Beispiel #1
0
    def upload(self, id, max_size=10):
        if not self.storage_path:
            return

        directory = self.get_directory(id)
        filepath = self.get_path(id)

        if self.filename:
            try:
                os.makedirs(directory)
            except OSError, e:
                if e.errno != 17:
                    raise
            tmp_filepath = filepath + '~'
            output_file = open(tmp_filepath, 'wb+')
            self.upload_file.seek(0)
            current_size = 0

            while True:

                current_size = current_size + 1
                data = self.upload_file.read(2**20)
                if not data:
                    break
                """
				# formati di file che potrebbero essere malevoli
				ckan.black_list.upload_mimetype_blacklist = application/exe application/octet-stream application/x-msdownload application/x-exe application/dos-exe vms/exe application/x-winexe application/msdos-windows application/x-msdos-program application/bin application/binary application/bat application/x-bat application/textedit application/x-sh
				"""

                mimetype = magic.from_buffer(data, mime=True)
                '''Check if the resource mimetype is blacklisted (defined in development.ini)'''

                blacklist = config.get(
                    'ckan.black_list.upload_mimetype_blacklist', '').split()
                #print blacklist

                #from ckan.lib import helpers
                #formati_ammissibili = helpers.resource_formats()
                #print formati_ammissibili.keys()

                if current_size == 1:
                    if mimetype in blacklist:
                        os.remove(tmp_filepath)
                        raise logic.ValidationError({
                            'upload': [
                                'Formato del file >> ' + str(mimetype) +
                                ' << illegale'
                            ]
                        })
                    #else:
                    #	print 'Formato del file >> ' + str(mimetype)+' << accettato :-)'

                output_file.write(data)
                if current_size > max_size:
                    os.remove(tmp_filepath)
                    raise logic.ValidationError(
                        {'upload': ['File upload too large']})
            output_file.close()
            os.rename(tmp_filepath, filepath)
            return
def __privacy_state_for_package_dict(data_dict):
    private = data_dict.get('private', None)
    capacity = data_dict.get('capacity', None)

    def __validate_private_property(property_value):
        return str(property_value).lower() in 'true, false'

    def __validate_capacity_property(property_value):
        return str(
            property_value).lower() in 'private, draft, public, published'

    if __validate_private_property(private):
        if str(private).lower() == 'true':
            return DCATAPOP_PRIVATE_DATASET
        if str(private).lower() == 'false':
            return DCATAPOP_PUBLIC_DATASET
    elif private is not None:
        raise logic.ValidationError(
            "'private' property can only be 'true' or 'false': '{0}' is not supported"
            .format(private))
    else:
        if __validate_capacity_property(capacity):
            if capacity.lower() in 'private, draft':
                return DCATAPOP_PRIVATE_DATASET
            if capacity.lower() in 'public, published':
                return DCATAPOP_PUBLIC_DATASET
        elif capacity is not None:
            raise logic.ValidationError(
                "'capacity' property can only be 'private', 'draft', 'public' or 'published': '{0}' is not supported"
                .format(capacity))

    return DCATAPOP_PUBLIC_DATASET
def comment_update(context, data_dict):
    model = context['model']

    logic.check_access("comment_update", context, data_dict)

    cid = logic.get_or_bust(data_dict, 'id')
    comment = comment_model.Comment.get(cid)
    if not comment:
        abort(404)

    # Validate that we have the required fields.
    if not all([data_dict.get('comment')]):
        raise logic.ValidationError("Comment text is required")

    # Cleanup the comment
    cleaned_comment = util.clean_input(data_dict.get('comment'))

    # Run profanity check
    if toolkit.asbool(config.get('ckan.comments.check_for_profanity', False)) \
            and (helpers.profanity_check(cleaned_comment) or helpers.profanity_check(data_dict.get('subject', ''))):
        raise logic.ValidationError("Comment blocked due to profanity.")

    comment.subject = data_dict.get('subject')
    comment.comment = cleaned_comment
    comment.modified_date = datetime.datetime.utcnow()

    comment.flagged = data_dict.get('flagged')

    model.Session.add(comment)
    model.Session.commit()

    return comment.as_dict()
Beispiel #4
0
def get_feature_data(data_dict, path):
    query = data_dict["query"]
    # sanity checks
    if query == "feature" and "feature" not in data_dict:
        raise logic.ValidationError("Please specify 'feature' parameter!")

    feat = data_dict["feature"]
    is_scalar = dclab.dfn.scalar_feature_exists(feat)
    path_condensed = path.with_name(path.name + "_condensed.rtdc")

    if is_scalar and path_condensed.exists():
        path = path_condensed

    feature_list = get_feature_list(path)
    if feat in feature_list:
        with dclab.rtdc_dataset.fmt_hdf5.RTDC_HDF5(path) as ds:
            if is_scalar:
                data = ds[feat].tolist()
            else:
                if "event" not in data_dict:
                    raise logic.ValidationError("Please specify 'event' for "
                                                + f"non-scalar feature {feat}!"
                                                )
                if feat == "trace":
                    data = get_trace_data(data_dict, path)
                else:
                    event = int(data_dict["event"])
                    data = ds[feat][event].tolist()
    elif not dclab.dfn.feature_exists(feat):
        raise logic.ValidationError(f"Unknown feature name '{feat}'!")
    else:
        raise logic.ValidationError(f"Feature '{feat}' unavailable!")
    return data
def comment_create(context, data_dict):
    model = context['model']
    user = context['user']

    userobj = model.User.get(user)

    logic.check_access("comment_create", context, data_dict)

    # Validate that we have the required fields.
    if not all([data_dict.get('comment')]):
        raise logic.ValidationError("Comment text is required")

    thread_id = data_dict.get('thread_id')

    if not thread_id:
        url = data_dict.get('url')
        if url:
            thread = comment_model.CommentThread.from_url(url)
            thread_id = thread.id if thread else None

    if not thread_id:
        raise logic.ValidationError("Thread identifier or URL is required")

    # Cleanup the comment
    cleaned_comment = util.clean_input(data_dict.get('comment'))

    # Run profanity check
    if toolkit.asbool(config.get('ckan.comments.check_for_profanity', False)) \
            and (helpers.profanity_check(cleaned_comment) or helpers.profanity_check(data_dict.get('subject', ''))):
        raise logic.ValidationError(
            {"message": "Comment blocked due to profanity."})

    # Create the object
    cmt = comment_model.Comment(thread_id=thread_id, comment=cleaned_comment)
    cmt.user_id = userobj.id
    cmt.subject = data_dict.get('subject', 'No subject')

    if 'creation_date' in context:
        cmt.creation_date = datetime.datetime.fromtimestamp(
            context['creation_date'])

    # Check if there is a parent ID and that it is valid
    # TODO, validity in this case includes checking parent is not
    # deleted.
    prt = data_dict.get('parent_id')
    if prt:
        parent = comment_model.Comment.get(prt)
        if parent:
            cmt.parent_id = parent.id

    # approval and spam checking removed

    model.Session.add(cmt)
    model.Session.commit()

    return cmt.as_dict()
Beispiel #6
0
def comment_create(context, data_dict):
    model = context['model']
    user = context['user']

    userobj = model.User.get(user)

    #logic.check_access("comment_create", context, data_dict)

    # Valida que se tengan todos los campos requeridos.
    if not all([data_dict.get('comment')]):
        raise logic.ValidationError("Comment text is required")

    thread_id = data_dict.get('thread_id')

    if not thread_id:
        url = data_dict.get('url')
        if url:
            thread = comment_model.CommentThread.from_url(url)
            thread_id = thread.id if thread else None

    if not thread_id:
        raise logic.ValidationError("Thread identifier or URL is required")

    # Cleanup the comment
    cleaned_comment = util.clean_input(data_dict.get('comment'))

    # Create the object
    cmt = comment_model.Comment(thread_id=thread_id, comment=cleaned_comment)
    cmt.user_id = userobj.id if userobj else None
    cmt.subject = data_dict.get('subject', 'No subject')
    cmt.email = data_dict.get('email', 'No Email')

    if 'creation_date' in context:
        cmt.creation_date = datetime.datetime.fromtimestamp(
            context['creation_date'])

    # Check if there is a parent ID and that it is valid
    # TODO, validity in this case includes checking parent is not
    # deleted.
    prt = data_dict.get('parent_id')
    if prt:
        parent = comment_model.Comment.get(prt)
        if parent:
            cmt.parent_id = parent.id

    # approval and spam checking removed
    commentThread = comment_model.CommentThread.get(thread_id)
    commentThread.active_thread = 'active'
    commentThread.state_thread = 'active'
    model.Session.add(commentThread)
    model.Session.add(cmt)
    model.Session.commit()

    return cmt.as_dict()
def resource_filters_create(context, data_dict):
    """Create custom extract filters for a given resource.

    :param resource_id: id of the resource that filters should be added to.
    :type resource_id: string

    :param filters: filters i.e. ['electricity', 'date']
    :type filters: list of strings

    :returns: the newly created resource
    :rtype: dictionary
    """

    resource_id = l.get_or_bust(data_dict, 'resource_id')
    filters = data_dict.pop('filters')

    # Fetch available columns from the datastore and check against filters
    ds = l.get_action('datastore_info')(context, {'id': resource_id})

    # Loop through datastore schema and validate filters
    available_filters = map(lambda val: val.lower(), ds.get('schema').keys())
    for _f in filters:
        if _f.lower() not in available_filters:
            raise l.ValidationError(
                _('\'{0}\' filter type not supported. Supported types are: {1}'
                  .format(_f, available_filters)))

    # Check if filter already exist for the given resource
    active_filters = l.get_action('active_resource_filters')(
        context, {
            'resource_id': resource_id
        })
    for _f in active_filters:
        if _f['name'] in filters:
            raise l.ValidationError(
                _('\'{0}\' already exists for the given resource'.format(_f)))

    resource_meta = l.get_action('datastore_search')(context, {
        'resource_id': resource_id,
        'limit': 1
    })
    fields = resource_meta['fields']

    # Add new filters
    payload = {'id': resource_id}
    for _f in fields:
        if _f['id'] in filters:
            active_filters.append({'name': _f['id'], 'type': _f['type']})
    payload.update({'filters': json.dumps(active_filters)})
    l.get_action('resource_patch')(context, payload)

    return l.get_action('resource_show')(context, {'id': resource_id})
Beispiel #8
0
    def _validate_new_org_request_field(self, data, context):
        errors = {}
        for field in ['name', 'description', 'description_data', 'work_email', 'your_name', 'your_email']:
            if data[field] is None or data[field].strip() == '':
                errors[field] = [_('should not be empty')]

        if len(errors) > 0:
            raise logic.ValidationError(errors)

        user_email_validator = tk.get_validator('email_validator')
        schema = {'work_email': [user_email_validator, unicode]}
        data_dict, _errors = _validate(data, schema, context)

        if _errors:
            raise logic.ValidationError(_errors.get('work_email'))
Beispiel #9
0
def get_last_publish_status(context, data_dict):
    """
    Return the French and English values for the given lastpublishstatuscode.

    :param lastPublishStatusCode: Publishing Status Code (i.e. '10')
    :type lastPublishStatusCode: str
    :return: English, French and code values for given lastpublishstatuscode
    :rtype: dict
    :raises: ValidationError
    """
    massage = lambda in_: {
        'last_publish_status_code': in_['value'],
        'en': in_['label'].get('en'),
        'fr': in_['label'].get('fr')
    }

    publish_status = _get_or_bust(
        data_dict,
        'lastPublishStatusCode'
    ).zfill(2)

    presets = scheming_helpers.scheming_get_preset('ndm_publish_status')
    publish_statuses = presets['choices']

    for ps in publish_statuses:
        if unicode(ps['value']) == unicode(publish_status):
            return massage(ps)
    else:
        raise logic.ValidationError(
            ('lastPublishStatusCode: \'{0}\' invalid'.format(publish_status),)
        )
Beispiel #10
0
def comment_update(context, data_dict):
    model = context['model']

    #logic.check_access("comment_update", context, data_dict)

    cid = logic.get_or_bust(data_dict, 'id')
    comment = comment_model.Comment.get(cid)
    if not comment:
        abort(404)

    # Validate that we have the required fields.
    if not all([data_dict.get('comment')]):
        raise logic.ValidationError("Comment text is required")

    # Cleanup the comment
    cleaned_comment = util.clean_input(data_dict.get('comment'))

    comment.subject = data_dict.get('subject')
    comment.email = data_dict.get('email')
    comment.comment = cleaned_comment
    comment.modified_date = datetime.datetime.now()

    model.Session.add(comment)
    model.Session.commit()

    return comment.as_dict()
Beispiel #11
0
def get_format_description(context, data_dict):
    # noinspection PyUnresolvedReferences
    """
    Return the French and English values for the given formatCode.

    :param formatCode: Format Code (i.e. '10')
    :type formatCode: str

    :return: English, French and code values for given formatCode
    :rtype: dict

    :raises: ValidationError
    """
    massage = lambda in_: {
        'format_code': in_['value'],
        'en': in_['label'].get('en'),
        'fr': in_['label'].get('fr')
    }

    format_code = _get_or_bust(
        data_dict,
        'formatCode'
    ).zfill(2)

    preset = scheming_helpers.scheming_get_preset(u'ndm_format')
    format_codes = preset['choices']

    for fc in format_codes:
        if fc['value'] == format_code:
            return massage(fc)
    else:
        raise logic.ValidationError(
            ('formatCode \'{0}\' invalid'.format(format_code),)
        )
Beispiel #12
0
def taxonomy_term_show_all(context, data_dict):
    """
    Shows a single taxonomy term and its children, the taxonomy id is not
    required, just a term_id.

    :returns: A single taxonomy term
    :rtype: A dictionary
    """
    _check_access('taxonomy_term_show', context, data_dict)

    label = data_dict.get('label')
    taxonomy_id = data_dict.get('taxonomy_id')

    if not label:
        raise logic.ValidationError("Either id, uri or label is required")

    if (taxonomy_id):
        term = TaxonomyTerm.get_from_taxonomy(label, taxonomy_id)
    else:
        term = TaxonomyTerm.get_all(label)

    if not term:
        raise logic.NotFound()

    return [u.as_dict() for u in term]
Beispiel #13
0
def sub_theme_delete(context, data_dict):
    ''' Deletes a sub-theme

    :param id: the sub-theme's ID
    :type id: string

    :returns: OK
    :rtype: string
    '''

    try:
        check_access('sub_theme_delete', context, data_dict)
    except NotAuthorized:
        raise NotAuthorized(
            _(u'Need to be system '
              u'administrator to administer'))

    id = logic.get_or_bust(data_dict, 'id')

    questions = Session.query(ResearchQuestion) \
        .filter_by(sub_theme=id) \
        .count()
    if questions:
        raise logic.ValidationError(
            _('Sub-Theme cannot be deleted while it '
              'still has research questions'))

    try:
        filter = {'id': id}
        SubThemes.delete(filter)
    except NotFound:
        raise NotFound(_(u'Sub-theme'))

    return 'OK'
Beispiel #14
0
def vocabulary_show_without_package_detail(context, data_dict):
    '''Return a single tag vocabulary without package details.

    :param id: the id or name of the vocabulary
    :type id: string
    :return: the vocabulary.
    :rtype: dictionary

    '''

    vocab_id = data_dict.get('id')
    if not vocab_id:
        raise logic.ValidationError({'id': 'id not in data'})
    vocabulary = ckan_model.vocabulary.Vocabulary.get(vocab_id)
    if vocabulary is None:
        raise NotFound(_('Could not find vocabulary "%s"') % vocab_id)
    vocabulary_dict = d.table_dictize(vocabulary, context)
    assert not vocabulary_dict.has_key('tags')
    vocabulary_dict['tag_count'] = vocabulary.tags.count()
    vocabulary_dict['tags'] = [tag_dictize_without_package_detail(tag, context) for tag
            in vocabulary.tags]
    tags = vocabulary_dict['tags']
    tagsorted = sorted(tags, key=lambda k:k['package_count'], reverse=True)
    vocabulary_dict['tags'] = tagsorted
    return vocabulary_dict
Beispiel #15
0
def _check_new_user_quota():
    redis_conn = connect_to_redis()
    new_users_list = 'new_latest_users'
    if 'new_latest_users' not in redis_conn.keys():
        redis_conn.lpush(new_users_list, datetime.now().isoformat())
    else:
        # TODO: read this rom config
        max_new_users = 10
        period = 60 * 10
        begin_date = datetime.now() - timedelta(seconds=period)

        count = 0
        elements_to_remove = []

        for i in range(0, redis_conn.llen(new_users_list)):
            value = redis_conn.lindex(new_users_list, i)
            new_user_creation_date = dateutil.parser.parse(value)
            if new_user_creation_date >= begin_date:
                count += 1
            else:
                elements_to_remove += [value]

        for value in elements_to_remove:
            redis_conn.lrem(new_users_list, value)

        if count >= max_new_users:
            log.error("new user temporary quota exceeded ({0})".format(count))
            raise logic.ValidationError({
                'user':
                "******"
                .format(period / 60)
            })
        else:
            # add new user creation
            redis_conn.lpush(new_users_list, datetime.now().isoformat())
Beispiel #16
0
def hdx_qa_sdcmicro_run(context, data_dict):
    '''
    Add sdc micro flag "running" to resource
    Post to aws endpoint to start the sdc micro check
    parameters for R script:
    -d "idp_settlement|settlement|resp_gender|resp_age|breadwinner|total_hh|person_with_disabilities" -w weights_general -s Feuil1 -f data11.xlsx -t "text|text|text|text|numeric|text|text|text|text|text|numeric|text|numeric"
    :param data_dict: dictionary containg parameters
    :type data_dict: dict
    Parameters from data_dict
    :param dataset_id: the id or name of the dataset
    :type dataset_id: str
    :param resource_id: the id or name of the resource
    :type resource_id: str
    :param data_columns_list: list with data columns
    :param weight_column: the weight column
    :param columns_type_list: list with types for each column - text, double, numeric, date
    :param sheet: in case of excel/xlsx/xls we need the sheet id (0-n)
    :param skip_rows: how many rows to skip until data
    :return: True or False or data_dict
    :rtype: bool
    '''
    # resource_patch to mark sdc micro flag in "running" mode
    # post to aws endpoint to start the sdc micro (sdc micro will have to mark the flag and upload the result)
    _check_access('qa_sdcmicro_run', context, {})
    resource_id = data_dict.get("resource_id")
    if resource_id:
        try:
            # resource_dict = get_action("resource_show")(context, {"id": resource_id})
            resource_dict = get_action("hdx_qa_resource_patch")(context, {"id": resource_id, "sdc_report_flag": "QUEUED"})
            _run_sdcmicro_check(resource_dict, data_dict.get("data_columns_list"), data_dict.get("weight_column"),
                                data_dict.get("columns_type_list"), data_dict.get("sheet", 0), context)
        except Exception, e:
            ex_msg = e.message if hasattr(e, 'message') and e.message else str(e)
            message = e.error_summary if hasattr(e, 'error_summary') and e.error_summary else 'Something went wrong while processing the request: ' + str(ex_msg)
            raise logic.ValidationError({'message': message}, error_summary=message)
Beispiel #17
0
 def upload(self, id, max_size=10):
     if not self.storage_path:
         return
     directory = self.get_directory(id)
     filepath = self.get_path(id)
     if self.filename:
         try:
             os.makedirs(directory)
         except OSError, e:
             ## errno 17 is file already exists
             if e.errno != 17:
                 raise
         tmp_filepath = filepath + '~'
         output_file = open(tmp_filepath, 'wb+')
         self.upload_file.seek(0)
         current_size = 0
         while True:
             current_size = current_size + 1
             #MB chunks
             data = self.upload_file.read(2**20)
             if not data:
                 break
             output_file.write(data)
             if current_size > max_size:
                 os.remove(tmp_filepath)
                 raise logic.ValidationError(
                     {'upload': ['File upload too large']})
         output_file.close()
         os.rename(tmp_filepath, filepath)
Beispiel #18
0
def taxonomy_create(context, data_dict):
    """
    Creates a new taxonomy. Terms are not created here, they must be
    created using taxonomy_term_create with the taxonomy id from this
    call.

    :param owner_org: the id of the dataset's owning organization, see


    :returns: The newly created taxonomy
    :rtype: A dictionary.
    """
    _check_access('taxonomy_create', context, data_dict)

    model = context['model']

    name = data_dict.get('name')

    title = logic.get_or_bust(data_dict, 'title')
    uri = logic.get_or_bust(data_dict, 'uri')

    if not name:
        name = munge_name(title)

    # Check the name has not been used
    if model.Session.query(Taxonomy).filter(Taxonomy.name == name).count() > 0:
        raise logic.ValidationError("Name is already in use")

    t = Taxonomy(name=name, title=title, uri=uri)
    model.Session.add(t)
    model.Session.commit()

    return t.as_dict()
Beispiel #19
0
def taxonomy_show(context, data_dict):
    """ Shows a single taxonomy.

    :param id: The name of id of the taxonomy

    :returns: A single taxonomy
    :rtype: A dictionary
    """
    _check_access('taxonomy_show', context, data_dict)

    model = context['model']
    id = data_dict.get('id')
    uri = data_dict.get('uri')
    name = data_dict.get('name')

    if not id and not uri and not name:
        raise logic.ValidationError("Neither id, name or uri were provided")

    item = Taxonomy.get(id or name)
    if not item and uri:
        item = Taxonomy.by_uri(uri)

    if not item:
        raise logic.NotFound()

    return item.as_dict(with_terms=True)
Beispiel #20
0
def taxonomy_term_create(context, data_dict):
    """ Allows for the creation of a new taxonomy term.

    :returns: The newly updated term
    :rtype: A dictionary
    """
    _check_access('taxonomy_term_create', context, data_dict)
    model = context['model']

    taxonomy_id = logic.get_or_bust(data_dict, 'taxonomy_id')
    taxonomy = logic.get_action('taxonomy_show')(context, {'id': taxonomy_id})

    label = logic.get_or_bust(data_dict, 'label')
    uri = logic.get_or_bust(data_dict, 'uri')
    description = data_dict.get('description')

    if model.Session.query(TaxonomyTerm).\
            filter(TaxonomyTerm.uri == uri).\
            filter(TaxonomyTerm.taxonomy_id == taxonomy_id ).count() > 0:
        raise logic.ValidationError("Term uri already used in this taxonomy")

    term = TaxonomyTerm(**data_dict)
    model.Session.add(term)
    model.Session.commit()

    return term.as_dict()
Beispiel #21
0
def harvest_object_create(context, data_dict):
    """ Create a new harvest object

    :type guid: string (optional)
    :type content: string (optional)
    :type job_id: string 
    :type source_id: string (optional)
    :type package_id: string (optional)
    :type extras: dict (optional)
    """
    check_access('harvest_object_create', context, data_dict)
    data, errors = _validate(data_dict, harvest_object_create_schema(),
                             context)

    if errors:
        raise logic.ValidationError(errors)

    obj = HarvestObject(guid=data.get('guid'),
                        content=data.get('content'),
                        job=data['job_id'],
                        harvest_source_id=data.get('source_id'),
                        package_id=data.get('package_id'),
                        extras=[
                            HarvestObjectExtra(key=k, value=v)
                            for k, v in data.get('extras', {}).items()
                        ])

    obj.save()
    return harvest_object_dictize(obj, context)
Beispiel #22
0
def string_to_timedelta(s: str) -> datetime.timedelta:
    """Parse a string s and return a standard datetime.timedelta object.

    Handles days, hours, minutes, seconds, and microseconds.

    Accepts strings in these formats:

    2 days
    14 days
    4:35:00 (hours, minutes and seconds)
    4:35:12.087465 (hours, minutes, seconds and microseconds)
    7 days, 3:23:34
    7 days, 3:23:34.087465
    .087465 (microseconds only)

    :raises ckan.logic.ValidationError: if the given string does not match any
        of the recognised formats

    """
    patterns = []
    days_only_pattern = r"(?P<days>\d+)\s+day(s)?"
    patterns.append(days_only_pattern)
    hms_only_pattern = r"(?P<hours>\d?\d):(?P<minutes>\d\d):(?P<seconds>\d\d)"
    patterns.append(hms_only_pattern)
    ms_only_pattern = r".(?P<milliseconds>\d\d\d)(?P<microseconds>\d\d\d)"
    patterns.append(ms_only_pattern)
    hms_and_ms_pattern = hms_only_pattern + ms_only_pattern
    patterns.append(hms_and_ms_pattern)
    days_and_hms_pattern = r"{0},\s+{1}".format(days_only_pattern,
                                                hms_only_pattern)
    patterns.append(days_and_hms_pattern)
    days_and_hms_and_ms_pattern = days_and_hms_pattern + ms_only_pattern
    patterns.append(days_and_hms_and_ms_pattern)

    match = None
    for pattern in patterns:
        match = re.match("^{0}$".format(pattern), s)
        if match:
            break

    if not match:
        raise logic.ValidationError(
            {"message": "Not a valid time: {0}".format(s)})

    gd = match.groupdict()
    days = int(gd.get("days", "0"))
    hours = int(gd.get("hours", "0"))
    minutes = int(gd.get("minutes", "0"))
    seconds = int(gd.get("seconds", "0"))
    milliseconds = int(gd.get("milliseconds", "0"))
    microseconds = int(gd.get("microseconds", "0"))
    delta = datetime.timedelta(
        days=days,
        hours=hours,
        minutes=minutes,
        seconds=seconds,
        milliseconds=milliseconds,
        microseconds=microseconds,
    )
    return delta
Beispiel #23
0
def taxonomy_term_update(context, data_dict):
    """ Allows a taxonomy term to be updated.

    :returns: The newly updated term
    :rtype: A dictionary
    """

    _check_access('taxonomy_term_update', context, data_dict)
    model = context['model']

    id = data_dict.get('id')
    uri = data_dict.get('uri')

    if not id and not uri:
        raise logic.ValidationError("Either id or uri is required")

    term = TaxonomyTerm.get(id or uri)

    if not term:
        raise logic.NotFound()

    term.label = data_dict.get('label', term.label)
    term.parent_id = data_dict.get('parent_id', term.parent_id)
    term.uri = logic.get_or_bust(data_dict, 'uri')
    term.description = data_dict.get('description', term.description)
    #term.extras = data_dict.get('extras', '')

    model.Session.add(term)
    model.Session.commit()

    return term.as_dict()
Beispiel #24
0
def hdx_qa_pii_run(context, data_dict):
    '''
    Add sdc micro flag "running" to resource
    Post to aws endpoint to start the sdc micro check
    :param data_dict: dictionary containg parameters
    :type data_dict: dict
    Parameters from data_dict
    :param dataset_id: the id or name of the dataset
    :type dataset_id: str
    :param resource_id: the id or name of the resource
    :type resource_id: str
    :return: True or False or data_dict
    :rtype: bool
    '''
    # resource_patch to mark sdc micro flag in "running" mode
    # post to aws endpoint to start the sdc micro (sdc micro will have to mark the flag and upload the result)
    _check_access('qa_pii_run', context, {})
    resource_id = data_dict.get("resourceId")
    if resource_id:
        try:
            resource_dict = get_action("hdx_qa_resource_patch")(context, {"id": resource_id, "pii_report_flag": "QUEUED"})
            _run_pii_check(resource_dict, context)
        except Exception, e:
            ex_msg = e.message if hasattr(e, 'message') and e.message else str(e)
            message = e.error_summary if hasattr(e, 'error_summary') and e.error_summary else 'Something went wrong while processing the request:' + str(ex_msg)
            raise logic.ValidationError({'message': message}, error_summary=message)
Beispiel #25
0
def _check_reset_attempts(email):
    redis_conn = connect_to_redis()
    if email not in redis_conn.keys():
        log.debug("Redis: first login attempt for {0}".format(email))
        redis_conn.hmset(email, {
            'attempts': 1,
            'latest': datetime.now().isoformat()
        })
    else:
        base = 3
        attempts = int(redis_conn.hmget(email, 'attempts')[0])
        latest = dateutil.parser.parse(redis_conn.hmget(email, 'latest')[0])

        waiting_seconds = base**attempts
        limit_date = latest + timedelta(seconds=waiting_seconds)

        log.debug(
            'Redis: wait {0} seconds after {1} attempts => after date {2}'.
            format(waiting_seconds, attempts, limit_date.isoformat()))

        if limit_date > datetime.now():
            raise logic.ValidationError({
                'user':
                "******"
                .format(int((limit_date - datetime.now()).total_seconds()),
                        limit_date.isoformat())
            })
        else:
            # increase counter
            redis_conn.hmset(email, {
                'attempts': attempts + 1,
                'latest': datetime.now().isoformat()
            })
Beispiel #26
0
    def upload(self, max_size=2):
        ''' Actually upload the file.
        This should happen just before a commit but after the data has
        been validated and flushed to the db. This is so we do not store
        anything unless the request is actually good.
        max_size is size in MB maximum of the file'''

        if self.filename:
            output_file = open(self.tmp_filepath, 'wb')
            self.upload_file.seek(0)
            current_size = 0
            while True:
                current_size = current_size + 1
                # MB chuncks
                data = self.upload_file.read(2**20)
                if not data:
                    break
                output_file.write(data)
                if current_size > max_size:
                    os.remove(self.tmp_filepath)
                    raise logic.ValidationError(
                        {self.file_field: ['File upload too large']})
            output_file.close()
            os.rename(self.tmp_filepath, self.filepath)
            self.clear = True

        if (self.clear and self.old_filename
                and not self.old_filename.startswith('http')):
            try:
                os.remove(self.old_filepath)
            except OSError, e:
                pass
Beispiel #27
0
def page_update(context, data_dict):
    logic.check_access('page_update', context, data_dict)

    validation.page_name_validator(data_dict, context)

    try:
        session = context['session']
        page = pages_model.Page.get_by_id(id=data_dict['id'])
        if page is None:
            raise NotFound

        populate_page(page, data_dict)

        groups = data_dict.get('groups')
        process_groups(context, page, groups)

        tags = data_dict.get('tags')
        process_tags(context, page, tags)

        session.add(page)
        session.commit()
        return dictize.page_dictize(page)
    except Exception as e:
        ex_msg = e.message if hasattr(e, 'message') else str(e)
        message = 'Something went wrong while processing the request: {}'.format(
            ex_msg)
        raise logic.ValidationError({'message': message},
                                    error_summary=message)
Beispiel #28
0
def check_file_extension(file_name):
    log.info('upload: checking file * %s * extension ', file_name)
    file_extensions = config.get('ckan.upload.file_extensions', '').lower()
    name, ext = os.path.splitext(file_name)
    # check if not empty first
    if len(ext[1:]) > 0:
        if ext[1:].lower() not in file_extensions:
            log.error(
                "upload: the file * %s * was not uploaded - File extension * %s *  is not allowed",
                file_name, ext[1:].lower())
            raise logic.ValidationError(
                {'upload': ['File extension is not allowed']})
    else:
        log.error(
            "upload: the file * %s * was not uploaded - File extension is empty",
            file_name)
        raise logic.ValidationError({'upload': ['File extension is empty']})
Beispiel #29
0
def resource_update(context, data_dict):
    attributes = data_dict.pop('attributes', [])
    errors = validate_resource_attributes_list(attributes)

    if any(errors):
        raise l.ValidationError(errors)

    data_dict['attributes'] = attributes
    return l.action.update.resource_update(context, data_dict)
Beispiel #30
0
 def __init__(self, config, section):
     self.cas_role = config.get(section, 'role.name')
     self.group_name = config.get(section, 'role.group.name')
     self.group_role = config.get(section, 'role.group.role')
     self.is_org = config.getboolean(section, 'role.group.is_org')
     
     if self.group_role not in ['member', 'editor', 'admin']:
         raise logic.ValidationError('Group role not set for CAS role {0}'\
                                     .format(self.cas_role))