def validate(self, **kwargs):
        """
        Validates a data file

        :param file_path: path to file to be loaded.
        :param data: pre loaded YAML object (optional).
        :return: Bool to indicate the validity of the file.
        """

        default_data_schema = json.load(open(self.default_schema_file, "r"))

        # even though we are using the yaml package to load,
        # it supports JSON and YAML
        data = kwargs.pop("data", None)
        file_path = kwargs.pop("file_path", None)

        if file_path is None:
            raise LookupError("file_path argument must be supplied")

        if data is None:

            try:
                # We try to load using the CLoader for speed improvements.
                try:
                    data = yaml.load(open(file_path, "r"), Loader=yaml.CLoader)
                except ScannerError as se:
                    self.add_validation_message(
                        ValidationMessage(file=file_path, message="There was a problem parsing the file.\n" + str(se))
                    )
                    return False
            except:  # pragma: no cover
                try:  # pragma: no cover
                    data = yaml.load(open(file_path, "r"))  # pragma: no cover
                except ScannerError as se:  # pragma: no cover
                    self.add_validation_message(
                        ValidationMessage(file=file_path, message="There was a problem parsing the file.\n" + str(se))
                    )  # pragma: no cover
                    return False

        try:

            if "type" in data:
                custom_schema = self.load_custom_schema(data["type"])
                json_validate(data, custom_schema)
            else:
                json_validate(data, default_data_schema)

        except ValidationError as ve:

            self.add_validation_message(
                ValidationMessage(file=file_path, message=ve.message + " in " + str(ve.instance))
            )

        if self.has_errors(file_path):
            return False
        else:
            return True
예제 #2
0
def validate(data):
    if 'version' not in data:
        raise Exception("Data does not specify schema version")

    if data['version'] == 1:
        return json_validate(data, SCHEMA_V1)
    elif data['version'] == 3:
        return json_validate(data, SCHEMA_V3)

    raise Exception("Unknown schema version: %s" % data['version'])
예제 #3
0
    def validate_config(self, config):
        name = config.get('metadata', {}).get('name') or ""
        if not RE_NAME.match(name):
            raise ValidationError(
                _("Secret 名称格式错误,只能包含:小写字母、数字、中划线(-),首字母必须是字母,长度小于256个字符"))

        if settings.IS_TEMPLATE_VALIDATE:
            try:
                json_validate(config, SECRET_SCHEM)
            except JsonValidationError as e:
                raise ValidationError('Secret {prefix_msg}{e}'.format(
                    prefix_msg=_("配置信息格式错误"), e=e.message))
            except SchemaError as e:
                raise ValidationError('Secret {prefix_msg}{e}'.format(
                    prefix_msg=_("配置信息格式错误"), e=e))

        return json.dumps(config)
예제 #4
0
    def validate_config(self, config):
        # Service 名称可以支持变量
        # name = config.get('metadata', {}).get('name') or ""
        # if not RE_NAME.match(name):
        #     raise ValidationError(
        #         u"Service 名称格式错误,只能包含:小写字母、数字、连字符(-),首字母必须是字母,长度小于256个字符")

        if settings.IS_TEMPLATE_VALIDATE:
            try:
                json_validate(config, SERVICE_SCHEM)
            except JsonValidationError as e:
                raise ValidationError(
                    _('Service 配置信息格式错误 {}').format(e.message))
            except SchemaError as e:
                raise ValidationError(_('Service 配置信息格式错误{}').format(e))

        return json.dumps(config)
예제 #5
0
    def import_snippets(self, filename):
        errors = {}
        total_snippets = 0
        valid_snippets = 0
        with open(filename, 'r') as f:
            try:
                snippets = json.load(f)
            except ValueError as e:
                errors['loading'] = e.msg

        if len(errors) == 0:
            try:
                json_validate(instance=snippets, schema=SNIPPETS_SCHEMA)
            except ValidationError as e:
                index_path = ['snippets']
                for part in e.absolute_path:
                    index_path.append('[{0}]'.format(part))
                full_message = '{0} on instance {1}:<br>{2}'.format(
                    e.message, ''.join(index_path), e.instance)
                errors['validation'] = full_message

        if len(errors) == 0:
            for language_info in snippets:
                language = language_info['language']
                triggers = language_info['triggers']
                for trigger_info in triggers:
                    trigger = trigger_info['trigger']
                    descriptions = trigger_info['descriptions']
                    for description_info in descriptions:
                        description = description_info['description']
                        snippet = description_info['snippet']
                        snippet_text = snippet['text']
                        total_snippets += 1
                        try:
                            build_snippet_ast(snippet_text)
                            self.update_or_enqueue(language, trigger,
                                                   description, snippet)
                            valid_snippets += 1
                        except SyntaxError as e:
                            syntax_errors = errors.get('syntax', {})
                            key = '{0}/{1}/{2}'.format(language, trigger,
                                                       description)
                            syntax_errors[key] = e.msg
                            errors['syntax'] = syntax_errors

        return valid_snippets, total_snippets, errors
예제 #6
0
    def validate_config(self, config):
        # k8s Service 名称可以支持变量
        # name = config.get('metadata', {}).get('name') or ""
        # if not K8S_RENAME.match(name):
        #     raise ValidationError(
        #         u"Service %s" % K8S_NAME_ERROR_MSG)

        if settings.IS_TEMPLATE_VALIDATE:
            try:
                json_validate(config, K8S_SERVICE_SCHEM)
            except JsonValidationError as e:
                raise ValidationError(
                    _('Service 配置信息格式错误{}').format(e.message))
            except SchemaError as e:
                raise ValidationError(_('Service 配置信息格式错误{}').format(e))

        return json.dumps(config)
예제 #7
0
 def validate_location(self, value):
     schema = {
         "type" : "object",
         "properties": {
             "latitude" : {"type" : "number"},
             "name" : {"type": "number"}
         },
         "required" : ["latitude", "longitude"] 
     }
     try:
         json_validate(value, schema)
     except json_ValidationError:
         raise serializers.ValidationError('This is not a valid JSON for geolocation')
     
     #Test for validate JSON
     if value['latitude'] > 4:
         raise serializers.ValidationError('This latitude is out of bounds')
     return value
예제 #8
0
def validate(obj):
    with open(schema_dir() / "recipe.v1.json") as schema_in:
        schema = json.load(schema_in)
    try:
        validation_result = json_validate(instance=obj, schema=schema)
    except ValidationError as e:
        console.print(e, style="red")
        exit(1)
    return validation_result
예제 #9
0
    def validate(cls, data, component):
        """
        Validates a request body against an component in a openapi3.0 template
        """
        if not cls._swagger_doc:
            cls.set_schema(cls.default_swagger_template())

        components = cls._resolved_schema.get("components")
        if components is not None:
            schemas = {**components.get("schemas", {})}
            if component in schemas:
                try:
                    json_validate(instance=data, schema=schemas[component])
                except Exception as err:
                    raise ValidationException(str(err))
            else:
                raise ValidationException(
                    f"SCHEMA: Does not contain {component}")
예제 #10
0
def scrap_to_payload(scrap):
    """Translates scrap data to the output format"""
    # Apply new keys here as needed (like `ref`)
    payload = {
        "name": scrap.name,
        "data": scrap.data,
        "encoder": scrap.encoder,
        "version": LATEST_SCRAP_VERSION,
    }
    # Ensure we're conforming to our schema
    try:
        json_validate(payload, scrap_schema(LATEST_SCRAP_VERSION))
    except ValidationError as e:
        raise ScrapbookDataException(
            "Scrap (name={name}) contents do not conform to required type structures: {error}"
            .format(name=scrap.name or "None", error=str(e)),
            [e],
        )
    return payload
예제 #11
0
    def patch(self):
        """
        更新服务。除参数id外必须外,其余参数均为选填。

        :return:
            :success:
                0
            :failure:
                -1, "匹配id的服务不存在"
                -2, "任务名不可重复。有相同名称的任务已存在"
        """
        try:
            srv = Service.objects.get(id=request.args.get('id'))
        except DoesNotExist:
            return -1, "匹配id的服务不存在"

        tmp_dict = {}
        if request.args.get("title"):
            tmp_dict.update({"title": request.args.get("title")})
        if request.args.get('spec'):
            spec = json.loads(request.args.get('spec'))
            json_validate(spec, json.loads(spec_schema))
            tmp_dict.update({"spec": spec})
        if request.args.get('crawler_count'):
            tmp_dict.update(
                {"crawler_count": request.args.get('crawler_count')})
        if request.args.get('service_params_spec'):
            tmp_dict.update({
                "params": [
                    ServiceParamsSpec(name=elm['name'],
                                      default=elm.get('default', ''),
                                      description=elm.get('desc', ''))
                    for elm in request.args.get('service_params_spec')
                ]
            })
        try:
            srv.update(**tmp_dict)
        except (DuplicateKeyError, NotUniqueError):
            return -2, "任务名不可重复。有相同名称的任务已存在。"

        return 0
예제 #12
0
def _validate_and_clean_config(config: dict) -> dict:
    """Validates a config as a dict. And adds default
    properties to that config.

    Args:
        config (dict): Config for data linter validation run.

    Returns:
        dict: The same config but with default params added.
    """
    json_validate(config, config_schema)

    for table_name, params in config["tables"].items():
        if (not params.get("expect-header")) and params.get("headers-ignore-case"):
            log.warning(
                f"Table '{table_name}' has a 'headers-ignore-case' parameter "
                "but no 'expect-header'. Setting 'expect-header' to True."
            )
            params["expect-header"] = True

    return config
    def validate(self, **kwargs):
        """
        Validates a data file.

        :param file_path: path to file to be loaded.
        :param file_type: file data type (optional).
        :param data: pre loaded YAML object (optional).
        :return: Bool to indicate the validity of the file.
        """

        file_path = kwargs.pop("file_path", None)
        file_type = kwargs.pop("file_type", None)
        data = kwargs.pop("data", None)

        if file_path is None:
            raise LookupError("file_path argument must be supplied")

        if data is None:

            try:
                # The yaml package support both JSON and YAML
                with open(file_path, 'r') as df:
                    data = yaml.load(df, Loader=Loader)
            except Exception as e:
                self.add_validation_message(
                    ValidationMessage(
                        file=file_path,
                        message='There was a problem parsing the file.\n' +
                        e.__str__(),
                    ))
                return False

        try:
            if file_type:
                custom_schema = self.load_custom_schema(file_type)
                json_validate(data, custom_schema)
            elif 'type' in data:
                custom_schema = self.load_custom_schema(data['type'])
                json_validate(data, custom_schema)
            else:
                with open(self.default_schema_file, 'r') as f:
                    default_data_schema = json.load(f)
                    json_validate(data, default_data_schema)
                if self._get_major_version() > 0:
                    check_for_zero_uncertainty(data)
                    check_length_values(data)

        except ValidationError as ve:
            self.add_validation_message(
                ValidationMessage(
                    file=file_path,
                    message=ve.message + ' in ' + str(ve.instance),
                ))

        if self.has_errors(file_path):
            return False
        else:
            return True
예제 #14
0
def add_request():
    payload = request.json

    try:
        json_validate(payload, schema=BOOK_REQUEST_SCHEMA)
    except ValidationError as e:
        return jsonify({'error': e.message}), 400

    user_email = payload.get('email')
    book_title = payload.get('title')

    if not get_redis_client().sismember('books', book_title):
        return jsonify(
            {'error':
             f'Book with title \'{book_title}\' not in the library'}), 400

    try:
        validate_email(user_email)
    except EmailNotValidError as e:
        return jsonify({'error':
                        f'User email \'{user_email}\' is invalid'}), 400

    request_id = generate_id(book_title, user_email)
    if get_redis_client().hexists('requests', request_id):
        return jsonify({'error': 'This request is already recorded'}), 400

    new_book_request = {
        'title': book_title,
        'email': user_email,
        'id': request_id,
        'timestamp': datetime.utcnow(),
    }
    get_redis_client().hset('requests',
                            key=new_book_request['id'],
                            value=pickle.dumps(new_book_request))

    return jsonify(new_book_request)
예제 #15
0
 def test_get_excluded_properties(self):
     ontology_content = json.load(self.test_ontology_schema.file)
     json_validate(ontology_content, QCONFIG_SCHEMA)
     actual_excluded_properties = get_excluded_properties(ontology_content, "test_package.model")
     test_excluded_properties = []
     self.assertSetEqual(set(actual_excluded_properties), set(test_excluded_properties))
예제 #16
0
def _parse_validate_and_resample_stf(request, db_info):
    """
    Parses the JSON based STF, validates it, and resamples it.

    :param request: The request.
    :param db_info: Information about the current database.
    """
    if not request.body:
        msg = ("The source time function must be given in the body of the "
               "POST request.")
        return tornado.web.HTTPError(400, log_message=msg, reason=msg)

    # Try to parse it as a JSON file.
    with io.BytesIO(request.body) as buf:
        try:
            j = json.loads(buf.read().decode())
        except Exception:
            msg = "The body of the POST request is not a valid JSON file."
            return tornado.web.HTTPError(400, log_message=msg, reason=msg)

    # Validate it.
    try:
        json_validate(j, _json_schema)
    except JSONValidationError as e:
        # Replace the u'' unicode string specifier for consistent error
        # messages.
        msg = "Validation Error in JSON file: " + re.sub(r"u'", "'", e.message)
        return tornado.web.HTTPError(400, log_message=msg, reason=msg)

    # Make sure the sampling rate is ok.
    if j["sample_spacing_in_sec"] < db_info.dt:
        msg = ("'sample_spacing_in_sec' in the JSON file must not be smaller "
               "than the database dt [%.3f seconds]." % db_info.dt)
        return tornado.web.HTTPError(400, log_message=msg, reason=msg)

    # Convert to numpy array.
    j["data"] = np.array(j["data"], np.float64)

    # A couple more custom validations.
    message = None

    # Make sure its not all zeros.
    if np.abs(j["data"]).max() < 1e-20:
        message = ("All zero (or nearly all zero) source time functions don't "
                   "make any sense.")

    # The data must begin and end with zero. The user is responsible for the
    # tapering.
    if j["data"][0] != 0.0 or j["data"][-1] != 0.0:
        message = "Must begin and end with zero."

    if message:
        msg = "STF data did not validate: %s" % message
        return tornado.web.HTTPError(400, log_message=msg, reason=msg)

    missing_length = (db_info.length -
                      (len(j["data"]) - 1) * j["sample_spacing_in_sec"])
    missing_samples = max(
        int(missing_length / j["sample_spacing_in_sec"]) + 1, 0)

    # Add a buffer of 20 samples at the beginning and at the end.
    data = np.concatenate(
        [np.zeros(20), j["data"],
         np.zeros(missing_samples + 20)])

    # Resample it using sinc reconstruction.
    data = lanczos_interpolation(
        data,
        # Account for the additional samples at the beginning.
        old_start=-20 * j["sample_spacing_in_sec"],
        old_dt=j["sample_spacing_in_sec"],
        new_start=0.0,
        new_dt=db_info.dt,
        new_npts=db_info.npts,
        # The large a is okay because we add zeros at the beginning and the
        # end.
        a=12,
        window="blackman",
    )

    # There is potentially some numerical noise on the first sample.
    assert data[0] < 1e-10 * np.abs(data.ptp())
    data[0] = 0.0

    # Normalize the integral to one.
    data /= np.trapz(np.abs(data), dx=db_info.dt)
    j["data"] = data

    return j
예제 #17
0
                'model': 'metax_api.file',
            }

            new['fields']['file_name'] = file_name % loop
            new['fields']['identifier'] = identifier % loop
            new['fields']['download_url'] = download_url % loop
            new['fields']['modified_by_api'] = '2017-05-23T10:07:22.559656Z'
            new['fields']['created_by_api'] = '2017-05-23T10:07:22.559656Z'
            new['fields']['file_characteristics']['title'] = json_title % loop
            new['fields']['file_characteristics'][
                'description'] = json_description % loop
            new['fields']['file_storage_id'] = file_storage_id
            new['pk'] = str(uuid4())

            if validate_json:
                json_validate(new['fields']['file_characteristics'],
                              json_schema)

            test_data_list.append(new)

        else:
            # http POST requests

            new = row_template.copy()

            new['file_name'] = file_name % loop
            new['identifier'] = identifier % loop
            new['download_url'] = download_url % loop
            new['file_characteristics']['title'] = json_title % loop
            new['file_characteristics'][
                'description'] = json_description % loop
예제 #18
0
        return uploaded_task
    schema_json = None
    try:
        schema_file = file(
            path.join(settings.AUX_FILES_DIR, 'task-schema', 'v0.2',
                      'task-schema-0.2.json'))
        schema_str = schema_file.read()
        schema_file.close()
        schema_json = json_loads(schema_str)
    except Exception, ex:
        msg = 'Error reading JSON schema file: %s' % str(ex)
        error_status.message = msg
        error_status.save()
        return uploaded_task
    try:
        json_validate(task_json, schema_json)
    except JsonValidationError, ex:
        msg = 'File "task/task.json" is incorrect: %s' % str(ex)
        error_status.message = msg
        error_status.save()
        return uploaded_task

    mentioned_files = [
        task_json['desc_ru'],
        task_json['desc_en'],
        task_json['writeup_ru'],
        task_json['writeup_en'],
    ]
    mentioned_images = []
    if 'images' in task_json:
        for image_obj in task_json['images']:
예제 #19
0
def product_fields_validator(value):
    try:
        json_validate(value, FIELDS_SCHEMA)
    except Exception as e:
        raise ValidationError(e)
예제 #20
0
        def put(self, output_id):

            # Request Validation: MIME type
            # http://flask.pocoo.org/docs/1.0/api/#flask.Request.is_json
            if not request.is_json:
                err_message = 'Request MIME-Type: ' + request.mimetype + \
                    '. API only accepts: application/json'
                out_resp_obj = jsonErrorClientSide(err_message=err_message)
                return make_response(jsonify(out_resp_obj), 400)

            # Request Validation: URI resource
            if output_id not in current_channel_valid:
                err_message = 'URI Resource ' + output_id + \
                    ' not found.'
                out_resp_obj = jsonErrorClientSide(err_message=err_message)
                return make_response(jsonify(out_resp_obj), 404)

            # Request Validation: JSON according to schema
            # http://json-schema.org/latest/json-schema-validation.html
            json_schema = {
                'type': 'object',
                'required': ['output_id', 'set_point'],
                'properties': {
                    'output_id': {
                        'type': 'string',
                        'enum': current_channel_valid
                    },
                    'set_point': {
                        'type': 'number',
                        'minimum': 0.0,
                        'maximum': 100.0
                    }
                }
            }

            try:
                json_validate(request.json, schema=json_schema)
            except JSONValidationError as err:
                err_message = 'JSON validation error: ' + err.message
                out_resp_obj = jsonErrorClientSide(err_message=err_message)
                return make_response(jsonify(out_resp_obj), 400)

            # Request Validation: JSON output_id match URI resource
            if output_id != request.json['output_id']:
                err_message = 'URI Resource ' + output_id + \
                    ' not equal to JSON output_id: ' + \
                    request.json['output_id']
                out_resp_obj = jsonErrorClientSide(err_message=err_message)
                return make_response(jsonify(out_resp_obj), 400)

            # Request Validation OK
            # Send request to MQTT server for processing in paho_control

            out_mqtt_obj = {
                'output_id': output_id,
                'set_point': request.json['set_point']
            }

            out_mqtt_topic = \
                'api/v1.0/control/currentoutputs/' + \
                output_id + \
                '/PUT'
            out_mqtt_payload = json.dumps(out_mqtt_obj)

            client.publish(topic=out_mqtt_topic,
                           payload=out_mqtt_payload,
                           qos=2,
                           retain=True)

            return marshal(out_mqtt_obj, current_channel_fields)
예제 #21
0
 def decorator(self, message, *args, **kwargs):
     try:
         json_validate(message, message_schema)
     except ValidationError:
         raise Messages.RequestSchemaError
     return message_handler(self, message, *args, **kwargs)
예제 #22
0
    def patch(self):
        """
        更新任务内容。仅可以更新任务名称,爬虫数,爬取计划,以及:普通任务可更新配置;服务任务可更新参数,不可变更服务id;任务类型不可更新

        :return:
            :success:
                true
            :failure:
                -1, "任务id不存在"
                -2, "任务名不可重复。有相同名称的任务已存在。"
        """
        try:
            job = Job.objects.get(id=request.args.get('id'))
        except DoesNotExist:
            return -1, "任务id不存在"

        tmp_dict = {}
        if request.args.get("title"):
            tmp_dict.update({"title": request.args.get("title")})
        if job.category == 'TASK' and request.args.get('spec'):
            spec = json.loads(request.args.get('spec'))
            json_validate(spec, json.loads(spec_schema))
            tmp_dict.update({"content__spec": spec})
        elif job.category == 'SERVICE' and request.args.get('service_params'):
            service_params = {
                elm['name']: elm['value']
                for elm in request.args.get('service_params')
            }
            tmp_dict.update({
                "content__service_inst":
                JobContentServiceInstance(
                    service=Service(id=job.content.service_inst.service.id),
                    params=service_params)
            })
        if request.args.get('schedule_at'):
            tmp_dict.update({"schedule__at": request.args.get('schedule_at')})
            tmp_dict.update({"schedule__cron": None})
        if request.args.get("crawler_count"):
            tmp_dict.update(
                {"crawler_count": request.args.get("crawler_count")})
        try:
            if request.args.get('schedule_cron_second') and \
                    request.args.get('schedule_cron_minute') and \
                    request.args.get('schedule_cron_hour') and \
                    request.args.get('schedule_cron_day_of_month') and \
                    request.args.get('schedule_cron_month') and \
                    request.args.get('schedule_cron_day_of_week'):

                cron = JobScheduleCron(
                    second=request.args.get('schedule_cron_second'),
                    minute=request.args.get('schedule_cron_minute'),
                    hour=request.args.get('schedule_cron_hour'),
                    day_of_month=request.args.get(
                        'schedule_cron_day_of_month'),
                    month=request.args.get('schedule_cron_month'),
                    day_of_week=request.args.get('schedule_cron_day_of_week'),
                )
                tmp_dict.update({"schedule__cron": cron})
                tmp_dict.update({"schedule__at": None})

            job.update(**tmp_dict)
        except (DuplicateKeyError, NotUniqueError):
            return -2, "任务名不可重复。有相同名称的任务已存在。"

        # reschedule
        sj = SpiderJob(job)
        sj.schedule()

        return 0
예제 #23
0
import simplejson
import sys
from jsonschema import FormatChecker, ValidationError
from jsonschema import validate as json_validate

if len(sys.argv) < 2:
    print(f"""
Usage: {sys.argv[0]} <schema file name> [<test data file name>]

Note that the entire file will be loaded by simpljson.load() and should
only contain one JSON object.
""")
    sys.exit(-1)

schema_file_name = sys.argv[1]
test_file_name = None
if len(sys.argv) == 3:
    test_file_name = sys.argv[2]

with open(schema_file_name, 'r') as schema_file:
    schema = simplejson.load(schema_file)
    print('Schema file loaded OK...')

    if test_file_name is not None:
        with open(test_file_name, 'r') as test_file:
            test_event = simplejson.load(test_file)

        json_validate(test_event, schema, format_checker=FormatChecker())

        print('Input file validated against schema OK.')
예제 #24
0
 def test_get_inherited_classes(self):
     ontology_content = json.load(self.test_ontology_schema.file)
     json_validate(ontology_content, QCONFIG_SCHEMA)
     actual_inherited_classes = get_inherited_classes(ontology_content)
     test_inherited_classes = []
     self.assertSetEqual(set(actual_inherited_classes), set(test_inherited_classes))
예제 #25
0
    def post(self):
        """
        创建任务

        :return:
            :success:
                {
                    "job_id": "5fe756b270d69643f25be490"
                }
            :failure:
                -1, "spec或service_id不可同时为空"
                -2, "任务名不可重复。有相同名称的任务已存在。"
                -3, "任务关联的服务不存在。"
        """
        job = Job()
        job.title = request.args.get('title')
        # not sure why the default value is not taken
        job.category = request.args.get('category') if request.args.get(
            'category') else "TASK"
        job.create_time = datetime.now()
        job.crawler_count = request.args.get('crawler_count')
        # content: spec 或 服务实例 二选一
        spec = None
        job_srv_inst = None
        if request.args.get('service_id'):
            try:
                srv = Service.objects.get(id=request.args.get('service_id'))
            except DoesNotExist:
                return -3, "任务关联的服务不存在。"
            service_params = {
                elm['name']: elm['value']
                for elm in request.args.get('service_params')
            }
            job_srv_inst = JobContentServiceInstance(service=srv,
                                                     params=service_params)
        elif request.args.get('spec'):
            spec = json.loads(request.args.get('spec'))
            json_validate(spec, json.loads(spec_schema))
        else:
            return -1, "spec或service_id不可同时为空"

        job.content = JobContent(spec=spec, service_inst=job_srv_inst)
        # schedule
        cron = None
        if request.args.get('schedule_cron_second') and \
                request.args.get('schedule_cron_minute') and \
                request.args.get('schedule_cron_hour') and \
                request.args.get('schedule_cron_day_of_month') and \
                request.args.get('schedule_cron_month') and \
                request.args.get('schedule_cron_day_of_week'):
            cron = JobScheduleCron(
                second=request.args.get('schedule_cron_second'),
                minute=request.args.get('schedule_cron_minute'),
                hour=request.args.get('schedule_cron_hour'),
                day_of_month=request.args.get('schedule_cron_day_of_month'),
                month=request.args.get('schedule_cron_month'),
                day_of_week=request.args.get('schedule_cron_day_of_week'),
            )
        job.schedule = JobSchedule(at=request.args.get('schedule_at'),
                                   cron=cron)

        try:
            job.save()
        except (DuplicateKeyError, NotUniqueError):
            return -2, "任务名不可重复。有相同名称的任务已存在。"

        sj = SpiderJob(job)
        sj.schedule()

        return {"job_id": str(job.id)}
예제 #26
0
def tiles(request):
    """Retrieve a set of tiles

    A call to this API function should retrieve a few tiles.

    Args:
        request (django.http.HTTPRequest): The request object containing
            the parameters (e.g. d=x.0.0) that identify the tiles being
            requested.

    Returns:
        django.http.JsonResponse: A JSON object containing all of the tile
            data being requested. The JSON object is just a dictionary of
            (tile_id, tile_data) items.

    """
    tileids_to_fetch = set()
    tileset_to_options = dict()

    TILE_LIMIT = 1000

    if request.method == "POST":
        # This is a POST request, so try to parse the request body as JSON.
        try:
            body = json.loads(request.body.decode("utf-8"))
        except:
            return JsonResponse(
                {"error": "Unable to parse request body as JSON."},
                status=rfs.HTTP_400_BAD_REQUEST,
            )

        # Validate against the JSON schema.
        try:
            json_validate(instance=body, schema=tjs.tiles_post_schema)
        except JsonValidationError as e:
            return JsonResponse(
                {
                    "error": f"Invalid request body: {e.message}.",
                },
                status=rfs.HTTP_400_BAD_REQUEST,
            )

        # Iterate over tilesets to obtain the associated tile IDs and options.
        for tileset_info in body:
            tileset_uid = tileset_info["tilesetUid"]
            # Prepend the tileset UID to each tile ID suffix.
            tile_ids = [
                f"{tileset_uid}.{tile_id}" for tile_id in tileset_info["tileIds"]
            ]
            tileids_to_fetch.update(tile_ids)

            tileset_options = tileset_info.get("options", None)
            # The "options" property is optional.
            if type(tileset_options) == dict:
                tileset_to_options[tileset_uid] = tileset_options
                # Hash the options object so that the tile can be cached.
                tileset_to_options[tileset_uid]["options_hash"] = hashlib.md5(
                    json.dumps(tileset_options).encode("utf-8")
                ).hexdigest()

    elif request.method == "GET":
        # create a set so that we don't fetch the same tile multiple times
        tileids_to_fetch = set(request.GET.getlist("d"))

    if len(tileids_to_fetch) > TILE_LIMIT:
        return JsonResponse(
            {
                "error": "Too many tiles were requested.",
            },
            status=rfs.HTTP_400_BAD_REQUEST,
        )

    # with ProcessPoolExecutor() as executor:
    #       res = executor.map(parallelize, hargs)
    """
    p = mp.Pool(4)
    res = p.map(parallelize, hargs)
    """

    # Return the raw data if only one tile is requested. This currently only
    # works for `imtiles`
    raw = request.GET.get("raw", False)

    tileids_by_tileset = col.defaultdict(set)
    generated_tiles = []

    tilesets = {}
    transform_id_to_original_id = {}

    # sort tile_ids by the dataset they come from
    for tile_id in tileids_to_fetch:
        tileset_uuid = tgt.extract_tileset_uid(tile_id)

        # get the tileset object first
        if tileset_uuid in tilesets:
            tileset = tilesets[tileset_uuid]
        else:
            tileset = tm.Tileset.objects.get(uuid=tileset_uuid)
            tilesets[tileset_uuid] = tileset

        if tileset.filetype == "cooler":
            # cooler tiles can have a transform (e.g. 'ice', 'kr') which
            # needs to be added if it's not there (e.g. 'default')
            new_tile_id = add_transform_type(tile_id)
            transform_id_to_original_id[new_tile_id] = tile_id
            tile_id = new_tile_id
        else:
            transform_id_to_original_id[tile_id] = tile_id

        # see if the tile is cached
        tile_value = None
        try:
            if tileset_uuid in tileset_to_options:
                tileset_options = tileset_to_options[tileset_uuid]
                tile_value = rdb.get(tile_id + tileset_options["options_hash"])
            else:
                tile_value = rdb.get(tile_id)
        except Exception as ex:
            # there was an error accessing the cache server
            # log the error and carry forward fetching the tile
            # from the original data
            logger.warn(ex)

        # tile_value = None

        if tile_value is not None:
            # we found the tile in the cache, no need to fetch it again
            tile_value = pickle.loads(tile_value)
            generated_tiles += [(tile_id, tile_value)]
            continue

        tileids_by_tileset[tileset_uuid].add(tile_id)

    # fetch the tiles
    tilesets = [tilesets[tu] for tu in tileids_by_tileset]
    accessible_tilesets = [
        (t, tileids_by_tileset[t.uuid], raw, tileset_to_options.get(t.uuid, None))
        for t in tilesets
        if (
            ((not t.private) or request.user == t.owner)
            and (not t.requiresAuthentication or request.user.is_authenticated)
            and (
                not t.requiresAuthentication
                or (
                    ("accessibleTilesets" in request.session)
                    and (t.uuid in request.session["accessibleTilesets"])
                )
            )
        )
    ]

    # pool = mp.Pool(6)

    generated_tiles += list(it.chain(*map(tgt.generate_tiles, accessible_tilesets)))

    """
    for tileset_uuid in tileids_by_tileset:
        # load the tileset object
        tileset = tilesets[tileset_uuid]

        # check permissions
        if tileset.private and request.user != tileset.owner:
            generated_tiles += [(tile_id, {'error': "Forbidden"}) for tile_id in tileids_by_tileset[tileset_uuid]]
        else:
            generated_tiles += generate_tiles(tileset, tileids_by_tileset[tileset_uuid])
    """

    # store the tiles in redis

    tiles_to_return = {}

    for (tile_id, tile_value) in generated_tiles:
        tileset_uuid = tgt.extract_tileset_uid(tile_id)
        try:
            if tileset_uuid in tileset_to_options:
                tileset_options = tileset_to_options[tileset_uuid]
                rdb.set(
                    tile_id + tileset_options["options_hash"], pickle.dumps(tile_value)
                )
            else:
                rdb.set(tile_id, pickle.dumps(tile_value))
        except Exception as ex:
            # error caching a tile
            # log the error and carry forward, this isn't critical
            logger.warn(ex)

        if tile_id in transform_id_to_original_id:
            original_tile_id = transform_id_to_original_id[tile_id]
        else:
            # not in our list of reformatted tile ids, so it probably
            # wasn't requested
            continue

        if original_tile_id in tileids_to_fetch:
            tiles_to_return[original_tile_id] = tile_value

    if len(generated_tiles) == 1 and raw and "image" in generated_tiles[0][1]:
        return HttpResponse(generated_tiles[0][1]["image"], content_type="image/jpeg")

    return JsonResponse(tiles_to_return, safe=False)
예제 #27
0
 def validate_schema(self):
     try:
         json_validate(instance=self.r_paper, schema=self.schema)
         return True
     except ValidationError as e:
         return False
예제 #28
0
def _parse_validate_and_resample_stf(request, db_info):
    """
    Parses the JSON based STF, validates it, and resamples it.

    :param request: The request.
    :param db_info: Information about the current database.
    """
    if not request.body:
        msg = "The source time function must be given in the body of the " \
              "POST request."
        return tornado.web.HTTPError(400, log_message=msg, reason=msg)

    # Try to parse it as a JSON file.
    with io.BytesIO(request.body) as buf:
        try:
            j = json.loads(buf.read().decode())
        except Exception:
            msg = "The body of the POST request is not a valid JSON file."
            return tornado.web.HTTPError(400, log_message=msg, reason=msg)

    # Validate it.
    try:
        json_validate(j, _json_schema)
    except JSONValidationError as e:
        # Replace the u'' unicode string specifier for consistent error
        # messages.
        msg = "Validation Error in JSON file: " + re.sub(r"u'", "'", e.message)
        return tornado.web.HTTPError(400, log_message=msg, reason=msg)

    # Make sure the sampling rate is ok.
    if j["sample_spacing_in_sec"] < db_info.dt:
        msg = "'sample_spacing_in_sec' in the JSON file must not be smaller " \
              "than the database dt [%.3f seconds]." % db_info.dt
        return tornado.web.HTTPError(400, log_message=msg, reason=msg)

    # Convert to numpy array.
    j["data"] = np.array(j["data"], np.float64)

    # A couple more custom validations.
    message = None

    # Make sure its not all zeros.
    if np.abs(j["data"]).max() < 1E-20:
        message = ("All zero (or nearly all zero) source time functions don't "
                   "make any sense.")

    # The data must begin and end with zero. The user is responsible for the
    # tapering.
    if j["data"][0] != 0.0 or j["data"][-1] != 0.0:
        message = "Must begin and end with zero."

    if message:
        msg = "STF data did not validate: %s" % message
        return tornado.web.HTTPError(400, log_message=msg, reason=msg)

    missing_length = db_info.length - (
        len(j["data"]) - 1) * j["sample_spacing_in_sec"]
    missing_samples = max(int(missing_length / j["sample_spacing_in_sec"]) + 1,
                          0)

    # Add a buffer of 20 samples at the beginning and at the end.
    data = np.concatenate([
        np.zeros(20), j["data"], np.zeros(missing_samples + 20)])

    # Resample it using sinc reconstruction.
    data = lanczos_interpolation(
        data,
        # Account for the additional samples at the beginning.
        old_start=-20 * j["sample_spacing_in_sec"],
        old_dt=j["sample_spacing_in_sec"],
        new_start=0.0,
        new_dt=db_info.dt,
        new_npts=db_info.npts,
        # The large a is okay because we add zeros at the beginning and the
        # end.
        a=12, window="blackman")

    # There is potentially some numerical noise on the first sample.
    assert data[0] < 1E-10 * np.abs(data.ptp())
    data[0] = 0.0

    # Normalize the integral to one.
    data /= np.trapz(np.abs(data), dx=db_info.dt)
    j["data"] = data

    return j
예제 #29
0
    def test_get_defined_classes(self):
        ontology_content = json.load(self.test_ontology_schema.file)
        json_validate(ontology_content, QCONFIG_SCHEMA)
        actual_defined_classes = get_defined_classes(ontology_content)
        test_defined_classes = [
            {
                u'name': u'model',
                u'package': u'test_package',
                u'id': u'1',
                u'documentation': u'this is a test model',
                u'is_document': True,
                u'is_meta': False,
                u'properties': {
                    u'excluded': [],
                    u'inherited': [],
                    u'defined': [
                        {
                            u'name': u'name',
                            u'package': u'test_package',
                            u'id': u'id.1.1',
                            u'cardinality': u'1.1',
                            u'is_meta': False,
                            u'is_nillable': True,
                            u'property_type': u'ATOMIC',
                            u'atomic_type': u'STRING'
                        },
                        {
                            u'name': u'enumeration',
                            u'package': u'test_package',
                            u'id': u'1.2',
                            u'documentation': u'this is a test enumeration',
                            u'cardinality': u'0.1',
                            u'is_meta': False,
                            u'is_nillable': True,
                            u'property_type': u'ENUMERATION',
                            u'enumeration_is_open': True,
                            u'enumeration_members': [
                                {u'documentation': u'documentation for one', u'order': 1, u'value': u'one'},
                                {u'documentation': u'documentation for two', u'order': 2, u'value': u'two'},
                                {u'documentation': u'documentation for three', u'order': 3, u'value': u'three'}
                            ]
                        },
                        {
                            u'name': u'thing',
                            u'package': u'test_package',
                            u'id': u'1.3',
                            u'documentation': u'a relationship property;            there are lots of spaces in this documentation',
                            u'cardinality': u'0.1',
                            u'is_meta': False,
                            u'is_nillable': True,
                            u'property_type': u'RELATIONSHIP',
                            u'relationship_targets': [
                                u'test_package.recursive_thing'
                            ],
                        }
                    ]
                }
            },
            {
                u'name': u'recursive_thing',
                u'package': u'test_package',
                u'id': u'2',
                u'is_document': False,
                u'is_meta': False,
                u'properties': {
                    u'excluded': [],
                    u'inherited': [],
                    u'defined': [
                        {
                            u'name': u'name',
                            u'package': u'test_package',
                            u'id': u'2.1',
                            u'cardinality': u'1.1',
                            u'is_nillable': True,
                            u'is_meta': False,
                            u'property_type': u'ATOMIC',
                            u'atomic_type': u'STRING',
                        },
                        {
                            u'name': u'child',
                            u'package': u'test_package',
                            u'id': u'2.2',
                            u'cardinality': u'0.N',
                            u'is_nillable': True,
                            u'is_meta': False,
                            u'property_type': u'RELATIONSHIP',
                            u'relationship_targets': [
                                u'test_package.recursive_thing'
                            ]
                        },
                        {
                            u'name': u'multiple_targets',
                            u'package': u'test_package',
                            u'id': u'2.3',
                            u'cardinality': u'0.1',
                            u'is_nillable': True,
                            u'is_meta': False,
                            u'property_type': u'RELATIONSHIP',
                            u'relationship_targets': [
                                u'test_package.other_thing_one',
                                u'test_package.other_thing_two'
                            ]
                        }
                    ]
                }
            },
            {
                u'name': u'other_thing_one',
                u'package': u'test_package',
                u'id': u'3',
                u'is_document': False,
                u'is_meta': False,
                u'properties': {
                    u'inherited': [],
                    u'excluded': [],
                    u'defined': [
                        {
                            u'name': u'name',
                            u'package': u'test_package',
                            u'id': u'3.1',
                            u'cardinality': u'1.1',
                            u'is_nillable': True,
                            u'is_meta': False,
                            u'property_type': u'ATOMIC',
                            u'atomic_type': u'STRING'
                        }
                    ]
                }
            },
            {
                u'name': u'other_thing_two',
                u'package': u'test_package',
                u'id': u'4',
                u'is_document': False,
                u'is_meta': False,
                u'properties': {
                    u'inherited': [],
                    u'excluded': [],
                    u'defined': [
                        {
                            u'name': u'name',
                            u'package': u'test_package',
                            u'id': u'4.1',
                            u'cardinality': u'1.1',
                            u'is_meta': False,
                            u'is_nillable': True,
                            u'property_type': u'ATOMIC',
                            u'atomic_type': u'STRING'
                        }
                    ]
                }
            }
        ]

        for actual_defined_class, test_defined_class in zip(actual_defined_classes, test_defined_classes):
            self.assertDictEqual(actual_defined_class, test_defined_class)
예제 #30
0
def validate(payload, schema_file):
    """ Validates a payload against a schema """

    with payload_schema(schema_file) as schema:
        json_validate(payload, schema)
예제 #31
0
def validate(datadict):
    return json_validate(instance=datadict, schema=schema)
예제 #32
0
파일: tasks.py 프로젝트: Gfif/blackbox3
    schema_json = None
    try:
        schema_file = file(path.join(settings.AUX_FILES_DIR,
                                     'task-schema',
                                     'v0.2',
                                     'task-schema-0.2.json'))
        schema_str = schema_file.read()
        schema_file.close()
        schema_json = json_loads(schema_str)
    except Exception, ex:
        msg = 'Error reading JSON schema file: %s' % str(ex)
        error_status.message = msg
        error_status.save()
        return uploaded_task
    try:
        json_validate(task_json, schema_json)
    except JsonValidationError, ex:
        msg = 'File "task/task.json" is incorrect: %s' % str(ex)
        error_status.message = msg
        error_status.save()
        return uploaded_task

    mentioned_files = [
        task_json['desc_ru'],
        task_json['desc_en'],
        task_json['writeup_ru'],
        task_json['writeup_en'],
    ]
    mentioned_images = []
    if 'images' in task_json:
        for image_obj in task_json['images']:
예제 #33
0
 def validate(self, raw):
     return json_validate(raw, self.schema)
예제 #34
0
 def validate(self, raw):
     return json_validate(raw, self.schema)