コード例 #1
0
ファイル: sat.py プロジェクト: meantheory/sat
	def json(self, schema, key=None, cls=None, *args, **kwargs):
		'''
		will validate the returned json using jsonschema 
		'''
		data = self._json_data(key)

		if data is None:
			print('No data :-(')
			sys.exit()

		try:
			schema_validate(schema, data, *args, **kwargs)
		except ValidationError:

			print('\n >> Validation Error <<')
			print(data)
			print('\n Goodbye :-)')
			sys.exit()
		
		except SchemaError as e:

			print('\n ## You provided a bad schema :-( ##\n')
			print(schema,'\n')
			
			print(e)
			print('\n Goodbye :-)')
			sys.exit()

		return self
コード例 #2
0
def get_cohort_counts():

    cohort_counts = None

    try:
        request_data = request.get_json()
        schema_validate(request_data, COHORT_FILTER_SCHEMA)

        if 'filters' not in request_data:
            cohort_counts = {
                'message': 'No filters were provided; ensure that the request body contains a \'filters\' property.'
            }
        else:
            cohort_counts = get_sample_case_list_bq(None, request_data['filters'])

            if cohort_counts:
                for prog in cohort_counts:
                    if cohort_counts[prog]['case_count'] <= 0:
                        cohort_counts[prog]['message'] = "No cases or samples found which meet the filter criteria for this program."
                    cohort_counts[prog]['provided_filters'] = request_data['filters'][prog]

    except BadRequest as e:
        logger.warn("[WARNING] Received bad request - couldn't load JSON.")
        cohort_counts = {
            'message': 'The JSON provided in this request appears to be improperly formatted.',
        }
    except ValidationError as e:
        logger.warn('[WARNING] Filters rejected for improper formatting: {}'.format(e))
        cohort_counts = {
            'message': 'Filters were improperly formatted.'
        }
    except Exception as e:
        logger.exception(e)

    return cohort_counts
コード例 #3
0
def post_query(user, cohort_id):
    try:
        request_data = request.get_json()

        if 'fields' not in request_data:
            return dict(
                message = 'No queryFields provided; ensure that the request body contains a \'queryFields\' component.',
                code = 400)

        schema_validate(request_data, QUERY_FIELDS)

        data = {"request_data": request_data}

        query_info = perform_query(request,
                             func=requests.post,
                             url="{}/cohorts/api/{}/query/".format(settings.BASE_URL,cohort_id),
                             data=data,
                             user=user)

    except BadRequest as e:
        logger.warning("[WARNING] Received bad request - couldn't load JSON.")
        query_info = dict(
            message='The JSON provided in this request appears to be improperly formatted.',
            code = 400)

    except ValidationError as e:
        logger.warning('[WARNING] Filters rejected for improper formatting: {}'.format(e))
        query_info = dict(
            message= 'Filters were improperly formatted.',
            code = 400)

    return query_info
コード例 #4
0
ファイル: schema.py プロジェクト: thomas-b-jackson/yac
def validate(object_dict, schema_rel_path):
    """ Validate that a dictionary satisfies a json schema

    Args:
        object_dict: A dictionaray representing some object
        schema_rel_path: The path the schema that the object should satisfy

    Returns:
        None

    Raises:
        ValidationError.
    """

    schema, err = _load_schema(schema_rel_path)

    if not err:

        # only perform the validation against
        # a non-null dictionary
        if object_dict:

            try:
                schema_validate(object_dict, schema)
            except ExtValidationError as e:
                # provide some context before re-raising exception
                raise ExtValidationError(_pp_failures(e, schema))

    else:
        raise ExtValidationError(err)
コード例 #5
0
def post_query_preview(user):
    try:
        request_data = request.get_json()

        if 'cohort_def' not in request_data:
            return dict(
                message = 'No cohort_def provided; ensure that the request body contains a \'cohort_dev\' component.',
                code = 400)
        if 'filters' not in request_data['cohort_def']:
            return dict(
                message = 'No filters were provided; ensure that the cohort_def contains a \'filterSet\' component.',
                code = 400)
        if 'queryFields' not in request_data:
            return dict(
                message = 'No queryFields provided; ensure that the request body contains a \'queryFields\' component.',
                code = 400)

        schema_validate(request_data, QUERY_PREVIEW_BODY)

        if 'name' not in request_data["cohort_def"] or request_data["cohort_def"]['name'] == "":
            return dict(
                message = 'A name was not provided for this cohort. The cohort was not made.',
                code = 400
            )

        blacklist = re.compile(BLACKLIST_RE, re.UNICODE)
        match = blacklist.search(str(request_data["cohort_def"]['name']))

        if not match and 'description' in request_data["cohort_def"]:
            match = blacklist.search(str(request_data["cohort_def"]['description']))

        if match:
            return dict(
                message = "Your cohort's name or description contains invalid characters; " +
                            "please edit them and resubmit. [Saw {}]".format(str(match)),
                code = 400
            )

        data = {"request_data": request_data}

        query_info = perform_query(request,
                             func=requests.post,
                             url="{}/cohorts/api/preview/query/".format(settings.BASE_URL),
                             data=data,
                             user=user)

    except BadRequest as e:
        logger.warning("[WARNING] Received bad request - couldn't load JSON.")
        query_info = dict(
            message='The JSON provided in this request appears to be improperly formatted.',
            code = 400)

    except ValidationError as e:
        logger.warning('[WARNING] Filters rejected for improper formatting: {}'.format(e))
        query_info = dict(
            message= 'Filters were improperly formatted.',
            code = 400)

    return query_info
コード例 #6
0
    def from_dict(cls, data):
        """

        Throws:
            ValidationError if the data object does not match the required schema.
        """
        schema_validate(data, cls.SCHEMA)
        return cls(data['service_account_blacklist'])
コード例 #7
0
    def from_dict(cls, data):
        """

        Throws:
            ValidationError if the data object does not match the required schema.
        """
        schema_validate(data, cls.SCHEMA)
        return cls(data['google_org_whitelist'])
コード例 #8
0
ファイル: configuration.py プロジェクト: qedalab/josim-tools
    def from_dict(value: Dict) -> "MarginParameterConfiguration":
        """ Create a verify configuration from a dict """

        schema_validate(instance=value, schema=SCHEMA_MARGIN_PARAMETER)

        nominal = value.get("nominal")

        return MarginParameterConfiguration(nominal)
コード例 #9
0
ファイル: configuration.py プロジェクト: qedalab/josim-tools
    def from_dict(value: Dict) -> "YieldAnalysisConfiguration":
        """ Create a yield analysis configuration from a dict """

        schema_validate(instance=value, schema=SCHEMA_YIELD)

        num_samples = value["num_samples"]

        return YieldAnalysisConfiguration(num_samples)
コード例 #10
0
ファイル: configuration.py プロジェクト: qedalab/josim-tools
    def from_dict(value: Dict) -> "YieldParameterConfiguration":
        """ Create a yield parameter configuration from a dict """

        schema_validate(instance=value, schema=SCHEMA_YIELD_PARAMETER)

        nominal = value.get("nominal")
        variance = value["variance"]

        return YieldParameterConfiguration(variance, nominal)
コード例 #11
0
def validate(yml: str) -> bool:
    try:
        schema_validate(load(yml, Loader=FullLoader),
                        load(ZOO_JSON_SCHEMA, FullLoader))
    except ValidationError as err:
        log.info("repos.sync_zoo_yml.validation_error", error=err)
        return False
    else:
        return True
コード例 #12
0
def create_cohort(user):
    cohort_info = None

    try:
        request_data = request.get_json()
        if 'filters' not in request_data:
            return dict(
                message = 'No filters were provided; ensure that the request body contains a \'filters\' property.',
                code = 400)

        schema_validate(request_data['filters'], COHORT_FILTERS_SCHEMA)

        if 'name' not in request_data:
            return dict(
                message = 'A name was not provided for this cohort. The cohort was not made.',
                code = 400
            )

        blacklist = re.compile(BLACKLIST_RE, re.UNICODE)
        match = blacklist.search(str(request_data['name']))

        if not match and 'description' in request_data:
            match = blacklist.search(str(request_data['description']))

        if match:
            return dict(
                message = "Your cohort's name or description contains invalid characters; " +
                            "please edit them and resubmit. [Saw {}]".format(str(match)),
                code = 400
            )

        path_params = {'email': user}
        try:
            auth = get_auth()
            data = {"request_data": request_data}
            response = requests.post("{}/{}/".format(settings.BASE_URL, 'cohorts/api/save_cohort'),
                            params=path_params, json=data, headers=auth)
            cohort_info = response.json()
        except Exception as e:
            logger.exception(e)

    except BadRequest as e:
        logger.warning("[WARNING] Received bad request - couldn't load JSON.")
        cohort_info = {
            'message': 'The JSON provided in this request appears to be improperly formatted.',
            'code': 400
        }

    except ValidationError as e:
        logger.warning("[WARNING] Cohort information rejected for improper formatting: {}".format(e))
        cohort_info = {
            'message': 'Cohort information was improperly formatted - cohort not created.',
            'code': 400
        }

    return cohort_info
コード例 #13
0
ファイル: configuration.py プロジェクト: qedalab/josim-tools
    def from_dict(value: Dict) -> "OptimizerParameterConfiguration":
        """ Create a optimize parameter configuration from a dict """

        schema_validate(instance=value, schema=SCHEMA_OPTIMIZE_PARAMETER)

        nominal = value["nominal"]
        min_ = value.get("min")
        max_ = value.get("max")

        return OptimizerParameterConfiguration(nominal, min_, max_)
コード例 #14
0
def get_cohort_preview_manifest(user):
    try:
        # if 'next_page' in request.args and request.args['next_page'] not in ["", None]:
        #     data = {"request_data": None}
        # else:
        if True:
            request_data = request.get_json()

            if 'filters' not in request_data:
                return dict(
                    message = 'No filters were provided; ensure that the request body contains a \'filters\' property.',
                    code = 400)

            schema_validate(request_data['filters'], COHORT_FILTERS_SCHEMA)

            if 'name' not in request_data:
                return dict(
                    message = 'A name was not provided for this cohort. The cohort was not made.',
                    code = 400
                )

            blacklist = re.compile(BLACKLIST_RE, re.UNICODE)
            match = blacklist.search(str(request_data['name']))

            if not match and 'description' in request_data:
                match = blacklist.search(str(request_data['description']))

            if match:
                return dict(
                    message = "Your cohort's name or description contains invalid characters; " +
                                "please edit them and resubmit. [Saw {}]".format(str(match)),
                    code = 400
                )

            data = {"request_data": request_data}
        manifest_info = get_manifest(request,
                             func=requests.post,
                             url="{}/cohorts/api/preview/manifest/".format(settings.BASE_URL),
                             data=data,
                             user=user)

    except BadRequest as e:
        logger.warning("[WARNING] Received bad request - couldn't load JSON.")
        manifest_info = dict(
            message='The JSON provided in this request appears to be improperly formatted.',
            code = 400)

    except ValidationError as e:
        logger.warning('[WARNING] Filters rejected for improper formatting: {}'.format(e))
        manifest_info = dict(
            message= 'Filters were improperly formatted.',
            code = 400)

    return manifest_info
コード例 #15
0
 def from_dict(cls: Any, data: JsonDict, validate=True) -> Any:
     """Returns a dataclass instance with all nested classes converted from the dict given"""
     decoded_data = {}
     if validate:
         schema_validate(data, cls.json_schema())
     for field, field_type in cls.__annotations__.items():
         mapped_field = cls.field_mapping().get(field, field)
         decoded_data[field] = cls._decode_field(field, field_type,
                                                 data.get(mapped_field),
                                                 validate)
     return cls(**decoded_data)
コード例 #16
0
def create_cohort(user):
    cohort_info = None

    try:
        request_data = request.get_json()
        schema_validate(request_data, COHORT_FILTER_SCHEMA)

        if 'name' not in request_data:
            cohort_info = {
                'message': 'A name was not provided for this cohort. The cohort was not made.',
            }
            return cohort_info

        if 'filters' not in request_data:
            cohort_info = {
                'message': 'Filters were not provided; at least one filter must be provided for a cohort to be valid.' +
                       ' The cohort was not made.',
            }
            return cohort_info

        blacklist = re.compile(BLACKLIST_RE, re.UNICODE)
        match = blacklist.search(str(request_data['name']))

        if not match and 'desc' in request_data:
            match = blacklist.search(str(request_data['desc']))

        if match:
            cohort_info = {
                'message': 'Your cohort\'s name or description contains invalid characters; please edit them and resubmit. ' +
                    '[Saw {}]'.format(str(match)),
            }

        else:
            result = make_cohort(user, **request_data)

            if 'message' in result:
                cohort_info = result
            else:
                cohort_info = get_cohort_info(result['cohort_id'])

    except BadRequest as e:
        logger.warn("[WARNING] Received bad request - couldn't load JSON.")
        cohort_info = {
            'message': 'The JSON provided in this request appears to be improperly formatted.',
        }

    except ValidationError as e:
        logger.warn("[WARNING] Cohort information rejected for improper formatting: {}".format(e))
        cohort_info = {
            'message': 'Cohort information was improperly formatted - cohort not edited.',
        }

    return cohort_info
コード例 #17
0
def test_detector(user_client, rf):
    # Put an entry in the schedule that we can refer to
    rjson = post_schedule(user_client, TEST_SCHEDULE_ENTRY)
    entry_name = rjson['name']
    task_id = rjson['next_task_id']

    # use mock_acquire set up in conftest.py
    by_name['mock_acquire'](entry_name, task_id)
    acquistion = Acquisition.objects.get(task_id=task_id)
    sigmf_metadata = acquistion.sigmf_metadata
    assert sigmf_validate(sigmf_metadata)
    schema_validate(sigmf_metadata, schema)
コード例 #18
0
ファイル: configuration.py プロジェクト: qedalab/josim-tools
    def from_dict(value: Dict) -> "MarginAnalysisConfiguration":
        """ Create a marginal analysis configuration from a dict """

        schema_validate(instance=value, schema=SCHEMA_MARGIN)

        max_search = value.get("max_search", 1.9)
        min_search = value.get("min_search", 0.1)
        scan_steps = value.get("scan_steps", 4)
        binary_search_steps = value.get("binary_search_steps", 3)

        return MarginAnalysisConfiguration(max_search, min_search, scan_steps,
                                           binary_search_steps)
コード例 #19
0
ファイル: configuration.py プロジェクト: qedalab/josim-tools
    def from_dict(value: Dict) -> "VerifyConfiguration":
        """ Create a verify configuration from a dict """

        schema_validate(instance=value, schema=SCHEMA_VERIFY)

        method = value["method"]
        file_path = value["file"]
        circuit_path = value["circuit"]
        threshold = value.get("threshold", 0.05)
        wrspice_compatiblity = value.get("wrspice_compatibility", False)

        return VerifyConfiguration(method, file_path, circuit_path, threshold,
                                   wrspice_compatiblity)
コード例 #20
0
    def from_dict(cls, param):
        """
        Throws:
            ValidationError if the data object does not match the required schema.
        """
        schema_validate(param, cls.SCHEMA)

        supported_genomic_builds = param['supported_genomic_builds']
        data_table_list = [
            GNABTableConfig.from_dict(item) for item in param['tables']
        ]

        return cls(supported_genomic_builds, data_table_list)
コード例 #21
0
def edit_cohort(cohort_id, user, delete=False):
    match = None

    try:
        if delete:
            cohort = Cohort.objects.get(id=cohort_id)
            cohort.active = False
            cohort.save()
            cohort_info = {
                'notes': 'Cohort {} (\'{}\') has been deleted.'.format(cohort_id, cohort.name),
                'data': {'filters': cohort.get_current_filters(unformatted=True)},
            }
        else:
            request_data = request.get_json()
            if len(request_data.keys()):
                schema_validate(request_data, COHORT_FILTER_SCHEMA)

            if 'name' in request_data:
                blacklist = re.compile(BLACKLIST_RE, re.UNICODE)
                match = blacklist.search(str(request_data['name']))

            if match:
                cohort_info = {
                    'message': 'Your cohort\'s name or description contains invalid characters; please edit them and resubmit. ' +
                               '[Saw {}]'.format(str(match)),
                }
            else:
                result = make_cohort(user, source_id=cohort_id, **request_data)
                if 'message' in result:
                    cohort_info = result
                else:
                    cohort_info = get_cohort_info(result['cohort_id'])


    except BadRequest as e:
        logger.warn("[WARNING] Received bad request - couldn't load JSON.")
        cohort_info = {
            'message': 'The JSON provided in this request appears to be improperly formatted.',
        }

    except ObjectDoesNotExist as e:
        logger.error("[ERROR] During {} for cohort ID {}:".format(request.method,str(cohort_id)))
        logger.error("Couldn't find a cohort with that ID!")

    except ValidationError as e:
        logger.warn("[WARNING] Cohort information rejected for improper formatting: {}".format(e))
        cohort_info = {
            'message': 'Cohort information was improperly formatted - cohort not edited.',
        }

    return cohort_info
コード例 #22
0
ファイル: configuration.py プロジェクト: qedalab/josim-tools
    def from_dict(cls, value: Dict) -> "OptimizeConfiguration":
        """ Create a optimize configuration """

        schema_validate(value, SCHEMA_OPTIMIZE)

        method: str = value["method"]

        search_radius: float = value.get("search_radius", 0.05)
        converge: float = value.get("converge", 0.01)
        max_iterations: float = value.get("max_iterations", 1000)

        output: Optional[str] = value.get("output", None)

        return cls(method, search_radius, converge, max_iterations, output)
コード例 #23
0
        def wrapper(*args, **kwargs):
            instance = args[0]

            if isinstance(self.schema_or_schema_parameter_name, str):
                schema = getattr(instance,
                                 self.schema_or_schema_parameter_name)
            else:
                schema = self.schema_or_schema_parameter_name

            try:
                schema_validate(self.object(), schema)
                return f(*args, **kwargs)
            except ValidationError as e:
                raise ValidationException(e.message, e.instance)
コード例 #24
0
 def _check_basic(self, key, schema, identifier):
     """ Verify schema and no data duplicated
     """
     issues = []
     ids = set()
     for d in self.data:
         data = d.get(key, {})
         try:
             schema_validate({key: data}, yaml.load(schema))
         except Exception, e:
             issues.append(e.message)
         duplicated = set(data.keys()) & ids
         if duplicated:
             issues.append("%s IDs [%s,] are duplicated" %
                           (identifier, ",".join(duplicated)))
         ids.update(set(data.keys()))
コード例 #25
0
    def to_dict(self,
                omit_none: bool = True,
                validate: bool = False) -> JsonDict:
        """Converts the dataclass instance to a JSON encodable dict, with optional JSON schema validation.

        If omit_none (default True) is specified, any items with value None are removed
        """
        data = {}
        for f in fields(self):
            value = self._encode_field(getattr(self, f.name), omit_none)
            if omit_none and value is None:
                continue
            data[self.field_mapping().get(f.name, f.name)] = value
        if validate:
            schema_validate(data, self.json_schema())
        return data
コード例 #26
0
def test_cose_schema(config_env: Dict):
    if EXPECTED_SCHEMA_VALIDATION not in config_env[EXPECTED_RESULTS].keys():
        skip(f'Test not requested: {EXPECTED_SCHEMA_VALIDATION}')
    if COSE not in config_env.keys():
        skip(f'Test dataset does not contain {COSE}')

    if config_env[EXPECTED_RESULTS][EXPECTED_SCHEMA_VALIDATION]:
        dgc = _dgc(config_env)
        cose_payload = loads(dgc.payload)
        assert PAYLOAD_HCERT in cose_payload.keys()
        assert len(cose_payload[PAYLOAD_HCERT]) == 1
        assert 1 in cose_payload[PAYLOAD_HCERT].keys()
        hcert = cose_payload[PAYLOAD_HCERT][1]
        schema_validate(hcert, _get_hcert_schema())
        assert len(
            set(hcert.keys()) & {'v', 'r', 't'}
        ) == 1, 'DGC adheres to schema but contains multiple certificates'
コード例 #27
0
ファイル: __init__.py プロジェクト: morucci/repoxplorer
 def _check_basic(self, key, schema, identifier):
     """ Verify schema and no data duplicated
     """
     issues = []
     ids = set()
     for d in self.data:
         data = d.get(key, {})
         try:
             schema_validate({key: data},
                             yaml.load(schema))
         except Exception as e:
             issues.append(e.message)
         duplicated = set(data.keys()) & ids
         if duplicated:
             issues.append("%s IDs [%s,] are duplicated" % (
                           identifier, ",".join(duplicated)))
         ids.update(set(data.keys()))
     return ids, issues
コード例 #28
0
ファイル: maruval.py プロジェクト: uzh/marugoto-validator
def validate(path=None, fail_first=False, no_warnings=False):
    """
    Main validator routine

    Iterate over all JSON files, check that they can be loaded, and then run jsonschema.
    Handle fail fast option both during loading and validating.
    """
    # get the absolute expanded path and always use this
    path = os.path.abspath(os.path.expanduser(path))
    errors = list()
    to_check = _get_json_files(path)
    schemata = _get_schemata(path)
    ok = 0
    for json_file in sorted(to_check):
        schema_name, schema = _get_correct_schema(json_file, schemata)
        with open(json_file, "r", encoding="utf-8") as f:
            try:
                data = json.load(f)
            except Exception as err:
                errors.append((err, json_file, True))
                if fail_first:
                    break
                continue
        try:
            schema_validate(instance=data, schema=schema)
            _custom_validate(json_file, data, path)

        except (ValidationError, OSError) as err:
            errors.append((err, json_file, False))
            if fail_first:
                break
            continue
        ok += 1
    _print_errors(errors)
    msg = "\nAll done. {} errors found".format(len(errors))
    if len(to_check) > 1:
        msg += ". {} files OK.\n".format(ok)
    else:
        msg += " in {}".format(to_check[0])
    print(msg)
    if len(errors):
        sys.exit(1)
    else:
        sys.exit(0)
コード例 #29
0
def test_cose_schema(config_env: Dict):
    if CONFIG_ERROR in config_env.keys():
        fail(f'Config Error: {config_env[CONFIG_ERROR]}')
    if EXPECTED_SCHEMA_VALIDATION not in config_env[EXPECTED_RESULTS].keys():
        skip(f'Test not requested: {EXPECTED_SCHEMA_VALIDATION}')
    if COSE not in config_env.keys():
        skip(f'Test dataset does not contain {COSE}')

    if config_env[EXPECTED_RESULTS][EXPECTED_SCHEMA_VALIDATION]:
        dgc = _dgc(config_env)
        cose_payload = loads(dgc.payload, object_hook=_object_hook_e)
        assert PAYLOAD_HCERT in cose_payload.keys()
        assert len(cose_payload[PAYLOAD_HCERT]) == 1
        assert 1 in cose_payload[PAYLOAD_HCERT].keys()
        hcert = cose_payload[PAYLOAD_HCERT][1]
        schema_validate(hcert, _get_hcert_schema())
        # assert len(set(hcert.keys()) & {'v', 'r', 't'}) == 1,
        # 'DGC adheres to schema but contains multiple certificates'
        assert len([key for key in hcert.keys() if key in ['v', 'r', 't']]) == 1, \
            'DGC adheres to schema but contains multiple certificates'
コード例 #30
0
def validate(path=None, fail_first=False, no_warnings=False):
    """
    Main validator routine

    Iterate over all JSON files, check that they can be loaded, and then run jsonschema.
    Handle fail fast option both during loading and validating.
    """
    errors = list()
    to_check = _get_json_files(path)
    schemata = _get_schemata()
    ok = 0
    for json_file in sorted(to_check):
        schema = _get_correct_schema(json_file, schemata)
        with open(json_file, "r") as f:
            try:
                data = json.load(f)
            except json.JSONDecodeError as err:
                errors.append((err, json_file, True))
                if fail_first:
                    break
                continue
        try:
            schema_validate(instance=data, schema=schema)
        except ValidationError as err:
            errors.append((err, json_file, False))
            if fail_first:
                break
            continue
        ok += 1
    _print_errors(errors)
    msg = "\nAll done. {} errors found".format(len(errors))
    if len(to_check) > 1:
        msg += ". {} files OK.\n".format(ok)
    else:
        msg += " in {}".format(to_check[0])
    print(msg)
コード例 #31
0
def validate(data, schema):
    schema_validate(
        instance=data,
        schema=schema,
        format_checker=draft7_format_checker,
    )
コード例 #32
0
ファイル: swagger.py プロジェクト: piotrbulinski/swagger2rst
 def schema_validate(cls, obj, json_schema):
     schema_validate(obj, json_schema, format_checker=cls._json_format_checker)
コード例 #33
0
def validate(model_name, instance):
    base_url = os.path.abspath('resources') + '/schemas/'
    resolver = RefResolver('file://{base_url}'.format(base_url=base_url), None)
    schema = load_schema(model_name)
    schema_validate(instance=instance, schema=schema, resolver=resolver)
コード例 #34
0
ファイル: compilespec.py プロジェクト: futoin/specs
def compilespec( spec_file ) :
    #---
    spec_dir = os.path.dirname( spec_file )
    meta_dir = os.path.join( spec_dir, 'meta' )
    preview_dir = os.path.join( spec_dir, 'preview' )
    html_file = os.path.join( preview_dir, os.path.basename( spec_file ).replace( '.md', '.html' ) )

    #---
    spec_time = os.path.getmtime(spec_file)
    try :
        html_time = os.path.getmtime(html_file)
    except OSError:
        spec_time = 1
        html_time = 0

    if spec_time < html_time :
        print( "- Skipping " + spec_file + "\n" )
        return

    #---
    input_file = codecs.open( spec_file, mode="r", encoding="utf-8" )


    #---
    text = []
    json_text = []
    parsing_iface = False
    parsing_schema = False
    schema_re = re.compile( '^`Schema\\(([a-z0-9\-_]+)\\){`$' )
    curr_line = 1
    in_header = True
    spec_ver = ''
    end_of_spec_seen = False

    for l in input_file:
        try:
            if in_header :
                pair = l.split( ':', 2 )

                if len( pair ) == 2 :
                    tag, value = pair
                    value = value.strip()

                    if tag == 'Version' :
                        spec_ver = value.replace('DV','')
                    elif tag in ('Copyright','Authors','Date') :
                        pass
                    elif re.match( 'FTN[0-9]+', l ) :
                        pass
                    else :
                        die(str(curr_line) + " Unknown header field")

                if l == '\n' :
                    in_header = False

                    if not spec_ver :
                        die( str(curr_line) + " Missing spec Version" )
                        
            #---
            if end_of_spec_seen and l != '\n':
                die( str(curr_line) + " Text after end of spec" )
                        
            #---
            m = schema_re.match( l )

            if m is not None:
                if parsing_iface or parsing_schema :
                    print( "Current Schema: " + str( parsing_schema ) + "\n" )
                    die( str(curr_line) + ': Unable to parse Schema in scope of another Schema or Iface\n' )

                parsing_schema = m.group(1)
                text.append('<p class="futoin-schema">Schema: ' + parsing_schema + '</p>\n')

            elif l == '`}Schema`\n' :
                schema_obj = json.loads(
                        ''.join( json_text ),
                        object_pairs_hook = lambda pairs: collections.OrderedDict( pairs )
                )
                Draft4Validator.check_schema(schema_obj)
                schema = json.dumps(schema_obj, indent=2, separators=(',', ': ') )

                schema_file = os.path.join( meta_dir, parsing_schema + '-' + spec_ver + '-schema.json' )

                with codecs.open( schema_file,
                                "w",
                                encoding="utf-8",
                                errors="xmlcharrefreplace"
                ) as f:
                    f.write( schema )
                    
                    
                # mjr ver
                spec_major_ver = spec_ver.split('.')
                spec_major_ver = spec_major_ver[0]

                schema_mjr_file = os.path.join( meta_dir, parsing_schema + '-' + spec_major_ver + '-schema.json' )
                try:
                    os.unlink( schema_mjr_file )
                except OSError:
                    pass
                
                os.symlink( os.path.basename( schema_file ), schema_mjr_file )
                    
                # no ver
                schema_file_nover = os.path.join( meta_dir, parsing_schema + '-schema.json' )
                try:
                    os.unlink( schema_file_nover )
                except OSError:
                    pass
                os.symlink( os.path.basename( schema_mjr_file ), schema_file_nover )

                parsing_schema = False
                json_text = []

            elif l == '`Iface{`\n' :
                if parsing_iface or parsing_schema :
                    die( str(curr_line) + ': Unable to parse Iface in scope of Schema or another Iface\n' )

                parsing_iface = True

            elif l == '`}Iface`\n' :
                if not parsing_iface:
                    die( str(curr_line) + ': Unexpected end of Iface' )

                iface = json.loads(
                        ''.join( json_text ),
                        object_pairs_hook = lambda pairs: collections.OrderedDict( pairs )
                )
                iface_name = iface['iface']
                iface["version"] = spec_ver
                
                if 'imports' in iface:
                    iface['imports'] = [ v.replace('{ver}', spec_ver) for v in iface['imports'] ]
                if 'inherit' in iface:
                    iface['inherit'] = iface['inherit'].replace('{ver}', spec_ver)
                
                # validate schema
                schema_file = os.path.join(meta_dir, 'futoin-interface-' + iface['ftn3rev'] + '-schema.json')
                with open(schema_file, 'r') as sf:
                    schema = json.load(sf)
                schema_validate(iface, schema)
                
                # version file
                iface_ver_file = os.path.join( meta_dir, iface_name + '-' + iface['version'] + '-iface.json' )

                with codecs.open( iface_ver_file,
                                "w",
                                encoding="utf-8",
                                errors="xmlcharrefreplace"
                ) as f:
                    f.write( json.dumps( iface, indent=2, separators=(',', ': ') ) )

                # mjr symlink
                iface_major_ver = iface['version'].split('.')
                iface_major_ver = iface_major_ver[0]
                iface_mjr_file = os.path.join( meta_dir, iface_name + '-' + iface_major_ver + '-iface.json' )
                
                try:
                    os.unlink( iface_mjr_file )
                except OSError:
                    pass
                os.symlink( os.path.basename( iface_ver_file ), iface_mjr_file )

                
                # no ver symlink
                iface_file_nover = os.path.join( meta_dir, iface_name + '-iface.json' )
                try:
                    os.unlink( iface_file_nover )
                except OSError:
                    pass
                os.symlink( os.path.basename( iface_mjr_file ), iface_file_nover )

                parsing_iface = False
                json_text = []

            else :
                if l == '=END OF SPEC=\n' :
                    end_of_spec_seen = True
                    
                if parsing_iface or parsing_schema :
                    json_text.append( l )

                l = l.replace( '.md', '.html' )
                text.append( l )

            curr_line += 1
        except Exception as e :
            if len( json_text ) :
                i = 1
                for jl in json_text :
                    sys.stderr.write( "%s: %s"  % ( i, jl ) )
                    i += 1
            die( "At line %s: Exception: %s\n" % ( curr_line, e )  )
            
    #---
    if not end_of_spec_seen:
        die( "Missing '=END OF SPEC='"  )

    #---
    html_ver_file = html_file.replace( '.html', '-' + spec_ver + '.html' )
    
    spec_major_ver = spec_ver.split('.')
    spec_major_ver = spec_major_ver[0]
    html_mjrver_file = html_file.replace( '.html', '-' + spec_major_ver + '.html' )
    
    if False :
        raw_file = codecs.open( html_file + '.raw', "w",
                                encoding="utf-8",
                                errors="xmlcharrefreplace"
        )

        raw_file.write( ''.join( text ) )
        raw_file.close()

    output_file = codecs.open( html_ver_file, "w",
                            encoding="utf-8",
                            errors="xmlcharrefreplace"
    )

    # mjr.mnr symlink
    try :
        os.unlink( html_mjrver_file )
    except OSError:
        pass
    os.symlink( os.path.basename( html_ver_file ), html_mjrver_file )

    # mjr symlink
    try :
        os.unlink( html_file )
    except OSError:
        pass
    os.symlink( os.path.basename( html_mjrver_file ), html_file )

    # update html
    output_file.write( '<!DOCTYPE html>\n' )
    output_file.write( '<html>\n<head>\n' )
    output_file.write( '<title>' + os.path.basename( spec_file ) + '</title>\n' )
    output_file.write( '<link rel="stylesheet" type="text/css" href="../../css/specs.css">\n' )
    output_file.write( '</head><body>\n' )
    output_file.write( markdown.markdown( ''.join( text ), output_format='html5' ) )
    output_file.write( '\n</body></html>' )
    output_file.close()
    input_file.close()

    #---
    print( "Compiled " + spec_file + "\n" )