def existing_image(filename: str) -> str: if not filename: raise SchemaError('No file given') if not os.path.exists(filename): raise SchemaError('File "{0}" does not exist'.format(filename)) ensure_opencv_filename(filename) return os.path.realpath(filename)
def plugin_list(value): """Turn a space-delimited series of plugin names into a ListAndAll of Plugins. Never return the core plugin. """ if not isinstance(value, basestring): # Probably can't happen raise SchemaError( '"%s" is neither * nor a whitespace-delimited list ' 'of plugin names.' % (value, ), None) plugins = all_plugins_but_core() names = value.strip().split() is_all = names == ['*'] if is_all: names = plugins.keys() try: ret = ListAndAll([plugins[name] for name in names]) ret.is_all = is_all return ret except KeyError: raise SchemaError( 'Never heard of plugin "%s". I\'ve heard of ' 'these: %s.' % (name, ', '.join(plugins.keys())), None)
def validate_pai_config_path(self, experiment_config): '''validate paiConfigPath field''' if experiment_config.get('trainingServicePlatform') == 'pai': if experiment_config.get('trial', {}).get('paiConfigPath'): # validate commands pai_config = get_yml_content( experiment_config['trial']['paiConfigPath']) taskRoles_dict = pai_config.get('taskRoles') if not taskRoles_dict: raise SchemaError( 'Please set taskRoles in paiConfigPath config file!') else: pai_trial_fields_required_list = [ 'image', 'paiStorageConfigName', 'command' ] for trial_field in pai_trial_fields_required_list: if experiment_config['trial'].get(trial_field) is None: raise SchemaError( 'Please set {0} in trial configuration,\ or set additional pai configuration file path in paiConfigPath!' .format(trial_field)) pai_resource_fields_required_list = [ 'gpuNum', 'cpuNum', 'memoryMB' ] for required_field in pai_resource_fields_required_list: if experiment_config['trial'].get(required_field) is None and \ experiment_config['paiConfig'].get(required_field) is None: raise SchemaError( 'Please set {0} in trial or paiConfig configuration,\ or set additional pai configuration file path in paiConfigPath!' .format(required_field))
def __call__(self, parent_schema, series, index): if self.element_wise: val_result = series.map(self.fn) if val_result.all(): return True raise SchemaError( self.vectorized_error_message(parent_schema, index, series[~val_result])) else: # series-wise validator can return either a boolean or a # pd.Series of booleans. val_result = self.fn(series) if isinstance(val_result, pd.Series): if not val_result.dtype == PandasDtype.Bool.value: raise TypeError( "validator %d: %s must return bool or Series of type " "bool, found %s" % (index, self.fn.__name__, val_result.dtype)) if val_result.all(): return True raise SchemaError( self.vectorized_error_message(parent_schema, index, series[~val_result])) else: if val_result: return True else: raise SchemaError( "series did not pass series validator %d: %s" % (index, self.error_message))
def validate_self_content(keypairs, datadict): """Validate a dict by comparing keypairs. Args: keypairs(obj): Iterable of tuples strings with len==2 datadict(dict): a dict Returns: None: if all entries are equal. Raises: SchemaError: if any key pair is different. """ datakeys = set(datadict.keys()) reqkeys = {y for x in keypairs for y in x} missing = not reqkeys.issubset(datakeys) if missing: errmsg = ("Content validation failed.\n\ Cannot compare missing keys: %s " % (missing)) raise SchemaError(errmsg) for k1, k2 in keypairs: v1 = datadict[k1] v2 = datadict[k2] if v1 != v2: errmsg = ("Content validation failed.\n\ (d[%s] = %s) != (d[%s] = %s)" % (k1, v1, k2, v2)) raise SchemaError(errmsg)
def __call__(self, series): """Validate a series.""" _dtype = self._pandas_dtype if isinstance(self._pandas_dtype, str) \ else self._pandas_dtype.value if self._nullable: series = series.dropna() if (_dtype == Int.value): _dtype = Float.value if (series.astype(_dtype) != series).any(): # in case where dtype is meant to be int, make sure that # casting to int results in the same values. raise SchemaError( "after dropping null values, expected series values " "to be int, found: %s" % set(series)) else: nulls = series.isnull() if nulls.sum() > 0: raise SchemaError( "non-nullable series contains null values: %s" % series[nulls].head(N_FAILURE_CASES).to_dict()) type_val_result = series.dtype == _dtype if not type_val_result: raise SchemaError( "expected series '%s' to have type %s, got %s" % (series.name, self._pandas_dtype.value, series.dtype)) check_results = [] for i, check in enumerate(self._checks): check_results.append(check(self, series, i)) return all([type_val_result] + check_results)
def validate(self, x): if not (isinstance(x, list) or isinstance(x, tuple) or isinstance(x, numpy.ndarray)): raise SchemaError("Sequence is not list, tuple or numpy array", []) if isinstance(x, numpy.ndarray): if x.dtype.kind != "f": raise SchemaError( "Array dtype must be float, " "but was {}".format(x.dtype), []) x = x.ravel() if len(x) == 0: raise ValueError("Expecting a non-empty sequence but " "got {}".format(x)) if self.size is not None and len(x) != self.size: raise SchemaError( "Expecting sequence length {} but got " "{}".format(self.size, len(x)), []) if not isinstance(x, numpy.ndarray): for value in x: if not isinstance(value, (int, float)): raise SchemaError( "Values in sequence are expected to be " "numeric", []) x = numpy.array(x, dtype=float) return x
def validate(self, value): if not is_integer(value) and not is_float(value): raise SchemaError( "Salary has unexpected type {t}".format(t=type(value))) elif value <= 10000: raise SchemaError("Salary {s} is too low".format(s=value)) elif value >= 99999.9: raise SchemaError("Salary {s} is too high".format(s=value))
def validate(self, data): try: return self._callable(data) except SchemaError as x: raise SchemaError([None] + x.autos, [self._error] + x.errors) except BaseException as x: f = self._callable.__name__ raise SchemaError('%s(%r) raised %r' % (f, data, x), self._error)
def validate(self, x): if not isinstance(x, tuple): raise SchemaError("Expecting tuple, got {}".format(type(x)), []) if len(x) != self.N: raise SchemaError( "Expecting a tuple of size {}, but got".format(self.N, len(x)), []) return tuple(schema.validate(y) for y, schema in zip(x, self.tt))
def _validate_endpoint_list(value): if value is None: raise SchemaError('Invalid configuration: \'currentlist\' is not set') if not isinstance(value, list): raise SchemaError( 'Invalid configuration: \'currentlist\' is not a list of items') for item in value: _validate_valid_url(item, caller="'currentlist' items") return True
def validate(self, data): x = SchemaError([], []) for s in [Schema(s, error=self._error) for s in self._args]: try: return s.validate(data) except SchemaError as _x: x = _x raise SchemaError(['%r did not validate %r' % (self, data)] + x.autos, [self._error] + x.errors)
def validate_tuner_adivosr_assessor(self, experiment_config): if experiment_config.get('advisor'): if experiment_config.get('assessor') or experiment_config.get('tuner'): raise SchemaError('advisor could not be set with assessor or tuner simultaneously!') self.validate_annotation_content(experiment_config, 'advisor', 'builtinAdvisorName') else: if not experiment_config.get('tuner'): raise SchemaError('Please provide tuner spec!') self.validate_annotation_content(experiment_config, 'tuner', 'builtinTunerName')
def validate(self, ets_id): try: ets_id = int(ets_id) if ets_id not in self.valid_ets_ids: raise SchemaError('entity-type selection %s not valid' % ets_id) return ets_id except Exception: raise SchemaError('entity-type selection %s not valid' % ets_id)
def parse_time(time): '''Change the time to seconds''' unit = time[-1] if unit not in ['s', 'm', 'h', 'd']: raise SchemaError('the unit of time could only from {s, m, h, d}') time = time[:-1] if not time.isdigit(): raise SchemaError('time format error!') parse_dict = {'s': 1, 'm': 60, 'h': 3600, 'd': 86400} return int(time) * parse_dict[unit]
def _validate_valid_url(value, caller): if value is None: raise SchemaError( 'Invalid configuration: {} is not set'.format(caller)) if not isinstance(value, str): raise SchemaError( 'Invalid configuration: {} must be strings'.format(caller)) if not valid_url_re.search(value): raise SchemaError( 'Invalid configuration: {} must be a valid url'.format(caller)) return value
def _validate_mode(value): if value is None: raise SchemaError('Invalid configuration: \'MODE\' is not set') if not isinstance(value, str): raise SchemaError('Invalid configuration: \'MODE\' must be a string') allowed_modes = {"PRIVATE", "PUBLIC [EU]", "PUBLIC [NAM]", "PUBLIC [APJC]"} if value.upper() not in allowed_modes: raise SchemaError( 'Invalid configuration: \'MODE\' must be one of the following {}'. format(allowed_modes)) return value.upper()
def validate_search_space_content(self, experiment_config): '''Validate searchspace content, if the searchspace file is not json format or its values does not contain _type and _value which must be specified, it will not be a valid searchspace file''' try: search_space_content = json.load(open(experiment_config.get('searchSpacePath'), 'r')) for value in search_space_content.values(): if not value.get('_type') or not value.get('_value'): raise SchemaError('please use _type and _value to specify searchspace!') except Exception as e: raise SchemaError('searchspace file is not a valid json format! ' + str(e))
def validate(self, x): if not isinstance(x, (list, set, tuple)): raise SchemaError("Sequence is not list, tuple nor set", []) if x: if self.elem_type: elem_type = self.elem_type else: elem_type = self.infer_type_from_data(x) if not all(isinstance(x_i, elem_type) for x_i in x): msg = "Expecting all elements to be {}".format(self.elem_type) raise SchemaError(msg, []) return x
def check_institution(d): """If the agreement is institution, then we have to have an institution.""" if d['agreement'] == 'institution': if 'institution' in d: if d['institution'] not in ALL_ORGS: raise SchemaError( "Institution {!r} isn't in orgs.yaml: {}".format( d['institution'], d)) if d['agreement'] == 'none': if 'institution' in d: raise SchemaError("No-agreement should have no institution") return True
def validate_class_args(self, class_args, algo_type, builtin_name): if not builtin_name or not class_args: return meta = get_builtin_algo_meta(algo_type+'s', builtin_name) if meta and 'accept_class_args' in meta and meta['accept_class_args'] == False: raise SchemaError('classArgs is not allowed.') validator = create_validator_instance(algo_type+'s', builtin_name) if validator: try: validator.validate_class_args(**class_args) except Exception as e: raise SchemaError(str(e))
def validate_class_args(self, class_args, algo_type, builtin_name): if not builtin_name or not class_args: return meta = get_registered_algo_meta(builtin_name, algo_type+'s') if meta and 'acceptClassArgs' in meta and meta['acceptClassArgs'] == False: raise SchemaError('classArgs is not allowed.') logging.getLogger('nni.protocol').setLevel(logging.ERROR) # we know IPC is not there, don't complain validator = create_validator_instance(algo_type+'s', builtin_name) if validator: try: validator.validate_class_args(**class_args) except Exception as e: raise SchemaError(str(e))
def validate_exclude_quant_types_quant_bits(data): if not ('exclude' in data or ('quant_types' in data and 'quant_bits' in data)): raise SchemaError( 'Either (quant_types and quant_bits) or exclude must be specified.' ) return True
def get_validator(event_type: str) -> Tuple[str, callable]: if event_type in TASK_EVENT_TYPES: return "task", CompiledTaskEventSchema elif event_type in WORKER_EVENT_TYPES: return "worker", CompiledWorkerEventSchema else: raise SchemaError(f"{event_type} is not a valid celery event type!")
def __init__(self, value): value = float(value) if not 0 < value < 100: raise SchemaError("Invalid percentile value") self.value = value
def json_path(txt): try: logging.debug("validating as json path - '%s'" % txt) return jsonpath_ng.parse(txt) except Exception as e: logging.error("Bad JsonPath format: '%s'" % txt) raise SchemaError(['Bad JsonPath format: %s' % txt], str(e))
def validate(self, *args, **kwargs): try: item = self.validation_schema.validate(self.to_item()) return Currency(self.mongo, **item) except SchemaError: raise SchemaError( 'Error while validating data or code already exists')
def migrate(settings: dict) -> dict: """ Migration of the settings ``settings`` to version V3.3.1 settings :param settings: The settings dict to migrate :return: The migrated dict """ # rename keys and update their value # add missing keys # name - value pairs missing_keys = { 'auto_migrate_settings': True, 'ldap_tls_cacertdir': "", 'ldap_tls_reqcert': "hard", 'ldap_tls_cipher_suite': "", 'bootloaders_shim_folder': "@@shim_folder@@", 'bootloaders_shim_file': "@@shim_file@@", 'bootloaders_ipxe_folder': "@@ipxe_folder@@", 'syslinux_memdisk_folder': "@@memdisk_folder@@", 'syslinux_pxelinux_folder': "@@pxelinux_folder@@", } for (key, value) in missing_keys.items(): new_setting = helper.Setting(key, value) helper.key_add(new_setting, settings) # delete removed keys if not validate(settings): raise SchemaError("V3.3.1: Schema error while validating") return normalize(settings)
def validate_meta(meta=None, hard_validation=False): """ Validate meta data. :param meta: Meta data. :type meta: dict :param hard_validation: Add extra data validations. :type hard_validation: bool :return: Validated meta data. :rtype: dict """ i, e = _validate_base_with_schema(meta or {}, depth=2) if hard_validation: from schema import SchemaError from .hard import _hard_validation for k, v in sorted(sh.stack_nested_keys(i, depth=1)): for c, msg in _hard_validation(v, 'meta'): sh.get_nested_dicts(e, *k)[c] = SchemaError([], [msg]) if _log_errors_msg(e): return sh.NONE return i
def test_skelebot_schema_error(self, mock_yaml, exit_mock, print_mock): mock_yaml.side_effect = SchemaError("Validation Failed") sb.main() print_mock.assert_called_once_with(Fore.RED + "ERROR" + Style.RESET_ALL + " | skelebot.yaml | Validation Failed") exit_mock.assert_called_once_with(1)