Пример #1
0
def validate_args(args):
    """ Validate arguments.
        Checks:
            that the specified file exists and is readable
            that LEVEL is between 1 and 7
    """

    schema = Schema({
        "FILE": Use(open,
                    error="FILE doesn't exist or isn't readable!"),

        "--level": Or(None,
                        And(Use(int), lambda n: 1 <= n <= 7),
                        error="LEVEL should be between 1 and 7"),

        Optional("--help"): Or(True, False),
        })

    try:
        # Don't return the validated args here, just make sure they are valid.
        # Schema will return an object containing an open file object,
        # when we just wanted to make sure the file was readable.
        schema.validate(args)
        return args

    except SchemaError as e:
        exit(e)
Пример #2
0
    def __init__(self):
        self.status_ok_schema = Schema({'status' :And(str)})
        
        self.error = {"login_syntax" : '{"type":"login", "nick":"4 <= len(str) <= 10", "passwd":"str"}',
                      "register_syntax" : '{"type":"register", "nick":"4 <= len(str) <= 10", "passwd":"str", "email":"str"}',
                      "pub_public_msg_syntax" : '{"type":"public", "nick":"4 <= len(str) <= 10", "token_id":"str", "msg":"str"}',
                      "pub_public_msg_syntax" : '{"type":"public", "nick":"4 <= len(str) <= 10", "token_id":"str", "msg":"str"}'}

        self.user_join_left_schema = Schema({'user_join_left' :And(str), 'user_list': And(list), 'user' :And(str)})
        
        self.login_ok_schema = Schema({'type' : And(str, Use(str.lower), lambda n: n == 'login'),
                                       'nick': And(str, Use(str.lower), lambda n: 4 <= len(n) <= 10),
                                       'token_id':And(str), 'user_list': And(list)})
        self.login_nok_schema = Schema({'type' : And(str, Use(str.lower), lambda n: n == 'err_login'),
                                       'msg': And(str)})
        
        
        self.register_ok_schema = Schema({'type' : And(str, Use(str.lower), lambda n: n == 'register'),
                                       'succ':And(str)})
        self.msg_ok_schema = Schema({'type' : And(str, Use(str.lower), lambda n: n == 'public'),
                                       'from_': And(str, Use(str.lower), lambda n: 4 <= len(n) <= 10),
                                       'msg':And(str)})
        self.prv_msg_ok_schema = Schema({'type' : And(str, Use(str.lower), lambda n: n == 'private'),
                                       'to_':And(str), 'from_': And(str, Use(str.lower), lambda n: 4 <= len(n) <= 10),
                                       'msg':And(str)})
        
        self.access_folder_schema = Schema({'type' : And(str, Use(str.lower), lambda n: n == 'folder'),
                                       'to_':And(str), 'from_': And(str)})
        
        self.status_ = Schema({'type' : And(str, Use(str.lower), lambda n: n == 'status'),
                                       'status':And(str), 'nick': And(str)})

        self.access_folder_valid = Schema({'type' : And(str, Use(str.lower), lambda n: n == 'access_folder_valid'),
                                       'from_': And(str, Use(str.lower), lambda n: 4 <= len(n) <= 10),
                                       'to_':And(str), 'passwd':And(str)})
Пример #3
0
def test_json_schema_object_or_array_of_object():
    # Complex test where "test" accepts either an object or an array of that object
    o = {"param1": "test1", Optional("param2"): "test2"}
    s = Schema({"test": Or(o, [o])})
    assert s.json_schema("my-id") == {
        "$schema": "http://json-schema.org/draft-07/schema#",
        "id": "my-id",
        "properties": {
            "test": {
                "anyOf": [
                    {
                        "additionalProperties": False,
                        "properties": {"param1": {}, "param2": {}},
                        "required": ["param1"],
                        "type": "object",
                    },
                    {
                        "type": "array",
                        "items": {
                            "additionalProperties": False,
                            "properties": {"param1": {}, "param2": {}},
                            "required": ["param1"],
                            "type": "object",
                        },
                    },
                ]
            }
        },
        "required": ["test"],
        "additionalProperties": False,
        "type": "object",
    }
Пример #4
0
def validate_args(args):
    """
    Validates command line arguments
    """
    schema = Schema({
        '--user': And(str, len, Use(str.lower), error="missing or invalid user"),
        '--repo': And(str, len, Use(str.lower), error="missing or invalid repo"),
        '--type': And(str, len, Use(str.lower), lambda s: s in ('deb', 'rpm'), error="type must be one of deb or rpm"),
        '--distro': And(str, len, Use(str.lower), error="missing or invalid distro"),
        '--distro_version': And(str, len, Use(str.lower), error="missing or invalid distro_version"),
        '--arch': And(str, len, Use(str.lower), error="missing or invalid arch"),
        '--pkg_name': And(str, len, Use(str.lower), error="missing or invalid pkg_name"),
        '--filename': And(str, len, Use(str.lower), error="missing or invalid filename"),
        '--timeout': And(Use(float), lambda f: f > 0, lambda f, i=float(args['--poll_interval']): f > i,
                         error="timeout must be a number greater than 1, and greater than poll_interval (default 30)"),
        '--poll_interval': And(Use(float), lambda f: f > 0, error="poll_interval must be a number greater than 0"),
        '--page_interval': And(Use(float), lambda f: f > 0, error="page_interval must be a number greater than 0"),
        '--help': bool,
        '--version': bool,
        '--log-level': And(str, len, Use(str.upper),
                           lambda s: s in ('DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL'),
                           error="invalid log level specified")
    })

    try:
        return schema.validate(args)
    except SchemaError as ex:
        utils.abort("Invalid argument: {}. ".format(ex.message))
Пример #5
0
def test_generate_signature_schema():
    """Test generate_signature_schema() function."""
    def f(a, b, camelCase=True, none=None, quantity=3.0*unit.angstroms):
        pass

    f_schema = generate_signature_schema(f)
    assert len(f_schema) == 3
    for k in f_schema.keys():
        assert isinstance(k, Optional)

    # Remove Optional() marker for comparison
    stripped_schema = {k._schema: v for k, v in f_schema.items() if k._schema != 'quantity'}
    assert {'camel_case': bool, 'none': object} == stripped_schema

    # Check conversion
    f_schema = Schema(f_schema)
    assert f_schema.validate({'quantity': '5*angstrom'}) == {'quantity': 5*unit.angstrom}

    # Check update
    optional_instance = Optional('camel_case')
    updated_schema = generate_signature_schema(f, update_keys={'none': float, optional_instance: int},
                                               exclude_keys={'quantity'})
    assert len(updated_schema) == 2
    assert updated_schema['none'] == float
    assert updated_schema[optional_instance] == int
Пример #6
0
def run(raw_args):
    '''
    Validates the arguments by converting the VCF files into lists of VCFRecords, converting
    threshold to int, and calling validate_value on <VALUE>.
    returns the command string for dispatching; the new args dictionary after validating, and the first
    filename so that that file can later be used for a vcf.VCFWriter object.
    :param dict raw_args: the result of docopt(__doc__)
    :return str, dict, str: the command string, the new args dict, and the first filename.
    '''
    commands = ['vcall', 'filter', 'diff', 'stat', 'statdiff', 'exists', 'ambiguous']
    schema_dict=    {
            '<FILE1>' : Use(validate_vcf_file),
            Optional('<FILE2>') : Use(validate_vcf_file),
            Optional('<VALUE>') : Use(validate_value),
            '--count' : bool,
            '--threshold' : Use(int, error='Threshold must be integer'),
            Optional('--tag') : lambda t: True,
            '--csv' : bool
             #tags.__contains__ #    error='Tag was not valid, should be one of {0}'.format(' '.join(tags)))
        }
    schema_dict.update( dict( (arg, bool) for arg in commands + ops))
    _args = Schema(schema_dict).validate(raw_args)
    cmd_str = [k for (k, arg) in _args.items() if k in commands and arg][0]
    filename = raw_args['<FILE1>']
    return cmd_str, _args, filename
Пример #7
0
def parse_args():
    opts = docopt(__doc__, version='.'.join(VERSION))
    schema = Schema({
        Optional('PACK_TYPE'):
        Or(None, lambda s: s.lower() in PACKS_TYPE,
           error="PACK_TYPE should be either %s" % (', '.join(PACKS_TYPE))),
        Optional('--config'):
        Or(None, Use(open),
           error="--config must be a readable file"),
        Optional('--attempts'):
        And(Use(int), lambda n: n > 0,
            error='--attempts must be a strictly positive integer'),
        Optional('--score'):
        And(Use(int),
            error='--score must be an integer'),
        Optional('--threshold'):
        And(Use(int), lambda n: n >= 0,
            error='--threshold must be a positive integer'),
        Optional('--low-threshold'):
        And(Use(int), lambda n: n > 0,
            error='--low-threshold must be a strictly positive integer'),
        Optional('--wait'):
        And(Use(int), lambda n: n >= 0,
            error='--wait must be a positive integer'),
        object: object,
    })
    opts = schema.validate(opts)
    opts['PACK_TYPE'] = opts['PACK_TYPE'].lower() if opts['PACK_TYPE'] else "wild"
    if opts['--config']:
        config = simplejson.loads(opts['--config'].read())
        opts.update(config)
    return opts
Пример #8
0
def test_optional_key_convert_failed_randomly_while_with_another_optional_object():
    """
    In this test, created_at string "2015-10-10 00:00:00" is expected to be converted
    to a datetime instance.
        - it works when the schema is

            s = Schema({
                    'created_at': _datetime_validator,
                    Optional(basestring): object,
                })

        - but when wrapping the key 'created_at' with Optional, it fails randomly
    :return:
    """
    import datetime
    fmt = '%Y-%m-%d %H:%M:%S'
    _datetime_validator = Or(None, Use(lambda i: datetime.datetime.strptime(i, fmt)))
    # FIXME given tests enough
    for i in range(1024):
        s = Schema({
            Optional('created_at'): _datetime_validator,
            Optional('updated_at'): _datetime_validator,
            Optional('birth'): _datetime_validator,
            Optional(basestring): object,
        })
        data = {
            'created_at': '2015-10-10 00:00:00'
        }
        validated_data = s.validate(data)
        # is expected to be converted to a datetime instance, but fails randomly
        # (most of the time)
        assert isinstance(validated_data['created_at'], datetime.datetime)
Пример #9
0
    def _fit_first(self, first):
        # Check for a tuples of numbers, strings or "sequences".
        schema = Schema((int, float, basestring, SequenceValidator()))
        schema.validate(first)
        if not first:
            raise ValueError("Cannot fit with no empty features")

        # Build validation schema using the first data point
        self.indexes = {}  # Tuple index to matrix column mapping
        self.reverse = []  # Matrix column to tuple index mapping
        self.schema = [None] * len(first)
        self.str_tuple_indexes = []
        for i, data in enumerate(first):
            if isinstance(data, (int, float)):
                type_ = Use(float)  # ints and floats are all mapped to float
                self._add_column(i, None)
            elif isinstance(data, basestring):
                type_ = basestring  # One-hot encoded indexes are added last
                self.str_tuple_indexes.append(i)
            else:
                type_ = SequenceValidator(data)
                for j in xrange(type_.size):
                    self._add_column(i, j)
            self.schema[i] = type_
        assert None not in self.schema
        self.schema = tuple(self.schema)
        self.validator = TupleValidator(self.schema)
Пример #10
0
def main():
    """
    Main function
    """
    try:
        name = indexfile.__name__
        version = indexfile.__version__
        log = indexfile.getLogger(__name__)

        # local variables
        index = None

        # load commands
        commands = load_commands()
        helpstr = __doc__ % (name, name) + get_commands_help(commands)

        # create validation schema
        sch = Schema({
            'index': Or(None,
                        And(Or('-', 'stdin'),
                            Use(lambda x: sys.stdin)),
                        open),
            Optional('format'): open,
            Optional('loglevel'): And(str,
                                      Use(str.lower),
                                      Or('error',
                                         'warn',
                                         'info',
                                         'debug')),
            '<command>': Command(commands=commands),
            str: object
        })

        # parse args and remove dashes
        args = docopt(helpstr, version="%s v%s" % (name, version), options_first=True)
        args = dict([(k.replace('-', ''), v) for k, v in args.iteritems()])

        # validate args
        args = sch.validate(args)

        # deal with 'help' command
        if args.get('<command>') == 'help':
            docopt(helpstr, version="%s v%s" % (name, version), argv=['--help'])

        # load the index and delegate command
        config = load_config(os.getcwd(), args)

        indexfile.setLogLevel(config.get('loglevel'))
        index = open_index(config)

        command_ = get_command(args.get('<command>'), commands)
        argv = [name, command_] + args['<args>']
        sys.argv = argv
        module_ = "indexfile.cli.indexfile_%s" % command_
        runpy.run_module(module_,
                         run_name="__main__",
                         init_globals={'index': index, 'command': '{0} {1}'.format(name, command_)})

    except KeyboardInterrupt, e:
        sys.exit(1)
Пример #11
0
    def _validate_neighbor(self):
        """Validate neighbor against Schema."""

        neighbor_schema = Schema({
            'remote_ip': basestring,
            'remote_as': And(basestring, lambda n: 0 <= int(n) <= 4294967295),
            Optional('password'): basestring,
            Optional('maximum_hops'): And(basestring,
                                          lambda n: 1 <= int(n) <= 255),
            Optional('timer_keepalive'): And(basestring,
                                             lambda n: 1 <= int(n) <= 65535),
            Optional('timer_timeout'): And(basestring,
                                           lambda n: 3 <= int(n) <= 65536),
            Optional('description'): basestring,
            Optional('soft_reconfiguration'): bool,
            Optional('community'): bool,
            Optional('remove_private_as'): bool,
            Optional('next_hop_self'): bool
        })

        try:
            neighbor_schema.validate(self.neighbor)
        except SchemaWrongKeyError:
            # It doesn't matter if neighbor dict has other keys besides these.
            pass
        except SchemaError as e:
            raise InvalidNeighborException(e.code)
Пример #12
0
def main(args=None):
    # The console script created by setuptools takes the cwd off the path.
    sys.path.insert(0, os.getcwd())

    scheme = Schema({'--debug': bool,
                     '--delim': str,
                     '--demo': bool,
                     '--help': bool,
                     '--float': Or(None, str),
                     '--from': Or(None, Use(lambda x: list(map(int, x.split(','))))),
                     '--nan': Or(None, str),
                     '--next': Or(None, str),
                     '--no-index-label': bool,
                     '--not-finished': bool,
                     '-o': Or(None, str),
                     '--skip': Or(None, Use(lambda x: x.split(','))),
                     '--skip-parents': bool,
                     '--version': bool,
                     '<data-file>': Or(None, str),
                     '<exp-file>': Or(lambda x: x is None, os.path.exists, error='Invalid <exp-file>'),
                     '<level>': [str],
                     '<n>': [And(Use(int), lambda n: n > 0)],
                     'export': bool,
                     'resume': bool,
                     'run': bool,
                     })

    options = scheme.validate(docopt(__doc__, argv=args, version=__version__))

    if options['--debug']:
        logging.basicConfig(level=logging.DEBUG)

    if options['run'] or options['resume']:
        exp = Experiment.load(options['<exp-file>'])
        kwargs = {'demo': options['--demo'],
                  'parent_callbacks': not options['--skip-parents'],
                  'resume': options['resume'],
                  'session_options': options['-o'],
                  'from_section': options['--from'],
                  }

        if options['--next']:
            kwargs.update(section_obj=exp.find_first_not_run(
                options['--next'], by_started=not options['--not-finished']))

        elif options['resume'] and not options['<n>']:
            kwargs.update(section_obj=exp.find_first_partially_run(options['<level>'][0]))

        else:
            kwargs.update(zip(options['<level>'], options['<n>']))

        run_experiment_section(exp, **kwargs)

    elif options['export']:
        export_experiment_data(options['<exp-file>'], options['<data-file>'],
                               float_format=options['--float'],
                               skip_columns=options['--skip'],
                               index_label=False if options['--no-index-label'] else None,
                               na_rep=options['--nan'],
                               sep=options['--delim'])
Пример #13
0
def main():
    args = docopt(__doc__)

    schema = Schema({
        '--help': bool,
        '--headless': bool,
        '--width': Use(int),
        '--height': Use(int),
        '<save_path>': str,
    })

    try:
        args = schema.validate(args)
    except SchemaError as e:
        exit(e)

    model_path = 'models'

    loadPrcFileData('', 'window-title Babble')
    loadPrcFileData('', 'win-size %d %d' % (args['--width'], args['--height']))
    loadPrcFileData('', 'audio-library-name null') # suppress warning
    loadPrcFileData('', 'model-path %s' % model_path)
    loadPrcFileData('', 'bullet-filter-algorithm groups-mask')

    if args['--headless']:
        loadPrcFileData('', 'window-type none')

    app = App(args)
    app.run()
Пример #14
0
    def is_valid(self):
        """Checks if the input dictionary contains all valid types

        Checks the __dict__ attribute and ensure it follows the correct
        schema

        Args:
            None

        Returns:
            A Boolean if dictionary follows schema
        """

        schema = Schema({
            'region': unicode,
            'subnet': unicode,
            'purchase_type': And(unicode, lambda x: x in ["on_demand", "spot"]),
            'image': unicode,
            'price': unicode,
            'num_instances': int,
            'key_name': unicode,
            'security_group_ids': list,
            'instance_type': unicode,
            'tag_name': unicode,
            'vol_size': int,
            'bdm': dict})

        try:
            schema.validate(self.__dict__)
            return True
        except Exception as exc:
            print exc
            print "Invalid instance template"
            return False
Пример #15
0
def optparser(argv=sys.argv[1:]):
    __usage__ = """
Bayesian analysis of ITC data. Uses MicroCal .itc files, or custom format .yml files for analysing experiments.

Usage:
  ITC.py <datafiles>... [-w <workdir> | --workdir=<workdir>] [-n <name> | --name=<name>] [-q <file> | --heats=<file>] [-i <ins> | --instrument=<ins> ] [-v | -vv | -vvv] [-r <file> | --report=<file>] [ -l <logfile> | --log=<logfile>]
  ITC.py mcmc <datafiles>...  (-m <model> | --model=<model>) [-w <workdir> | --workdir=<workdir>] [ -r <receptor> | --receptor=<receptor>] [-n <name> | --name=<name>] [-q <file> | --heats=<file>] [-i <ins> | --instrument=<ins> ] [ -l <logfile> | --log=<logfile>] [-v | -vv | -vvv] [--report=<file>] [options]
  ITC.py (-h | --help)
  ITC.py --license
  ITC.py --version

Options:
  -h, --help                            Show this screen
  --version                              Show version
  --license                              Show license
  -l <logfile>, --log=<logfile>          File to write logs to. Will be placed in workdir.
  -v,                                    Verbose output level. Multiple flags increase verbosity.
  <datafiles>                            Datafile(s) to perform the analysis on, .itc, .yml
  -w <workdir>, --workdir=<workdir>      Directory for output files                      [default: ./]
  -r <receptor> | --receptor=<receptor>  The name of the receptor for a Competitive Binding model.
  -n <name>, --name=<name>               Name for the experiment. Will be used for output files. Defaults to inputfile name.
  -i <ins>, --instrument=<ins>           The name of the instrument used for the experiment. Overrides .itc file instrument.
  -q <file>, --heats=<file>              Origin format integrated heats file. (From NITPIC use .dat file)
  -m <model>, --model=<model>            Model to use for mcmc sampling                  [default: TwoComponent]
  --nfit=<n>                             No. of iteration for maximum a posteriori fit   [default: 20000]
  --niters=<n>                           No. of iterations for mcmc sampling             [default: 2000000]
  --nburn=<n>                            No. of Burn-in iterations for mcmc sampling     [default: 500000]
  --nthin=<n>                            Thinning period for mcmc sampling               [default: 250]
  --report=<file>                        Output file with summary in markdown
"""
    arguments = docopt(__usage__, argv=argv, version='ITC.py, pre-alpha')
    schema = Schema({'--heats': Or(None, And(str, os.path.isfile, Use(os.path.abspath))),  # str, verify that it exists
                     '--help': bool,  # True or False are accepted
                     '--license': bool,  # True or False are accepted
                     '-v': And(int, lambda n: 0 <= n <= 3),  # integer between 0 and 3
                     '--model': And(str, lambda m: m in known_models),  # str and found in this dict
                     '--nfit':And(Use(int), lambda n: n > 0),
                     # Convert str to int, make sure that it is larger than 0
                     '--nburn': And(Use(int), lambda n: n > 0),
                     # Convert str to int, make sure that it is larger than 0
                     '--niters': And(Use(int), lambda n: n > 0),
                     # Convert str to int, make sure that it is larger than 0
                     '--nthin': And(Use(int), lambda n: n > 0),
                     # Convert str to int, make sure that it is larger than 0
                     '--name': Or(None, And(str, len)),  # Not an empty string
                     '--instrument': Or(None, And(str, lambda m: m in known_instruments)),
                     # None, or str and found in this dict
                     '--version': bool,  # True or False are accepted
                     '--receptor': str,  # str
                     '--workdir': str,  # str
                     # list and ensure it contains existing files
                     '<datafiles>': And(list, lambda inpfiles : [os.path.isfile(inpfile) for inpfile in inpfiles], Use(lambda inpfiles: [os.path.abspath(inpfile) for inpfile in inpfiles])),
                     'mcmc': bool,  # True or False are accepted
                     '--report': Or(None, Use(lambda f: open(f, 'w'))),
                     # Don't use, or open file with writing permissions
                     '--log': Or(None, str),  # Don't use, or str
                    })

    return schema.validate(arguments)
Пример #16
0
    def __init__(self, input_file=None, output_file=None, dfxml_file=None, report_file=None,
                 commit=False, ignore_patterns=[], key=None, rules=[]):
        #  Validate configuration
        from schema import Schema, Optional, Or, Use, And, SchemaError
        schema = Schema({
            'input_file': Use(lambda f: open(f, 'r'), error='Cannot read the input file'),
            Optional('output_file'):
            Or(None,
               Use(lambda f: open(f, 'w'), error='Cannot write to the output file')),
            Optional('dfxml_file'):
            Or(None,
               Use(lambda f: open(f, 'r'), error='Cannot read DFXML file')),
            Optional('report_file'):
            Or(None,
               lambda f: open(f, 'w'), error='Cannot write to the report file'),
            'commit': Or(True, False),
            'ignore_patterns':
                Use(lambda f: re.compile(convert_fileglob_to_re('|'.join(f))),
                    error='Cannot compile unified ignore regex'),
            'key': Or(None, str),
            'rules': And([(redact_rule, redact_action)], lambda f: len(f) > 0)})
        try:
            kwargs = {
                'input_file': input_file,
                'output_file': output_file,
                'dfxml_file': dfxml_file,
                'report_file': report_file,
                'commit': commit,
                'ignore_patterns': ignore_patterns,
                'key': key,
                'rules': rules
            }
            self.conf = schema.validate(kwargs)
        except SchemaError as e:
            logging.warning('The redact configuration did not validate:')
            exit(e)
        if self.conf['commit'] and 'output_file' not in self.conf.keys():
            logging.error('An output file is required when COMMIT is on.')
            exit(1)
        # TODO Check input and output are not same file

        logging.debug('Configuration:\n%s' % self.conf)

        # Print rules
        logging.debug(json.dumps(map(lambda x, y: (x.line,
                                                   x.__class__.__name__,
                                                   y.__class__.__name__,
                                                   x.lgpattern if hasattr(x, 'lgpattern') else ''),
                                     self.conf['rules']),
                                 indent=4))

        self.input_file = self.conf['input_file']
        from os import path
        self.image_size = path.getsize(self.input_file.name)
        self.output_file = self.conf['output_file']
        self.report_file = self.conf['report_file']
        self.dfxml_file = self.conf['dfxml_file']
        self.commit = self.conf['commit']
        self.configure_report_logger()
Пример #17
0
def validate_args(args):
    schema = Schema({
        '--date': Or(None, lambda n: re.match('\d{8}', n) is not None, error='--date should be date_format=yyyymmdd'),
        '--help': bool,
        '--version': bool,
    })

    return schema.validate(args)
Пример #18
0
def validate_descriptor(descriptor):
    "return True if the given descriptor is correctly structured."
    try:
        descr_schema = Schema(
            {lambda v: v in ['files', 'tar-gzipped', 'mysql-database', 'postgresql-database']: [str]})
        return descr_schema.validate(descriptor)
    except SchemaError as err:
        raise AssertionError(str(err))
Пример #19
0
def checkSchema(schemadict, args):
    '''Pythonic的检查schema'''
    schema = Schema(schemadict)
    try:
        args = schema.validate(args)
    except SchemaError as e:
        exit(log(e, error=True))
    return args
Пример #20
0
    def validate(self, req):
        schema = Schema(
            {
                'code': And(str, len, error='Code is required')
            }
        )

        return schema.validate(req)
def main():
    args = docopt(__doc__)

    # validating arguments
    schema = Schema({
        '-r': And(Use(int), lambda n: 10 <= n <= 95,
                      error='-r must be integer and 10 <= N <= 95'),
        '-R': And(boto.ec2.get_region,
                      error='-R must be a valid region name'),
        '-a': And(Use(int), lambda s: s >= 60,
                      error='-a must be integer and S >= 60'),
        '-e': And(Use(int), lambda n: n >= 1,
                      error='-e must be integer and N >= 1'),
        str: object})

    try:
        args = schema.validate(args)
    except SchemaError as e:
        exit(e)

    # setting arguments
    global DEBUG, RATIO, REGION, SNS, PREFIX, ALARM_PERIOD, EVALUATION_PERIOD

    DEBUG = args['-d']
    RATIO = args['-r'] / 100.0
    REGION = args['-R']
    SNS = args['-s']
    PREFIX = args['-p']
    ALARM_PERIOD = args['-a']
    EVALUATION_PERIOD = args['-e']

    ddb_tables = get_ddb_tables()
    aws_cw_connect = boto.ec2.cloudwatch.connect_to_region(REGION)

    (alarms_to_create,
     alarms_to_update) = get_ddb_alarms_to_create(ddb_tables, aws_cw_connect)

    # creating new alarms
    if alarms_to_create:
        if DEBUG:
            for alarm in alarms_to_create:
                print 'DEBUG CREATED:', alarm
        else:
            print 'New DynamoDB table(s) Alarms created:'
            for alarm in alarms_to_create:
                aws_cw_connect.create_alarm(alarm)
                print alarm

    # updating existing alarms
    if alarms_to_update:
        if DEBUG:
            for alarm in alarms_to_update:
                print 'DEBUG UPDATED:', alarm
        else:
            print 'DynamoDB table(s) Alarms updated:'
            for alarm in alarms_to_update:
                aws_cw_connect.update_alarm(alarm)
                print alarm
Пример #22
0
def test_issue_9_prioritized_key_comparison_in_dicts():
    # http://stackoverflow.com/questions/14588098/docopt-schema-validation
    s = Schema({'ID': Use(int, error='ID should be an int'),
                'FILE': Or(None, Use(open, error='FILE should be readable')),
                Optional(str): object})
    data = {'ID': 10, 'FILE': None, 'other': 'other', 'other2': 'other2'}
    assert s.validate(data) == data
    data = {'ID': 10, 'FILE': None}
    assert s.validate(data) == data
Пример #23
0
def test_issue_56_cant_rely_on_callables_to_have_name():
    s = Schema(methodcaller('endswith', '.csv'))
    assert s.validate('test.csv') == 'test.csv'
    with SE:
        try:
            s.validate('test.py')
        except SchemaError as e:
            assert "operator.methodcaller" in e.args[0]
            raise
Пример #24
0
def validate(optdict):
    result={}
    argdict = clicommonargs.argvalidators
    #extract sub argdict here
    myvalidables = ['-c','-r','--hltpath','--hltconfig',str]
    argdict = dict((k,v) for k,v in clicommonargs.argvalidators.iteritems() if k in myvalidables)
    schema = Schema(argdict)
    result = schema.validate(optdict)
    return result
Пример #25
0
def test_exception_handling_with_bad_validators():
    BadValidator = namedtuple("BadValidator", ["validate"])
    s = Schema(BadValidator("haha"))
    with SE:
        try:
            s.validate("test")
        except SchemaError as e:
            assert "TypeError" in e.args[0]
            raise
Пример #26
0
def test_complex():
    s = Schema({'<file>': And([Use(open)], lambda l: len(l)),
                '<path>': os.path.exists,
                Optional('--count'): And(int, lambda n: 0 <= n <= 5)})
    data = s.validate({'<file>': ['./LICENSE-MIT'], '<path>': './'})
    assert len(data) == 2
    assert len(data['<file>']) == 1
    assert data['<file>'][0].read().startswith('Copyright')
    assert data['<path>'] == './'
Пример #27
0
def test_missing_keys_exception_with_non_str_dict_keys():
    s = Schema({And(str, Use(str.lower), 'name'): And(str, len)})
    with SE: s.validate(dict())
    with SE:
        try:
            Schema({1: 'x'}).validate(dict())
        except SchemaMissingKeyError as e:
            assert e.args[0] == "Missing keys: 1"
            raise
Пример #28
0
def test_issue_56_cant_rely_on_callables_to_have_name():
    s = Schema(methodcaller("endswith", ".csv"))
    assert s.validate("test.csv") == "test.csv"
    with SE:
        try:
            s.validate("test.py")
        except SchemaError as e:
            assert "operator.methodcaller" in e.args[0]
            raise
Пример #29
0
def validate(optdict):
    result={}
    argdict = clicommonargs.argvalidators
    #extract sub argdict here
    myvalidables = ['-c','-n','-f','-r','-i','-o','--amodetag','-b','--beamenergy','--datatag','--begin','--end','--output-style',str]
    argdict = dict((k,v) for k,v in clicommonargs.argvalidators.iteritems() if k in myvalidables)
    schema = Schema(argdict)
    result = schema.validate(optdict)
    return result
Пример #30
0
	def set_schema_props(self, schema_file):
		
		schema = Schema(schema_file)
		
		self.schema_version = schema.version
		self.simple_elements = schema.get_element_list("complexType", "simpleAddressType")
		self.detail_elements = schema.get_element_list("complexType", "detailAddressType")
		self.element_list = schema.get_element_list("element","vip_object")
		self.elem_fields = self.get_fields(schema, self.element_list)
PARAM_SCHEMA = {
    Optional("notification_endpoint"): str,
    Optional("schedule"): str,
    Optional("restart_execution_on_update"): bool,
}

AWS_ACCOUNT_ID_REGEX_STR = r"\A[0-9]{12}\Z"
AWS_ACCOUNT_ID_SCHEMA = Schema(
    And(
        Or(int, str), Use(str),
        Regex(
            AWS_ACCOUNT_ID_REGEX_STR,
            error=(
                "The specified account id is incorrect. "
                "This typically happens when you specify the account id as a "
                "number, while the account id starts with a zero. If this is "
                "the case, please wrap the account id in quotes to make it a "
                "string. An AWS Account Id is a number of 12 digits, which "
                "should start with a zero if the Account Id has a zero at "
                "the start too. "
                "The number shown to not match the regular expression could "
                "be interpreted as an octal number due to the leading zero. "
                "Therefore, it might not match the account id as specified "
                "in the deployment map."))))

# CodeCommit Source
CODECOMMIT_SOURCE_PROPS = {
    "account_id": AWS_ACCOUNT_ID_SCHEMA,
    Optional("repository"): str,
    Optional("branch"): str,
    Optional("poll_for_changes"): bool,
Пример #32
0
str_or_empty = Or(str, None)

if __name__ == '__main__':
    args = docopt(__doc__, version='My CLI command 0.1.0')

    schema = Schema({
        '--boolean-param': bool,
        '--help': bool,
        '--named-param': str_or_empty,
        '--param-array': list_with_str_content,
        '--repeatable-param': non_negative_int,
        '--that_m': bool,
        '--that_o': bool,
        '--this_m': bool,
        '--this_o': bool,
        '--version': bool,
        '-v': non_negative_int,
        '<argv>': list_with_str_content,
        '<in-array1>': list_with_str_content,
        '<in-array2>': list_with_str_content,
        '<in-array3>': list_with_str_content,
        '<in-value1>': str_or_empty,
        '<in-value2>': str_or_empty,
        'action1': bool,
        'action2': bool
    })

    try:
        args = schema.validate(args)
    except SchemaError as e:
        exit(e)
Пример #33
0
def bayesitc_mcmc_parser(argv=sys.argv[1:]):
    __usage__ = """Analyze ITC data using Markov chain Monte Carlo (MCMC). Uses MicroCal .itc files, or custom format .yml files for modeling experiments.
    When running the program you can select one of two options:

    competitive
      A competitive binding model. Requires multiple experiments to be specified.

    twocomponent
      A twocomponent binding model. Analyzes only a single experiment

    Usage:
      bayesitc_mcmc.py twocomponent <datafile> <heatsfile> [-v | -vv | -vvv] [--cc=<c_cell>] [--cs=<c_syringe> ] [--dc=<dc_cell>] [--ds=<dc_syringe>] [options]
      bayesitc_mcmc.py competitive (<datafile> <heatsfile>)... (-r <receptor> | --receptor <receptor>) [-v | -vv | -vvv] [options]
      bayesitc_mcmc.py (-h | --help)
      bayesitc_mcmc.py --license
      bayesitc_mcmc.py --version

    Options:
      -h, --help                             Show this screen
      --version                              Show version
      --license                              Show license
      -l <logfile>, --log=<logfile>          File to write logs to. Will be placed in workdir.
      --cc <c_cell>                          Concentration of component in cell in mM. Defaults to value in input file
      --cs <c_syringe>                       Concentration of component in syringe in mM. Defaults to value in input file
      --dc <dc_cell>                         Relative uncertainty in cell concentration      [default: 0.1]
      --ds <dc_syringe>                      Relative uncertainty in syringe concentration   [default: 0.1]
      -v,                                    Verbose output level. Multiple flags increase verbosity.
      -w <workdir>, --workdir <workdir>      Directory for output files                      [default: ./]
      -r <receptor> | --receptor <receptor>  The name of the receptor for a competitive binding model.
      -n <name>, --name <name>               Name for the experiment. Will be used for output files. Defaults to inputfile name.
      -i <ins>, --instrument <ins>           The name of the instrument used for the experiment. Overrides .itc file instrument.
      --nfit=<n>                             No. of iteration for maximum a posteriori fit   [default: 20000]
      --niters=<n>                           No. of iterations for mcmc sampling             [default: 2000000]
      --nburn=<n>                            No. of Burn-in iterations for mcmc sampling     [default: 500000]
      --nthin=<n>                            Thinning period for mcmc sampling               [default: 500]
"""
    arguments = docopt(__usage__,
                       argv=argv,
                       version='bayesitc_mcmc.py, pre-alpha')
    schema = Schema({
        '--help':
        bool,  # True or False are accepted
        '--license':
        bool,  # True or False are accepted
        # integer between 0 and 3
        '-v':
        And(int, lambda n: 0 <= n <= 3),
        # str and found in this dict
        'twocomponent':
        bool,
        'competitive':
        bool,
        '--nfit':
        And(Use(int), lambda n: n > 0),
        # Convert str to int, make sure that it is larger than 0
        '--nburn':
        And(Use(int), lambda n: n > 0),
        # Convert str to int, make sure that it is larger than 0
        '--niters':
        And(Use(int), lambda n: n > 0),
        # Convert str to int, make sure that it is larger than 0
        '--nthin':
        And(Use(int), lambda n: n > 0),
        # Convert str to int, make sure that it is larger than 0
        '--name':
        Or(None, And(str, len)),  # Not an empty string
        '--instrument':
        Or(None, And(str, lambda m: m in known_instruments)),
        # None, or str and found in this dict
        '--version':
        bool,  # True or False are accepted
        '--receptor':
        Or(None, str),  # str or None
        '--workdir':
        str,  # str
        # str and ensure file exists
        # list and ensure it contains existing files
        '<datafile>':
        And(
            list,
            lambda inpfiles: [os.path.isfile(inpfile) for inpfile in inpfiles],
            Use(lambda inpfiles:
                [os.path.abspath(inpfile) for inpfile in inpfiles])),
        # list and ensure it contains existing files
        '<heatsfile>':
        And(
            list,
            lambda inpfiles: [os.path.isfile(inpfile) for inpfile in inpfiles],
            Use(lambda inpfiles:
                [os.path.abspath(inpfile) for inpfile in inpfiles])),
        # Don't use, or open file with writing permissions
        '--log':
        Or(None, str),  # Don't use, or str
        '--cc':
        Or(None,
           And(Use(float),
               lambda n: n > 0.0)),  # Not specified, or a float greater than 0
        '--cs':
        Or(None, And(Use(float),
                     lambda n: n > 0.0)),  # Not specified, or a float
        '--dc':
        And(Use(float), lambda n: n > 0.0),  # a float greater than 0
        '--ds':
        And(Use(float), lambda n: n > 0.0),  # a float greater than 0
    })

    return schema.validate(arguments)
Пример #34
0
def _pay_check(kwargs, call, operator):
    """
    发起收款: 获取微信/支付宝收款二维码
        1. 改f_shop.flow_top_up
        2. 增加aeolus.call的transact_list[ {transact_num,cash,number_list,code_url,  trade_no} ]
        注意: 回调的部分, 要根据transact_num
            改flow_top_up,flow,flow_statistics;
            改aeolus.call.transact_list的trade_no;
            改aeolus.express对应于number_list里面的运单的状态们.
    :param kwargs:
    :param call:
    :param operator:
    :return:
    """
    kwargs = Schema({
        'cash': schema_float_2,
        'number_list': [schema_unicode],
        "pay_type": schema_unicode,  # WXPAY/ALIPAY
    }).validate(kwargs)
    # 0.1 想要发起的收款金额必须大于0
    if kwargs['cash'] <= 0:
        raise ValueError('总计发起收款的金额必须大于0元')
    # 0.2 信息校验
    cash = Express.objects(
        number__in=kwargs['number_list'],  # 传入的这些单
        assignee__id=operator['id'],  # 是配送员领取/代填写的单
        creator__id=call.shop_id,  # 是该商户下的单
        status__sub_status=ExprState.
        SUB_STATUS_PRE_CREATED,  # 必须都没被别人发起过收款(PRE_CREATED.PRE_CREATED)
        node__node_n__tel={
            "$exists": True,
            "$ne": ""
        },  # 都填过收方信息了
        fee__fh={
            "$exists": True,
            "$ne": None
        }).aggregate_sum('fee.fh')
    if cash == 0:
        raise ValueError("运单校验失败, 请尝试刷新页面后重试")
    elif round(cash, 2) != kwargs['cash']:
        raise ValueError("运单和运费校验失败, 请尝试刷新页面或单笔付款")

    # 1. 信息校验成功, 向DAS-shop请求记录支付操作, 同时向微信发起NATIVE类型的预交易请求
    if kwargs['pay_type'] == 'WXPAY':
        ret = yield shop_wxpay_code_url(call.shop_id, call.shop_name,
                                        call.shop_tel, kwargs['cash'])
    elif kwargs['pay_type'] == 'ALIPAY':
        ret = yield shop_alipay_code_url(call.shop_id, call.shop_name,
                                         call.shop_tel, kwargs['cash'])
    else:
        logging.error('pay_type[%s] should be either WXPAY or ALIPAY.' %
                      kwargs['pay_type'])
        raise ValueError('未知支付类型')
    # 2. 增加aeolus.call的transact_list[ {transact_num,cash,number_list,code_url,  trade_no} ]
    if ret:
        kwargs = {
            'push__transact_list': {
                'transact_num': ret['transact_num'],
                'cash': kwargs['cash'],
                'number_list': kwargs['number_list'],
                'code_url': ret['code_url'],
            }
        }
        raise Return(kwargs)
    else:
        # 在前面应该有error log.
        raise ValueError('向微信/支付宝发起预交易请求失败, 请稍后重试')
Пример #35
0
import promote
from schema import Schema, And

# schema is optional https://pypi.python.org/pypi/schema
@promote.validate_json(Schema({'name': And(str, lambda s: len(s) > 1)}))
def helloWorld(data):
    return {'response': 'Hello ' + data['name'] + '!'}

USERNAME = '******'
API_KEY = 'your_api_key'
PROMOTE_URL = "https://promote.c.yhat.com/"

p = promote.Promote(USERNAME, API_KEY, PROMOTE_URL)

# test data
TESTDATA = {'name': 'austin'}

# test model locally
print(helloWorld(TESTDATA))

# 1. test that TESTDATA is valid json
# 2. THERE IS test data, run helloWorld(TESTDATA) before deployment
p.deploy("HelloModel", helloWorld, TESTDATA, confirm=True, dry_run=False, verbose=1)
Пример #36
0
class Registry(Component):
    """
    Registry Class

    Provides the ability to define the registry that will be used when publishing the Docker image
    for the project, and provides the capability to publish by logging into the registry through
    the CLI.
    """

    activation = Activation.PROJECT
    commands = ["publish"]

    schema = Schema(
        {
            Optional('host'):
            And(str, error='Registry \'host\' must be a String'),
            Optional('port'):
            And(int, error='Registry \'port\' must be an Integer'),
            Optional('user'):
            And(str, error='Registry \'user\' must be a String'),
            Optional('aws'):
            And(dict, error='Registry \'aws\' must be a Dictionary')
        },
        ignore_extra_keys=True)

    host = None
    port = None
    user = None
    aws = None

    @classmethod
    def load(cls, config):
        """Instantiate the AWS Class Object if it is present"""

        cls.validate(config)
        aws = Aws.load(config["aws"]) if ("aws" in config) else None

        return cls(config.get("host"), config.get("port"), config.get("user"),
                   aws)

    def __init__(self, host=None, port=None, user=None, aws=None):
        """Instantiate the Registry Class Object based on the provided parameters"""
        self.host = host
        self.port = port
        self.user = user
        self.aws = aws

    def addParsers(self, subparsers):
        """
        SkeleParser Hook

        Adds a parser for the publish command to allow the user to login and push the project's
        Docker image to the defined registry, or Docker Hub, if one is not defined.
        """

        helpMessage = "Publish your versioned Docker Image to the registry"
        registryParser = subparsers.add_parser("publish", help=helpMessage)
        registryParser.add_argument("-t",
                                    "--tags",
                                    nargs='*',
                                    help="Additional image tags")

        return subparsers

    def execute(self, config, args, host=None):
        """
        Execution Hook

        Executes when the publish command is provided and prompts for username and password before
        building the project's Docker Image and pushing it to the defined registry.
        """

        # Login to the registry
        if self.aws is not None:
            docker.loginAWS(self.host,
                            self.aws.region,
                            self.aws.profile,
                            docker_host=host)
        else:
            docker.login(host=self.host, docker_host=host)

        # Build and Push with the different tags (LATEST and VERSION default)
        if (not args.skip_build_global):
            docker.build(config, host=host)

        docker.push(config,
                    self.host,
                    self.port,
                    self.user,
                    tags=args.tags,
                    docker_host=host)
def _validate_cloud_section(cloud_section):
    """
    Run provider-specific schema validation.
    """
    provider = cloud_section['provider']
    return Schema(CLOUD_PROVIDER_SCHEMAS[provider]).validate(cloud_section)
def main(argv, session):
    args = docopt(__doc__, argv=argv)

    # Validate args.
    s = Schema({
        six.text_type:
        bool,
        '<identifier>':
        list,
        '--modify':
        list,
        '--append':
        list,
        '--append-list':
        list,
        '--remove':
        list,
        '--spreadsheet':
        Or(
            None,
            And(lambda f: os.path.exists(f),
                error='<file> should be a readable file or directory.')),
        '--target':
        Or(None, str),
        '--priority':
        Or(None, Use(int, error='<priority> should be an integer.')),
    })
    try:
        args = s.validate(args)
    except SchemaError as exc:
        print('{0}\n{1}'.format(str(exc), printable_usage(__doc__)),
              file=sys.stderr)
        sys.exit(1)

    formats = set()
    responses = []

    for i, identifier in enumerate(args['<identifier>']):
        item = session.get_item(identifier)

        # Check existence of item.
        if args['--exists']:
            if item.exists:
                responses.append(True)
                print('{0} exists'.format(identifier))
            else:
                responses.append(False)
                print('{0} does not exist'.format(identifier), file=sys.stderr)
            if (i + 1) == len(args['<identifier>']):
                if all(r is True for r in responses):
                    sys.exit(0)
                else:
                    sys.exit(1)

        # Modify metadata.
        elif args['--modify'] or args['--append'] or args['--append-list'] \
                or args['--remove']:
            if args['--modify']:
                metadata_args = args['--modify']
            elif args['--append']:
                metadata_args = args['--append']
            elif args['--append-list']:
                metadata_args = args['--append-list']
            if args['--remove']:
                metadata_args = args['--remove']
            try:
                metadata = get_args_dict(metadata_args)
                if any('/' in k for k in metadata):
                    metadata = get_args_dict_many_write(metadata)
            except ValueError:
                print(
                    "error: The value of --modify, --remove, --append or --append-list "
                    "is invalid. It must be formatted as: --modify=key:value",
                    file=sys.stderr)
                sys.exit(1)

            if args['--remove']:
                responses.append(remove_metadata(item, metadata, args))
            else:
                responses.append(modify_metadata(item, metadata, args))
            if (i + 1) == len(args['<identifier>']):
                if all(r.status_code == 200 for r in responses):
                    sys.exit(0)
                else:
                    for r in responses:
                        if r.status_code == 200:
                            continue
                        # We still want to exit 0 if the non-200 is a
                        # "no changes to xml" error.
                        elif 'no changes' in r.content.decode('utf-8'):
                            continue
                        else:
                            sys.exit(1)

        # Get metadata.
        elif args['--formats']:
            for f in item.get_files():
                formats.add(f.format)
            if (i + 1) == len(args['<identifier>']):
                print('\n'.join(formats))

        # Dump JSON to stdout.
        else:
            metadata = json.dumps(item.item_metadata)
            print(metadata)

    # Edit metadata for items in bulk, using a spreadsheet as input.
    if args['--spreadsheet']:
        if not args['--priority']:
            args['--priority'] = -5
        with io.open(args['--spreadsheet'], 'rU', newline='',
                     encoding='utf-8') as csvfp:
            spreadsheet = csv.DictReader(csvfp)
            responses = []
            for row in spreadsheet:
                if not row['identifier']:
                    continue
                item = session.get_item(row['identifier'])
                if row.get('file'):
                    del row['file']
                metadata = dict((k.lower(), v) for (k, v) in row.items() if v)
                responses.append(modify_metadata(item, metadata, args))

            if all(r.status_code == 200 for r in responses):
                sys.exit(0)
            else:
                for r in responses:
                    if r.status_code == 200:
                        continue
                    # We still want to exit 0 if the non-200 is a
                    # "no changes to xml" error.
                    elif 'no changes' in r.content.decode('utf-8'):
                        continue
                    else:
                        sys.exit(1)
Пример #39
0
class Lucene(HoaxyCommand):
    """
usage:
  hoaxy lucene --index [--mode=<mode>]
  hoaxy lucene --search --query=<q> [--top=<n>]
  hoaxy lucene -h | --help

Using Apache Lucene to build index from the parsed articles. And also
provide a simple interface to query the indexed articles.
--index             Create, append and update index.
--search            Do lucene search

Options:
--mode=<mode>       Mode for create index, available choices are:
                    create_or_append, create, append
                    [default: create_or_append]
--query=<q>         String to query.
--top=<n>           Number of top results to show.
                    [default: 5]
-h --help           Show help.

Examples:

    1. Create index of all non-index documents
        hoaxy lucene --index --mode=create_or_append

    2. If you want to replace the old indexes and create a new one:
        hoaxy lucene --index --mode=create

    3. Search top 5 most relavant article containing keywords 'trump'
        hoaxy lucene --search --query=trump
    """
    name = 'lucene'
    short_description = 'Lucene Indexing and Searching'
    args_schema = Schema({
        '--query':
        Or(None, lambda s: len(s) > 0),
        '--mode':
        Or(
            None,
            And(Use(str.lower), lambda s: s in
                ('create_or_append', 'create', 'append'))),
        '--top':
        Or(None, And(Use(int), lambda x: x > 0)),
        object:
        object
    })

    @classmethod
    def prepare_article(cls, article_data):
        article_id, group_id, canonical_url, title, meta, content,\
            date_published, domain, site_type = article_data
        article = dict(article_id=article_id,
                       group_id=group_id,
                       canonical_url=canonical_url,
                       title=title,
                       content=content,
                       date_published=date_published,
                       domain=domain,
                       site_type=site_type)
        article['meta'] = unicode(meta)
        article['uq_id_str'] = unicode(group_id) + title
        if article['content'] is None:
            article['content'] = u'NULL'
        return article

    @classmethod
    def index(cls, session, mode, articles_iter, mgid):
        lucene.initVM()
        index_dir = cls.conf['lucene']['index_dir']
        indexer = Indexer(index_dir,
                          mode,
                          date_format=cls.conf['lucene']['date_format'])
        article = None
        for i, data in enumerate(articles_iter):
            article = cls.prepare_article(data)
            indexer.index_one(article)
            if i % cls.conf['window_size'] == 1:
                logger.info('Indexed %s articles', i)
        indexer.close()
        if article is not None:
            mgid.value = str(article['group_id'])
            session.commit()
            logger.info('Indexed article pointer updated!')
        else:
            logger.warning('No new articles are found!')
        logger.info('Done!')

    @classmethod
    def search(cls, query, n):
        lucene.initVM()
        index_dir = cls.conf['lucene']['index_dir']
        searcher = Searcher(index_dir)
        rs = searcher.search(query, n)
        pprint.pprint(rs)

    @classmethod
    def run(cls, args):
        try:
            # print(args)
            args = cls.args_schema.validate(args)
        except SchemaError as e:
            sys.exit(e)
        session = Session()
        # make sure lucene be inited
        lucene.initVM()
        lucene.getVMEnv().attachCurrentThread()
        if args['--index'] is True:
            configure_logging('lucene.index', console_level='INFO')
            mgid = get_or_create_m(
                session,
                MetaInfo,
                data=dict(
                    name='article_group_id_lucene_index',
                    value='0',
                    value_type='int',
                    description='article.group_id used for lucene index'),
                fb_uk='name')
            if args['--mode'] == 'create':
                mgid.set_value(0)
                session.commit()
            q = """
            SELECT DISTINCT ON (a.group_id) a.id, a.group_id,
                a.canonical_url,
                a.title, a.meta, a.content,
                coalesce(a.date_published, a.date_captured) AS pd,
                s.domain, s.site_type
            FROM article AS a
                JOIN site AS s ON s.id=a.site_id
            WHERE a.site_id IS NOT NULL AND s.is_enabled IS TRUE
                AND a.group_id>:gid
            ORDER BY group_id, pd ASC
            """
            articles_iter = session.execute(
                sqlalchemy.text(q).bindparams(gid=mgid.get_value()))
            cls.index(session, args['--mode'], articles_iter, mgid)
        elif args['--search'] is True:
            configure_logging('lucene.search', console_level='INFO')
            cls.search(args['--query'], args['--top'])
        else:
            print("Unrecognized command!")
            sys.exit(2)
Пример #40
0
from schema import Schema, And, Use, Optional, SchemaError
import os

rect_field_schema = And(Use(int), lambda x: x >= 0)

screen_rect_schema = Schema({
    "left": rect_field_schema,
    "top": rect_field_schema,
    "width": rect_field_schema,
    "height": rect_field_schema,
})

screen_rect_yaml = {
    "left": 526,
    "top": 422,
    "width": 438,
    "height": 440,
}
Пример #41
0
machine_list_schima = {
    Optional('machineList'): [
        Or(
            {
                'ip': str,
                'port': And(int, lambda x: 0 < x < 65535),
                'username': str,
                'passwd': str
            }, {
                'ip': str,
                'port': And(int, lambda x: 0 < x < 65535),
                'username': str,
                'sshKeyPath': os.path.exists,
                Optional('passphrase'): str
            })
    ]
}

LOCAL_CONFIG_SCHEMA = Schema({**common_schema, **common_trial_schema})

REMOTE_CONFIG_SCHEMA = Schema({
    **common_schema,
    **common_trial_schema,
    **machine_list_schima
})

PAI_CONFIG_SCHEMA = Schema({
    **common_schema,
    **pai_trial_schema,
    **pai_config_schema
})
Пример #42
0

if __name__ == "__main__":
    from schema import Schema
    import argparse
    import time
    from entity_ranker import EntityRanker

    parser = argparse.ArgumentParser("arguments for basic testing lexicon")
    parser.add_argument("--schema", type=str, help="path to schema to use")
    parser.add_argument("--ranker-data", type=str, help="path to train data")
    parser.add_argument("--annotated-examples-path", help="Json of annotated examples", type=str)
    parser.add_argument("--scenarios-json", help="Json of scenario information", type=str)
    parser.add_argument("--transcripts", help="Json file of all transcripts collected")

    args = parser.parse_args()

    path = args.schema
    start_build = time.time()

    ranker = EntityRanker(args.annotated_examples_path, args.scenarios_json, args.ranker_data, args.transcripts)
    schema = Schema(path)
    lex = Lexicon(schema, learned_lex=True, entity_ranker=ranker, scenarios_json=args.scenarios_json)
    print "Building complete: ", time.time() - start_build
    start_test = time.time()
    lex.test()
    print "Testing Complete: ", time.time() - start_test



Пример #43
0
class Stage(object):
    STAGE_FILE = 'Dvcfile'
    STAGE_FILE_SUFFIX = '.dvc'

    PARAM_MD5 = 'md5'
    PARAM_CMD = 'cmd'
    PARAM_DEPS = 'deps'
    PARAM_OUTS = 'outs'
    PARAM_LOCKED = 'locked'

    SCHEMA = {
        Optional(PARAM_MD5): Or(str, None),
        Optional(PARAM_CMD): Or(str, None),
        Optional(PARAM_DEPS): Or(And(list, Schema([dependency.SCHEMA])), None),
        Optional(PARAM_OUTS): Or(And(list, Schema([output.SCHEMA])), None),
        Optional(PARAM_LOCKED): bool,
    }

    def __init__(self,
                 project,
                 path=None,
                 cmd=None,
                 cwd=os.curdir,
                 deps=[],
                 outs=[],
                 md5=None,
                 locked=False):
        self.project = project
        self.path = path
        self.cmd = cmd
        self.cwd = cwd
        self.outs = outs
        self.deps = deps
        self.md5 = md5
        self.locked = locked

    @property
    def relpath(self):
        return os.path.relpath(self.path)

    @property
    def is_data_source(self):
        return self.cmd is None

    @staticmethod
    def is_stage_filename(path):
        if not path.endswith(Stage.STAGE_FILE_SUFFIX) \
           and os.path.basename(path) != Stage.STAGE_FILE:
            return False

        return True

    @staticmethod
    def is_stage_file(path):
        if not os.path.isfile(path):
            return False

        return Stage.is_stage_filename(path)

    def changed_md5(self):
        md5 = self._get_md5()
        assert md5 is not None

        if self.md5 == md5:
            return False

        return True

    @property
    def is_callback(self):
        return not self.is_data_source and len(self.deps) == 0

    @property
    def is_import(self):
        return not self.cmd and \
               len(self.deps) == 1 and \
               len(self.outs) == 1

    def _changed_deps(self, log):
        if self.locked:
            return False

        if self.is_callback:
            msg = "Dvc file '{}' is a 'callback' stage (has a command and " \
                  "no dependencies) and thus always considered as changed."
            self.project.logger.warn(msg.format(self.relpath))
            return True

        for dep in self.deps:
            if not dep.changed():
                continue
            log("Dependency '{}' of '{}' changed.".format(dep, self.relpath))
            return True

        return False

    def _changed_outs(self, log):
        for out in self.outs:
            if not out.changed():
                continue
            log("Output '{}' of '{}' changed.".format(out, self.relpath))
            return True

        return False

    def _changed_md5(self, log):
        if self.changed_md5():
            log("Dvc file '{}' changed.".format(self.relpath))
            return True
        return False

    def changed(self, print_info=False):
        if print_info:
            log = self.project.logger.info
        else:
            log = self.project.logger.debug

        ret = any([self._changed_deps(log),
                   self._changed_outs(log),
                   self._changed_md5(log)])

        if ret:
            msg = "Stage '{}' changed.".format(self.relpath)
            color = 'yellow'
        else:
            msg = "Stage '{}' didn't change.".format(self.relpath)
            color = 'green'

        log(Logger.colorize(msg, color))

        return ret

    def remove_outs(self, ignore_remove=False):
        for out in self.outs:
            out.remove(ignore_remove=ignore_remove)

    def unprotect_outs(self):
        for out in self.outs:
            if out.path_info['scheme'] != 'local' or not out.exists:
                continue
            self.project.unprotect(out.path)

    def remove(self):
        self.remove_outs(ignore_remove=True)
        os.unlink(self.path)

    def reproduce(self, force=False, dry=False, interactive=False):
        if not self.changed(print_info=True) and not force:
            return None

        if (self.cmd or self.is_import) and not self.locked and not dry:
            # Removing outputs only if we actually have command to reproduce
            self.remove_outs(ignore_remove=False)

        msg = "Going to reproduce '{}'. Are you sure you want to continue?"
        msg = msg.format(self.relpath)
        if interactive \
           and not self.project.prompt.prompt(msg):
            raise DvcException('Reproduction aborted by the user')

        self.project.logger.info(u'Reproducing \'{}\''.format(self.relpath))

        self.run(dry=dry)

        msg = u'\'{}\' was reproduced'.format(self.relpath)
        self.project.logger.debug(msg)

        return self

    @staticmethod
    def validate(d, fname=None):
        try:
            Schema(Stage.SCHEMA).validate(d)
        except SchemaError as exc:
            raise StageFileFormatError(fname, exc)

    @staticmethod
    def loadd(project, d, path):
        Stage.validate(d, fname=os.path.relpath(path))
        path = os.path.abspath(path)
        cwd = os.path.dirname(path)
        cmd = d.get(Stage.PARAM_CMD, None)
        md5 = d.get(Stage.PARAM_MD5, None)
        locked = d.get(Stage.PARAM_LOCKED, False)

        stage = Stage(project=project,
                      path=path,
                      cmd=cmd,
                      cwd=cwd,
                      md5=md5,
                      locked=locked)

        stage.deps = dependency.loadd_from(stage, d.get(Stage.PARAM_DEPS, []))
        stage.outs = output.loadd_from(stage, d.get(Stage.PARAM_OUTS, []))

        return stage

    @classmethod
    def _stage_fname_cwd(cls, fname, cwd, outs, add):
        if fname and cwd:
            return (fname, cwd)

        if not outs:
            return (cls.STAGE_FILE, cwd if cwd else os.getcwd())

        out = outs[0]
        if out.path_info['scheme'] == 'local':
            path = os.path
        else:
            path = posixpath

        if not fname:
            fname = path.basename(out.path) + cls.STAGE_FILE_SUFFIX

        if not cwd or (add and out.is_local):
            cwd = path.dirname(out.path)

        return (fname, cwd)

    @staticmethod
    def _check_inside_project(project, cwd):
        assert project is not None
        proj_dir = os.path.realpath(project.root_dir)
        if not os.path.realpath(cwd).startswith(proj_dir):
            raise StageBadCwdError(cwd)

    def is_cached(self):
        """
        Checks if this stage has been already ran and saved to the same
        dvc file.
        """
        from dvc.remote.local import RemoteLOCAL
        from dvc.remote.s3 import RemoteS3

        old = Stage.load(self.project, self.path)
        if old._changed_outs(log=self.project.logger.debug):
            return False

        # NOTE: need to save checksums for deps in order to compare them
        # with what is written in the old stage.
        for dep in self.deps:
            dep.save()

        old_d = old.dumpd()
        new_d = self.dumpd()

        # NOTE: need to remove checksums from old dict in order to compare
        # it to the new one, since the new one doesn't have checksums yet.
        old_d.pop(self.PARAM_MD5, None)
        new_d.pop(self.PARAM_MD5, None)
        outs = old_d.get(self.PARAM_OUTS, [])
        for out in outs:
            out.pop(RemoteLOCAL.PARAM_MD5, None)
            out.pop(RemoteS3.PARAM_ETAG, None)

        return old_d == new_d

    @staticmethod
    def loads(project=None,
              cmd=None,
              deps=[],
              outs=[],
              outs_no_cache=[],
              metrics_no_cache=[],
              fname=None,
              cwd=os.curdir,
              locked=False,
              add=False,
              overwrite=True,
              ignore_build_cache=False,
              remove_outs=False):

        stage = Stage(project=project,
                      cwd=cwd,
                      cmd=cmd,
                      locked=locked)

        stage.outs = output.loads_from(stage, outs, use_cache=True)
        stage.outs += output.loads_from(stage, outs_no_cache, use_cache=False)
        stage.outs += output.loads_from(stage, metrics_no_cache,
                                        use_cache=False, metric=True)
        stage.deps = dependency.loads_from(stage, deps)

        if fname is not None and os.path.basename(fname) != fname:
            msg = "Stage file name '{}' should not contain subdirectories. " \
                  "Use '-c|--cwd' to change location of the stage file."
            raise StageFileBadNameError(msg.format(fname))

        fname, cwd = Stage._stage_fname_cwd(fname, cwd, stage.outs, add=add)

        Stage._check_inside_project(project, cwd)

        cwd = os.path.abspath(cwd)
        path = os.path.join(cwd, fname)

        stage.cwd = cwd
        stage.path = path

        # NOTE: remove outs before we check build cache
        if remove_outs:
            stage.remove_outs(ignore_remove=False)
            project.logger.warn("Build cache is ignored when using "
                                "--remove-outs.")
            ignore_build_cache = True
        else:
            stage.unprotect_outs()

        if os.path.exists(path):
            if not ignore_build_cache and stage.is_cached():
                Logger.info('Stage is cached, skipping.')
                return None

            msg = "'{}' already exists. Do you wish to run the command and " \
                  "overwrite it?".format(stage.relpath)
            if not overwrite and not project.prompt.prompt(msg, False):
                raise StageFileAlreadyExistsError(stage.relpath)

        return stage

    @staticmethod
    def _check_dvc_filename(fname):
        if not Stage.is_stage_filename(fname):
            msg = "Bad stage filename '{}'. Stage files should be named " \
                  "'Dvcfile' or have a '.dvc' suffix(e.g. '{}.dvc')."
            raise StageFileBadNameError(msg.format(os.path.relpath(fname),
                                                   os.path.basename(fname)))

    @staticmethod
    def _check_dvc_file(fname):
        sname = fname + Stage.STAGE_FILE_SUFFIX
        if Stage.is_stage_file(sname):
            Logger.info("Do you mean '{}'?".format(sname))

    @staticmethod
    def load(project, fname):
        if not os.path.exists(fname):
            Stage._check_dvc_file(fname)
            raise StageFileDoesNotExistError(fname)

        Stage._check_dvc_filename(fname)

        if not Stage.is_stage_file(fname):
            Stage._check_dvc_file(fname)
            raise StageFileIsNotDvcFileError(fname)

        with open(fname, 'r') as fd:
            return Stage.loadd(project, yaml.safe_load(fd) or dict(), fname)

    def dumpd(self):
        ret = {}

        if self.cmd is not None:
            ret[Stage.PARAM_CMD] = self.cmd

        if len(self.deps):
            ret[Stage.PARAM_DEPS] = [d.dumpd() for d in self.deps]

        if len(self.outs):
            ret[Stage.PARAM_OUTS] = [o.dumpd() for o in self.outs]

        ret[Stage.PARAM_MD5] = self.md5

        if self.locked:
            ret[Stage.PARAM_LOCKED] = self.locked

        return ret

    def dump(self, fname=None):
        if not fname:
            fname = self.path

        self._check_dvc_filename(fname)

        msg = "Saving information to '{}'.".format(os.path.relpath(fname))
        Logger.info(msg)

        with open(fname, 'w') as fd:
            yaml.safe_dump(self.dumpd(), fd, default_flow_style=False)

        self.project._files_to_git_add.append(os.path.relpath(fname))

    def _get_md5(self):
        from dvc.output.local import OutputLOCAL

        # NOTE: excluding parameters that don't affect the state of the
        # pipeline. Not excluding OutputLOCAL.PARAM_CACHE, because if
        # it has changed, we might not have that output in our cache.
        exclude = [self.PARAM_LOCKED,
                   OutputLOCAL.PARAM_METRIC]

        d = self.dumpd()

        # NOTE: removing md5 manually in order to not affect md5s in deps/outs
        if self.PARAM_MD5 in d.keys():
            del d[self.PARAM_MD5]

        return dict_md5(d, exclude)

    def save(self):
        for dep in self.deps:
            dep.save()

        for out in self.outs:
            out.save()

        self.md5 = self._get_md5()

    def _check_missing_deps(self):
        missing = []
        for dep in self.deps:
            if not dep.exists:
                missing.append(dep)

        if len(missing) > 0:
            raise MissingDep(missing)

    def _check_if_fish(self, executable):  # pragma: no cover
        if executable is None \
           or os.path.basename(os.path.realpath(executable)) != 'fish':
            return

        msg = "DVC detected that you are using fish as your default " \
              "shell. Be aware that it might cause problems by overwriting " \
              "your current environment variables with values defined " \
              "in '.fishrc', which might affect your command. See " \
              "https://github.com/iterative/dvc/issues/1307. "
        self.project.logger.warn(msg)

    def _run(self):
        self._check_missing_deps()
        executable = os.getenv('SHELL') if os.name != 'nt' else None
        self._check_if_fish(executable)

        p = subprocess.Popen(self.cmd,
                             cwd=self.cwd,
                             shell=True,
                             env=fix_env(os.environ),
                             executable=executable)
        p.communicate()

        if p.returncode != 0:
            raise StageCmdFailedError(self)

    def run(self, dry=False):
        if self.locked:
            msg = u'Verifying outputs in locked stage \'{}\''
            self.project.logger.info(msg.format(self.relpath))
            if not dry:
                self.check_missing_outputs()
        elif self.is_import:
            msg = u'Importing \'{}\' -> \'{}\''
            self.project.logger.info(msg.format(self.deps[0].path,
                                                self.outs[0].path))

            if not dry:
                self.deps[0].download(self.outs[0].path_info)
        elif self.is_data_source:
            msg = u'Verifying data sources in \'{}\''.format(self.relpath)
            self.project.logger.info(msg)
            if not dry:
                self.check_missing_outputs()
        else:
            msg = u'Running command:\n\t{}'.format(self.cmd)
            self.project.logger.info(msg)

            if not dry:
                self._run()

        if not dry:
            self.save()

    def check_missing_outputs(self):
        outs = [out for out in self.outs if not out.exists]
        paths = [out.path if out.path_info['scheme'] != 'local' else
                 out.rel_path for out in outs]
        if paths:
            raise MissingDataSource(paths)

    def checkout(self, force=False):
        for out in self.outs:
            out.checkout(force=force)

    def _status(self, entries, name):
        ret = {}

        for entry in entries:
            ret.update(entry.status())

        if ret:
            return {name: ret}

        return {}

    def status(self):
        ret = {}

        if not self.locked:
            ret.update(self._status(self.deps, 'deps'))

        ret.update(self._status(self.outs, 'outs'))

        if ret or self.changed_md5() or self.is_callback:
            return {self.relpath: ret}

        return {}
Пример #44
0
    return data


ops_schema = Schema({
    Optional('weight', default=None): {
        Optional('granularity', default=None):
        And(list,
            lambda s: all(i in ['per_channel', 'per_tensor'] for i in s)),
        Optional('scheme', default=None):
        And(list, lambda s: all(i in ['asym', 'sym'] for i in s)),
        Optional('dtype', default=None):
        And(list,
            lambda s: all(i in ['int8', 'uint8', 'fp32', 'bf16'] for i in s)),
        Optional('algorithm', default=None):
        And(list, lambda s: all(i in ['minmax', 'kl'] for i in s))
    },
    Optional('activation', default=None): {
        Optional('granularity', default=None):
        And(list,
            lambda s: all(i in ['per_channel', 'per_tensor'] for i in s)),
        Optional('scheme', default=None):
        And(list, lambda s: all(i in ['asym', 'sym'] for i in s)),
        Optional('dtype', default=None):
        And(list,
            lambda s: all(i in ['int8', 'uint8', 'fp32', 'bf16'] for i in s)),
        Optional('algorithm', default=None):
        And(list, lambda s: all(i in ['minmax', 'kl'] for i in s))
    }
})

transform_schema = Schema({
    Optional('RandomResizedCrop'): {
Пример #45
0
 def validate(d, fname=None):
     try:
         Schema(Stage.SCHEMA).validate(d)
     except SchemaError as exc:
         raise StageFileFormatError(fname, exc)
Пример #46
0
SCHEMA = Schema({
    "api_server": {
        "port": And(int, lambda port: port > 0),
        "enable_microbatch": bool,
        "run_with_ngrok": bool,
        "enable_swagger": bool,
        "enable_metrics": bool,
        "enable_feedback": bool,
        "max_request_size": And(int, lambda size: size > 0),
        "workers": Or(And(int, lambda workers: workers > 0), None),
        "timeout": And(int, lambda timeout: timeout > 0),
    },
    "marshal_server": {
        "max_batch_size": Or(And(int, lambda size: size > 0), None),
        "max_latency": Or(And(int, lambda latency: latency > 0), None),
        "workers": Or(And(int, lambda workers: workers > 0), None),
        "request_header_flag": str,
    },
    "yatai": {
        "url": Or(str, None)
    },
    "tracing": {
        "type":
        Or(And(str, Use(str.lower), lambda s: s in ('zipkin', 'jaeger')),
           None),
        Optional("zipkin"): {
            "url": Or(str, None)
        },
        Optional("jaeger"): {
            "address": Or(str, None),
            "port": Or(int, None)
        },
    },
    "instrument": {
        "namespace": str
    },
    "logging": {
        "level": str
    },
})
Пример #47
0
def _add_expr(kwargs, call, operator):
    """
    加单
    :param kwargs:
    :param call:
    :param operator:
    :return:
    """
    kwargs = Schema({
        'name': schema_unicode_empty,
        'tel': schema_unicode,
        'address': schema_unicode_empty,
        'lat': schema_float,
        'lng': schema_float,
        'fence': {
            'id': schema_unicode,
            'name': schema_unicode_empty,
            'node': object
        },
        Optional('category', default=DEFAULT_CATEGORY): schema_unicode,
        Optional('remark', default=''): schema_unicode_empty,
    }).validate(kwargs)
    # 0. 拼一些信息
    creator = {
        'id': call.shop_id,
        'name': call.shop_name,
        'tel': call.shop_tel,
        'm_type': ''
    }
    node = {
        "node_0": {
            "name": call.shop_name,
            "tel": call.shop_tel,
            "addr": call.loc['address'],
            "lat": call.loc['lat'],
            "lng": call.loc['lng'],
            "fence": call.loc['fence']
        },
        "node_n": {
            "name": kwargs['name'],
            "tel": kwargs['tel'],
            "addr": kwargs['address'],
            "lat": kwargs['lat'],
            "lng": kwargs['lng'],
            "fence": kwargs['fence']
        }
    }
    # 1. 预先设置默认运费
    fee = yield get_fee(call.shop_id, kwargs['category'])
    # 2. 状态预设
    status = dict(status=ExprState.STATUS_PRE_CREATED,
                  sub_status=ExprState.SUB_STATUS_PRE_CREATED)
    # 3. 创建
    expr = yield Express.create(
        creator=creator,
        status=status,
        node=node,
        remark=kwargs['remark'],
        fee=fee,
        assignee=operator  # 代商户下单预设领取人为 进行这个操作的派件员
    )
    kwargs['add_to_set__number_list'] = expr.number
    raise Return(kwargs)
Пример #48
0
def main(argv, session):
    args = docopt(__doc__, argv=argv)

    # Validation error messages.
    invalid_id_msg = (
        '<identifier> should be between 3 and 80 characters in length, and '
        'can only contain alphanumeric characters, underscores ( _ ), or '
        'dashes ( - )')

    # Validate args.
    s = Schema({
        text_type:
        Use(lambda x: bool(x)),
        '<file>':
        list,
        '--format':
        list,
        '--glob':
        list,
        'delete':
        bool,
        '<identifier>':
        Or(None, And(str, validate_ia_identifier, error=invalid_id_msg)),
    })
    try:
        args = s.validate(args)
    except SchemaError as exc:
        print('{0}\n{1}'.format(str(exc), printable_usage(__doc__)),
              file=sys.stderr)
        sys.exit(1)

    verbose = True if not args['--quiet'] else False
    item = session.get_item(args['<identifier>'])
    if not item.exists:
        print('{0}: skipping, item does\'t exist.')

    # Files that cannot be deleted via S3.
    no_delete = ['_meta.xml', '_files.xml', '_meta.sqlite']

    if verbose:
        sys.stdout.write('Deleting files from {0}\n'.format(item.identifier))

    if args['--all']:
        files = [f for f in item.iter_files()]
        args['--cacade'] = True
    elif args['--glob']:
        files = item.get_files(glob_pattern=args['--glob'])
    elif args['--format']:
        files = item.get_files(formats=args['--format'])
    else:
        fnames = []
        if args['<file>'] == ['-']:
            fnames = [f.strip().decode('utf-8') for f in sys.stdin]
        else:
            fnames = [f.strip().decode('utf-8') for f in args['<file>']]

        files = [f for f in [item.get_file(f) for f in fnames] if f]

    if not files:
        sys.stderr.write(' warning: no files found, nothing deleted.\n')
        sys.exit(1)

    for f in files:
        if not f:
            if verbose:
                sys.stderr.write(' error: "{0}" does not exist\n'.format(
                    f.name))
            sys.exit(1)
        if any(f.name.endswith(s) for s in no_delete):
            continue
        if args['--dry-run']:
            sys.stdout.write(' will delete: {0}/{1}\n'.format(
                item.identifier, f.name.encode('utf-8')))
            continue
        resp = f.delete(verbose=verbose, cascade_delete=args['--cascade'])
        if resp.status_code != 204:
            msg = get_xml_text(resp.content)
            sys.stderr.write(' error: {0} ({1})\n'.format(
                msg, resp.status_code))
            sys.exit(1)
Пример #49
0
from schema import Schema, And, Use, SchemaError

config_schema_dict = {
    "SERVER": {
        "pc_ip": And(Use(str)),
        "pc_port": And(Use(str)),
        "pc_username": And(Use(str)),
        "pc_password": And(Use(str)),
    },
    "PROJECT": {
        "name": And(Use(str))
    },
    "DB": {
        "location": And(Use(str))
    },
    "CATEGORIES": {},
}

config_schema = Schema(config_schema_dict)


def validate_config(config):

    config_dict = {s: dict(config.items(s)) for s in config.sections()}
    try:
        config_schema.validate(config_dict)
        return True
    except SchemaError:
        return False
Пример #50
0
def integrate_parser(argv=sys.argv[1:]):
    __usage__ = """
Integrate ITC data using Gaussian process regression. Uses MicroCal .itc files, or custom format .yml files for analysing experiments.

Usage:
  bayesitc_integrate.py <datafiles>... [-w <workdir> | --workdir=<workdir>] [-v | -vv | -vvv] [options]
  bayesitc_integrate.py (-h | --help)
  bayesitc_integrate.py --license
  bayesitc_integrate.py --version

Options:
  -h, --help                             Show this screen
  --version                              Show version
  --license                              Show license
  -l <logfile>, --log=<logfile>          File to write logs to. Will be placed in workdir.
  -v,                                    Verbose output level. Multiple flags increase verbosity.
  <datafiles>                            Datafile(s) to perform the analysis on, .itc, .yml
  -w <workdir>, --workdir=<workdir>      Directory for output files                      [default: ./]
  -n <name>, --name=<name>               Name for the experiment. Will be used for output files. Defaults to input file name.
  -i <ins>, --instrument=<ins>           The name of the instrument used for the experiment. Overrides .itc file instrument.
  -f <frac>, --fraction=<frac>           The fraction of the injection to fit, measured from the end [default: 0.2]
  --theta0=<theta0>                      The parameters in the autocorrelation model. [default: 5.0]
  --nugget=<nugget>                      Size of nugget effect to allow smooth predictions from noisy data. [default: 1.0]
  --plot                                 Generate plots of the baseline fit
"""

    arguments = docopt(__usage__,
                       argv=argv,
                       version='bayesitc_integrate.py, pre-alpha')
    schema = Schema({
        '--help':
        bool,  # True or False are accepted
        '--license':
        bool,  # True or False are accepted
        # integer between 0 and 3
        '-v':
        And(int, lambda n: 0 <= n <= 3),
        # Float greater than 0
        '--fraction':
        And(Use(float), lambda n: 0 < n <= 1.0),
        '--nugget':
        And(Use(float), lambda n: n > 0),
        '--theta0':
        And(Use(float), lambda n: n > 0),
        # Convert str to int, make sure that it is larger than 0
        '--name':
        Or(None, And(str, len)),  # Not an empty string
        '--instrument':
        Or(None, And(str, lambda m: m in known_instruments)),
        # None, or str and found in this dict
        '--version':
        bool,  # True or False are accepted
        '--plot':
        bool,  # True or False are accepted
        '--workdir':
        str,  # str
        # list and ensure it contains existing files
        '<datafiles>':
        And(
            list,
            lambda inpfiles: [os.path.isfile(inpfile) for inpfile in inpfiles],
            Use(lambda inpfiles:
                [os.path.abspath(inpfile) for inpfile in inpfiles])),
        # Don't use, or open file with writing permissions
        '--log':
        Or(None, str),  # Don't use, or str
    })

    return schema.validate(arguments)
from schema import Schema, And, Use, Or, Optional
from logger import configure_logger

LOGGER = configure_logger(__name__)

# Pipeline Params
PARAM_SCHEMA = {
    Optional("notification_endpoint"): str,
    Optional("schedule"): str,
    Optional("restart_execution_on_update"): bool,
}

# CodeCommit Source
CODECOMMIT_SOURCE_PROPS = {
    "account_id": Schema(And(Use(int), lambda n: len(str(n)) == 12)),
    Optional("repository"): str,
    Optional("branch"): str,
    Optional("poll_for_changes"): bool,
    Optional("owner"): str,
    Optional("role"): str
}
CODECOMMIT_SOURCE = {
    "provider": 'codecommit',
    "properties": CODECOMMIT_SOURCE_PROPS
}

# GitHub Source
GITHUB_SOURCE_PROPS = {
    Optional("repository"): str,
    Optional("branch"): str,
Пример #52
0
def bayesitc_util_parser(argv=sys.argv[1:]):
    __usage__ = """
Bayesian analysis of ITC data. Uses MicroCal .itc files, or custom format .yml files for analysing experiments.

Usage:
  ITC.py <datafiles>... [-w <workdir> | --workdir=<workdir>] [-n <name> | --name=<name>] [-q <file> | --heats=<file>] [-i <ins> | --instrument=<ins> ] [-v | -vv | -vvv] [-r <file> | --report=<file>] [ -l <logfile> | --log=<logfile>]
  ITC.py mcmc <datafiles>...  (-m <model> | --model=<model>) [-w <workdir> | --workdir=<workdir>] [ -r <receptor> | --receptor=<receptor>] [-n <name> | --name=<name>] [-q <file> | --heats=<file>] [-i <ins> | --instrument=<ins> ] [ -l <logfile> | --log=<logfile>] [-v | -vv | -vvv] [--report=<file>] [options]
  ITC.py (-h | --help)
  ITC.py --license
  ITC.py --version

Options:
  -h, --help                            Show this screen
  --version                              Show version
  --license                              Show license
  -l <logfile>, --log=<logfile>          File to write logs to. Will be placed in workdir.
  -v,                                    Verbose output level. Multiple flags increase verbosity.
  <datafiles>                            Datafile(s) to perform the analysis on, .itc, .yml
  -w <workdir>, --workdir=<workdir>      Directory for output files                      [default: ./]
  -r <receptor> | --receptor=<receptor>  The name of the receptor for a Competitive Binding model.
  -n <name>, --name=<name>               Name for the experiment. Will be used for output files. Defaults to inputfile name.
  -i <ins>, --instrument=<ins>           The name of the instrument used for the experiment. Overrides .itc file instrument.
  -q <file>, --heats=<file>              Origin format integrated heats file. (From NITPIC use .dat file)
  -m <model>, --model=<model>            Model to use for mcmc sampling                  [default: TwoComponent]
  --nfit=<n>                             No. of iteration for maximum a posteriori fit   [default: 20000]
  --niters=<n>                           No. of iterations for mcmc sampling             [default: 6000]
  --nburn=<n>                            No. of Burn-in iterations for mcmc sampling     [default: 1000]
  --nthin=<n>                            Thinning period for mcmc sampling               [default: 5]
  --report=<file>                        Output file with summary in markdown
"""
    arguments = docopt(__usage__, argv=argv, version='ITC.py, pre-alpha')
    schema = Schema({
        '--heats':
        Or(None, And(str, os.path.isfile,
                     Use(os.path.abspath))),  # str, verify that it exists
        '--help':
        bool,  # True or False are accepted
        '--license':
        bool,  # True or False are accepted
        # integer between 0 and 3
        '-v':
        And(int, lambda n: 0 <= n <= 3),
        # str and found in this dict
        '--model':
        And(str, lambda m: m in known_models),
        '--nfit':
        And(Use(int), lambda n: n > 0),
        # Convert str to int, make sure that it is larger than 0
        '--nburn':
        And(Use(int), lambda n: n > 0),
        # Convert str to int, make sure that it is larger than 0
        '--niters':
        And(Use(int), lambda n: n > 0),
        # Convert str to int, make sure that it is larger than 0
        '--nthin':
        And(Use(int), lambda n: n > 0),
        # Convert str to int, make sure that it is larger than 0
        '--name':
        Or(None, And(str, len)),  # Not an empty string
        '--instrument':
        Or(None, And(str, lambda m: m in known_instruments)),
        # None, or str and found in this dict
        '--version':
        bool,  # True or False are accepted
        '--receptor':
        Or(None, str),  # str or None
        '--workdir':
        str,  # str
        # list and ensure it contains existing files
        '<datafiles>':
        And(
            list,
            lambda inpfiles: [os.path.isfile(inpfile) for inpfile in inpfiles],
            Use(lambda inpfiles:
                [os.path.abspath(inpfile) for inpfile in inpfiles])),
        'mcmc':
        bool,  # True or False are accepted
        '--report':
        Or(None, Use(lambda f: open(f, 'w'))),
        # Don't use, or open file with writing permissions
        '--log':
        Or(None, str),  # Don't use, or str
    })

    return schema.validate(arguments)
Пример #53
0
def test_issue_83_iterable_validation_return_type():
    TestSetType = type("TestSetType", (set, ), dict())
    data = TestSetType(["test", "strings"])
    s = Schema(set([str]))
    assert isinstance(s.validate(data), TestSetType)
 def __init__(self, map_input: dict):
     self.validated = Schema(TOP_LEVEL_SCHEMA).validate(map_input)
Пример #55
0
class TaskView(View):
    update_schema = Schema({
        Optional('name'):
        And(str, lambda s: 0 < len(s) < 256),
        Optional('description'):
        str,
        Optional('project_id'):
        int
    })

    # Optional代表字段是可选   and代表需要满足所有条件

    def get(self, request, task_id, *args, **kwargs):
        """
        请求是单个数据
        :param request:
        :param task_id:
        :param args:
        :param kwargs:
        :return:
        """
        task = Task.objects.filter(id=task_id).first()
        if not task:
            return response_failed(code=ErrorCode.task, message='数据不存在')
        task_dict = model_to_dict(task)
        return response_success(data=task_dict)

    def put(self, request, task_id, *args, **kwargs):
        """
        这个是修改数据
        :param request:
        :param task_id:
        :param args:
        :param kwargs:
        :return:
        """
        task = Task.objects.filter(id=task_id).first()
        if not task:
            return response_failed(code=ErrorCode.task, message='数据不存在')

        body = request.body
        data = json.loads(body, encoding='utf-8')
        if not self.update_schema.is_valid(data):
            return response_failed()

        data = self.update_schema.validate(data)
        if not data:  # 如果没有传数据,就不需要处理
            pass
        else:
            Task.objects.filter(id=task_id).update(**data)
            task = Task.objects.filter(id=task_id).first()

        task_dict = model_to_dict(task)
        return response_success(data=task_dict)

    def delete(self, request, task_id, *args, **kwargs):
        """
        这个是删除数据
        :param request:
        :param task_id:
        :param args:
        :param kwargs:
        :return:
        """
        Task.objects.filter(id=task_id).delete()
        return response_success(data=True)
Пример #56
0
from schema import Schema, Optional

from hil import model
from hil.model import db
from hil.auth import get_auth_backend
from hil.config import cfg
from hil.rest import rest_call
from hil.class_resolver import concrete_class_for
from hil.network_allocator import get_network_allocator
from hil.errors import *


# Project Code #
################
@rest_call('GET', '/projects', Schema({}))
def list_projects():
    """List all projects.

    Returns a JSON array of strings representing a list of projects.

    Example:  '["project1", "project2", "project3"]'
    """
    get_auth_backend().require_admin()
    projects = model.Project.query.all()
    projects = sorted([p.label for p in projects])
    return json.dumps(projects)


@rest_call('PUT', '/project/<project>', Schema({'project': basestring}))
def project_create(project):
Пример #57
0
def test_issue_9_prioritized_key_comparison():
    validate = Schema({"key": 42, object: 42}).validate
    assert validate({"key": 42, 777: 42}) == {"key": 42, 777: 42}
Пример #58
0
class TaskIntervalRunTestCasesView(View):
    update_schema = Schema({
        'days': And(int, lambda s: 0 <= s),
        'hours': And(int, lambda s: 0 <= s),
        'minutes': And(int, lambda s: 0 <= s),
        'start_time': str
    })

    def post(self, request, task_id, *args, **kwargs):
        """
        任务循环执行
        :param request:
        :param task_id:
        :param args:
        :param kwargs:
        :return:
        """
        task = Task.objects.filter(id=task_id).first()
        if not task:
            return response_failed(code=ErrorCode.task, message='数据不存在')

        body = request.body
        data = json.loads(body, encoding='utf-8')
        if not self.update_schema.is_valid(data):
            return response_failed()

        data = self.update_schema.validate(data)
        if not data:  # 如果没有传数据,就不需要处理
            pass
        else:
            data['interval_switch'] = True
            Task.objects.filter(id=task_id).update(**data)

        job = scheduler.get_job("task" + str(task_id))
        if job:
            scheduler.remove_job("task" + str(task_id))

        scheduler.add_job(run_task_common,
                          'interval',
                          args=[task_id],
                          days=data["days"],
                          hours=data["hours"],
                          minutes=data["minutes"],
                          start_date=data["start_time"],
                          id="task" + str(task_id))
        return response_success()

    def delete(self, request, task_id, *args, **kwargs):
        """
        停止任务循环执行
        :param request:
        :param task_id:
        :param args:
        :param kwargs:
        :return:
        """
        task = Task.objects.filter(id=task_id).first()
        if not task:
            return response_failed(code=ErrorCode.task, message='数据不存在')
        data = {
            "interval_switch": False,
            "days": 0,
            "hours": 0,
            "minutes": 0,
            "start_time": None
        }
        Task.objects.filter(id=task_id).update(**data)

        job = scheduler.get_job("task" + str(task_id))
        if job:
            scheduler.remove_job("task" + str(task_id))
        return response_success()
Пример #59
0
  --attacking-general            Enables the general when attacking (+1 to all pips) [default: False]
  --defending-general            Enables the general when defending (+1 to all pips) [default: False]
"""

from docopt import docopt
from schema import Schema, And, Use, SchemaError

from .model import Army, Context
from .simulator import simulate
from .stats import make_simulation_summary, terminal_result

validation_schema = Schema({
    '<attacker>': And(Use(int), lambda n: n > 1),
    '<defender>': And(Use(int), lambda n: n > 0),
    '--iterations': And(Use(int), lambda n: 1 <= n <= 100000),
    '--attacking-general': bool,
    '--defending-general': bool,
    '--help': bool,
    '--version': bool
})


def parse_args():
    args = docopt(__doc__, version='Risk Simulator 1.0')
    try:
        args = validation_schema.validate(args)
    except SchemaError as e:
        exit(e)

    return Context(
        iterations=args['--iterations'],
Пример #60
0
schema = Schema(
    {
        "auto_migrate_settings":
        bool,
        "allow_duplicate_hostnames":
        bool,
        "allow_duplicate_ips":
        bool,
        "allow_duplicate_macs":
        bool,
        "allow_dynamic_settings":
        bool,
        "always_write_dhcp_entries":
        bool,
        "anamon_enabled":
        bool,
        "auth_token_expiration":
        int,
        "authn_pam_service":
        str,
        "autoinstall_snippets_dir":
        str,
        "autoinstall_templates_dir":
        str,
        "bind_chroot_path":
        str,
        "bind_zonefile_path":
        str,
        "bind_master":
        str,
        "boot_loader_conf_template_dir":
        str,
        Optional("bootloaders_dir", default="/var/lib/cobbler/loaders"):
        str,
        Optional("bootloaders_formats",
                 default={
                     'aarch64': {
                         'binary_name': 'grubaa64.efi'
                     },
                     'arm': {
                         'binary_name': 'bootarm.efi'
                     },
                     'arm64-efi': {
                         'binary_name': 'grubaa64.efi',
                         'extra_modules': ['efinet']
                     },
                     'i386': {
                         'binary_name': 'bootia32.efi'
                     },
                     'i386-pc-pxe': {
                         'binary_name': 'grub.0',
                         'mod_dir': 'i386-pc',
                         'extra_modules': ['chain', 'pxe', 'biosdisk']
                     },
                     'i686': {
                         'binary_name': 'bootia32.efi'
                     },
                     'IA64': {
                         'binary_name': 'bootia64.efi'
                     },
                     'powerpc-ieee1275': {
                         'binary_name': 'grub.ppc64le',
                         'extra_modules': ['net', 'ofnet']
                     },
                     'x86_64-efi': {
                         'binary_name': 'grubx86.efi',
                         'extra_modules': ['chain', 'efinet']
                     }
                 }):
        dict,
        Optional("bootloaders_modules",
                 default=[
                     'btrfs', 'ext2', 'xfs', 'jfs', 'reiserfs', 'all_video', 'boot', 'cat', 'configfile', 'echo', 'fat', 'font', 'gfxmenu', 'gfxterm', 'gzio', 'halt', 'iso9660', 'jpeg', 'linux', 'loadenv', 'minicmd', 'normal', 'part_apple', 'part_gpt', 'part_msdos', 'password_pbkdf2', 'png', 'reboot', 'search', 'search_fs_file', 'search_fs_uuid', 'search_label', 'sleep', 'test', 'true', 'video', 'mdraid09', 'mdraid1x', 'lvm', 'serial', 'regexp', 'tr', 'tftp', 'http', 'luks', 'gcry_rijndael', 'gcry_sha1', 'gcry_sha256'
                 ]):
        list,
        Optional("syslinux_dir", default="/usr/share/syslinux"):
        str,
        Optional("grub2_mod_dir", default="/usr/share/grub"):
        str,
        Optional("grubconfig_dir", default="/var/lib/cobbler/grub_config"):
        str,
        "build_reporting_enabled":
        bool,
        "build_reporting_email": [str],
        "build_reporting_ignorelist": [str],
        "build_reporting_sender":
        str,
        "build_reporting_smtp_server":
        str,
        "build_reporting_subject":
        str,
        Optional("buildisodir", default="/var/cache/cobbler/buildiso"):
        str,
        "cheetah_import_whitelist": [str],
        "client_use_https":
        bool,
        "client_use_localhost":
        bool,
        Optional("cobbler_master", default=""):
        str,
        Optional("convert_server_to_ip", default=False):
        bool,
        "createrepo_flags":
        str,
        "autoinstall":
        str,
        "default_name_servers": [str],
        "default_name_servers_search": [str],
        "default_ownership": [str],
        "default_password_crypted":
        str,
        "default_template_type":
        str,
        "default_virt_bridge":
        str,
        Optional("default_virt_disk_driver", default="raw"):
        str,
        "default_virt_file_size":
        int,
        "default_virt_ram":
        int,
        "default_virt_type":
        str,
        "enable_ipxe":
        bool,
        "enable_menu":
        bool,
        "http_port":
        int,
        "include": [str],
        Optional("iso_template_dir", default="/etc/cobbler/iso"):
        str,
        Optional("jinja2_includedir", default="/var/lib/cobbler/jinja2"):
        str,
        "kernel_options":
        dict,
        "ldap_anonymous_bind":
        bool,
        "ldap_base_dn":
        str,
        "ldap_port":
        int,
        "ldap_search_bind_dn":
        str,
        "ldap_search_passwd":
        str,
        "ldap_search_prefix":
        str,
        "ldap_server":
        str,
        "ldap_tls":
        bool,
        "ldap_tls_cacertfile":
        str,
        "ldap_tls_certfile":
        str,
        "ldap_tls_keyfile":
        str,
        Optional("bind_manage_ipmi", default=False):
        bool,
        # TODO: Remove following line
        "manage_dhcp":
        bool,
        "manage_dhcp_v4":
        bool,
        "manage_dhcp_v6":
        bool,
        "manage_dns":
        bool,
        "manage_forward_zones": [str],
        "manage_reverse_zones": [str],
        Optional("manage_genders", False):
        bool,
        "manage_rsync":
        bool,
        "manage_tftpd":
        bool,
        "mgmt_classes": [str],
        # TODO: Validate Subdict
        "mgmt_parameters":
        dict,
        "next_server_v4":
        str,
        "next_server_v6":
        str,
        Optional("nsupdate_enabled", False):
        bool,
        Optional("nsupdate_log", default="/var/log/cobbler/nsupdate.log"):
        str,
        Optional("nsupdate_tsig_algorithm", default="hmac-sha512"):
        str,
        Optional("nsupdate_tsig_key", default=[]): [str],
        "power_management_default_type":
        str,
        "proxy_url_ext":
        str,
        "proxy_url_int":
        str,
        "puppet_auto_setup":
        bool,
        Optional("puppet_parameterized_classes", default=True):
        bool,
        Optional("puppet_server", default="puppet"):
        str,
        Optional("puppet_version", default=2):
        int,
        "puppetca_path":
        str,
        "pxe_just_once":
        bool,
        "nopxe_with_triggers":
        bool,
        "redhat_management_permissive":
        bool,
        "redhat_management_server":
        str,
        "redhat_management_key":
        str,
        "register_new_installs":
        bool,
        "remove_old_puppet_certs_automatically":
        bool,
        "replicate_repo_rsync_options":
        str,
        "replicate_rsync_options":
        str,
        "reposync_flags":
        str,
        "reposync_rsync_flags":
        str,
        "restart_dhcp":
        bool,
        "restart_dns":
        bool,
        "run_install_triggers":
        bool,
        "scm_track_enabled":
        bool,
        "scm_track_mode":
        str,
        "scm_track_author":
        str,
        "scm_push_script":
        str,
        "serializer_pretty_json":
        bool,
        "server":
        str,
        "sign_puppet_certs_automatically":
        bool,
        Optional("signature_path",
                 default="/var/lib/cobbler/distro_signatures.json"):
        str,
        Optional("signature_url",
                 default="https://cobbler.github.io/signatures/3.0.x/latest.json"):
        str,
        "tftpboot_location":
        str,
        "virt_auto_boot":
        bool,
        "webdir":
        str,
        "webdir_whitelist": [str],
        "xmlrpc_port":
        int,
        "yum_distro_priority":
        int,
        "yum_post_install_mirror":
        bool,
        "yumdownloader_flags":
        str,
        Optional("windows_enabled", default=False):
        bool,
        Optional("windows_template_dir", default="/etc/cobbler/windows"):
        str,
        Optional("samba_distro_share", default="DISTRO"):
        str,
    },
    ignore_extra_keys=False)