Пример #1
0
def send_bytes(p: Player, c: Messageable, msg: List[str]) -> str:
    if len(msg) < 2:
        return 'Invalid syntax: !sbytes <name> <packetid>'

    content = ' '.join(msg)
    if not (re := re_match(_sbytes_re, content)):
        return 'Invalid syntax.'
Пример #2
0
def parse_args():
    description = "A tool to extract cellular and molecular identifiers " +\
        "from single cell RNA sequencing experiments"

    parser = ArgumentParser(description=description)

    parser.add_argument("-i", "--input", nargs=2,
            help="R1 and R2 from a paired end sequencing experiment")
    parser.add_argument("-o", "--output", required=True,
            help="Tagged unmapped BAM file (use '-' to output to stdout)")

    parser.add_argument("--pipeline", default="dropseq", choices=["dropseq", "scpipe"],
        help="Set output type depending on pipeline tool chosen")

    parser.add_argument("-s", "--summary-prefix",
        help="Prefix for summary files (including absolute or relative paths)")

    parser.add_argument("-c","--cores", type=int, default=1,
        help="Number of processing units (CPUs) to use (default=1)")

    parser.add_argument("--tag-bc", type=str, default="XC",
        help="Tag for single cell barcode (default=XC)")

    parser.add_argument("--tag-bc-q", type=str, default="XQ",
        help="Tag for single cell barcode base quality (default=XQ)")

    parser.add_argument("--tag-umi", type=str, default="XM",
        help="Tag for Unique Molecular Identifier (default=XM)")

    parser.add_argument("--tag-umi-q", type=str, default="Xq",
        help="Tag for Unique Molecular Identifier base quality (default=Xq)")

    parser.add_argument("--tag-error", type=str, default="XE",
        help="Tag for errors (default=XE)")

    parser.add_argument("--subset", type=int,
        help="Select a lower number of reads to analyze [debugging]")

    parser.add_argument("-v", '--version', action='version', version='%(prog)s 1.2.1')

    args = parser.parse_args()

    # check parameters
    if not (args.tag_bc != args.tag_umi != args.tag_error != args.tag_bc):
        logging.error("Tags provided with '--tag' must be all different.")
        sys.exit(1)

    tag_pattern = "^[A-Za-z][A-Za-z0-90]$"
    if not (re_match(tag_pattern, args.tag_bc) and re_match(tag_pattern, args.tag_umi) and re_match(tag_pattern, args.tag_error)):
        logging.error("Tags provided with '--tag' must be two-character strings matching /[A-Za-z][A-Za-z0-9]/")
        sys.exit(1)

    for x in args.input:
        ext = "".join(Path(x).suffixes)
        if ext != ".fastq.gz":
            logging.error("Invalid input file extensions '{}': '.fastq.gz' is required".format(ext))
            sys.exit(1)

    return(args)
 def _process_start_state(self, str_request):
     if re_match(r"add source.*", str_request) is not None:
         self._state = SourceProcessorState.SETUP_SOURCE
         return self._process_adding_source_state(
             re_match(r"add source(.*)", str_request).group(1).strip())
     elif re_match(r"remove source.*", str_request) is not None:
         self._remove_source(str_request)
     return False
Пример #4
0
 def _compareAttribute(self, node1, node2, attr):
     attr1 = node1.getAttribute(attr)
     attr2 = node2.getAttribute(attr)
     if attr2.startswith("regexp:"):
         return bool(re_match(attr2[7:], attr1))
     elif attr2.startswith("glob:"):
         return bool(re_match(glob_trans(attr2[5:]), attr1))
     else:
         return attr1 == attr2
  def get_outputs(self, input_data):
    # Step 0: check the type of input parameters
    if not isinstance(self.ns.host, str):
      print("The type of \"host\" must be str (string)!")
      raise IllegalArgumentException
    
    if not re_match("^[0-9localhost.:/]+$", self.ns.host):
      print("hostport does not match preseted character-set" )
      raise IllegalArgumentException
    
    if not isinstance(self.ns.port, int):
      print("The type of \"port\* must be int!")
      raise IllegalArgumentException

    if not isinstance(self.ns.model_name, str):
      print("the type of \"model_name\" must be str (string)!")
      raise IllegalArgumentException
        
    if not re_match("^[0-9A-Za-z_. \-/]+$", self.ns.model_name):
      print("model_name does not match preseted character-set" )
      raise IllegalArgumentException

    if not isinstance(input_data, dict):
      print("the type of \"input_data\" must be dict!")
      raise IllegalArgumentException
        
    if (not isinstance(MAX_RESPONSE_TIME, int)) and (not isinstance(MAX_RESPONSE_TIME, float)):
      print("the type of \"max_response_time\" must be int or float!")
      raise IllegalArgumentException

    # Setup connection
    channel = implementations.insecure_channel(self.ns.host, self.ns.port)
    stub = prediction_service_pb2.beta_create_PredictionService_stub(channel)
    
    # Initialize the request
    request = predict_pb2.PredictRequest()
    request.model_spec.name = self.ns.model_name
    request.model_spec.signature_name = self.ns.model_signature_name
    #request.model_spec.version = self.ns.model_version_num
    # Set the input variables of the request
    for key, value in input_data.items():
      if not re_match("^[0-9A-Za-z_. \-/]+$", key):
        print("model_name does not match preseted character-set" )
        raise IllegalArgumentException
      if isinstance(value, numpy_ndarray):
        request.inputs[key].CopyFrom(make_tensor_proto(value, shape=list(value.shape)))
      elif isinstance(value, int) or isinstance(value, float):
        request.inputs[key].CopyFrom(make_tensor_proto(value) )
      else:
        request.inputs[key].CopyFrom(make_tensor_proto(value, shape=list(value.shape)))
    
    # Obtain the result of prediction
    response = stub.Predict(request, MAX_RESPONSE_TIME)
    if PRINT_RESPONSE:
      responseDict = self.print_response(response)

    return responseDict
Пример #6
0
    def request(self,
                vat_id: (str, NoneType),
                country_code: (str, NoneType) = '',
                bypass_ratelimit: bool = False):
        allowed_arg_types = (NoneType, str)
        vat_re = r'^([0-9A-Za-z]{2,12})$'
        country_code_re = r'^([A-Z]{2})$'

        if not isinstance(vat_id, allowed_arg_types):
            raise TypeError(
                'vat_id should be either str, or NoneType, not %s' %
                type(vat_id))
        elif not isinstance(country_code, allowed_arg_types):
            raise TypeError(
                'country_code should be either str, or NoneType, not %s'
            ) % type(country_code)

        country_code = country_code or ''
        country_code = country_code.upper()

        if country_code in self.COUNTRY_CODE_ALIASES:
            country_code = self.COUNTRY_CODE_ALIASES[country_code]

        vat_id = vat_id.lstrip().rstrip().upper() if vat_id else ''
        vat_id = ''.join([c for c in vat_id if c not in '\n\t -'])

        request = ViesRequest(vat_id, country_code)

        if len(vat_id) < 2:
            request.error = 'vat_id (%s) should be at least 2 characters long' % vat_id
        elif country_code and vat_id[:2] == country_code:
            vat_id = vat_id[2:]
        elif not country_code:
            country_code, vat_id = vat_id[:2], vat_id[2:]

        if request.error:
            request.is_valid = False
            return request

        if not re_match(vat_re, vat_id):
            request.error = "vat_id '%s' doesn't match the pattern '%s'" % (
                vat_id, vat_re)
        elif not re_match(country_code_re, country_code):
            request.error = "country_code '%s' doesn't match the pattern '%s'" % (
                country_code, country_code_re)
        elif country_code not in self.EU_COUNTRY_CODES:
            request.error = 'unsupported country code: "%s"' % country_code

        if request.error:
            request.is_valid = False
            return request

        request.country_code = country_code
        request.vat_id = vat_id
        request.post(bypass_ratelimit)

        return request
Пример #7
0
def pull_remote_info(some_list):
    pattern_full = r'^.+@.+'
    pattern_host = r'^.+:.*'
    for element in some_list:
        remote_full = re_match(pattern_full, element)
        remote_host = re_match(pattern_host, element)
        if remote_full:
            return element
        elif remote_host:
            return element
    return ''
Пример #8
0
 def route_for_task(self, task, args=None, kwargs=None):
     parts = task.split('.')
     if re_match(r'^mp[a-z_]+\.sync\.[a-z_]+$', task) is not None:
         return {
             'routing_key': task,
             'queue': parts[0] + '.sync',
             }
     elif re_match(r'^mp[a-z_]+\.async\.[a-z_]+$', task) is not None:
         return {
             'routing_key': task,
             'queue': parts[0] + '.async',
             }
     return None
Пример #9
0
    def create_user(self, first, last, gender="M", username=None, password=None, role="Provider"):
        url = self.base + USER_FORM_URL

        if not first:
            raise CreateUserException("Given name can't be blank")
        first_names = first.split()

        if not last:
            raise CreateUserException("Family name can't be blank")

        if not username:
            username = "".join([c[0] for c in first_names] + [last]).lower()

        if not password:
            password = []
            for i in range(0, 3):
                password += [random.choice(string.digits), random.choice(string.lowercase)]
            random.shuffle(password)
            password = "".join(password)
        else:
            if (
                len(password) < 6
                or not (re_match("\d", password) and re_match("[a-zA-Z]", password))
                or re_match("\s", password)
            ):
                raise CreateUserException(
                    "Password must be at least six " "characters and contain at least " "one letter and number"
                )

        params = {}
        if len(first_names) > 1:
            params.update({"names[0].middleName": first_names.pop().title()})

        params.update(
            {
                "names[0].givenName": " ".join(first_names).title(),
                "names[0].familyName": last.title(),
                "gender": gender,
                "username": username.lower(),
                "userFormPassword": password,
                "confirm": password,
                "roleStrings": role,
            }
        )

        data = urlencode(params)
        response = self.opener.open(url, data).read()
        if response.find("Username or System Id taken") != -1:
            raise CreateUserException("Username taken")
Пример #10
0
def versions_from_file(filename):
    versions = {}
    try:
        with open(filename) as f:
            for line in f.readlines():
                mo = re_match("version_version = '([^']+)'", line)
                if mo:
                    versions['version'] = mo.group(1)
                mo = re_match("version_full = '([^']+)'", line)
                if mo:
                    versions['full'] = mo.group(1)
    except EnvironmentError:
        return {}

    return versions
Пример #11
0
def versions_from_file(filename):
   versions = {}
   try:
      with open(filename) as f:
         for line in f.readlines():
            mo = re_match("version_version = '([^']+)'", line)
            if mo:
               versions['version'] = mo.group(1)
            mo = re_match("version_full = '([^']+)'", line)
            if mo:
               versions['full'] = mo.group(1)
   except EnvironmentError:
      return {}

   return versions
Пример #12
0
def import_path(path):
    # type: (str) -> Any
    """
    Import object from a full import path.

    .. code:: python

        >>> from objetto.utils.lazy_import import import_path

        >>> import_path("abc|abstractmethod")
        <function abstractmethod at ...>

    :param path: Import path.
    :type path: str

    :return: Imported object.

    :raises ValueError: Invalid or empty path.
    :raises AttributeError: No object with the provided name.
    """
    if "|" not in path:
        if not path:
            error = "can't import from empty path"
        else:
            error = (
                "import path '{}' does not specify a module name (missing '|' "
                "separator between module path and qualified name)"
            ).format(path)
        raise ValueError(error)
    elif path.startswith("."):
        error = "import path '{}' is not absolute".format(path)
        raise ValueError(error)

    match = re_match(PRE_IMPORT_PATH_VALIDATION_REGEX, path) and re_match(
        IMPORT_PATH_REGEX, path)
    if not match:
        error = "invalid import path '{}'".format(path)
        raise ValueError(error)

    module, qual_name = match.groups()
    name_parts = qual_name.split(".")
    module_obj = __import__(module, fromlist=[name_parts[0]])

    obj = module_obj
    for name_part in name_parts:
        obj = getattr(obj, name_part)

    return obj
Пример #13
0
def b64decode(s, altchars=None, validate=False):
    """Decode bytes encoded with the standard Base64 alphabet.

    Argument ``s`` is a :term:`bytes-like object` or ASCII string to
    decode.

    Optional ``altchars`` must be a :term:`bytes-like object` or ASCII
    string of length 2 which specifies the alternative alphabet used instead
    of the '+' and '/' characters.

    If ``validate`` is ``False`` (the default), characters that are neither in
    the normal base-64 alphabet nor the alternative alphabet are discarded
    prior to the padding check.
    If ``validate`` is ``True``, these non-alphabet characters in the input
    result in a :exc:`binascii.Error`.

    The result is returned as a :class:`bytes` object.

    A :exc:`binascii.Error` is raised if ``s`` is incorrectly padded.
    """
    if version_info < (3, 0):
        s = _get_bytes(s)
        if altchars is not None:
            altchars = _get_bytes(altchars)
            assert len(altchars) == 2, repr(altchars)
            s = s.translate(maketrans(altchars, b'+/'))
        if validate and not re_match(b'^[A-Za-z0-9+/]*={0,2}$', s):
            raise BinAsciiError('Non-base64 digit found')
        try:
            return builtin_decode(s, altchars)
        except TypeError as e:
            raise BinAsciiError(str(e))
    return builtin_decode(s, altchars, validate)
Пример #14
0
    def __contains(pattern, doc):
        def __find(p, d):
            for k in d:
                if re_match(p, k):
                    return k
                #
            raise AssertionError()

        #
        if isinstance(doc, dict):
            if isinstance(pattern, dict):
                for p in pattern:
                    VarSearch.__contains(pattern[p], doc[__find(p, doc)])
            elif isinstance(p, list):
                for p in pattern:
                    VarSearch.__contains({'.*': '.*'}, doc[__find(p, doc)])
            else:
                VarSearch.__contains({'.*': '.*'}, doc[__find(pattern, doc)])
        elif isinstance(doc, list):
            for d in doc:
                VarSearch.__contains(pattern, d)
            #
        else:
            if not re_match(pattern, doc):
                raise AssertionError()
Пример #15
0
    def profile_template(cls) -> Dict[Text, Any]:
        def sequence_closure(
                sequence: Union[List,
                                Tuple]) -> Union[Dict[Text, Any], List[Any]]:
            if len(sequence) > 0:
                if isinstance(sequence[0], Definition):
                    return dict(map(closure, sequence))
                elif isinstance(sequence[0], list) or isinstance(
                        sequence[0], tuple):
                    return list(map(sequence_closure, sequence))
                elif sequence[0] == int:
                    return [0]
                elif sequence[0] == float:
                    return [0.0]
                elif sequence[0] == bool:
                    return [True]
                elif sequence[0] == str:
                    return ['']
                else:
                    return []
            else:
                return []

        def closure(definition: Definition) -> Tuple[Text, Any]:
            if definition.type == str:
                return definition.name, ''
            elif definition.type == int:
                return definition.name, 0,
            elif definition.type == float:
                return definition.name, 0.0
            elif definition.type == bool:
                return definition.name, True
            elif definition.type == list or definition.type == tuple:
                if definition.children is not None and len(
                        definition.children) > 0:
                    if isinstance(definition.children[0], Definition):
                        return definition.name, dict(
                            map(closure, definition.children))
                    elif isinstance(definition.children[0],
                                    list) or isinstance(
                                        definition.children[0], tuple):
                        return definition.name, list(
                            filter(lambda it: len(it) > 0,
                                   map(sequence_closure, definition.children)))
                    return '_', None
                else:
                    return definition.name, []
            else:
                return '_', None

        class_str = str(cls)
        schema = class_str[slice(
            *re_match(r'.*\'([a-zA-Z._]+)\'.*', class_str).regs[-1])]
        if schema.startswith('abc.'):
            schema = schema[4:]
        template = OrderedDict({'__schema__': schema})
        template.update(
            filter(lambda it: it[0] != '_' and it[1] != [],
                   map(closure, cls.define())))
        return template
Пример #16
0
 def _glob_match(self, pattern, string):
     """
     Match given string, by escaping regex characters
     """
     # regex flags Multi-line, Unicode, Locale
     return bool(re_match(glob_trans(pattern), string,
                          re.M | re.U | re.L))
Пример #17
0
    def retrieve_credentials(self, request, token):
        # Try to retrieve and verify captcha
        self.verify_captcha(request)

        email = request.POST['vltrgstremail']

        # Verify email format:
        if '"' in email or "'" in email or not re_match(
                ".*@.*\..{2,4}", email):
            raise CredentialsError("Bad format for email : '{}'".format(email))

        # Verify if the user already exists in application repository
        try:
            backend = self.workflow.repository
            user_infos = self.search_user_by_mail(email, backend)

            if not user_infos:
                raise UserNotFound()
            else:
                raise UserAlreadyExistsError(
                    "REGISTER::search_user: User '{}' already found on repository '{}' "
                    "with email '{}'".format(user_infos['user'],
                                             backend.repo_name, email))
        except (User.DoesNotExist, UserNotFound) as e:
            pass

        # Verify if an email has already been sent
        for key in self.redis_base.keys("registration_*"):
            if self.redis_base.hget(key, "email") == email:
                raise CredentialsError(
                    "The registration key has already been sent.")

        return email
Пример #18
0
def get_platform():

    from os.path import exists
    from re import match as re_match

    platforms = []
    arch = run_output('uname', '-m').replace('686', '386')
    platforms.append(arch)

    if exists('/etc/debian_version'):
        platforms.append('debian')

    if exists('/etc/redhat-release'):
        platforms.append('redhat')

        version = file('/etc/redhat-release').read().strip()
        badge = 'Red Hat Enterprise Linux Server release (\d+)\.(\d+)'
        match = re_match(badge, version)
        if match:
            platforms.append('rhel%s' % match.group(1))

    if exists('/etc/arch-release'):
        platforms.append('archlinux')

    verbose('PLATFORMS: %s' % ",".join(platforms))
    return platforms
Пример #19
0
    def filter_droplets(self, matcher=None):
        """
        Basic droplet filter helper.
        Filters out droplets which pass a substring match on the name
        for the provided matcher.
        Matcher defaults to empty string and returns all instances
        :param matcher: Token to match droplet names against.
        :type  matcher: basestring
        :rtype: list<Droplet>
        """
        if matcher is None:
            return self.droplets

        if not isinstance(matcher, (int, basestring)):
            raise InvalidArgumentError(
                "Method requires a string filter token or droplet ID")

        if isinstance(matcher, int):
            return [x for x in self.droplets if x.id == matcher]

        # See if a Droplet ID is passed in (an integer) and filter
        # based on ID.
        try:
            _id = literal_eval(matcher)
            return [x for x in self.droplets if x.id == _id]
        except (TypeError, ValueError):
            matcher = re_compile(".*?{0}.*?".format(matcher))
            return [x for x in self.droplets
                    if re_match(matcher, x.name) is not None]
Пример #20
0
    def __init__( self, data, b0_thr = 0 ) :
        """Initialize the acquisition scheme.

        Parameters
        ----------
        data : string or numpy.ndarray
            The filename of the scheme or a matrix containing the actual values
        b0_thr : float
            The threshold on the b-values to identify the b0 images (default: 0)
        """
        if type(data) is str :
            # try loading from file
            try :
                n = 0 # headers lines to skip to get to the numeric data
                with open(data) as fid :
                    for line in fid :
                        if re_match( r'[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?', line.strip() ) :
                            break
                        n += 1
                tmp = np.loadtxt( data, skiprows=n )
            except :
                raise IOError( 'Unable to open scheme file' )
            self.load_from_table( tmp, b0_thr )
        else :
            # try loading from matrix
            self.load_from_table( data, b0_thr )
Пример #21
0
 def _stem_with_duplicate_character_check(word, del_len):
     if word[-1] == 's':
         del_len += 1
     stemmed_word = word[:-del_len]
     if re_match(r'.*(\w)\1$', stemmed_word):
         stemmed_word = stemmed_word[:-1]
     return stemmed_word
Пример #22
0
    def check_barcode(bc, code):
        """
        This method checks if the barcode is in the proper format.
        The validation concerns the barcode length and the set of characters, but won't compute/validate any checksum.
        The full set of requirement for each barcode type is available in the ESC/POS documentation.

        As an example, using EAN13, the barcode `12345678901` will be correct, because it can be rendered by the
        printer. But it does not suit the EAN13 standard, because the checksum digit is missing. Adding a wrong
        checksum in the end will also be considered correct, but adding a letter won't (EAN13 is numeric only).

        .. todo:: Add a method to compute the checksum for the different standards

        .. todo:: For fixed-length standards with mandatory checksum (EAN, UPC),
            compute and add the checksum automatically if missing.

        :param bc: barcode format, see :py:func`~escpos.Escpos.barcode`
        :param code: alphanumeric data to be printed as bar code, see :py:func`~escpos.Escpos.barcode`
        :return: bool
        """
        if bc not in BARCODE_FORMATS:
            return False

        bounds, regex = BARCODE_FORMATS[bc]
        return any(bound[0] <= len(code) <= bound[1]
                   for bound in bounds) and re_match(regex, code)
Пример #23
0
 def _glob_match(self, pattern, string):
     """
     Match given string, by escaping regex characters
     """
     # regex flags Multi-line, Unicode, Locale
     return bool(re_match(glob_trans(pattern), string,
                          re.M | re.U | re.L))
Пример #24
0
 def _stem_with_duplicate_character_check(word, del_len):
     if word[-1] == 's':
         del_len += 1
     stemmed_word = word[:-del_len]
     if re_match(r'.*(\w)\1$', stemmed_word):
         stemmed_word = stemmed_word[:-1]
     return stemmed_word
Пример #25
0
 def _parse_sanitizer(cls, input_line):
     if "#" not in input_line:
         return None
     m = cls._re_sanitizer.match(input_line)
     if m is None:
         return None
     sframe = cls(mode=cls.MODE_SANITIZER, stack_line=m.group("num"))
     input_line = m.group("line")
     # check if line is symbolized
     if m.group("in"):
         # find function/method name
         m = cls._re_func_name.match(input_line)
         if m is not None:
             sframe.function = m.group("func")
     if input_line.startswith("("):
         input_line = input_line.strip("()")
     # find location (file name or module) and offset (line # or offset)
     offset = re_match(r"(.+?)(\:([0-9a-f]+)|\+(0x[0-9a-f]+)).*",
                       input_line)
     if offset:
         sframe.location = basename(offset.group(1))
         sframe.offset = offset.group(3) or offset.group(4)
     else:
         sframe.location = input_line
     return sframe
Пример #26
0
def do_get_blog_tag_posts(parser, token):
    """
    Get the blog Posts for a specified tag and store it in a context variable.
    
    Usage::

      {% get_blog_tag_posts [tag_name] as [varname] %}
    
    tag should be a variable or a quoted string

    Example::
    
      {% get_blog_tag_posts "django" as blog_posts %}
    
    """
    try:
        tag_name, arg = token.contents.split(None, 1)
    except ValueError:
        raise template.TemplateSyntaxError(
            '%r tag requires arguments' % token.contents.split()[0])
    m = re_match(r'(.*?) as (\w+)', arg)
    if not m:
        raise template.TemplateSyntaxError(
            '%r tag had invalid arguments' % tag_name)
    tag, var_name = m.groups()
    return BlogTagPostsNode(tag, var_name)
Пример #27
0
def parse_option(args, spec, options={}, header='', footer=''):
    from re import match as re_match
    from getopt import getopt, GetoptError
    help_message = header
    short_opt, long_opt, opt_cond = '', [], {}
    for line in spec.splitlines():
        match = re_match('([a-z])\|(\w+)([:=])(.+)', line)
        if not match:
            continue
        opt_s, opt_l, opt_v, opt_h = match.groups()
        help_message = (help_message +
                        '\n-%s, --%s\t\t%s' % (opt_s, opt_l, opt_h))
        opt_cond['-' + opt_s] = opt_l
        opt_cond['--' + opt_l] = opt_l
        if opt_v == '=':
            opt_s = opt_s + ':'
            opt_l = opt_l + '='
        short_opt = short_opt + opt_s
        long_opt.append(opt_l)

    help_message = help_message + '\n' + footer
    try:
        opts, args = getopt(args, short_opt, long_opt)
        for optname, optval in opts:
            if optname in opt_cond:
                options[opt_cond[optname]] = optval
        return options
    except GetoptError as err:
        from sys import stderr, exit
        print >>stderr, 'ERROR:', str(err)
        print >>stderr, '\n', help_message, '\n'
        exit(1)
Пример #28
0
    def validate_userule_syntax(self, snakefile: TokenIterator):
        identifier = r"[a-zA-Z_]\S*"
        use_syntax_regexp = (
            r"use rule (?:(?:{id})|\*)"
            r"(?: from {id})?(?: as {id})?( with[ ]?:)?$".format(
                id=identifier))
        use_ebnf_syntax = ('"use" "rule" (identifier | "*") '
                           '"from" identifier ["as" identifier] ["with" ":"]')
        while not is_newline(self.token):
            if self.token.type == tokenize.COMMENT:
                break
            # Tokenizing splits up '<identifier>*' into two tokens
            if self.token.string != "*":
                self.keyword_line += " "
            self.keyword_line += self.token.string
            try:
                self.token = next(snakefile)
            except StopIteration:
                break

        self.keyword_line = self.keyword_line.replace("use rule*",
                                                      "use rule *").replace(
                                                          "as*", "as *")
        match = re_match(use_syntax_regexp, self.keyword_line)
        if match is None:
            SyntaxFormError(self.line_nb, self.keyword_line, use_ebnf_syntax)
        if match.groups()[0] is None:
            self.enter_context = False
        else:
            # Gets added at formatting
            self.keyword_line = self.keyword_line.rstrip(": ")
Пример #29
0
    def _parse_core(self, path: OMPath) -> Tuple[OMCore, int]:
        """Parse a single core.

           :param path: the path to the node
        """
        node = path.node
        hartids = node['hartIds']
        if len(hartids) != 1:
            raise ValueError(f'HartIds not handled {hartids}')
        isa = node['isa']
        exts = []
        xlens = set()
        iset = None
        for kname, value in isa.items():
            if kname == 'baseSpecification':
                continue
            if kname == 'xLen':
                xlens.add(value)
            if isinstance(value, dict):
                types = value.get('_types', None)
                if not type:
                    continue
                if 'OMSpecification' in types:
                    exts.extend(kname)
                elif 'OMBaseInstructionSet' in types:
                    iset = types[0]
                    imo = re_match(r'RV(?P<xlen>\d+)(?P<ext>\w+)', iset)
                    if not imo:
                        raise ValueError(f'Unsupported type: {iset}')
                    xlens.add(int(imo.group('xlen')))
                    exts.extend(imo.group('ext').lower())
        if len(xlens) != 1:
            raise ValueError(f'xLen issue detected: {xlens}')
        core = OMCore(xlens.pop(), ''.join(exts))
        return core, hartids[0]
Пример #30
0
    def __init__( self, data, b0_thr = 0 ) :
        """Initialize the acquisition scheme.

        Parameters
        ----------
        data : string or numpy.ndarray
            The filename of the scheme or a matrix containing the actual values
        b0_thr : float
            The threshold on the b-values to identify the b0 images (default: 0)
        """
        if type(data) is str :
            # try loading from file
            try :
                n = 0 # headers lines to skip to get to the numeric data
                with open(data) as fid :
                    for line in fid :
                        if re_match( r'[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?', line.strip() ) :
                            break
                        n += 1

                tmp = np.loadtxt( data, skiprows=n )

            except :
                ERROR( 'Unable to open scheme file' )

            self.load_from_table( tmp, b0_thr )
        else :
            # try loading from matrix
            self.load_from_table( data, b0_thr )
Пример #31
0
    def list_archive(self, archive_name):
        """
        function calls << 7z l -slt -ba {container_path}>> function, the
        output of which looks something like ...
            Path = first_file.py
            Size = 123

            Path = second_file.py
            Size = 123
        extract the key and the value from the output
        """

        # Parse input
        container_path = self.clean_filename(archive_name)

        path_args = format_path_args(container_path)
        command = self.get_command('list', path_args=path_args)
        output = execute_subprocess(command)

        file_list = [{}]
        for line in output.strip().splitlines():
            if not line:
                file_list.append({})
                continue
            file_item = file_list[-1]

            key = re_match(r"^\w+", line)[0].lower()
            value = re_sub(r"^\w+\s=\s", "", line)
            file_item.update({key: value})

        return [f for f in file_list if f]
Пример #32
0
def is_numeric(val):
    if isinstance(val, int) or isinstance(val, float):
        return True
    else:
        if re_match("^\d+?\.\d+?$", val) is None:
            return val.isdigit()
        return True
Пример #33
0
def check_for_match(pattern, some_list):
    match_list = []
    for element in some_list:
        match = re_match(pattern, element)
        if match:
            match_list.append(element)
    return " ".join(match_list)
Пример #34
0
	def get_timeshift_buffer_secs(self):
		timeshift_buffer = mpd_parser.get_attrib(self.mpd_tree, 'timeShiftBufferDepth')
		if timeshift_buffer is not None:
			from re import match as re_match
			matches = re_match(r'PT(\d+)S', timeshift_buffer)
			if matches is not None and len(matches.groups()) == 1:
				return int(matches.group(1))
		return None
Пример #35
0
def o_b58(r160, magicbyte=0):
    """ Base58 encoding w leading zero compact
    """
    from re import match as re_match
    inp_fmtd = chr(int(magicbyte)) + r160
    leadingzbytes = len(re_match('^\x00*', inp_fmtd).group(0))
    checksum = hashlib.sha256(hashlib.sha256(inp_fmtd).digest()).digest()[:4]
    return '1' * leadingzbytes + encode(decode(inp_fmtd + checksum, 256), 58, 0)
Пример #36
0
def base58Encode(r160, magicbyte=0, prefix=1, length=0):
	""" Base58 encoding w leading zero compact
	"""
	from re import match as re_match
	inp_fmtd = chr(int(magicbyte if magicbyte < 255 else 255)) + r160
	leadingzbytes = len(re_match('^\x00*', inp_fmtd).group(0))
	checksum = hashlib.sha256(hashlib.sha256(inp_fmtd).digest()).digest()[:4]
	return str(prefix) * leadingzbytes + enc.encode(enc.decode(inp_fmtd + checksum, 256), 58, 0)
Пример #37
0
 def get_classname(clazz: Type):
     python_class_name_string = str(clazz)
     pattern = r""".* \'(?P<classname>.*)\'.*"""
     result = re_match(pattern, python_class_name_string)
     if result is None:
         return None
     name = result['classname']
     return name
Пример #38
0
def rem_user_parse(some_list):
    pattern = r'^\w+[^\:\.\,\@]*'
    remote_info = pull_remote_info(some_list)
    if '@' in remote_info:
        user = re_match(pattern, remote_info)
        if user:
            return user.group()
    return ''
Пример #39
0
def extract(fp):
    fcs = {}
    name = None
    for line in read_file(fp):
        if re_match(r'^\s*#', line) and not re_match(r'^\s*##[ACDE]\s+', line):
            continue
        if name:
            if re_match(r'^\s*}\s*$', line):
                fcs[name]['C'] = san(fcs[name].get('C', name))
                name = None
                continue
            mtch = re_match(r'^\s*##C\s+(.+)$', line)
            if mtch:
                fcs[name]['C'] = '{} {}'.format(name, mtch.group(1))
                continue
            mtch = re_match(r'^\s*##(A|D|E)\s+(.+)$', line)
            if mtch:
                typ = mtch.group(1)
                msg = mtch.group(2)
                if typ == 'A':
                    mtch = re_match(r'^(.+?)=(.+)$', msg)
                    arg = san(mtch.group(1))
                    msg = san(mtch.group(2))
                    fcs[name].setdefault(typ, {})[arg] = msg
                elif typ == 'D':
                    fcs[name].setdefault(typ, []).append(san(msg))
                else:  # E
                    fcs[name].setdefault(typ, []).append(san(msg, 'simple'))
                continue
        else:
            mtch = re_match(r'^(\w+)\(\)\s+{\s*$', line)
            if mtch:
                name = mtch.group(1)
                fcs[name] = {}
    return fcs
Пример #40
0
 def validate_seed(self, seed):
     regex = '^[\w\s]+$'
     if bool(re_match(regex, self.seed.data)) is False:
         raise ValidationError(
             'Invalid seed provided; must be alphanumeric characters only')
     if len(self.seed.data.split()) != 25:
         raise ValidationError(
             "Invalid seed provided; must be standard Wownero 25 word format"
         )
Пример #41
0
def base58Encode(r160, magicbyte=0, prefix=1, length=0):
    """ Base58 encoding w leading zero compact
	"""
    from re import match as re_match
    inp_fmtd = chr(int(magicbyte if magicbyte < 255 else 255)) + r160
    leadingzbytes = len(re_match('^\x00*', inp_fmtd).group(0))
    checksum = hashlib.sha256(hashlib.sha256(inp_fmtd).digest()).digest()[:4]
    return str(prefix) * leadingzbytes + enc.encode(
        enc.decode(inp_fmtd + checksum, 256), 58, 0)
Пример #42
0
def syntax_check(email: str) -> bool:
    """
    function brought from verify-email library
    :param email: email for check
    :return: True if email is possible else false
    """
    if re_match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", email):
        return True
    return False
Пример #43
0
    def strip_url(url):
        split_results = urlsplit(url)
        path = split_results.path

        path_match = re_match("/(sales|lettings)/(.*?)/(\d*)/?", path)
        category, tag, prop_id = path_match.groups() \
                if path_match \
                else ("", "", "")

        return category, tag, prop_id
Пример #44
0
 def normalise_email(self):
     """
     Normalise the email address provides into an account URL
     """
     url = urlparse(self.request_email, "acct")
     if url.scheme != "acct":
         raise TypeError()
     self.request_email = url
     match = re_match(".*\@(.*)", url.path)
     self.hostmeta = HostMeta(match.group(1))
Пример #45
0
def match_regexp(transition, event, _token, _fsa):
    """
    The 'regexp' matcher.

    With this matcher,
    transition conditions are interpreted as regular expressions.
    Note that the *whole* event must match the regular expression
    (this is ensured by automatically prepending ``^`` and appending ``$`` to the condition).
    """
    return re_match('^%s$' % transition['condition'], event)
Пример #46
0
def event_nonum_arg_count(e):
    """Given an EventAnnotation, returns a dictionary containing for each of
    its argument without trailing numbers (e.g. "Theme1" -> "Theme") the number
    of times the argument appears."""
    from re import match as re_match

    nnc = {}
    for arg, aid in e.args:
        m = re_match(r'^(.*?)\d*$', arg)
        if m:
            arg = m.group(1)
        nnc[arg] = nnc.get(arg, 0) + 1
    return nnc
Пример #47
0
def event_nonum_args(e):
    """Given an EventAnnotatation, returns its arguments without trailing
    numbers (e.g. "Theme1" -> "Theme")."""
    from re import match as re_match

    nna = {}
    for arg, aid in e.args:
        m = re_match(r'^(.*?)\d*$', arg)
        if m:
            arg = m.group(1)
        if arg not in nna:
            nna[arg] = []
        nna[arg].append(aid)
    return nna
Пример #48
0
def main():

    opts =parseCli()
    rmap = parseParams(opts)
    file_path = rmap['config']['file_dir']
    if isdir(file_path) is not True:
        print "dir %s is not exist!!" %file_path
        sys.exit(0)

    file_list = os.listdir(file_path)
    for the_file in file_list:
        if re_match(r'.*\.bak$', the_file):
            continue
        if re_match(r'adduser.*', the_file):
            add_list = loadXlsx((file_path + the_file))
            challengeDB((file_path + the_file), rmap['radcheck']['insert'], add_list, **rmap['sqlconn'])
        if re_match(r'deluser.*', the_file):
            del_list = map(lambda x: x['username'],
                            loadXlsx((file_path + the_file)))
            challengeDB((file_path + the_file), rmap['radcheck']['insert'], del_list, **rmap['sqlconn'])
        if re_match(r'updateuser.*', the_file):
            update_list = loadXlsx((file_path + the_file))
            challengeDB((file_path + the_file), rmap['radcheck']['insert'], update_list, **rmap['sqlconn'])
Пример #49
0
def parseParams(opts):

    found = opts.conf_Path

    if not found:
        print "configuration directory is not exit!"
        sys.exit(0)

    recipe = found
    trmap = dict()
    for root, dirs, files in os.walk(recipe):
        for filespath in files:
            if re_match('.*ml$', filespath):
                filename = re_search(r'(.*)\..*ml$', filespath).group(1)
                trmap[filename] = expYaml(os.path.join(root, filespath))

    return trmap
Пример #50
0
def parseParams(config_Path):

    found = filter(lambda x: isdir(x),
                    (config_Path, '/etc/secdd/conf'))

    if not found:
        print "configuration directory is not exit!"
        sys_exit(0)

    recipe = found[0]
    trmap = dict()
    for root, dirs, files in os.walk(recipe):
        for filespath in files:
            if re_match('.*ml$', filespath):
                trmap[filespath.split('.')[0]] = expYaml(os.path.join(root, filespath))
    
    return trmap
Пример #51
0
 def findMovieDirs(self, path=None):
     if not os.path.isdir(path) or path == None:
         self.log('Abort scanning! invalid dir: %s' % path)
         return False
     for f in os.listdir(path):
         m = {}
         m['rel_path']   = os.path.join( path, f )
         m['abs_path']   = os.path.abspath( m['rel_path'] )
         m['basename']   = os.path.basename( f )
         m['title']      = ''
         m['year']       = ''
         m['rated']      = ''
         if self.args.user_ignores and m['rel_path'] in self.args.user_ignores:
             self.log('Ignoring predefined ignored directory: %s' % m['rel_path'])
             continue
         if os.path.islink( m['abs_path'] ):
             if self.abs_path not in os.path.realpath( m['abs_path'] ):
                 self.log('Ignoring link: %s' % m['abs_path'] )
                 continue
         if not os.path.exists( m['abs_path'] ):
             self.log('Ignoring Inexistent: %s' % m['abs_path'] )
         elif not os.access( m['abs_path'], os.R_OK ):
             self.log('Ignoring Unreadable: %s' % m['abs_path'] )
         elif stat_S_ISSOCK(os.stat(m['abs_path']).st_mode):
             self.log('Ignoring Socket: %s' % m['abs_path'] )
         elif os.path.isfile( m['abs_path'] ):
             self.log('Ignoring File: %s' % m['abs_path'] )
         elif os.path.isdir( m['abs_path'] ):
             match = re_match(r'(.*) \((\d{4})\)', m['basename'])        # by: cyphase
             #match = re_match(r'^([^(]+) \((\d+)\)$', m['basename'])    # by: Francisco
             if match:
                 m['title'] = match.groups()[0]
                 m['year'] = match.groups()[1]
                 self.addMovie(m)
             else:
                 if not self.isMovie(m):
                     self.log('Confirmed not a movie: %s' % m['rel_path'])
                     self.ignored_dirs['not_movie'].append(m)
                 else:
                     self.log('Might be a movie: %s' % m['rel_path'])
                     self.maybe_movies.append(m)
                 self.findMovieDirs( m['rel_path'] )
         else:
             self.log('Ignoring Unknown type: %s' % ff)
     return True
Пример #52
0
def writeFile(root, in_file, dest):
    base, file = split(realpath(join(dest, in_file)))
    if not exists(base):
        os.makedirs(base)
    in_file = str(in_file)
    out_includes = []
    input_file = open(realpath(join(root, in_file)), 'r')
    output_file = open(realpath(join(dest, in_file)), 'wt')
    output_file.write('//Auto generated file. DO NOT EDIT!\n')
    output_file.write('option optimize_for=LITE_RUNTIME;\n')
    for line in input_file.readlines():
        match = re_match('\s*?import\s+?\"(.+?)\"', line)
        if match:
            out_includes.append(match.group(1))
        output_file.write(line)
    input_file.close()
    output_file.close()
    return out_includes
Пример #53
0
def parseParams(config_path):

    found = filter(lambda x: isdir(x),
                    (config_path, '/etc/secdd/conf'))

    if not found:
        print "configuration directory is not exit!"
        sys.exit(0)

    recipe = found[0]
    trmap = dict()
    for root, dirs, files in os.walk(recipe):
        for filespath in files:
            if re_match('.*ml$', filespath):
                filename = re_search(r'(.*)\..*ml$', filespath).group(1)
                trmap[filename] = expYaml(os.path.join(root, filespath))

    return trmap
Пример #54
0
    def load_tests(_, tests, __):  # pylint: disable=redefined-outer-name,unused-argument
        finder = DocTestFinder(exclude_empty=False)

        for root_mod in roots:
            if isinstance(root_mod, ModuleType):
                root_mod_path, root_mod_name = root_mod.__file__, root_mod.__name__
            else:
                root_mod_path, root_mod_name = root_mod

            if splitext(basename(root_mod_path))[0] == "__init__":
                root_mod_path = dirname(root_mod_path)

            if isfile(root_mod_path):
                root_mod_iter = ((dirname(root_mod_path), None, (basename(root_mod_path),)),)
            else:
                root_mod_iter = os_walk(root_mod_path)

            for dir_name, _, file_names in root_mod_iter:
                if not re_match(re_escape(root_mod_path) + _PATH_RE, dir_name):
                    continue

                mod_name = dir_name[len(root_mod_path) :].replace(ospath_sep, ".").strip(".")

                if mod_name:
                    mod_name = root_mod_name + "." + mod_name
                else:
                    mod_name = root_mod_name

                for file_name in file_names:
                    if not file_name.endswith(".py"):
                        continue

                    if file_name == "__init__.py":
                        test_mod_name = mod_name
                    else:
                        test_mod_name = mod_name + "." + splitext(file_name)[0]

                    try:
                        tests.addTest(DocTestSuite(test_mod_name, test_finder=finder))
                    except Exception as err:  # pylint: disable=broad-except
                        _LOGGER.warning("unable to load doctests from %s (%s)", test_mod_name, err, exc_info=True)

        return tests
Пример #55
0
 def check_answer(self, answer_str):
     if self.check == self.EQUALS_CHECK:
         ans = answer_str
         correct = self.flag
         if self.is_case_insensitive_check:
             ans = ans.lower()
             correct = correct.lower()
         if self.is_trimmed_check:
             ans = ans.strip()
             correct = correct.strip()
         return ans == correct
     elif self.check == self.REGEX_CHECK:
         ans = answer_str
         flags = 0
         if self.is_case_insensitive_check:
             flags |= re_I
         if self.is_trimmed_check:
             ans = ans.strip()
         return re_match(self.flag, ans, flags)
     return False
Пример #56
0
def _comment(header, filepath, pattern, align, width):
    # Generate header-comment and place it into the file
    with open(filepath, 'r+', encoding='utf-8') as file:
        # Capture INFO comments
        try:
            stream = file.read()
            match = re_match(pattern, stream)
            # If there was a match
            if match:
                opening, pad, closing = match.group('opening', 'pad', 'closing')
                file.seek(match.start('opening'))
                file.write(_OPENING.format(pad, width).format(opening))
                # Substitute variables with values
                for line in header.split('\n'):
                    file.write(_CONTENT.format(align, width - 6).format(pad, line))
                file.write(_CLOSING.format(pad, width).format(closing))
                 # Write back content of file
                file.write(stream[match.end():])
                file.truncate()
                return True
        except UnicodeDecodeError:
            print('CLIC: cannot decode {!r}'.format(filepath))
Пример #57
0
def search(_user):
    from pyaspora.diaspora.models import DiasporaContact
    term = request.args.get('searchterm', None) or \
        abort(400, 'No search term provided')
    if re_match('[A-Za-z0-9._]+@[A-Za-z0-9.]+$', term):
        try:
            DiasporaContact.get_by_username(term)
        except:
            current_app.logger.debug(format_exc())

    matches = db.session.query(Contact).outerjoin(DiasporaContact).filter(or_(
        DiasporaContact.username.contains(term),
        Contact.realname.contains(term)
    )).order_by(Contact.realname).limit(99)

    data = {
        'contacts': [json_contact(c, _user) for c in matches]
    }

    add_logged_in_user_to_data(data, _user)

    return render_response('contacts_search_results.tpl', data)
Пример #58
0
def main():
    for unity_dir in args.directory:
        for root, dirs, files in os.walk(unity_dir):
            for excluded_dir in args.excludedirectory: 
                if excluded_dir in dirs:
                    dirs.remove(excluded_dir)
            for file in files:
                full_path = realpath(join(root, file))
                exclude = False
                for regex in args.excludefile:
                    if re_match(regex, full_path):
                        exclude = True
                        break

                if not exclude:
                    unity_file_list.append(full_path)

    output_path = join(args.output, args.name+".unity.cpp")
    output_file = open(output_path,'w')
    output_file.write('/** Auto generated file. Do not edit. **/\n')
    for unity_file in unity_file_list:
        output_file.write('#include "'+unity_file+'"\n')
    output_file.close()
Пример #59
0
    def encode(self, word):
        """Return the Naval Research Laboratory phonetic encoding of a word.

        Parameters
        ----------
        word : str
            The word to transform

        Returns
        -------
        str
            The NRL phonetic encoding

        Examples
        --------
        >>> pe = NRL()
        >>> pe.encode('the')
        'DHAX'
        >>> pe.encode('round')
        'rAWnd'
        >>> pe.encode('quick')
        'kwIHk'
        >>> pe.encode('eaten')
        'IYtEHn'
        >>> pe.encode('Smith')
        'smIHTH'
        >>> pe.encode('Larsen')
        'lAArsEHn'


        .. versionadded:: 0.3.0
        .. versionchanged:: 0.3.6
            Encapsulated in class

        """

        def _to_regex(pattern, left_match=True):
            new_pattern = ''
            replacements = {
                '#': '[AEIOU]+',
                ':': '[BCDFGHJKLMNPQRSTVWXYZ]*',
                '^': '[BCDFGHJKLMNPQRSTVWXYZ]',
                '.': '[BDVGJLMNTWZ]',
                '%': '(ER|E|ES|ED|ING|ELY)',
                '+': '[EIY]',
                ' ': '^',
            }
            for char in pattern:
                new_pattern += (
                    replacements[char] if char in replacements else char
                )

            if left_match:
                new_pattern += '$'
                if '^' not in pattern:
                    new_pattern = '^.*' + new_pattern
            else:
                new_pattern = '^' + new_pattern.replace('^', '$')
                if '$' not in new_pattern:
                    new_pattern += '.*$'

            return new_pattern

        word = word.upper()

        pron = ''
        pos = 0
        while pos < len(word):
            left_orig = word[:pos]
            right_orig = word[pos:]
            first = word[pos] if word[pos] in self._rules else ' '
            for rule in self._rules[first]:
                left, match, right, out = rule
                if right_orig.startswith(match):
                    if left:
                        l_pattern = _to_regex(left, left_match=True)
                    if right:
                        r_pattern = _to_regex(right, left_match=False)
                    if (not left or re_match(l_pattern, left_orig)) and (
                        not right
                        or re_match(r_pattern, right_orig[len(match) :])
                    ):
                        pron += out
                        pos += len(match)
                        break
            else:
                pron += word[pos]
                pos += 1

        return pron
Пример #60
0
 def Address(self):
     "Return compressed public key address"
     vh160 = chr(25) + self.Identifier()
     leadingzbytes = len(re_match('^\x00*', vh160).group(0))
     return 'B' * leadingzbytes + Base58.check_encode(vh160)