Exemple #1
0
def run_pep257():
    log.setLevel(logging.DEBUG)
    opt_parser = get_option_parser()
    # setup the logger before parsing the config file, so that command line
    # arguments for debug / verbose will be printed.
    options, arguments = opt_parser.parse_args()
    setup_stream_handler(options)
    # We parse the files before opening the config file, since it changes where
    # we look for the file.
    options = get_options(arguments, opt_parser)
    # Setup the handler again with values from the config file.
    setup_stream_handler(options)

    collected = collect(arguments or ['.'],
                        match=re(options.match + '$').match,
                        match_dir=re(options.match_dir + '$').match)

    log.debug("starting pep257 in debug mode.")

    Error.explain = options.explain
    Error.source = options.source
    collected = list(collected)
    errors = check(collected, ignore=options.ignore.split(','))
    code = 0
    count = 0
    for error in errors:
        sys.stderr.write('%s\n' % error)
        code = 1
        count += 1
    if options.count:
        print(count)
    return code
Exemple #2
0
    def check_triple_double_quotes(self, definition, docstring):
        r'''D300: Use """triple double quotes""".

        For consistency, always use """triple double quotes""" around
        docstrings. Use r"""raw triple double quotes""" if you use any
        backslashes in your docstrings. For Unicode docstrings, use
        u"""Unicode triple-quoted strings""".

        Note: Exception to this is made if the docstring contains
              """ quotes in its body.

        '''
        if docstring:
            if '"""' in ast.literal_eval(docstring):
                # Allow ''' quotes if docstring contains """, because
                # otherwise """ quotes could not be expressed inside
                # docstring. Not in PEP 257.
                regex = re(r"[uU]?[rR]?'''[^'].*")
            else:
                regex = re(r'[uU]?[rR]?"""[^"].*')

            if not regex.match(docstring):
                illegal_matcher = re(r"""[uU]?[rR]?("+|'+).*""")
                illegal_quotes = illegal_matcher.match(docstring).group(1)
                return violations.D300(illegal_quotes)
Exemple #3
0
def run_pep257():
    log.setLevel(logging.DEBUG)
    opt_parser = get_option_parser()
    # setup the logger before parsing the config file, so that command line
    # arguments for debug / verbose will be printed.
    options, arguments = opt_parser.parse_args()
    setup_stream_handlers(options)
    # We parse the files before opening the config file, since it changes where
    # we look for the file.
    options = get_options(arguments, opt_parser)
    if not validate_options(options):
        return INVALID_OPTIONS_RETURN_CODE
    # Setup the handler again with values from the config file.
    setup_stream_handlers(options)

    collected = collect(arguments or ['.'],
                        match=re(options.match + '$').match,
                        match_dir=re(options.match_dir + '$').match)

    log.debug("starting pep257 in debug mode.")

    Error.explain = options.explain
    Error.source = options.source
    collected = list(collected)
    checked_codes = get_checked_error_codes(options)
    errors = check(collected, select=checked_codes)
    code = NO_VIOLATIONS_RETURN_CODE
    count = 0
    for error in errors:
        sys.stderr.write('%s\n' % error)
        code = VIOLATIONS_RETURN_CODE
        count += 1
    if options.count:
        print(count)
    return code
Exemple #4
0
def main(options, arguments):
    Error.explain = options.explain
    Error.source = options.source
    collected = collect(arguments or ['.'],
                        match=re(options.match + '$').match,
                        match_dir=re(options.match_dir + '$').match)
    code = 0
    for error in check(collected, ignore=options.ignore.split(',')):
        sys.stderr.write('%s\n' % error)
        code = 1
    return code
Exemple #5
0
def main(options, arguments):
    if options.debug:
        log.setLevel(logging.DEBUG)
    log.debug("starting pep257 in debug mode.")
    Error.explain = options.explain
    Error.source = options.source
    collected = collect(arguments or ['.'],
                        match=re(options.match + '$').match,
                        match_dir=re(options.match_dir + '$').match)
    code = 0
    for error in check(collected, ignore=options.ignore.split(',')):
        sys.stderr.write('%s\n' % error)
        code = 1
    return code
Exemple #6
0
 def _get_ignore_decorators(config):
     """Return the `ignore_decorators` as None or regex."""
     if config.ignore_decorators:  # not None and not ''
         ignore_decorators = re(config.ignore_decorators)
     else:
         ignore_decorators = None
     return ignore_decorators
Exemple #7
0
    def _get_leading_words(line):
        """Return any leading set of words from `line`.

        For example, if `line` is "  Hello world!!!", returns "Hello world".
        """
        result = re("[A-Za-z ]+").match(line.strip())
        if result is not None:
            return result.group()
Exemple #8
0
 def is_public(self):
     # Check if we are a setter/deleter method, and mark as private if so.
     for decorator in self.decorators:
         # Given 'foo', match 'foo.bar' but not 'foobar' or 'sfoo'
         if re(r"^{0}\.".format(self.name)).match(decorator.name):
             return False
     name_is_public = not self.name.startswith('_') or is_magic(self.name)
     return self.parent.is_public and name_is_public
Exemple #9
0
    def keys(self, pattern):
        """ Add abilite to retrieve a list of keys with wildcard pattern.

        :returns: List keys

        """
        offset = len(self.make_key(''))
        mask = re(fnmatch.translate(self.make_key(pattern)))
        return [k[offset:] for k in self._cache.keys() if mask.match(k)]
Exemple #10
0
def parse_options(args=None, config=True, **overrides): # noqa
    """ Parse options from command line and configuration files.

    :return argparse.Namespace:

    """
    if args is None:
        args = []

    # Parse args from command string
    options = PARSER.parse_args(args)
    options.file_params = dict()
    options.linter_params = dict()

    # Override options
    for k, v in overrides.items():
        passed_value = getattr(options, k, _Default())
        if isinstance(passed_value, _Default):
            setattr(options, k, _Default(v))

    # Compile options from ini
    if config:
        cfg = get_config(str(options.options))
        for k, v in cfg.default.items():
            LOGGER.info('Find option %s (%s)', k, v)
            passed_value = getattr(options, k, _Default())
            if isinstance(passed_value, _Default):
                setattr(options, k, _Default(v))

        # Parse file related options
        for name, opts in cfg.sections.items():

            if not name.startswith('pylama'):
                continue

            if name == cfg.default_section:
                continue

            name = name[7:]

            if name in LINTERS:
                options.linter_params[name] = dict(opts)
                continue

            mask = re(fnmatch.translate(name))
            options.file_params[mask] = dict(opts)

    # Postprocess options
    opts = dict(options.__dict__.items())
    for name, value in opts.items():
        if isinstance(value, _Default):
            setattr(options, name, process_value(name, value.value))

    return options
Exemple #11
0
 def is_public(self):
     """Return True iff this method should be considered public."""
     # Check if we are a setter/deleter method, and mark as private if so.
     for decorator in self.decorators:
         # Given 'foo', match 'foo.bar' but not 'foobar' or 'sfoo'
         if re(r"^{}\.".format(self.name)).match(decorator.name):
             return False
     name_is_public = (not self.name.startswith('_') or
                       self.name in VARIADIC_MAGIC_METHODS or
                       self.is_magic)
     return self.parent.is_public and name_is_public
Exemple #12
0
def Rue(f,A="",s=0):
	from random import choice
	if f in G:f=G[f]
	else:
		c=f
		f=k("",open(f).read())
		if f.startswith("import "):
			a=f.find("\n",8)
			f=open(f[7:a]).read()+f[a:]
		a=f.find("\nimport ")+1
		while a:
			b=f.find("\n",a+8)
			f=f[:a]+open(f[a+7:b]).read()+f[b:]
			a=f.find("\nimport ",a)+1
		f=f.split("\n::=\n")
		G[c]=f
	c=""
	R=[]
	for lf,C in zip(range(len(f)-1,-1,-1),f):
		R+=((re(R[0],16).sub,R[1] if len(R) == 2 else R[1:] or "",len(R) == 1) for R in (R.split("::=") for R in c.split("\n") if R))
		while 1:
			while 1:
				c=C=C.replace("@@",A)
				for p0,p1,p2 in R:
					C=p0(choice(p1) if p2 else p1,C,1)
					if c is not C:break
				else:break
				if D:print(" "*s+C)
			if lf:break
			a=C.find("}")
			if a == -1:break
			while 1:
				b=C.rfind("{",0,a)
				c=C.rfind(":",0,b)
				f=C[c+1:b]
				b=C[b+1:a]
				C=C[:c]+(Smod[f](b) if f in Smod else Rue(f,b,s+1))+C[a+1:]
				a=C.find("}",c)
				if a == -1:break
	return C
Exemple #13
0
from html.parser import HTMLParser
from re import compile as re
from unicodedata import normalize as unicode_normalize

from .base import Service
from .common import Trait

__all__ = ['Duden']


INPUT_MAXIMUM = 100
IGNORE_ARTICLES = ['der', 'das', 'die']
CASE_MATTERS = ['Weg']

SEARCH_FORM = 'http://www.duden.de/suchen/dudenonline'
RE_DETAIL = re(r'href="(https?://www\.duden\.de/rechtschreibung/(.+?))"')
RE_MP3 = re(r'(Betonung:|Bei der Schreibung) .*?'
            r'(<em>|&raquo;)(.+?)(</em>|&laquo;).+?'
            r'<a .*? href="(https?://www\.duden\.de/_media_/audio/.+?\.mp3)"')

HTML_PARSER = HTMLParser()


class Duden(Service):
    """
    Provides a Service-compliant implementation for Duden.
    """

    __slots__ = []

    NAME = "Duden"
Exemple #14
0
"""Content Negotiation"""

from re import compile as re

file_type_re = re(r'\.([^\.]+)$')

content_types = {
    'text': 'text/plain',
    'txt': 'text/plain',
    'html': 'text/html',
    'js': 'text/javascript',
    'css': 'text/css',
    'png': 'image/png',
    'gif': 'image/gif',
    'jpg': 'image/jpg',
    'py': 'text/plain',
}

file_types = dict(
    (value, key)
    for key, value in content_types.items()
)

def get_content_type(file_name = None, file_type = None, content_type = None):
    if content_type is None:
        content_type = 'text/plain'
    if file_type is None and file_name is not None:
        file_type_search = file_type_re.search(file_name)
        if file_type_search:
            file_type_groups = file_type_search.groups()
            if file_type_groups:
Exemple #15
0
from threading import get_ident as gettid
from os import getpid, unlink, F_OK, O_RDONLY, O_WRONLY, O_EXCL, O_CREAT

from pathlib import Path
from re import compile as re

from time import sleep
from random import choice
from itertools import chain

b64bytes = b'+_'
b64chars = b64bytes.decode()

b64seq = bytes(b64encode(bytes((i << 2,)), b64bytes)[0] for i in range(64)).decode()
#b64set = set(b64seq)
directory_re = re('[A-Za-z0-9'+b64chars+']{2}')

def my_b64encode(b):
	return b64encode(b, b64bytes).rstrip(b'=').decode()

def my_b64decode(s):
	return b64decode(s + '=' * (-len(s) & 3), b64bytes)

class FilesystemListAction(PoolAction):
	directory = None
	cursor = None

	def sync(self):
		super().sync()
		return self.cursor
Exemple #16
0
 def _get_ignore_decorators(conf):
     """Return the `ignore_decorators` as None or regex."""
     return (re(conf.ignore_decorators)
             if conf.ignore_decorators else None)
Exemple #17
0
def get_parser():
    """ Make command parser for pylama.

    :return ArgumentParser:

    """
    split_csp_str = lambda s: list(
        set(i for i in s.strip().split(',') if i))

    parser = ArgumentParser(description="Code audit tool for python.")
    parser.add_argument(
        "path", nargs='?', default=_Default(CURDIR),
        help="Path on file or directory.")

    parser.add_argument(
        "--verbose", "-v", action='store_true', help="Verbose mode.")

    parser.add_argument('--version', action='version',
                        version='%(prog)s ' + version)

    parser.add_argument(
        "--format", "-f", default=_Default('pep8'), choices=['pep8', 'pylint'],
        help="Error format.")

    parser.add_argument(
        "--select", "-s", default=_Default(''), type=split_csp_str,
        help="Select errors and warnings. (comma-separated)")

    parser.add_argument(
        "--linters", "-l", default=_Default(','.join(DEFAULT_LINTERS)),
        type=split_csp_str,
        help=(
            "Select linters. (comma-separated). Choices are %s."
            % ','.join(s for s in utils.__all__)
        ))

    parser.add_argument(
        "--ignore", "-i", default=_Default(''), type=split_csp_str,
        help="Ignore errors and warnings. (comma-separated)")

    parser.add_argument(
        "--skip", default=_Default(''),
        type=lambda s: [re(fnmatch.translate(p)) for p in s.split(',') if p],
        help="Skip files by masks (comma-separated, Ex. */messages.py)")

    parser.add_argument(
        "--complexity", "-c", default=_Default(DEFAULT_COMPLEXITY), type=int,
        help="Set mccabe complexity.")

    parser.add_argument("--report", "-r", help="Filename for report.")
    parser.add_argument(
        "--hook", action="store_true", help="Install Git (Mercurial) hook.")

    parser.add_argument(
        "--async", action="store_true",
        help="Enable async mode. Usefull for checking a lot of files. "
        "Dont supported with pylint.")

    parser.add_argument(
        "--options", "-o", default=_Default(DEFAULT_INI_PATH),
        help="Select configuration file. By default is '<CURDIR>/pylama.ini'")

    return parser
Exemple #18
0
""" Code runnning support. """
import sys
from io import StringIO
from re import compile as re

from .environment import env

encoding = re(r'#.*coding[:=]\s*([-\w.]+)')


def run_code():
    """ Run python code in current buffer.

    :returns: None

    """
    errors, err = [], ''
    line1, line2 = env.var('a:line1'), env.var('a:line2')
    lines = __prepare_lines(line1, line2)
    if encoding.match(lines[0]):
        lines.pop(0)
        if encoding.match(lines[0]):
            lines.pop(0)
    elif encoding.match(lines[1]):
        lines.pop(1)

    context = dict(__name__='__main__',
                   __file__=env.var('expand("%:p")'),
                   input=env.user_input,
                   raw_input=env.user_input)
Exemple #19
0
 def _get_matches(conf):
     """Return the `match` and `match_dir` functions for `config`."""
     match_func = re(conf.match + '$').match
     match_dir_func = re(conf.match_dir + '$').match
     return match_func, match_dir_func
Exemple #20
0
def humanize(string):
    return re(r'(.)([A-Z]+)').sub(r'\1 \2', string).lower()
Exemple #21
0
    def next(obj, default=nothing):
        if default == nothing:
            return obj.next()
        else:
            try:
                return obj.next()
            except StopIteration:
                return default


__version__ = '0.5.0-alpha'
__all__ = ('check', 'collect')

PROJECT_CONFIG = ('setup.cfg', 'tox.ini', '.pep257')

humanize = lambda string: re(r'(.)([A-Z]+)').sub(r'\1 \2', string).lower()
is_magic = lambda name: name.startswith('__') and name.endswith('__')
is_ascii = lambda string: all(ord(char) < 128 for char in string)
is_blank = lambda string: not string.strip()
leading_space = lambda string: re('\s*').match(string).group()


class Value(object):

    __init__ = lambda self, *args: vars(self).update(zip(self._fields, args))
    __hash__ = lambda self: hash(repr(self))
    __eq__ = lambda self, other: other and vars(self) == vars(other)

    def __repr__(self):
        format_arg = lambda arg: '{}={!r}'.format(arg, getattr(self, arg))
        kwargs = ', '.join(format_arg(arg) for arg in self._fields)
Exemple #22
0
from re import compile as re
import traceback
import botocore

def get_object_access_logs(session, since=None):
    client = session.client('s3')
    for bucket in get_s3_buckets(session):
        try:
            for o in get_gets(client, bucket['Name'], since):
                yield o
        except botocore.exceptions.ClientError:
            pass # Most likely we don't have sufficient permissions to read that bucket.
        except:
            traceback.print_exc()

log_line_re = re(r'\w+ *([A-Za-z-]+) *\[(\d+/\w+/\d+:\d+:\d+:\d+).{4,10}\].*GET\.OBJECT *([^ ]+) *.*')
key_name_re = re(r'(.*/)?\d{4}-\d{2}-\d{2}-\d{2}-\d{2}-\d{2}-\w+')

def parse(lines):
    for line in lines:
        if 'GET.OBJECT' not in line:
            continue
        match = log_line_re.match(line)
        if match:
            t = datetime.strptime(match.group(2), '%d/%b/%Y:%H:%M:%S')
            yield dict(bucket=match.group(1), time=t, object=match.group(3))

def get_gets(client, bucket_name, since=None):
    def ls(**kw):
        return client.list_objects_v2(Bucket=bucket_name, **kw)
    if since:
Exemple #23
0
from re import compile as re
import operator as op

from funcparserlib.lexer import make_tokenizer, Token
from funcparserlib.parser import (some, a, maybe, finished, skip, many)


NUMBER_RE = re('(\d*\.?\d*)')
CONVERT = {
    "bytes": (
        ("TB", 1099511627776), ("GB", 1073741824.0), ("MB", 1048576.0), ("KB", 1024.0),
    ),
    "bits": (
        ("Tb", 1099511627776), ("Gb", 1073741824.0), ("Mb", 1048576.0), ("Kb", 1024.0),
    ),
    "bps": (
        ("Gbps", 1000000000.0), ("Mbps", 1000000.0), ("Kbps", 1000.0),
    ),
    "short": (
        ("Tri", 1000000000000.0), ("Bil", 1000000000.0), ("Mil", 1000000.0), ("K",   1000.0),
    ),
    "s": (
        ("y", 31536000.0),
        ("M", 2592000.0),
        ("w", 604800.0),
        ("d", 86400.0),
        ("h", 3600.0),
        ("m", 60.0),
        ("s", 1.0),
        ("ms", 0.001),
    ),
Exemple #24
0
class ConventionChecker:
    """Checker for PEP 257, NumPy and Google conventions.

    D10x: Missing docstrings
    D20x: Whitespace issues
    D30x: Docstring formatting
    D40x: Docstring content issues

    """

    NUMPY_SECTION_NAMES = (
        'Short Summary',
        'Extended Summary',
        'Parameters',
        'Returns',
        'Yields',
        'Other Parameters',
        'Raises',
        'See Also',
        'Notes',
        'References',
        'Examples',
        'Attributes',
        'Methods'
    )

    GOOGLE_SECTION_NAMES = (
        'Args',
        'Arguments',
        'Attention',
        'Attributes',
        'Caution',
        'Danger',
        'Error',
        'Example',
        'Examples',
        'Hint',
        'Important',
        'Keyword Args',
        'Keyword Arguments',
        'Methods',
        'Note',
        'Notes',
        'Other Parameters',
        'Parameters',
        'Return',
        'Returns',
        'Raises',
        'References',
        'See Also',
        'Tip',
        'Todo',
        'Warning',
        'Warnings',
        'Warns',
        'Yield',
        'Yields',
    )

    # Examples that will be matched -
    # "     random: Test" where random will be captured as the param
    # " random         : test" where random will be captured as the param
    # "  random_t (Test) : test  " where random_t will be captured as the param
    GOOGLE_ARGS_REGEX = re(
                        # Matches anything that fulfills all the following conditions:
        r"^\s*"         # Begins with 0 or more whitespace characters
        r"(\w+)"        # Followed by 1 or more unicode chars, numbers or underscores
                        # The above is captured as the first group as this is the paramater name.
        r"\s*"          # Followed by 0 or more whitespace characters
        r"\(?(.*?)\)?"  # Matches patterns contained within round brackets.
                        # The `(.*?)` is the second capturing group which matches any sequence of
                        # characters in a non-greedy way (denoted by the `*?`)
        r"\s*"          # Followed by 0 or more whitespace chars
        r":"            # Followed by a colon
        ".+"            # Followed by 1 or more characters - which is the docstring for the parameter
    )

    def check_source(self, source, filename, ignore_decorators=None):
        module = parse(StringIO(source), filename)
        for definition in module:
            for this_check in self.checks:
                terminate = False
                if isinstance(definition, this_check._check_for):
                    skipping_all = (definition.skipped_error_codes == 'all')
                    decorator_skip = ignore_decorators is not None and any(
                        len(ignore_decorators.findall(dec.name)) > 0
                        for dec in definition.decorators)
                    if not skipping_all and not decorator_skip:
                        error = this_check(self, definition,
                                           definition.docstring)
                    else:
                        error = None
                    errors = error if hasattr(error, '__iter__') else [error]
                    for error in errors:
                        if error is not None and error.code not in \
                                definition.skipped_error_codes:
                            partition = this_check.__doc__.partition('.\n')
                            message, _, explanation = partition
                            error.set_context(explanation=explanation,
                                              definition=definition)
                            yield error
                            if this_check._terminal:
                                terminate = True
                                break
                if terminate:
                    break

    @property
    def checks(self):
        all = [this_check for this_check in vars(type(self)).values()
               if hasattr(this_check, '_check_for')]
        return sorted(all, key=lambda this_check: not this_check._terminal)

    @check_for(Definition, terminal=True)
    def check_docstring_missing(self, definition, docstring):
        """D10{0,1,2,3}: Public definitions should have docstrings.

        All modules should normally have docstrings.  [...] all functions and
        classes exported by a module should also have docstrings. Public
        methods (including the __init__ constructor) should also have
        docstrings.

        Note: Public (exported) definitions are either those with names listed
              in __all__ variable (if present), or those that do not start
              with a single underscore.

        """
        if (not docstring and definition.is_public or
                docstring and is_blank(ast.literal_eval(docstring))):
            codes = {Module: violations.D100,
                     Class: violations.D101,
                     NestedClass: violations.D106,
                     Method: (lambda: violations.D105() if definition.is_magic
                              else (violations.D107() if definition.is_init
                              else violations.D102())),
                     Function: violations.D103,
                     NestedFunction: violations.D103,
                     Package: violations.D104}
            return codes[type(definition)]()

    @check_for(Definition)
    def check_one_liners(self, definition, docstring):
        """D200: One-liner docstrings should fit on one line with quotes.

        The closing quotes are on the same line as the opening quotes.
        This looks better for one-liners.

        """
        if docstring:
            lines = ast.literal_eval(docstring).split('\n')
            if len(lines) > 1:
                non_empty_lines = sum(1 for l in lines if not is_blank(l))
                if non_empty_lines == 1:
                    return violations.D200(len(lines))

    @check_for(Function)
    def check_no_blank_before(self, function, docstring):  # def
        """D20{1,2}: No blank lines allowed around function/method docstring.

        There's no blank line either before or after the docstring.

        """
        if docstring:
            before, _, after = function.source.partition(docstring)
            blanks_before = list(map(is_blank, before.split('\n')[:-1]))
            blanks_after = list(map(is_blank, after.split('\n')[1:]))
            blanks_before_count = sum(takewhile(bool, reversed(blanks_before)))
            blanks_after_count = sum(takewhile(bool, blanks_after))
            if blanks_before_count != 0:
                yield violations.D201(blanks_before_count)
            if not all(blanks_after) and blanks_after_count != 0:
                yield violations.D202(blanks_after_count)

    @check_for(Class)
    def check_blank_before_after_class(self, class_, docstring):
        """D20{3,4}: Class docstring should have 1 blank line around them.

        Insert a blank line before and after all docstrings (one-line or
        multi-line) that document a class -- generally speaking, the class's
        methods are separated from each other by a single blank line, and the
        docstring needs to be offset from the first method by a blank line;
        for symmetry, put a blank line between the class header and the
        docstring.

        """
        # NOTE: this gives false-positive in this case
        # class Foo:
        #
        #     """Docstring."""
        #
        #
        # # comment here
        # def foo(): pass
        if docstring:
            before, _, after = class_.source.partition(docstring)
            blanks_before = list(map(is_blank, before.split('\n')[:-1]))
            blanks_after = list(map(is_blank, after.split('\n')[1:]))
            blanks_before_count = sum(takewhile(bool, reversed(blanks_before)))
            blanks_after_count = sum(takewhile(bool, blanks_after))
            if blanks_before_count != 0:
                yield violations.D211(blanks_before_count)
            if blanks_before_count != 1:
                yield violations.D203(blanks_before_count)
            if not all(blanks_after) and blanks_after_count != 1:
                yield violations.D204(blanks_after_count)

    @check_for(Definition)
    def check_blank_after_summary(self, definition, docstring):
        """D205: Put one blank line between summary line and description.

        Multi-line docstrings consist of a summary line just like a one-line
        docstring, followed by a blank line, followed by a more elaborate
        description. The summary line may be used by automatic indexing tools;
        it is important that it fits on one line and is separated from the
        rest of the docstring by a blank line.

        """
        if docstring:
            lines = ast.literal_eval(docstring).strip().split('\n')
            if len(lines) > 1:
                post_summary_blanks = list(map(is_blank, lines[1:]))
                blanks_count = sum(takewhile(bool, post_summary_blanks))
                if blanks_count != 1:
                    return violations.D205(blanks_count)

    @staticmethod
    def _get_docstring_indent(definition, docstring):
        """Return the indentation of the docstring's opening quotes."""
        before_docstring, _, _ = definition.source.partition(docstring)
        _, _, indent = before_docstring.rpartition('\n')
        return indent

    @check_for(Definition)
    def check_indent(self, definition, docstring):
        """D20{6,7,8}: The entire docstring should be indented same as code.

        The entire docstring is indented the same as the quotes at its
        first line.

        """
        if docstring:
            indent = self._get_docstring_indent(definition, docstring)
            lines = docstring.split('\n')
            if len(lines) > 1:
                lines = lines[1:]  # First line does not need indent.
                indents = [leading_space(l) for l in lines if not is_blank(l)]
                if set(' \t') == set(''.join(indents) + indent):
                    yield violations.D206()
                if (len(indents) > 1 and min(indents[:-1]) > indent or
                        indents[-1] > indent):
                    yield violations.D208()
                if min(indents) < indent:
                    yield violations.D207()

    @check_for(Definition)
    def check_newline_after_last_paragraph(self, definition, docstring):
        """D209: Put multi-line docstring closing quotes on separate line.

        Unless the entire docstring fits on a line, place the closing
        quotes on a line by themselves.

        """
        if docstring:
            lines = [l for l in ast.literal_eval(docstring).split('\n')
                     if not is_blank(l)]
            if len(lines) > 1:
                if docstring.split("\n")[-1].strip() not in ['"""', "'''"]:
                    return violations.D209()

    @check_for(Definition)
    def check_surrounding_whitespaces(self, definition, docstring):
        """D210: No whitespaces allowed surrounding docstring text."""
        if docstring:
            lines = ast.literal_eval(docstring).split('\n')
            if lines[0].startswith(' ') or \
                    len(lines) == 1 and lines[0].endswith(' '):
                return violations.D210()

    @check_for(Definition)
    def check_multi_line_summary_start(self, definition, docstring):
        """D21{2,3}: Multi-line docstring summary style check.

        A multi-line docstring summary should start either at the first,
        or separately at the second line of a docstring.

        """
        if docstring:
            start_triple = [
                '"""', "'''",
                'u"""', "u'''",
                'r"""', "r'''",
                'ur"""', "ur'''"
            ]

            lines = ast.literal_eval(docstring).split('\n')
            if len(lines) > 1:
                first = docstring.split("\n")[0].strip().lower()
                if first in start_triple:
                    return violations.D212()
                else:
                    return violations.D213()

    @check_for(Definition)
    def check_triple_double_quotes(self, definition, docstring):
        r'''D300: Use """triple double quotes""".

        For consistency, always use """triple double quotes""" around
        docstrings. Use r"""raw triple double quotes""" if you use any
        backslashes in your docstrings. For Unicode docstrings, use
        u"""Unicode triple-quoted strings""".

        Note: Exception to this is made if the docstring contains
              """ quotes in its body.

        '''
        if docstring:
            if '"""' in ast.literal_eval(docstring):
                # Allow ''' quotes if docstring contains """, because
                # otherwise """ quotes could not be expressed inside
                # docstring. Not in PEP 257.
                regex = re(r"[uU]?[rR]?'''[^'].*")
            else:
                regex = re(r'[uU]?[rR]?"""[^"].*')

            if not regex.match(docstring):
                illegal_matcher = re(r"""[uU]?[rR]?("+|'+).*""")
                illegal_quotes = illegal_matcher.match(docstring).group(1)
                return violations.D300(illegal_quotes)

    @check_for(Definition)
    def check_backslashes(self, definition, docstring):
        r'''D301: Use r""" if any backslashes in a docstring.

        Use r"""raw triple double quotes""" if you use any backslashes
        (\) in your docstrings.

        '''
        # Just check that docstring is raw, check_triple_double_quotes
        # ensures the correct quotes.
        if docstring and '\\' in docstring and not docstring.startswith(
                ('r', 'ur')):
            return violations.D301()

    @check_for(Definition)
    def check_unicode_docstring(self, definition, docstring):
        r'''D302: Use u""" for docstrings with Unicode.

        For Unicode docstrings, use u"""Unicode triple-quoted strings""".

        '''
        if 'unicode_literals' in definition.module.future_imports:
            return

        # Just check that docstring is unicode, check_triple_double_quotes
        # ensures the correct quotes.
        if docstring and sys.version_info[0] <= 2:
            if not is_ascii(docstring) and not docstring.startswith(
                    ('u', 'ur')):
                return violations.D302()

    @staticmethod
    def _check_ends_with(docstring, chars, violation):
        """First line ends with one of `chars`.

        First line of the docstring should end with one of the characters in `chars`.
        `chars` supports either a `str` or an `Iterable[str]`. If the condition is
        evaluated to be false, it raises `violation`.

        """
        if docstring:
            summary_line = ast.literal_eval(docstring).strip().split('\n')[0]
            if not summary_line.endswith(chars):
                return violation(summary_line[-1])

    @check_for(Definition)
    def check_ends_with_period(self, definition, docstring):
        """D400: First line should end with a period.

        The [first line of a] docstring is a phrase ending in a period.

        """
        return self._check_ends_with(docstring, '.', violations.D400)

    @check_for(Definition)
    def check_ends_with_punctuation(self, definition, docstring):
        """D415: should end with proper punctuation.

        The [first line of a] docstring is a phrase ending in a period,
        question mark, or exclamation point

        """
        return self._check_ends_with(docstring, ('.', '!', '?'), violations.D415)

    @check_for(Function)
    def check_imperative_mood(self, function, docstring):  # def context
        """D401: First line should be in imperative mood: 'Do', not 'Does'.

        [Docstring] prescribes the function or method's effect as a command:
        ("Do this", "Return that"), not as a description; e.g. don't write
        "Returns the pathname ...".

        """
        if docstring and not function.is_test:
            stripped = ast.literal_eval(docstring).strip()
            if stripped:
                first_word = stripped.split()[0]
                check_word = first_word.lower()

                if check_word in IMPERATIVE_BLACKLIST:
                    return violations.D401b(first_word)

                try:
                    correct_forms = IMPERATIVE_VERBS.get(stem(check_word))
                except UnicodeDecodeError:
                    # This is raised when the docstring contains unicode
                    # characters in the first word, but is not a unicode
                    # string. In which case D302 will be reported. Ignoring.
                    return

                if correct_forms and check_word not in correct_forms:
                    best = max(
                        correct_forms,
                        key=lambda f: common_prefix_length(check_word, f)
                    )
                    return violations.D401(
                        best.capitalize(),
                        first_word
                    )

    @check_for(Function)
    def check_no_signature(self, function, docstring):  # def context
        """D402: First line should not be function's or method's "signature".

        The one-line docstring should NOT be a "signature" reiterating the
        function/method parameters (which can be obtained by introspection).

        """
        if docstring:
            first_line = ast.literal_eval(docstring).strip().split('\n')[0]
            if function.name + '(' in first_line.replace(' ', ''):
                return violations.D402()

    @check_for(Function)
    def check_capitalized(self, function, docstring):
        """D403: First word of the first line should be properly capitalized.

        The [first line of a] docstring is a phrase ending in a period.

        """
        if docstring:
            first_word = ast.literal_eval(docstring).split()[0]
            if first_word == first_word.upper():
                return
            for char in first_word:
                if char not in string.ascii_letters and char != "'":
                    return
            if first_word != first_word.capitalize():
                return violations.D403(first_word.capitalize(), first_word)

    @check_for(Definition)
    def check_starts_with_this(self, function, docstring):
        """D404: First word of the docstring should not be `This`.

        Docstrings should use short, simple language. They should not begin
        with "This class is [..]" or "This module contains [..]".

        """
        if docstring:
            first_word = ast.literal_eval(docstring).split()[0]
            if first_word.lower() == 'this':
                return violations.D404()

    @staticmethod
    def _is_docstring_section(context):
        """Check if the suspected context is really a section header.

        Lets have a look at the following example docstring:
            '''Title.

            Some part of the docstring that specifies what the function
            returns. <----- Not a real section name. It has a suffix and the
                            previous line is not empty and does not end with
                            a punctuation sign.

            This is another line in the docstring. It describes stuff,
            but we forgot to add a blank line between it and the section name.
            Parameters  <-- A real section name. The previous line ends with
            ----------      a period, therefore it is in a new
                            grammatical context.
            param : int
            examples : list  <------- Not a section - previous line doesn't end
                A list of examples.   with punctuation.
            notes : list  <---------- Not a section - there's text after the
                A list of notes.      colon.

            Notes:  <--- Suspected as a context because there's a suffix to the
            -----        section, but it's a colon so it's probably a mistake.
            Bla.

            '''

        To make sure this is really a section we check these conditions:
            * There's no suffix to the section name or it's just a colon AND
            * The previous line is empty OR it ends with punctuation.

        If one of the conditions is true, we will consider the line as
        a section name.
        """
        section_name_suffix = \
            context.line.strip().lstrip(context.section_name.strip()).strip()

        section_suffix_is_only_colon = section_name_suffix == ':'

        punctuation = [',', ';', '.', '-', '\\', '/', ']', '}', ')']
        prev_line_ends_with_punctuation = \
            any(context.previous_line.strip().endswith(x) for x in punctuation)

        this_line_looks_like_a_section_name = \
            is_blank(section_name_suffix) or section_suffix_is_only_colon

        prev_line_looks_like_end_of_paragraph = \
            prev_line_ends_with_punctuation or is_blank(context.previous_line)

        return (this_line_looks_like_a_section_name and
                prev_line_looks_like_end_of_paragraph)

    @classmethod
    def _check_blanks_and_section_underline(cls, section_name, context, indentation):
        """D4{07,08,09,12,14}, D215: Section underline checks.

        Check for correct formatting for docstring sections. Checks that:
            * The line that follows the section name contains
              dashes (D40{7,8}).
            * The amount of dashes is equal to the length of the section
              name (D409).
            * The section's content does not begin in the line that follows
              the section header (D412).
            * The section has no content (D414).
            * The indentation of the dashed line is equal to the docstring's
              indentation (D215).
        """
        blank_lines_after_header = 0

        for line in context.following_lines:
            if not is_blank(line):
                break
            blank_lines_after_header += 1
        else:
            # There are only blank lines after the header.
            yield violations.D407(section_name)
            yield violations.D414(section_name)
            return

        non_empty_line = context.following_lines[blank_lines_after_header]
        dash_line_found = ''.join(set(non_empty_line.strip())) == '-'

        if not dash_line_found:
            yield violations.D407(section_name)
            if blank_lines_after_header > 0:
                yield violations.D412(section_name)
        else:
            if blank_lines_after_header > 0:
                yield violations.D408(section_name)

            if non_empty_line.strip() != "-" * len(section_name):
                yield violations.D409(len(section_name),
                                      section_name,
                                      len(non_empty_line.strip()))

            if leading_space(non_empty_line) > indentation:
                yield violations.D215(section_name)

            line_after_dashes_index = blank_lines_after_header + 1
            # If the line index after the dashes is in range (perhaps we have
            # a header + underline followed by another section header).
            if line_after_dashes_index < len(context.following_lines):
                line_after_dashes = \
                    context.following_lines[line_after_dashes_index]
                if is_blank(line_after_dashes):
                    rest_of_lines = \
                        context.following_lines[line_after_dashes_index:]
                    if not is_blank(''.join(rest_of_lines)):
                        yield violations.D412(section_name)
                    else:
                        yield violations.D414(section_name)
            else:
                yield violations.D414(section_name)

    @classmethod
    def _check_common_section(cls, docstring, definition, context, valid_section_names):
        """D4{05,10,11,13}, D214: Section name checks.

        Check for valid section names. Checks that:
            * The section name is properly capitalized (D405).
            * The section is not over-indented (D214).
            * There's a blank line after the section (D410, D413).
            * There's a blank line before the section (D411).

        Also yields all the errors from `_check_blanks_and_section_underline`.
        """
        indentation = cls._get_docstring_indent(definition, docstring)
        capitalized_section = context.section_name.title()

        if (context.section_name not in valid_section_names and
                capitalized_section in valid_section_names):
            yield violations.D405(capitalized_section, context.section_name)

        if leading_space(context.line) > indentation:
            yield violations.D214(capitalized_section)

        if (not context.following_lines or
                not is_blank(context.following_lines[-1])):
            if context.is_last_section:
                yield violations.D413(capitalized_section)
            else:
                yield violations.D410(capitalized_section)

        if not is_blank(context.previous_line):
            yield violations.D411(capitalized_section)

        yield from cls._check_blanks_and_section_underline(capitalized_section,
                                        context,
                                        indentation)

    @classmethod
    def _check_numpy_section(cls, docstring, definition, context):
        """D406: NumPy-style section name checks.

        Check for valid section names. Checks that:
            * The section name has no superfluous suffix to it (D406).

        Additionally, also yield all violations from `_check_common_section`
        which are style-agnostic section checks.
        """
        indentation = cls._get_docstring_indent(definition, docstring)
        capitalized_section = context.section_name.title()
        yield from cls._check_common_section(docstring,
                                             definition,
                                             context,
                                             cls.NUMPY_SECTION_NAMES)
        suffix = context.line.strip().lstrip(context.section_name)
        if suffix:
            yield violations.D406(capitalized_section, context.line.strip())

    @staticmethod
    def _check_args_section(docstring, definition, context):
        """D417: `Args` section checks.

        Check for a valid `Args` or `Argument` section. Checks that:
            * The section documents all function arguments (D417)
                except `self` or `cls` if it is a method.

        """
        docstring_args = set()
        for line in context.following_lines:
            match = ConventionChecker.GOOGLE_ARGS_REGEX.match(line)
            if match:
                docstring_args.add(match.group(1))
        function_args = get_function_args(definition.source)
        # If the method isn't static, then we skip the first
        # positional argument as it is `cls` or `self`
        if definition.kind == 'method' and not definition.is_static:
            function_args = function_args[1:]
        missing_args = set(function_args) - docstring_args
        if missing_args:
            yield violations.D417(", ".join(sorted(missing_args)),
                                  definition.name)


    @classmethod
    def _check_google_section(cls, docstring, definition, context):
        """D416: Google-style section name checks.

        Check for valid section names. Checks that:
            * The section does not contain any blank line between its name
              and content (D412).
            * The section is not empty (D414).
            * The section name has semicolon as a suffix (D416).

        Additionally, also yield all violations from `_check_common_section`
        which are style-agnostic section checks.
        """
        capitalized_section = context.section_name.title()
        yield from cls._check_common_section(docstring,
                                             definition,
                                             context,
                                             cls.GOOGLE_SECTION_NAMES)
        suffix = context.line.strip().lstrip(context.section_name)
        if suffix != ":":
            yield violations.D416(capitalized_section + ":", context.line.strip())

        if capitalized_section in ("Args", "Arguments"):
            yield from cls._check_args_section(docstring, definition, context)


    @staticmethod
    def _get_section_contexts(lines, valid_section_names):
        """Generate `SectionContext` objects for valid sections.

        Given a list of `valid_section_names`, generate an
        `Iterable[SectionContext]` which provides:
            * Section Name
            * String value of the previous line
            * The section line
            * Following lines till the next section
            * Line index of the beginning of the section in the docstring
            * Boolean indicating whether the section is the last section.
        for each valid section.

        """
        lower_section_names = [s.lower() for s in valid_section_names]

        def _suspected_as_section(_line):
            result = get_leading_words(_line.lower())
            return result in lower_section_names

        # Finding our suspects.
        suspected_section_indices = [i for i, line in enumerate(lines) if
                                     _suspected_as_section(line)]

        SectionContext = namedtuple('SectionContext', ('section_name',
                                                       'previous_line',
                                                       'line',
                                                       'following_lines',
                                                       'original_index',
                                                       'is_last_section'))

        # First - create a list of possible contexts. Note that the
        # `following_lines` member is until the end of the docstring.
        contexts = (SectionContext(get_leading_words(lines[i].strip()),
                                   lines[i - 1],
                                   lines[i],
                                   lines[i + 1:],
                                   i,
                                   False)
                    for i in suspected_section_indices)


        # Now that we have manageable objects - rule out false positives.
        contexts = (c for c in contexts if ConventionChecker._is_docstring_section(c))

        # Now we shall trim the `following lines` field to only reach the
        # next section name.
        for a, b in pairwise(contexts, None):
            end = -1 if b is None else b.original_index
            yield SectionContext(a.section_name,
                                     a.previous_line,
                                     a.line,
                                     lines[a.original_index + 1:end],
                                     a.original_index,
                                     b is None)


    def _check_numpy_sections(self, lines, definition, docstring):
        """NumPy-style docstring sections checks.

        Check the general format of a sectioned docstring:
            '''This is my one-liner.

            Short Summary
            -------------
            This is my summary.

            Returns
            -------
            None.

            '''

        Section names appear in `NUMPY_SECTION_NAMES`.
        Yields all violation from `_check_numpy_section` for each valid
        Numpy-style section.
        """
        for ctx in self._get_section_contexts(lines,
                                              self.NUMPY_SECTION_NAMES):
            yield from self._check_numpy_section(docstring, definition, ctx)

    def _check_google_sections(self, lines, definition, docstring):
        """Google-style docstring section checks.

        Check the general format of a sectioned docstring:
            '''This is my one-liner.

            Note:
                This is my summary.

            Returns:
                None.

            '''

        Section names appear in `GOOGLE_SECTION_NAMES`.
        Yields all violation from `_check_google_section` for each valid
        Google-style section.
        """
        for ctx in self._get_section_contexts(lines,
                                              self.GOOGLE_SECTION_NAMES):
            yield from self._check_google_section(docstring, definition, ctx)

    @check_for(Definition)
    def check_docstring_sections(self, definition, docstring):
        """Check for docstring sections."""
        if not docstring:
            return

        lines = docstring.split("\n")
        if len(lines) < 2:
            return
        yield from self._check_numpy_sections(lines, definition, docstring)
        yield from self._check_google_sections(lines, definition, docstring)
Exemple #25
0
 def _get_matches(config):
     """Return the `match` and `match_dir` functions for `config`."""
     match_func = re(config.match + '$').match
     match_dir_func = re(config.match_dir + '$').match
     return match_func, match_dir_func
Exemple #26
0
def leading_space(string):
    return re('\s*').match(string).group()
def kafka_system_metric(match):
    return {
        'name': 'kafka-' + match.group(2),
        'labels': {
            'system': match.group(1)
        }
    }

"""
Simple metrics are encoded as strings.
Metrics that need a regex to extract labels are encoded as a tuple (regex, parser).
"""
metrics = {
    'org.apache.samza.system.kafka.KafkaSystemProducerMetrics': [
        (re('(.*)-(producer-send-failed)'), kafka_system_metric),
        (re('(.*)-(producer-send-success)'), kafka_system_metric),
        (re('(.*)-(producer-sends)'), kafka_system_metric),
        (re('(.*)-(producer-retries)'), kafka_system_metric),
        (re('(.*)-(flush-ms)'), kafka_system_metric),
        (re('(.*)-(flush-failed)'), kafka_system_metric),
        (re('(.*)-(flushes)'), kafka_system_metric),
        (re('(.*)-(flush-ns)'), kafka_system_metric),
        'serialization error',
    ],
    'org.apache.samza.system.kafka.KafkaSystemConsumerMetrics': {
        (re('(.*)-(\d+)-(bytes-read)'), topic_partition_metric),
        (re('(.*)-(\d+)-(high-watermark)'), topic_partition_metric),
        (re('(.*)-(\d+)-(messages-read)'), topic_partition_metric),
        (re('(.*)-(\d+)-(offset-change)'), topic_partition_metric),
        (re('(.*)-(\d+)-(messages-behind-high-watermark)'), topic_partition_metric),
class BrowserGenerator(ServiceDialog):
    """
    Provides a dialog for generating many media files to multiple cards
    from the card browser.
    """

    HELP_USAGE_DESC = "Adding audio to multiple notes"

    HELP_USAGE_SLUG = 'browser'

    _RE_WHITESPACE = re(r'\s+')

    __slots__ = [
        '_browser',  # reference to the current Anki browser window
        '_notes',  # list of Note objects selected when window opened
        '_process',  # state during processing; see accept() method below
    ]

    def __init__(self, browser, *args, **kwargs):
        """
        Sets our title.
        """

        self._browser = browser
        self._notes = None  # set in show()
        self._process = None  # set in accept()

        super(BrowserGenerator,
              self).__init__(title="Add TTS Audio to Selected Notes",
                             *args,
                             **kwargs)

    # UI Construction ########################################################

    def _ui_control(self):
        """
        Returns the superclass's text and preview buttons, adding our
        inputs to control the mass generation process, and then the base
        class's cancel/OK buttons.
        """

        header = Label("Fields and Handling")
        header.setFont(self._FONT_HEADER)

        intro = Note()  # see show() for where the text is initialized
        intro.setObjectName('intro')

        layout = super(BrowserGenerator, self)._ui_control()
        layout.addWidget(header)
        layout.addWidget(intro)
        layout.addStretch()
        layout.addLayout(self._ui_control_fields())
        layout.addWidget(self._ui_control_handling())
        layout.addStretch()
        layout.addWidget(self._ui_buttons())

        return layout

    def _ui_control_fields(self):
        """
        Returns a grid layout with the source and destination fields.

        Note that populating the field dropdowns is deferred to the
        show() event handler because the available fields might change
        from call to call.
        """

        source_label = Label("Source Field:")
        source_label.setFont(self._FONT_LABEL)

        source_dropdown = QtWidgets.QComboBox()
        source_dropdown.setObjectName('source')

        dest_label = Label("Destination Field:")
        dest_label.setFont(self._FONT_LABEL)

        dest_dropdown = QtWidgets.QComboBox()
        dest_dropdown.setObjectName('dest')

        layout = QtWidgets.QGridLayout()
        layout.addWidget(source_label, 0, 0)
        layout.addWidget(source_dropdown, 0, 1)
        layout.addWidget(dest_label, 1, 0)
        layout.addWidget(dest_dropdown, 1, 1)

        return layout

    def _ui_control_handling(self):
        """
        Return the append/overwrite radio buttons and behavior checkbox.
        """

        append = QtWidgets.QRadioButton(
            "&Append [sound:xxx] Tag onto Destination Field")
        append.setObjectName('append')
        append.toggled.connect(self._on_handling_toggled)

        overwrite = QtWidgets.QRadioButton(
            "Over&write the Destination Field w/ Media Filename")
        overwrite.setObjectName('overwrite')
        overwrite.toggled.connect(self._on_handling_toggled)

        behavior = Checkbox(object_name='behavior')
        behavior.stateChanged.connect(
            lambda status: self._on_behavior_changed(), )

        layout = QtWidgets.QVBoxLayout()
        layout.addWidget(append)
        layout.addWidget(overwrite)
        layout.addSpacing(self._SPACING)
        layout.addWidget(behavior)

        widget = QtWidgets.QWidget()
        widget.setLayout(layout)

        return widget

    def _ui_buttons(self):
        """
        Adjust title of the OK button.
        """

        buttons = super(BrowserGenerator, self)._ui_buttons()
        buttons.findChild(QtWidgets.QAbstractButton,
                          'okay').setText("&Generate")

        return buttons

    # Events #################################################################

    def show(self, *args, **kwargs):
        """
        Populate the source and destination dropdowns, recall the
        handling and behavior inputs, and focus the source dropdown.

        Note that the fields are dumped and repopulated each time,
        because the list of fields might change between displays of the
        window.
        """

        self._notes = [
            self._browser.mw.col.getNote(note_id)
            for note_id in self._browser.selectedNotes()
        ]

        self.findChild(Note, 'intro').setText(
            '%d note%s selected. Click "Help" for usage hints.' %
            (len(self._notes), "s" if len(self._notes) != 1 else ""))

        fields = sorted(
            {field
             for note in self._notes for field in note.keys()})

        config = self._addon.config

        source = self.findChild(QtWidgets.QComboBox, 'source')
        source.clear()
        for field in fields:
            source.addItem(field, field)
        source.setCurrentIndex(
            max(source.findData(config['last_mass_source']), 0))

        dest = self.findChild(QtWidgets.QComboBox, 'dest')
        dest.clear()
        for field in fields:
            dest.addItem(field, field)
        dest.setCurrentIndex(max(dest.findData(config['last_mass_dest']), 0))

        self.findChild(
            QtWidgets.QRadioButton,
            'append' if config['last_mass_append'] else 'overwrite',
        ).setChecked(True)

        self.findChild(Checkbox, 'behavior') \
            .setChecked(config['last_mass_behavior'])

        super(BrowserGenerator, self).show(*args, **kwargs)

        source.setFocus()

    def accept(self):
        """
        Check to make sure that we have at least one note, pull the
        service options, and kick off the processing.
        """

        now = self._get_all()
        source = now['last_mass_source']
        dest = now['last_mass_dest']
        append = now['last_mass_append']
        behavior = now['last_mass_behavior']

        eligible_notes = [
            note for note in self._notes if source in note and dest in note
        ]

        if not eligible_notes:
            self._alerts(
                f"Of the {len(self._notes)} notes selected in the browser, "
                f"none have both '{source}' and '{dest}' fields."
                if len(self._notes) > 1 else
                f"The selected note does not have both "
                f"'{source}' and '{dest}' fields.",
                self,
            )
            return

        self._disable_inputs()

        svc_id = now['last_service']
        options = (None if svc_id.startswith('group:') else
                   now['last_options'][now['last_service']])

        self._process = {
            'all':
            now,
            'aborted':
            False,
            'progress':
            _Progress(
                maximum=len(eligible_notes),
                on_cancel=self._accept_abort,
                title="Generating MP3s",
                addon=self._addon,
                parent=self,
            ),
            'service': {
                'id': svc_id,
                'options': options,
            },
            'fields': {
                'source': source,
                'dest': dest,
            },
            'handling': {
                'append': append,
                'behavior': behavior,
            },
            'queue':
            eligible_notes,
            'counts': {
                'total': len(self._notes),
                'elig': len(eligible_notes),
                'skip': len(self._notes) - len(eligible_notes),
                'done': 0,  # all notes processed
                'okay': 0,  # calls which resulted in a successful MP3
                'fail': 0,  # calls which resulted in an exception
            },
            'exceptions': {},
            'throttling': {
                'calls': {},  # unthrottled download calls made per service
                'sleep': self._addon.config['throttle_sleep'],
                'threshold': self._addon.config['throttle_threshold'],
            },
        }

        self._browser.mw.checkpoint("AwesomeTTS Batch Update")
        self._process['progress'].show()
        self._browser.model.beginReset()

        self._accept_next()

    def _accept_abort(self):
        """
        Flags that the user has requested that processing stops.
        """

        self._process['aborted'] = True

    def _accept_next(self):
        """
        Pop the next note off the queue, if not throttled, and process.
        """

        self._accept_update()

        proc = self._process
        throttling = proc['throttling']

        if proc['aborted'] or not proc['queue']:
            self._accept_done()
            return

        if throttling['calls'] and \
           max(throttling['calls'].values()) >= throttling['threshold']:
            # at least one service needs a break

            timer = QtCore.QTimer()
            throttling['timer'] = timer
            throttling['countdown'] = throttling['sleep']

            timer.timeout.connect(self._accept_throttled)
            timer.setInterval(1000)
            timer.start()
            return

        note = proc['queue'].pop(0)
        phrase = note[proc['fields']['source']]
        phrase = self._addon.strip.from_note(phrase)
        self._accept_update(phrase)

        def done():
            """Count the processed note."""

            proc['counts']['done'] += 1

        def okay(path):
            """Count the success and update the note."""

            filename = self._browser.mw.col.media.addFile(path)
            dest = proc['fields']['dest']
            note[dest] = self._accept_next_output(note[dest], filename)
            proc['counts']['okay'] += 1
            note.flush()

        def fail(exception):
            """Count the failure and the unique message."""

            proc['counts']['fail'] += 1

            message = exception.message
            if isinstance(message, str):
                message = self._RE_WHITESPACE.sub(' ', message).strip()

            try:
                proc['exceptions'][message] += 1
            except KeyError:
                proc['exceptions'][message] = 1

        def miss(svc_id, count):
            """Count the cache miss."""

            try:
                throttling['calls'][svc_id] += count
            except KeyError:
                throttling['calls'][svc_id] = count

        callbacks = dict(
            done=done,
            okay=okay,
            fail=fail,
            miss=miss,

            # The call to _accept_next() is done via a single-shot QTimer for
            # a few reasons: keep the UI responsive, avoid a "maximum
            # recursion depth exceeded" exception if we hit a string of cached
            # files, and allow time to respond to a "cancel".
            then=lambda: QtCore.QTimer.singleShot(0, self._accept_next),
        )

        svc_id = proc['service']['id']
        want_human = (self._addon.config['filenames_human'] or '{{text}}'
                      if self._addon.config['filenames'] == 'human' else False)

        if svc_id.startswith('group:'):
            config = self._addon.config
            self._addon.router.group(text=phrase,
                                     group=config['groups'][svc_id[6:]],
                                     presets=config['presets'],
                                     callbacks=callbacks,
                                     want_human=want_human,
                                     note=note)
        else:
            self._addon.router(svc_id=svc_id,
                               text=phrase,
                               options=proc['service']['options'],
                               callbacks=callbacks,
                               want_human=want_human,
                               note=note)

    def _accept_next_output(self, old_value, filename):
        """
        Given a note's old value and our current handling options,
        returns a new note value using the passed filename.
        """

        proc = self._process

        if proc['handling']['append']:
            if proc['handling']['behavior']:
                return self._addon.strip.sounds.univ(old_value).strip() + \
                    ' [sound:%s]' % filename
            elif filename in old_value:
                return old_value
            else:
                return old_value + ' [sound:%s]' % filename

        else:
            if proc['handling']['behavior']:
                return '[sound:%s]' % filename
            else:
                return filename

    def _accept_throttled(self):
        """
        Called for every "timeout" of the timer during a throttling.
        """

        proc = self._process

        if proc['aborted']:
            proc['throttling']['timer'].stop()
            self._accept_done()
            return

        proc['throttling']['countdown'] -= 1
        self._accept_update()

        if proc['throttling']['countdown'] <= 0:
            proc['throttling']['timer'].stop()
            del proc['throttling']['countdown']
            del proc['throttling']['timer']
            proc['throttling']['calls'] = {}
            self._accept_next()

    def _accept_update(self, detail=None):
        """
        Update the progress bar and message.
        """

        proc = self._process

        proc['progress'].update(
            label="finished %d of %d%s\n"
            "%d successful, %d failed\n"
            "\n"
            "%s" %
            (proc['counts']['done'], proc['counts']['elig'], " (%d skipped)" %
             proc['counts']['skip'] if proc['counts']['skip'] else "",
             proc['counts']['okay'], proc['counts']['fail'],
             "sleeping for %d second%s" %
             (proc['throttling']['countdown'],
              "s" if proc['throttling']['countdown'] != 1 else "") if
             (proc['throttling']
              and 'countdown' in proc['throttling']) else " "),
            value=proc['counts']['done'],
            detail=detail,
        )

    def _accept_done(self):
        """
        Display statistics and close out the dialog.
        """

        self._browser.model.endReset()

        proc = self._process
        proc['progress'].accept()

        messages = [
            "The %d note%s you selected %s been processed. " % (
                proc['counts']['total'],
                "s" if proc['counts']['total'] != 1 else "",
                "have" if proc['counts']['total'] != 1 else "has",
            ) if proc['counts']['done'] == proc['counts']['total'] else
            "%d of the %d note%s you selected %s processed. " % (
                proc['counts']['done'],
                proc['counts']['total'],
                "s" if proc['counts']['total'] != 1 else "",
                "were" if proc['counts']['done'] != 1 else "was",
            ),
            "%d note%s skipped for not having both the source and "
            "destination fields. Of those remaining, " % (
                proc['counts']['skip'],
                "s were" if proc['counts']['skip'] != 1 else " was",
            ) if proc['counts']['skip'] else "During processing, "
        ]

        if proc['counts']['fail']:
            if proc['counts']['okay']:
                messages.append(
                    "%d note%s successfully updated, but "
                    "%d note%s failed while processing." % (
                        proc['counts']['okay'],
                        "s were" if proc['counts']['okay'] != 1 else " was",
                        proc['counts']['fail'],
                        "s" if proc['counts']['fail'] != 1 else "",
                    ))
            else:
                messages.append("no notes were successfully updated.")

            messages.append("\n\n")

            if len(proc['exceptions']) == 1:
                messages.append("The following problem was encountered:")
                messages += [
                    "\n%s (%d time%s)" %
                    (message, count, "s" if count != 1 else "")
                    for message, count in proc['exceptions'].items()
                ]
            else:
                messages.append("The following problems were encountered:")
                messages += [
                    "\n- %s (%d time%s)" %
                    (message, count, "s" if count != 1 else "")
                    for message, count in proc['exceptions'].items()
                ]

        else:
            from aqt.utils import showInfo
            self._alerts = showInfo
            messages.append("there were no errors.")

        if proc['aborted']:
            messages.append("\n\n")
            messages.append(
                "You aborted processing. If you want to rollback the changes "
                "to the notes that were already processed, use the Undo "
                "AwesomeTTS Batch Update option from the Edit menu.")

        self._addon.config.update(proc['all'])
        self._disable_inputs(False)
        self._notes = None
        self._process = None

        super(BrowserGenerator, self).accept()

        # this alert is done by way of a singleShot() callback to avoid random
        # crashes on Mac OS X, which happen <5% of the time if called directly
        QtCore.QTimer.singleShot(
            0,
            lambda: self._alerts("".join(messages), self._browser),
        )

    def _get_all(self):
        """
        Adds support for fields and behavior.
        """

        source, dest, append, behavior = self._get_field_values()

        # TODO: could be rewritten with {**, **} syntax
        return dict(
            list(super(BrowserGenerator, self)._get_all().items()) + [
                ('last_mass_append', append),
                ('last_mass_behavior', behavior),
                ('last_mass_dest', dest),
                ('last_mass_source', source),
            ])

    def _get_field_values(self):
        """
        Returns the user's source and destination fields, append state,
        and handling mode.
        """

        return (
            self.findChild(QtWidgets.QComboBox, 'source').currentText(),
            self.findChild(QtWidgets.QComboBox, 'dest').currentText(),
            self.findChild(QtWidgets.QRadioButton, 'append').isChecked(),
            self.findChild(Checkbox, 'behavior').isChecked(),
        )

    def _on_handling_toggled(self):
        """
        Change the text on the behavior checkbox based on the append
        or overwrite behavior.
        """

        append = self.findChild(QtWidgets.QRadioButton, 'append')
        behavior = self.findChild(Checkbox, 'behavior')
        behavior.setText(
            "Remove Existing [sound:xxx] Tag(s)" if append.isChecked(
            ) else "Wrap the Filename in [sound:xxx] Tag")
        behavior.setChecked(True)

    def _on_behavior_changed(self):
        """
        Display a warning about bare filenames if user selects the
        override option and disables wrapping the field with a [sound]
        tag.
        """

        if self.isVisible():
            append = self.findChild(QtWidgets.QRadioButton, 'append')
            behavior = self.findChild(Checkbox, 'behavior')

            if not (append.isChecked() or behavior.isChecked()):
                self._alerts(
                    'Please note that if you use bare filenames, the "Check '
                    'Media" feature in Anki will not detect those audio '
                    "files as in-use, even if you insert the field into your "
                    "templates.",
                    self,
                )
Exemple #29
0
# -*- coding: UTF-8 -*-

import Image
from tiles import HEIGHT, WIDTH
from locations import locations as get_locations, SCALE
from regions import regions2 as get_regions
from glob import glob
from re import compile as re
from os import stat
from os.path import isfile
from utils import makedirs
from scale import scale_command
from darken import darken
import sys

normalized_labels_re = re(r'archive/labels/(.*)\-(..)-normalized\.png')
abnormal_labels_re = re(r'archive/labels/(.*)\-([seku][lt]).png')
languages = {
    "s": "Sindarin",
    "e": "English",
    "k": "Khuzdul",
    "u": "Unknown",
}
alphabets = {
    "l": "Latin",
    "t": "Tengwar",
}

def normalized_labels():
    for file_name in glob("archive/labels/*-normalized.png"):
        parts = normalized_labels_re.match(file_name).groups()
Exemple #30
0
import json
import os
import sys
from re import compile as re
from re import M

import yaml
from tornado import ioloop, log

from .alerts import BaseAlert
from .handlers import registry
from .units import MILLISECOND, TimeUnit

LOGGER = log.gen_log

COMMENT_RE = re(r'//\s+.*$', M)


class Reactor(object):
    """ Class description. """

    defaults = {
        'auth_password': None,
        'auth_username': None,
        'config': None,
        'critical_handlers': ['log', 'smtp'],
        'debug': False,
        'format': 'short',
        'graphite_url': 'http://localhost',
        'history_size': '1day',
        'interval': '10minute',
Exemple #31
0
    tuple. If not, returns a tuple with the old path and None.
    """

    old_path = environ.get('PATH_INFO')

    if old_path:
        new_path = '/' + '/'.join(component for component in [
            component.strip('-.')
            for component in get_paths.re_excessive_any.sub(
                '.',
                get_paths.re_excessive_dashes.sub(
                    '-',
                    get_paths.re_excessive_periods.sub(
                        '.',
                        get_paths.re_filter_characters.sub(
                            '', get_paths.re_filter_encoding.sub(
                                '', old_path))))).lower().split('/')
        ] if component)

        if new_path != old_path:
            return old_path, new_path

    return old_path, None


get_paths.re_excessive_any = re(r'[-.]{2,}')
get_paths.re_excessive_dashes = re(r'-{2,}')
get_paths.re_excessive_periods = re(r'\.{2,}')
get_paths.re_filter_characters = re(r'[^-./A-Za-z0-9]')
get_paths.re_filter_encoding = re(r'%[0-9A-Fa-f]{2}')
Exemple #32
0
def leading_space(string):
    """Return any leading space from `string`."""
    return re('\s*').match(string).group()
Exemple #33
0
class Router(object):
    """ Control migrations. """

    filemask = re(r"[\d]{3}_[^\.]+\.py")
    proxy = Proxy()

    def __init__(self, migrate_dir, **options):

        LOGGER.setLevel(options.get('LOGGING', 'WARNING'))

        if not op.exists(migrate_dir):
            LOGGER.warn('Migration directory: %s does not exists.',
                        migrate_dir)
            md(migrate_dir)

        config = {}
        if op.exists(op.join(migrate_dir, 'conf.py')):
            with open(op.join(migrate_dir, 'conf.py')) as f:
                exec_in(f.read(), config, config)
            for key in config:
                if not key.startswith('_'):
                    options[key] = config[key]
        else:
            LOGGER.warn(
                'Configuration file `conf.py` didnt found in migration directory'
            )

        self.migrate_dir = migrate_dir

        self.db = options.get('DATABASE')
        if not isinstance(
                self.db,
            (SqliteDatabase, MySQLDatabase, PostgresqlDatabase)) and self.db:
            self.db = connect(self.db)

        try:
            assert self.db
            self.proxy.initialize(self.db)
            assert self.proxy.database
            MigrateHistory.create_table()
        except (AttributeError, AssertionError):
            LOGGER.error("Invalid database: %s", self.db)
            sys.exit(1)
        except Exception:
            pass

    @property
    def fs_migrations(self):
        return sorted(''.join(f[:-3]) for f in ls(self.migrate_dir)
                      if self.filemask.match(f))

    @property
    def db_migrations(self):
        return [mm.name for mm in MigrateHistory.select()]

    @property
    def diff(self):
        db = set(self.db_migrations)
        return [name for name in self.fs_migrations if name not in db]

    def run(self, name=None):
        """ Run migrations. """

        LOGGER.info('Start migrations')

        migrator = Migrator(self.db)
        if name:
            return self.run_one(name, migrator)

        diff = self.diff
        for name in diff:
            self.run_one(name, migrator)

        if not diff:
            LOGGER.info('Nothing to migrate')

    def run_one(self, name, migrator):
        """ Run a migration. """

        LOGGER.info('Run "%s"', name)

        try:
            with open(op.join(self.migrate_dir, name + '.py')) as f:
                with self.db.transaction():
                    code = f.read()
                    scope = {}
                    exec_in(code, scope)
                    migrate = scope.get('migrate', lambda m: None)
                    logging.info('Start migration %s', name)
                    migrate(migrator, self.db)
                    MigrateHistory.create(name=name)
                    logging.info('Migrated %s', name)

        except Exception as exc:
            self.db.rollback()
            LOGGER.error(exc)

    def create(self, name):
        """ Create a migration. """

        LOGGER.info('Create a migration "%s"', name)

        num = len(self.fs_migrations)
        prefix = '{:03}_'.format(num)
        name = prefix + name + '.py'
        copy(MIGRATE_TEMPLATE, op.join(self.migrate_dir, name))

        LOGGER.info('Migration has created %s', name)
Exemple #34
0
def humanize(string):
    return re(r'(.)([A-Z]+)').sub(r'\1 \2', string).lower()
Exemple #35
0

PARSER.add_argument(
    "--linters", "-l", default=_Default(','.join(DEFAULT_LINTERS)),
    type=parse_linters, help=(
        "Select linters. (comma-separated). Choices are %s."
        % ','.join(s for s in LINTERS.keys())
    ))

PARSER.add_argument(
    "--ignore", "-i", default=_Default(''), type=split_csp_str,
    help="Ignore errors and warnings. (comma-separated)")

PARSER.add_argument(
    "--skip", default=_Default(''),
    type=lambda s: [re(fnmatch.translate(p)) for p in s.split(',') if p],
    help="Skip files by masks (comma-separated, Ex. */messages.py)")

PARSER.add_argument("--report", "-r", help="Send report to file [REPORT]")
PARSER.add_argument(
    "--hook", action="store_true", help="Install Git (Mercurial) hook.")

PARSER.add_argument(
    "--async", action="store_true",
    help="Enable async mode. Usefull for checking a lot of files. "
    "Dont supported with pylint.")

PARSER.add_argument(
    "--options", "-o", default="",
    help="Select configuration file. By default is '<CURDIR>/pylama.ini'")
Exemple #36
0
def leading_space(string):
    return re('\s*').match(string).group()
Exemple #37
0
""" Code runnning support. """
import sys
from re import compile as re

from ._compat import StringIO
from .environment import env


encoding = re(r'#.*coding[:=]\s*([-\w.]+)')


def run_code():
    """ Run python code in current buffer.

    :returns: None

    """
    errors, err = [], ''
    line1, line2 = env.var('a:line1'), env.var('a:line2')
    lines = __prepare_lines(line1, line2)
    if encoding.match(lines[0]):
        lines.pop(0)
        if encoding.match(lines[0]):
            lines.pop(0)
    elif encoding.match(lines[1]):
        lines.pop(1)

    context = dict(
        __name__='__main__',
        __file__=env.var('expand("%:p")'),
        input=env.user_input,
Exemple #38
0
def humanize(string):
    return re(r"(.)([A-Z]+)").sub(r"\1 \2", string).lower()
Exemple #39
0
 def __init__(self) -> None:
     super().__init__(re(r'\.phps?($|/)'))
Exemple #40
0
def get_parser():
    """ Make command parser for pylama.

    :return ArgumentParser:

    """
    split_csp_str = lambda s: list(
        set(i for i in s.strip().split(',') if i))

    parser = ArgumentParser(description="Code audit tool for python.")
    parser.add_argument(
        "path", nargs='?', default=_Default(CURDIR),
        help="Path on file or directory.")

    parser.add_argument(
        "--verbose", "-v", action='store_true', help="Verbose mode.")

    parser.add_argument('--version', action='version',
                        version='%(prog)s ' + version)

    parser.add_argument(
        "--format", "-f", default=_Default('pep8'), choices=['pep8', 'pylint'],
        help="Error format.")

    parser.add_argument(
        "--select", "-s", default=_Default(''), type=split_csp_str,
        help="Select errors and warnings. (comma-separated)")

    def parse_linters(csp_str):
        result = list()
        for name in split_csp_str(csp_str):
            linter = LINTERS.get(name)
            if linter:
                result.append((name, linter))
        return result

    parser.add_argument(
        "--linters", "-l", default=_Default(','.join(DEFAULT_LINTERS)),
        type=parse_linters, help=(
            "Select linters. (comma-separated). Choices are %s."
            % ','.join(s for s in LINTERS.keys())
        ))

    parser.add_argument(
        "--ignore", "-i", default=_Default(''), type=split_csp_str,
        help="Ignore errors and warnings. (comma-separated)")

    parser.add_argument(
        "--skip", default=_Default(''),
        type=lambda s: [re(fnmatch.translate(p)) for p in s.split(',') if p],
        help="Skip files by masks (comma-separated, Ex. */messages.py)")

    parser.add_argument(
        "--complexity", "-c", default=_Default(DEFAULT_COMPLEXITY), type=int,
        help="Set mccabe complexity.")

    parser.add_argument("--report", "-r", help="Filename for report.")
    parser.add_argument(
        "--hook", action="store_true", help="Install Git (Mercurial) hook.")

    parser.add_argument(
        "--async", action="store_true",
        help="Enable async mode. Usefull for checking a lot of files. "
        "Dont supported with pylint.")

    parser.add_argument(
        "--options", "-o", default=_Default(DEFAULT_INI_PATH),
        help="Select configuration file. By default is '<CURDIR>/pylama.ini'")

    return parser
Exemple #41
0
    def check_def_documents_parameteres(self, function, docstring):
        """Definition must document parameters in docstrings.

        All functions and methods must document each of the parameters in the
        NumPy format.

        https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt

        Parameters
        ----------
        function: str
            The string text of the function code
        docstring: str
            The docstring for the function, or None

        """
        if (not isinstance(function, Method) and
                not isinstance(function, Function)):
            return

        # I use ___ to prefix locals here, because we had a naming conflict
        # with a function called name and a local variable called name, so I
        # want to avoid all naming conflicts.
        ___fail = False

        # If it does not exist return
        if not docstring:
            return

        # In case of multline function definition with long parameters, we need
        # to reconstruct the definition from source.
        function_source = function.source.split("\n")
        function_def = ''
        while not function_def.endswith(":"):
            function_def += function_source.pop(0)
        function_def = function_def.strip()
        ___split = function_def.split("(")
        ___name = ___split[0].split()[1]

        params = ___split[1].split("):")[0]
        # We also need to clean up any extra spaces
        params = re(', +').split(params)
        params = [entry.split("=")[0] for entry in params]

        # Emptry string is for when there are no params.
        IGNORED_PARAMETERS = ["self", "request", "*args", "**kwargs", ""]

        # Just drop all ignored parameters. This is a lot cleaner.
        params = list(set(params) - set(IGNORED_PARAMETERS))

        # Ignore self if it is the only param
        if len(params) == 0:
            return

        if len(params) > 0:
            if not re('Parameters\n\s+----------\n').search(docstring):
                print "[Method %s] Forgot to define parameters section." % (
                    ___name)
                ___fail = True

        for p in params:
            # Ignore self as a parameter
            if p in IGNORED_PARAMETERS:
                continue

            # name: type
            #    description
            reg = escape(p) + ': \w+\n\s+\w+'
            if not re(reg).search(docstring):
                print "[Method %s] Forgot to document parameter %s" % (
                    ___name, p)
                ___fail = True

        if ___fail:
            return D410()
Exemple #42
0
""" Code runnning support. """

try:
    from StringIO import StringIO
except ImportError:
    from io import StringIO

import sys

from .environment import env
from re import compile as re


encoding = re(r'#[^\w]+coding:\s+utf.*$')


def run_code():
    """ Run python code in current buffer.

    :returns: None

    """
    errors, err = [], ''
    line1, line2 = env.var('a:line1'), env.var('a:line2')
    lines = __prepare_lines(line1, line2)
    for ix in (0, 1):
        if encoding.match(lines[ix]):
            lines.pop(ix)

    context = dict(
        __name__='__main__',
Exemple #43
0
from re import compile as re
from iterkit import all

part_finder = re(r'(_+|\d+|[A-Z]+(?![a-z])|[A-Z][a-z]+|[A-Z]+|[a-z]+)')

class CaseString(object):

    def __init__(
        self,
        string = None,
        parts = None,
        prefix = None,
        suffix = None
    ):

        # UPPER_CASE
        # lower_case
        # TitleCase
        # camelCase
        # TitleCase1_2

        self.string = string

        if parts is not None:
            self.parts = parts
            self.prefix = ''
            self.suffix = ''
        else:

            subparts = part_finder.findall(string)
Simple metrics are encoded as strings.
Metrics that need a regex to extract labels are encoded as a tuple (regex, parser).
"""
metrics = {
    'org.apache.samza.system.kafka.KafkaSystemProducerMetrics': [
        'kafka-producer-send-failed',
        'kafka-producer-send-success',
        'kafka-producer-sends',
        'kafka-producer-retries',
        'kafka-flush-ms',
        'kafka-flush-failed',
        'kafka-flushes',
        'serialization error',
    ],
    'org.apache.samza.system.kafka.KafkaSystemConsumerMetrics': {
        (re('(.*)-(\d+)-(bytes-read)'), topic_partition_metric),
        (re('(.*)-(\d+)-(high-watermark)'), topic_partition_metric),
        (re('(.*)-(\d+)-(messages-read)'), topic_partition_metric),
        (re('(.*)-(\d+)-(offset-change)'), topic_partition_metric),
        (re('(.*)-(\d+)-(messages-behind-high-watermark)'), topic_partition_metric),
        (re('(.*)-(reconnects)'), system_metric),
        (re('(.*)-(skipped-fetch-requests)'), system_metric),
        (re('(.*)-(topic-partitions)'), system_metric),
        (re('(.*)-SystemStreamPartition \[(.*), (.*), (.*)\]'), kafka_system_stream_partition_metric),
        'poll-count',
    },
    'org.apache.samza.system.SystemProducersMetrics': {
        'flushes',
        'sends',
        'serialization error',
        (re('(.*)-(flushes)'), source_metric),
Exemple #45
0
from re import compile as re, IGNORECASE, escape

QUOTE_RE = re(r"""(['"])((?:'\S|[^'])*)\1""")

def gen_underbars():
    while True:
        yield '<em>'
        yield '</em>'

def replace_underbars(text, next):
    at = text.find('_')
    if at < 0:
        return text
    else:
        return text[:at] + next() + replace_underbars(text[at+1:], next)

def highlighter(text, terms):
    if not terms:
        return text
    def highlight(match):
        return '<span class="highlight">%s</span>' % match.groups(0)[0]
    return re(r'(%s)' % '|'.join(escape(term) for term in terms), IGNORECASE).sub(highlight, text)

def markup(text, highlight = None):
    if highlight is None: highlight = []
    next_underbar = gen_underbars().next
    processed = ""
    while QUOTE_RE.search(text):
        match = QUOTE_RE.search(text)
        quote, enquoted = match.groups()
Exemple #46
0
    # Postprocess options
    opts = dict(options.__dict__.items())
    for name, value in opts.items():
        if isinstance(value, _Default):
            action = actions.get(name)
            if action and callable(action.type):
                value.value = action.type(value.value)

            setattr(options, name, value.value)

    # Parse file related options
    options.file_params = dict()
    for k, s in config.sections.items():
        if k != config.default_section:
            mask = re(fnmatch.translate(k))
            options.file_params[mask] = dict(s)
            options.file_params[mask]['lint'] = int(
                options.file_params[mask].get('lint', 1)
            )

    return options


def setup_logger(options):
    """ Setup logger with options. """

    LOGGER.setLevel(logging.INFO if options.verbose else logging.WARN)
    if options.report:
        LOGGER.removeHandler(STREAM)
        LOGGER.addHandler(logging.FileHandler(options.report, mode='w'))
Exemple #47
0
    # Postprocess options
    opts = dict(options.__dict__.items())
    for name, value in opts.items():
        if isinstance(value, _Default):
            action = actions.get(name)
            if action and callable(action.type):
                value.value = action.type(value.value)

            setattr(options, name, value.value)

    # Parse file related options
    options.file_params = dict()
    for k, s in config.sections.items():
        if k != config.default_section:
            mask = re(fnmatch.translate(k))
            options.file_params[mask] = dict(s)
            options.file_params[mask]['lint'] = int(
                options.file_params[mask].get('lint', 1)
            )

    return options


def setup_logger(options):
    """ Setup logger with options. """

    LOGGER.setLevel(logging.INFO if options.verbose else logging.WARN)
    if options.report:
        LOGGER.removeHandler(STREAM)
        LOGGER.addHandler(logging.FileHandler(options.report, mode='w'))
Exemple #48
0
def highlighter(text, terms):
    if not terms:
        return text
    def highlight(match):
        return '<span class="highlight">%s</span>' % match.groups(0)[0]
    return re(r'(%s)' % '|'.join(escape(term) for term in terms), IGNORECASE).sub(highlight, text)
Exemple #49
0
import os
from re import compile as re, M

import json
from tornado import ioloop, log

from .alerts import BaseAlert
from .utils import parse_interval
from .handlers import registry


LOGGER = log.gen_log

COMMENT_RE = re('//\s+.*$', M)


class Reactor(object):

    """ Class description. """

    defaults = {
        'auth_password': None,
        'auth_username': None,
        'config': 'config.json',
        'critical_handlers': ['log', 'smtp'],
        'debug': False,
        'format': 'short',
        'graphite_url': 'http://localhost',
        'history_size': '1day',
        'interval': '10minute',
        'logging': 'info',
Exemple #50
0
class Router(object):

    """Control migrations."""

    filemask = re(r"[\d]{3}_[^\.]+\.py")

    def __init__(self, plugin):
        """Initialize the router."""
        self.app = plugin.app
        self.database = plugin.database
        self.migrate_dir = plugin.cfg.migrations_path

    @cached_property
    def model(self):
        """Ensure that migrations has prepared to run."""
        # Initialize MigrationHistory model
        MigrateHistory._meta.database = self.app.plugins.peewee.database
        try:
            MigrateHistory.create_table()
        except pw.DatabaseError:
            self.database.rollback()
        return MigrateHistory

    @property
    def fs_migrations(self):
        """Scan migrations in file system."""
        if not op.exists(self.migrate_dir):
            self.app.logger.warn('Migration directory: %s does not exists.', self.migrate_dir)
            md(self.migrate_dir)
        return sorted(''.join(f[:-3]) for f in ls(self.migrate_dir) if self.filemask.match(f))

    @property
    def db_migrations(self):
        """Scan migrations in database."""
        return [mm.name for mm in self.model.select()]

    @property
    def diff(self):
        """Calculate difference between fs and db."""
        dbms = set(self.db_migrations)
        return [name for name in self.fs_migrations if name not in dbms]

    def create(self, name='auto'):
        """Create a migration."""
        self.app.logger.info('Create a migration "%s"', name)

        num = len(self.fs_migrations)
        prefix = '{:03}_'.format(num)
        name = prefix + name + '.py'
        path = copy(MIGRATE_TEMPLATE, op.join(self.migrate_dir, name))

        self.app.logger.info('Migration has created %s', path)
        return path

    def run(self, name=None):
        """Run migrations."""
        self.app.logger.info('Start migrations')

        migrator = Migrator(self.database, app=self.app)
        diff = self.diff

        if not diff:
            self.app.logger.info('Nothing to migrate')
            return None

        for mname in self.fs_migrations:
            self.run_one(mname, migrator, mname not in diff)
            if name and name == mname:
                break

    def run_one(self, name, migrator, fake=True):
        """Run a migration."""
        if not fake:
            self.app.logger.info('Run "%s"', name)

        try:
            with open(op.join(self.migrate_dir, name + '.py')) as f:
                code = f.read()
                scope = {}
                exec_in(code, scope)
                migrate = scope.get('migrate', lambda m: None)
                migrate(migrator, self.database, app=self.app)
                if fake:
                    migrator.clean()
                    return migrator

                self.app.logger.info('Start migration %s', name)
                with self.database.transaction():
                    migrator.run()
                    self.model.create(name=name)
                self.app.logger.info('Migrated %s', name)

        except Exception as exc:
            self.database.rollback()
            self.app.logger.exception(exc)
            raise