Example #1
0
        def parse_title(heading_line):
            # WARNING this regular expression fails if there is just one or no
            # word in the heading but a tag!
            m = REGEX_HEADING.match(heading_line)
            if m:
                r = m.groupdict()
                level = len(r[u'level'])
                todo = None
                title = u''
                tags = filter(test_not_empty,
                              r[u'tags'].split(u':')) if r[u'tags'] else []
                tags = list(tags)

                # if there is just one or no word in the heading, redo the parsing
                mt = REGEX_TAG.match(r[u'title'])
                if not tags and mt:
                    r = mt.groupdict()
                    tags = filter(
                        test_not_empty,
                        r[u'tags'].split(u':')) if r[u'tags'] else []
                    tags = list(tags)
                if r[u'title'] is not None:
                    _todo_title = [
                        i.strip() for i in r[u'title'].split(None, 1)
                    ]
                    if _todo_title and _todo_title[0] in allowed_todo_states:
                        todo = _todo_title[0]
                        if len(_todo_title) > 1:
                            title = _todo_title[1]
                    else:
                        title = r[u'title'].strip()

                return (level, todo, title, tags)
            raise ValueError(u'Data doesn\'t start with a heading definition.')
Example #2
0
		def parse_title(heading_line):
			# WARNING this regular expression fails if there is just one or no
			# word in the heading but a tag!
			m = REGEX_HEADING.match(heading_line)
			if m:
				r = m.groupdict()
				level = len(r[u'level'])
				todo = None
				title = u''
				tags = filter(test_not_empty, r[u'tags'].split(u':')) if r[u'tags'] else []
				tags = list(tags)

				# if there is just one or no word in the heading, redo the parsing
				mt = REGEX_TAG.match(r[u'title'])
				if not tags and mt:
					r = mt.groupdict()
					tags = filter(test_not_empty, r[u'tags'].split(u':')) if r[u'tags'] else []
					tags = list(tags)
				if r[u'title'] is not None:
					_todo_title = [i.strip() for i in r[u'title'].split(None, 1)]
					if _todo_title and _todo_title[0] in allowed_todo_states:
						todo = _todo_title[0]
						if len(_todo_title) > 1:
							title = _todo_title[1]
					else:
						title = r[u'title'].strip()

				return (level, todo, title, tags)
			raise ValueError(u'Data doesn\'t start with a heading definition.')
Example #3
0
def main():
    """Main function for setup.py; this actually does the installation"""
    assert path.isfile(resource_filename(package_name, "docopt_c.py"))
    with open(
        resource_filename(package_name, "docopt_c{extsep}py".format(extsep=extsep))) as f:
        parsed_init = parse(f.read())

    __author__, __version__, __description__ = map(
        lambda node: node.value if isinstance(node, Constant) else node.s,
        filter(
            lambda node: isinstance(node, (Constant, Str)),
            map(
                attrgetter("value"),
                filter(lambda node: isinstance(node, Assign)
                                    and any(filter(lambda target: target.id in frozenset(("__author__",
                                                                                          "__version__",
                                                                                          "__description__")),
                                                                            node.targets)),
                       parsed_init.body),
            ),
        ),
    )

    setup(
        name=package_name,
        author=__author__,
        author_email="*****@*****.**",
        version=__version__,
        description=__description__,
        long_description=long_description,
        long_description_content_type="text/markdown",
        py_modules=["docopt_c"],
        scripts=["docopt.py", "docopt_c.py"],
        classifiers=[
            "Development Status :: 3 - Alpha",
            "Environment :: Console",
            "Intended Audience :: Developers",
            "License :: OSI Approved",
            "License :: OSI Approved :: MIT License",
            "Natural Language :: English",
            "Operating System :: OS Independent",
            "Programming Language :: Python :: 2.7",
            "Programming Language :: Python :: 3.7",
            "Programming Language :: Python :: 3.8",
            "Programming Language :: Python :: 3.9",
            "Programming Language :: Python :: Implementation",
            "Topic :: Scientific/Engineering :: Interface Engine/Protocol Translator",
            "Topic :: Software Development",
            "Topic :: Software Development :: Build Tools",
            "Topic :: Software Development :: Code Generators",
            "Topic :: Software Development :: Compilers",
            "Topic :: Software Development :: Pre-processors"
        ],
        url="https://github.com/docopt/docopt.c"
    )
Example #4
0
    def process_IGMP(self, eth_src, ip_src, igmp_msg):

        if igmp_msg.protocol_name == 'igmpv3_report':

            records = igmp_msg.records

            for record in records:
                address = record.address

                if record.type_ == igmp.CHANGE_TO_INCLUDE_MODE or record.type_ == igmp.CHANGE_TO_EXCLUDE_MODE:

                    mode = record.type_ == igmp.CHANGE_TO_INCLUDE_MODE

                    self.log('Record change for group ' + address)

                    if address not in self.group_manager.subscribers:

                        self.group_manager.subscribers[address] = {}

                    subscribers = self.group_manager.subscribers[address]
                    subscribers[eth_src] = [mode, record.srcs]

                    if address in self.group_manager.group_2_sources.keys():

                        group = self.group_manager.group_2_sources[address]

                        if mode:

                            for src_ip in itertools.filter(
                                    lambda ip: ip != ip_src, group):

                                if src_ip in record.srcs:

                                    self.group_manager.add_subscriber(
                                        address, src_ip, eth_src)

                                else:

                                    self.group_manager.remove_subscriber(
                                        address, src_ip, eth_src)
                        else:

                            for src_ip in itertools.filter(
                                    lambda ip: ip != ip_src, group):

                                if src_ip in record.srcs:

                                    self.group_manager.remove_subscriber(
                                        address, src_ip, eth_src)

                                else:

                                    self.group_manager.add_subscriber(
                                        address, src_ip, eth_src)
 def removePackage(self, package):
     xbmc.log('XBian-config : Remove package %s from category %s' %
              (package, self.name), xbmc.LOGDEBUG)
     #[x for x in self.packageList[:self.initialiseIndex] if x.getName() == package][
     filter(lambda x: x.getName() == package, self.packageList[:self.initialiseIndex])[
         0].disable()
     self.flagRemove = True
     self.installed -= 1
     # refresh category label
     self.LabelPackageControl.setLabel(
         '%s [COLOR lightblue](%d/%d)[/COLOR]' % (self.name, self.installed, self.available))
     self.enableGetMore()
Example #6
0
 def points_in_radius(self, point, radius=1, include_self=False):
     """ return a set of points inside radius `radius' of `point'."""
     logging.debug("Returning points in radius %i for (%i, %i)", radius, *point)
     matrix = self.matrix
     min_x = min(map(len, self.matrix))
     # keep only the points inside the matrix.
     keep = lambda x: 0 <= x[0] < min_x and 0 <= x[1] < len(matrix)
     # the cartesian product of [-radius, radius]
     diff = range(-radius, radius + 1)
     points = ((point[0] + x, point[1] + y) for x, y in product(diff, diff))
     points = filter(keep, points)
     if not include_self:
         points = filter(lambda x: x != point, points)
     return points
Example #7
0
 def points_in_radius(self, point, radius=1, include_self=False):
     """ return a set of points inside radius `radius' of `point'."""
     logging.debug("Returning points in radius %i for (%i, %i)", radius,
                   *point)
     matrix = self.matrix
     min_x = min(map(len, self.matrix))
     # keep only the points inside the matrix.
     keep = lambda x: 0 <= x[0] < min_x and 0 <= x[1] < len(matrix)
     # the cartesian product of [-radius, radius]
     diff = range(-radius, radius + 1)
     points = ((point[0] + x, point[1] + y) for x, y in product(diff, diff))
     points = filter(keep, points)
     if not include_self:
         points = filter(lambda x: x != point, points)
     return points
Example #8
0
def update_contacts(contacts):
    contacts = map(_transform_contact_data, contacts)

    # Filter contact data using whitelist
    if settings.EMARSYS_RECIPIENT_WHITELIST is not None:
        contacts = filter(lambda contact: contact[3]  # 3=email
                          in settings.EMARSYS_RECIPIENT_WHITELIST, contacts)

    contacts = list(contacts)

    assert len(contacts) <= BATCH_SIZE

    if not contacts:
        return 0, [], []

    num_successful, errors = _update_contacts(contacts)

    missing_contacts = [email
                        for email, error_dict in errors.items()
                        if '2008' in error_dict]
    failed_contacts = [(email, error_dict)
                       for email, error_dict in errors.items()
                       if '2008' not in error_dict]

    return num_successful, missing_contacts, failed_contacts
Example #9
0
def parse(requirements):
    """
    Parses given requirements line-by-line.
    """
    transformer = RTransformer()
    return map(transformer.transform,
               filter(None, map(_parse, requirements.splitlines())))
Example #10
0
def erat(n):
    """Return a list of primes up to and including n.

    This is a fixed-size version of the Sieve of Eratosthenes, using an
    adaptation of the traditional algorithm.

    >>> erat(30)
    [2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
    >>> erat(10000) == list(primes_below(10000))
    True

    """
    _validate_int(n)
    # Generate a fixed array of integers.
    arr = list(range(n+1))
    # Cross out 0 and 1 since they aren't prime.
    arr[0] = arr[1] = None
    i = 2
    while i*i <= n:
        # Cross out all the multiples of i starting from i**2.
        for p in range(i*i, n+1, i):
            arr[p] = None
        # Advance to the next number not crossed off.
        i += 1
        while i <= n and arr[i] is None:
            i += 1
    return list(filter(None, arr))
Example #11
0
 def get_all_specs(self):
     dotspack = os.path.join(self.root,
                             spack.store.layout.metadata_dir)
     if os.path.exists(dotspack):
         return list(filter(None, map(self.get_spec, os.listdir(dotspack))))
     else:
         return []
def main():
    cmt.init()
    cmt.CUDAMatrix.init_random()
    if HEATUP:
        print("heating up for %g seconds..." % HEATUP, end=' ')
        sys.stdout.flush()
        heatup(HEATUP)
        print("done.")
    print("small matrix shape:", XS_SHAPE)
    print("large matrix shape:", XL_SHAPE)
    for funcname, func in filter(lambda f: f[0].startswith('bench_'),
                                 getmembers(getmodule(main), isfunction)):
        print("%-15s" % funcname[len('bench_'):], end=' ')
        sys.stdout.flush()
        for size, shape, factor in ('small', XS_SHAPE, 10), ('large', XL_SHAPE,
                                                             1):
            repeat = NUM_REPEATS * getattr(func, 'repeats', 1)
            time = min(timeit.repeat(\
                    setup="from __main__ import setup, %s\nmats = setup(%s)" % (funcname, shape),
                    stmt="%s(*mats)" % funcname, repeat=repeat,
                    number=NUM_ITER * factor)) / (NUM_ITER * factor)
            print("%.3es (%s) " % (time, size), end=' ')
            sys.stdout.flush()
        print()
    cmt.shutdown()
Example #13
0
def execute_sql(sql, user, password, host, execute=True):
    if not execute:
        return sql

    with settings(
        prompts={
            "Enter password: "******"mysql> ": ";\n{}\q".format(sql.replace(";", ";\n")),
        }
    ):
        return run(
            "mysql {}".format(
                " ".join(
                    filter(
                        None,
                        (
                            "-h {}".format(ensure_quoted(host)) if host else None,
                            "-u {}".format(ensure_quoted(user)) if user else None,
                            "-p" if password else None,
                            # "<<< {}".format(sql),  # -e
                        ),
                    ),
                )
            ),
            quiet=True,
        )
Example #14
0
    def print_status(self, *specs, **kwargs):
        if kwargs.get("with_dependencies", False):
            specs = set(get_dependencies(specs))

        specs = sorted(specs, key=lambda s: s.name)
        in_view = list(map(self.get_spec, specs))

        for s, v in zip(specs, in_view):
            if not v:
                tty.error(self._croot +
                          'Package not linked: %s' % s.name)
            elif s != v:
                self.print_conflict(v, s, level="warn")

        in_view = list(filter(None, in_view))

        if len(specs) > 0:
            tty.msg("Packages linked in %s:" % self._croot[:-1])

            # avoid circular dependency
            import spack.cmd
            spack.cmd.display_specs(in_view, flags=True, variants=True,
                                    long=self.verbose)
        else:
            tty.warn(self._croot + "No packages found.")
Example #15
0
def clear(strlist):
    """Remove empty strings and spaces from sequence.

    >>> clear(['123', '12', '', '2', '1', ''])
    ['123', '12', '2', '1']
    """
    return list(filter(None, map(lambda x: x.strip(), strlist)))
    def exclude_valid_certs(domain):
        cert_details = c.sudo(
            "certbot certificates --cert-name {domain}".format(
                domain=domain)).stdout

        if "Expiry Date" not in cert_details:
            return domain
        elif "(VALID" not in cert_details:
            return domain

        cert_expiry = next(
            map(
                lambda s: s.partition(":")[2].rpartition("(")[0].strip(),
                filter(
                    lambda s: s.lstrip().startswith("Expiry Date"),
                    cert_details.split("\n"),
                ),
            ),
            None,
        )
        if cert_expiry is None:
            return domain
        cert_expiry = datetime.strptime(cert_expiry, "%Y-%m-%d %H:%M:%S+00:00")
        if (cert_expiry - datetime.now()).days < 30:
            return domain
        return None
Example #17
0
	def packets_to_seqevents(self, packets):
		seqevents = []
		supported_packets = filter(lambda packet: has_layer(packet, 'sip'), packets)
		for packet in supported_packets:
			for seqevent in self.packet_to_seqevents(packet):
				seqevents.append(seqevent)
		return seqevents
Example #18
0
def any(seq, pred=None):
    """
    Returns True if pred(x) is true for at least one element in the iterable.
    """
    for elem in filter(pred, seq):
        return True
    return False
Example #19
0
def distill_query(letters):
    '''Break input into (required, optional, blanks).
    Uppercase letters are required.
    Lowercase letters are optional.
    A single numeric digit indicates the number of blanks.
    >>> distill_query('aDbEdF2')
    ('DEF', 'ABD', 2)
    >>> distill_query('rates1')
    ('', 'AERST', 1)
    '''
    required = filter(lambda x: x in string.ascii_uppercase, letters)
    optional = filter(lambda x: x in string.ascii_lowercase, letters)
    # optional = optional.upper()
    numbers = filter(lambda x: x in string.digits, letters)
    blanks = int('0' + ''.join(numbers))
    return (''.join(sorted(required)), ''.join(sorted(x.upper() for x in optional)), blanks)
Example #20
0
def erat(n):
    """Return a list of primes up to and including n.

    This is a fixed-size version of the Sieve of Eratosthenes, using an
    adaptation of the traditional algorithm.

    >>> erat(30)
    [2, 3, 5, 7, 11, 13, 17, 19, 23, 29]
    >>> erat(10000) == list(primes_below(10000))
    True

    """
    _validate_int(n)
    # Generate a fixed array of integers.
    arr = list(range(n + 1))
    # Cross out 0 and 1 since they aren't prime.
    arr[0] = arr[1] = None
    i = 2
    while i * i <= n:
        # Cross out all the multiples of i starting from i**2.
        for p in range(i * i, n + 1, i):
            arr[p] = None
        # Advance to the next number not crossed off.
        i += 1
        while i <= n and arr[i] is None:
            i += 1
    return list(filter(None, arr))
Example #21
0
def turner():
    """Yield prime numbers very slowly using Euler's sieve.

    The function is named for David Turner, who developed this implementation
    in a paper in 1975. Due to its simplicity, it has become very popular,
    particularly in Haskell circles where it is usually implemented as some
    variation of:

        primes = sieve [2..]
        sieve (p : xs) = p : sieve [x | x <- xs, x `mod` p > 0]

    This algorithm is often wrongly described as the Sieve of Eratosthenes,
    but it is not. Although simple, it is slow and inefficient, with
    asymptotic behaviour of O(N**2/(log N)**2), which is even worse than
    trial_division, and only marginally better than naive_primes. O'Neill
    calls this the "Sleight on Eratosthenes".
    """
    # References:
    #   http://en.wikipedia.org/wiki/Sieve_of_Eratosthenes
    #   http://en.literateprograms.org/Sieve_of_Eratosthenes_(Haskell)
    #   http://www.cs.hmc.edu/~oneill/papers/Sieve-JFP.pdf
    #   http://www.haskell.org/haskellwiki/Prime_numbers
    nums = itertools.count(2)
    while True:
        prime = next(nums)
        yield prime
        nums = filter(lambda v, p=prime: (v % p) != 0, nums)
Example #22
0
    def run(self):
        """
        Thread's main loop
        """
        self._add_event_to_loop(self.wakeup_evt,
                                lambda *args: self._wake_up_event_cb(*args))

        # We have to monitor the main thread exit. This is the simplest way to
        # let the main thread handle the signals while still being able to
        # perform some cleanup before the process exit. If we don't monitor the
        # main thread, this thread will hang the process when the process
        # receive SIGINT (or any other non fatal signal).
        main_thread = next(
            filter(lambda t: t.name == "MainThread", threading.enumerate()))
        try:
            while self.running and main_thread.is_alive():
                try:
                    self._wait_and_process()
                except RuntimeError as e:
                    self.logger.error("Exception caught: {}".format(e))

                self._run_task_list(self.async_pomp_task)
                self._run_task_list(self.deferred_pomp_task)
        finally:
            self.running = False
            # Perform some cleanup before this thread dies
            self._cleanup()
            self.destroy()
Example #23
0
 def getUserValue(self):
     load = dialogWait(self.DIALOGHEADER, _('Loading volumes...'))
     load.show()
     volumeList = xbianConfig('listvol', '--exclude=%s' % self.EXCLUDE, cmd=['sudo', 'btrfs-auto-snapshot'])
     load.close()
     have_to_stop = False
     dialog = xbmcgui.Dialog()
     while not have_to_stop:
         volId = dialog.select(_('Btrfs volume'), volumeList)
         if volId == -1:
             have_to_stop = True
         else:
             load = dialogWait(self.DIALOGHEADER, _('Please wait...'))
             load.show()
             snapshotList = xbianConfig(
                 'list', volumeList[volId], cmd=['sudo', 'btrfs-auto-snapshot'])
             #snapshotList = [x for x in snapshotList if x.split('@')[1]]
             snapshotList = list(filter(lambda x: x.split('@')[1], snapshotList))
             load.close()
             #snapId = dialog.select('Snapshot', [x.split('@')[1] for x in snapshotList])
             snapId = dialog.select('Snapshot', list(map(lambda x: x.split('@')[1], snapshotList)))
             if snapId != -1 and self.askConfirmation():
                 try:
                     dlg = dialogWait(self.DIALOGHEADER, self.PROGRESSTEXT)
                     dlg.show()
                     self.runCmd(volumeList[volId], snapshotList[snapId])
                 except:
                     print('error running btrfs-auto-spashot command %s %s' % (volumeList[volId], snapshotList[snapId]))
                 finally:
                     have_to_stop = True
                     dlg.close()
     return ''
Example #24
0
    def _make_unique(self, endpoints):

        len_endpoints = len(endpoints)

        def func_filter(endpoint_tuple):
            idx, endpoint = endpoint_tuple

            for idx_loop in range(idx, len_endpoints):
                current = endpoints[idx_loop]
                if current == endpoint:
                    return False

                current_name = current.get('name')
                looped_name = endpoint.get('name')

                if not current_name and not looped_name:
                    if current.get('source') != endpoint.get('source'):
                        continue

                elif current_name != looped_name:
                    continue

                if current.get('target') != endpoint.get('target'):
                    return False

            return True

        filtered = filter(func_filter,
                          [(idx, endpoint)
                           for idx, endpoint in enumerate(endpoints)])
        return list(filtered)  # todo
Example #25
0
 def getUserValue(self):
     load = dialogWait(self.DIALOGHEADER, _('Loading volumes...'))
     load.show()
     mountList = xbianConfig('mount', '--helper', cmd=['sudo', 'btrfs-auto-snapshot'])
     sep = xbianConfig('fstype', cmd=['sudo', 'btrfs-auto-snapshot'])[1]
     load.close()
     have_to_stop = False
     dialog = xbmcgui.Dialog()
     while not have_to_stop:
         #volId = dialog.select(_('Btrfs volume'), [x.split(sep)[0] for x in mountList])
         volId = dialog.select(_('Btrfs volume'), list(map(lambda x: x.split(sep)[0], mountList)))
         if volId == -1:
             have_to_stop = True
         else:
             #selectedVolume = [x.split(sep)[0] for x in mountList][volId]
             #mountItems = [x for x in mountList if selectedVolume + sep in x]
             #snapId = dialog.select('Snapshot', [x.split(sep)[1] for x in mountItems])
             selectedVolume = list(map(lambda x: x.split(sep)[0], mountList))[volId]
             mountItems = list(filter(lambda x: selectedVolume + sep in x, mountList))
             snapId = dialog.select('Snapshot', list(map(lambda x: x.split(sep)[1], mountItems)))
             if snapId != -1 and self.askConfirmation():
                 try:
                     dlg = dialogWait(self.DIALOGHEADER, self.PROGRESSTEXT)
                     dlg.show()
                     self.runCmd(selectedVolume, mountItems[snapId])
                 except:
                     print('error running btrfs-auto-spashot command %s' % (mountItems[snapId]))
                 finally:
                     have_to_stop = True
                     dlg.close()
     return ''
Example #26
0
    def print_status(self, *specs, **kwargs):
        if kwargs.get("with_dependencies", False):
            specs = set(get_dependencies(specs))

        specs = sorted(specs, key=lambda s: s.name)
        in_view = list(map(self.get_spec, specs))

        for s, v in zip(specs, in_view):
            if not v:
                tty.error(self._croot +
                          'Package not linked: %s' % s.name)
            elif s != v:
                self.print_conflict(v, s, level="warn")

        in_view = list(filter(None, in_view))

        if len(specs) > 0:
            tty.msg("Packages linked in %s:" % self._croot[:-1])

            # avoid circular dependency
            import spack.cmd
            spack.cmd.display_specs(in_view, flags=True, variants=True,
                                    long=self.verbose)
        else:
            tty.warn(self._croot + "No packages found.")
Example #27
0
def main():
    """Module entry point."""
    import argparse
    parser = argparse.ArgumentParser(
        description="""filters out words starting with an uppercase letters or
                       containing an apostrophe from a list of words""")
    # Cannot import the version from the package when running this module as a
    # script
    try:
        from . import __version__
        parser.add_argument('--version',
                            action='version',
                            version='%(prog)s ' + __version__)
    except SystemError:
        pass
    parser.add_argument('source',
                        type=argparse.FileType('rb'),
                        help='a file containing a list of words')
    parser.add_argument('destination',
                        type=argparse.FileType('w'),
                        help='destination of the filtered list of words')
    args = parser.parse_args()
    # Decode the list of words as UTF-8 and remove the trailing \n
    words = (word.strip() for word in codecs.iterdecode(args.source, 'utf-8'))
    for word in filter(is_valid, words):
        print(word, file=args.destination)
Example #28
0
def turner():
    """Yield prime numbers very slowly using Euler's sieve.

    The function is named for David Turner, who developed this implementation
    in a paper in 1975. Due to its simplicity, it has become very popular,
    particularly in Haskell circles where it is usually implemented as some
    variation of:

        primes = sieve [2..]
        sieve (p : xs) = p : sieve [x | x <- xs, x `mod` p > 0]

    This algorithm is often wrongly described as the Sieve of Eratosthenes,
    but it is not. Although simple, it is slow and inefficient, with
    asymptotic behaviour of O(N**2/(log N)**2), which is even worse than
    trial_division, and only marginally better than naive_primes. O'Neill
    calls this the "Sleight on Eratosthenes".
    """
    # References:
    #   http://en.wikipedia.org/wiki/Sieve_of_Eratosthenes
    #   http://en.literateprograms.org/Sieve_of_Eratosthenes_(Haskell)
    #   http://www.cs.hmc.edu/~oneill/papers/Sieve-JFP.pdf
    #   http://www.haskell.org/haskellwiki/Prime_numbers
    nums = itertools.count(2)
    while True:
        prime = next(nums)
        yield prime
        nums = filter(lambda v, p=prime: (v % p) != 0, nums)
Example #29
0
 def get_all_specs(self):
     dotspack = os.path.join(self.root,
                             spack.store.layout.metadata_dir)
     if os.path.exists(dotspack):
         return list(filter(None, map(self.get_spec, os.listdir(dotspack))))
     else:
         return []
Example #30
0
    def get_queryset(self, query_params=None, *args, **kwargs):
        if query_params is None:
            query_params = self.request.QUERY_PARAMS

        location = parse_location(self.kwargs.get('location_slug', None))
        time_range = parse_time_range(
            query_params.get('start', None),
            query_params.get('end', None)
        )

        try:
            min_tide_level = float(self.request.QUERY_PARAMS['tide_level'])
        except KeyError:
            raise MissingParameterException(
                'Missing required query parameter `tide_level`')

        extended_time_range = TimeRange(
            start=time_range.start - ONE_DAY,
            end=time_range.end + ONE_DAY)

        predictions = get_queryset(location, extended_time_range).filter(
            tide_level__gte=min_tide_level)

        return filter(None, map(
            partial(transform_time_window, time_range, extended_time_range),
            make_tide_time_windows(predictions)))
Example #31
0
def cmd_parser():
    parser = OptionParser()
    parser.add_option('--id',
                      type='string',
                      dest='id',
                      action='store',
                      help='comic ids set, e.g. 23552')
    parser.add_option(
        '--vol',
        type='string',
        dest='vol',
        action='store',
        help='comic vols set, e.g. 1,2,3, or "all" if you want :-)')
    parser.add_option('--threads',
                      '-t',
                      type='int',
                      dest='threads',
                      action='store',
                      default=4,
                      help='thread count for downloading comic')
    parser.add_option('--timeout',
                      '-T',
                      type='int',
                      dest='timeout',
                      action='store',
                      default=30,
                      help='timeout for downloading comic')
    try:
        sys.argv = list(
            map(lambda x: unicode(x.decode(sys.stdin.encoding)), sys.argv))
    except (NameError, TypeError):
        pass
    except UnicodeDecodeError:
        exit(0)

    args, _ = parser.parse_args(sys.argv[1:])

    if args.id:
        _ = map(lambda id: id.strip(), args.id.split(','))
        args.id = set(map(int, filter(lambda id_: id_.isdigit(), _)))

    if args.vol:
        _ = map(lambda vol: vol.strip(), args.vol.split(','))
        args.vol = set(map(int, filter(lambda vol_: vol_.isdigit(), _)))

    return args
Example #32
0
def llcompare(lltest, llref, ignore_empty=False, _recurse=[]):
    '''Compare a list/iterator of lists/iterators of tokens recursively.
	   Raise an error if there are intolerable differences.
	   The reference tokens in lref should already be of the correct type.
	   If ignore_empty is true, empty lines are not included in the comparison.
	   The _recurse argument is only used internally.
	'''
    if ignore_empty:
        lltest = filter(None, lltest)
        llref = filter(None, llref)
    i = 0
    for ltest, lref in zip_longest(lltest, llref, fillvalue=False):
        i += 1
        if ltest is False:
            raise error('List comparision failed: Fewer entries than expected'
                        ' (%d  !=  >= %d)' % (i, i + 1))
        if lref is False:
            raise error('List comparision failed: More entries than expected'
                        ' (>= %d  !=  %d)' % (i + 1, i))
        if lref and not isinstance(lref, _strtypes):

            if hasattr(lref, '__getitem__'):
                rfirst = lref[0]
            elif hasattr(lref, 'next') or hasattr(lref, '__next__'):
                rfirst = next(lref)  # "peek" at first
                lref = chain([rfirst], lref)  # "push" back
            else:
                rfirst = None
            if isinstance(rfirst, _strtypes):
                rfirst = None
            if hasattr(rfirst, '__iter__') or isinstance(
                    rfirst, (list, tuple)):
                llcompare(ltest,
                          lref,
                          _recurse=_recurse + [i],
                          ignore_empty=ignore_empty)
        try:
            lcompare(ltest, lref)
            #		except TypeError:
            #			print(ltest, lref)
            #			raise
        except error as e:
            if _recurse:
                raise error('%s (line %s)' % (str(e), _recurse + [i + 1]))
            else:
                raise error('%s (line %d)' % (str(e), i + 1))
Example #33
0
 def _files(tree, filters=None):
     """ get all file elements that have a path attribute (apply filters) """
     elems = [item for item in tree.findall(".//file[@path]")]
     if filters:
         for filt in filters:
             elems = filter(filt, elems)
     for elem in elems:
         yield elem
Example #34
0
    def _find_same(self, item, items=None, keys=('name')):
        """Find all items with matching values for given set of keys."""
        items = items if items else self.parse()

        def same(this, item=item, keys=keys):
            return all([this.get(key) == item.get(key) for key in keys])

        return filter(same, items)
Example #35
0
def partition(pred, iterable):
    """Use a predicate to partition entries into false entries and true entries
    partition(is_odd, range(10)) --> 0 2 4 6 8   and  1 3 5 7 9

    http://docs.python.org/3.4/library/itertools.html#itertools-recipes
    """
    t1, t2 = tee(iterable)
    return filterfalse(pred, t1), filter(pred, t2)
Example #36
0
    def get_all_handleable(self):
        """
        Enumerate all handleable devices currently known to udisks.

        NOTE: returns only devices that are still valid. This protects from
        race conditions inside udiskie.

        """
        return filter(self.is_handleable, self._udisks)
Example #37
0
 def create_profiler(cls, host, executable_path, output_dir, profiler_name=None, identifier=None):
     profilers = cls.profilers_for_platform(host.platform)
     if not profilers:
         return None
     profiler_name = profiler_name or cls.default_profiler_name(host.platform)
     profiler_class = next(filter(lambda profiler: profiler.name == profiler_name, profilers), None)
     if not profiler_class:
         return None
     return profilers[0](host, executable_path, output_dir, identifier)
Example #38
0
 def packets_to_seqevents(self, packets, protocols, traceName):
     seqevents = []
     #  supported_packets = filter(lambda packet: has_layer(packet, 'http2'), packets)        #---> This line is changed to below to filter http2 protocols only
     # supported_packets = filter(lambda packet: has_layer_http2(packet), packets)
     supported_packets = filter(lambda packet: has_layer_lists(packet, protocols), packets)
     for packet in supported_packets:
         for seqevent in self.packet_to_seqevents(packet, traceName):
             seqevents.append(seqevent)
     return seqevents
Example #39
0
def save_dump(d):
    preferred_dirs = [os.path.abspath('.'), tempfile.gettempdir()]
    tempdir = next(filter(writable_p, chain(preferred_dirs, d['writable_dirs'])))
    outfile = os.path.join(tempdir, 'dump.gz')

    with gzip_open(outfile, 'wb') as out:
        pickle.dump(d, out)

    atexit.register(print, 'the dictionary has been saved in', outfile)
Example #40
0
    def get_all_handleable(self):
        """
        Enumerate all handleable devices currently known to udisks.

        NOTE: returns only devices that are still valid. This protects from
        race conditions inside udiskie.

        """
        return filter(self.is_handleable, self._udisks)
Example #41
0
 def mount(self,
           fstype=None,
           options=None,
           auth_no_user_interaction=None):
     """Mount filesystem."""
     options = list(filter(None, (options or '').split(','))) + filter_opt({
         'auth_no_user_interaction': auth_no_user_interaction
     })
     return self.method.FilesystemMount(fstype or self.id_type, options)
 def getXbianValue(self):
     with open(self.cfgfile, 'r') as f:
         mat = list(
             filter(lambda x: re.match('%s=.*' % self.setting, x),
                    f.readlines()))
     if mat:
         self.exist = True
         return re.search('[01]', mat[0]).group()[0]
     return 0
Example #43
0
 def get_wait(self, uriparts):
     # Stop as soon as one is available to avoid initiating the rest
     for i in self.queue:
         if not i.busy and i.get_wait(uriparts) == 0:
             return 0
     # If None is available, let's see how much we have to wait
     available = filter(lambda x: not x.busy, self.queue)
     diff = min(worker.get_wait(uriparts) for worker in self.queue if not worker.busy)
     return diff
Example #44
0
 def _get_callback(self, request):
     filterf = lambda t:t[0] in (request.method, b('ALL'))
     path_to_check = getattr(request, '_remaining_path', request.path)
     for m, r, cb in filter(filterf, self._registry):
         result = r.search(path_to_check)
         if result:
             request._remaining_path = path_to_check[result.span()[1]:]
             return cb, result.groupdict()
     return None, None
Example #45
0
    def children_of(self, category, categories=None):
        if categories is None:
            categories = list(self.all())

        children = list(filter(lambda c: c.parent == category, categories))
        for child in children:
            children.extend(self.children_of(child, categories))

        return children
Example #46
0
def filter_exclude(specs, exclude):
    "Filter specs given sequence of exclude regex"
    to_exclude = [re.compile(e) for e in exclude]

    def keep(spec):
        for e in to_exclude:
            if e.match(spec.name):
                return False
        return True
    return filter(keep, specs)
Example #47
0
def data_generator(input_file):
    # sort by the first column (hgvs id returned from Mutalyzer)
    os.system("sort -k1 -n %s > %s.sorted" % (input_file, input_file))
    open_file = open("%s.sorted" % (input_file))
    emv = csv.reader(open_file, delimiter=",")
    # Skip header
    emv.next()
    emv = filter(lambda x: x[0], emv)
    json_rows = map(_map_line_to_json, emv)
    row_groups = (it for (key, it) in groupby(json_rows, lambda row: row["_id"]))
    return (merge_duplicate_rows(rg, "emv") for rg in row_groups)
Example #48
0
def main():
    import sys
    import argparse
    import os
    import os.path
    import errno

    parser = argparse.ArgumentParser(description=info,
                formatter_class=argparse.RawTextHelpFormatter)
    parser.add_argument('-d', '--dict', action='store', dest='dict',
                default='OWL14', help='Choose dictionary.')
    parser.add_argument('--min', type=int, action='store', dest='min', default=None)
    parser.add_argument('--max', type=int, action='store', dest='max', default=None)
    parser.add_argument('-a', '--all', action='store_true', dest='all', 
                default=False, help='Return all anagrams of length 3 or more.')
    parser.add_argument('letters', nargs='?', type=str, 
            help='Letters to anagram. Use _ for blanks.')
    results = parser.parse_args()

    # Abort if dictionary is not valid
    if results.dict not in DICT:
        print('Error: Invalid dictionary', file=sys.stderr)
        exit(-1)

    # Abort if no letters were provided
    if results.letters is None:
        parser.print_help()
        exit(-1)

    query = distill_query(results.letters)
    f = query_filter(query)

    # Find dictionary location based on script location
    head, tail = os.path.split(__file__)
    dictfile = os.path.join(head, '..', 'share', 'OWL14.txt')
    dictfile = os.path.join(head, '..', 'anagram', 'data', 'OWL14.txt')
    # dictfile = os.path.join(__file__, 'data', 'OWL14.txt')

    with open(dictfile, 'rt') as infile:
        words = (line.strip() for line in infile)
        for word in filter(f, words):
            print(word)
    exit(0)

    L = {letter:letters.count(letter) for letter in letters}
    if results.min is None:
        results.min = len(letters) + blanks
    if results.max is None:
        results.max = len(letters) + blanks
    if results.max < results.min:
        parser.print_help()
        exit(-1)
    if results.all:
        results.min = 3
Example #49
0
def extract():
    predicates = get_predicates('_p')
    file_predicates = get_predicates('_filep')
    file_predicates.update(predicates)
    d = defaultdict(set)

    for current, dirs, files in os.walk('/'):
        if current == '/':
            del dirs[dirs.index('sys')]
            del dirs[dirs.index('dev')]
            del dirs[dirs.index('proc')]
        localpath = partial(os.path.join, current)

        for pred_name, predicate in file_predicates.items():
            d[pred_name + '_files'].update(filter(predicate, map(localpath, files)))
        for pred_name, predicate in predicates.items():
            d[pred_name + '_dirs'].update(filter(predicate, map(localpath, dirs)))

    save_dump(d)
    return d
Example #50
0
def get_podcast_types(episodes):
    """Returns the types of a podcast

    A podcast is considered to be of a given types if the ratio of episodes that are of that type equals TYPE_THRESHOLD
    """
    has_mimetype = lambda e: e.mimetypes
    episodes = filter(has_mimetype, episodes)
    types = defaultdict()
    for e in episodes:
        for mimetype in e.mimetypes:
            t = get_type(mimetype)
            if not t:
                continue
            types[t] = types.get(t, 0) + 1

    max_episodes = sum(types.itervalues())
    l = list(types.iteritems())
    l.sort(key=lambda x: x[1], reverse=True)

    return [x[0] for x in filter(lambda x: max_episodes / float(x[1]) >= TYPE_THRESHOLD, l)]
Example #51
0
def N_stat(lengths, N):
    '''
    maximum positive integer L such that the total number of nucleotides
    of all contigs having length >= L is at least N% of the sum of contig lengths.
    :type lengths: list[N],N>0
    :type N: float,>0
    :rtype int'''
    def is_candidate(L):
        return (sum(filter(lambda x: x >= L, lengths)) / float(sum(lengths))) >= N
    candidates = filter(is_candidate, xrange(0, sum(lengths)))
    return max(candidates)
Example #52
0
 def namespaces(self):
     """
     A property holding only effective @namespace rules in
     self.parentStyleSheets.
     """
     namespaces = {}
     for rule in filter(lambda r: r.type == r.NAMESPACE_RULE,
                        reversed(self.parentStyleSheet.cssRules)):
         if rule.namespaceURI not in as_list(namespaces.values()):
             namespaces[rule.prefix] = rule.namespaceURI
     return namespaces
Example #53
0
 def children(self, is_computer=True):
     """ Return an iterator with all the possible boards for a player. """
     logging.debug("Generating children matrices:")
     piece = self.__piece(is_computer)
     points = filter(lambda x: x[2] == piece, self)
     for src in map(lambda x: x[:2], points):
         for dst in self.__moves_for_point(src):
             b = Board(matrix=self.matrix)
             b.move(src, dst)
             logging.debug("Board:\n %s\nMove: %s", b, (src, dst))
             yield b, (src, dst)
Example #54
0
def setup_reviewfolder_roles(folder):
    site = getSite()
    acl = site['acl_users']['ldap-plugin']['acl_users']

    with getUtility(ILDAPQuery)(acl, paged=True) as q_ldap:
        q_groups = q_ldap.query_groups(QUERY_LDAP_ROLES, ('cn',))

    groups = [r[1]['cn'][0] for r in q_groups]

    grant = chain(
        product([ROLE_RP1], filter(f_start_sr, groups)),
        product([ROLE_QE], filter(f_start_qe, groups)),
        product([ROLE_RP2], filter(f_start_re, groups)),
        product([ROLE_LR], filter(f_start_lr, groups)),
        product([ROLE_MSA], filter(f_start_msa, groups)),
    )

    for role, g_name in grant:
        folder.manage_setLocalRoles(g_name, [role])

    return folder
def first_true(iterable, default=False, pred=None):
    u"""Returns the first true value in the iterable.

        If no true value is found, returns *default*

        If *pred* is not None, returns the first item
        for which pred(item) is true.

        
        first_true([a,b,c], x) --> a or b or c or x
        first_true([a,b], x, f) --> a if f(a) else b if f(b) else x """
    return next(filter(pred, iterable), default)
def llcompare(lltest, llref, ignore_empty=False, _recurse=[]):
	'''Compare a list/iterator of lists/iterators of tokens recursively.
	   Raise an error if there are intolerable differences.
	   The reference tokens in lref should already be of the correct type.
	   If ignore_empty is true, empty lines are not included in the comparison.
	   The _recurse argument is only used internally.
	'''
	if ignore_empty:
		lltest = filter(None, lltest)
		llref = filter(None, llref)
	i = 0
	for ltest, lref in zip_longest(lltest, llref, fillvalue=False):
		i += 1
		if ltest is False:
			raise error('List comparision failed: Fewer entries than expected'
				' (%d  !=  >= %d)' % (i, i+1))
		if lref is False:
			raise error('List comparision failed: More entries than expected'
				' (>= %d  !=  %d)' % (i+1, i))
		if lref and not isinstance(lref, _strtypes):
		
			if hasattr(lref, '__getitem__'):
				rfirst = lref[0]
			elif hasattr(lref, 'next') or hasattr(lref, '__next__'):
				rfirst = next(lref) # "peek" at first
				lref = chain([rfirst], lref) # "push" back
			else: rfirst = None
			if isinstance(rfirst, _strtypes):
				rfirst = None
			if hasattr(rfirst, '__iter__') or isinstance(rfirst, (list, tuple)):
				llcompare(ltest, lref,
						_recurse=_recurse + [i], ignore_empty=ignore_empty)
		try: lcompare(ltest, lref)
#		except TypeError:
#			print(ltest, lref)
#			raise
		except error as e:
			if _recurse:
				raise error('%s (line %s)' % (str(e), _recurse + [i + 1]))
			else: raise error('%s (line %d)' % (str(e), i + 1))
Example #57
0
 def newJoin(self, rightWME, newJoinVariables):
     """
     >>> aNode1 = AlphaNode((Variable('P1'),RDF.type,URIRef('urn:uuid:Prop1')))
     >>> aNode2 = AlphaNode((Variable('P2'),RDF.type,URIRef('urn:uuid:Prop1')))
     >>> aNode3 = AlphaNode((Variable('P1'),Variable('P2'),RDFS.Class))
     >>> token1 = ReteToken((RDFS.domain,RDFS.domain,RDFS.Class))
     >>> token2 = ReteToken((RDFS.domain,RDF.type,URIRef('urn:uuid:Prop1')))
     >>> token3 = ReteToken((RDFS.range,RDF.type,URIRef('urn:uuid:Prop1')))
     >>> token4 = ReteToken((RDFS.range,RDFS.domain,RDFS.Class))
     >>> token5 = ReteToken((RDFS.domain,RDF.type,URIRef('urn:uuid:Prop1'))).bindVariables(aNode2)
     >>> inst = PartialInstantiation([token2.bindVariables(aNode1),token3.bindVariables(aNode2),token5])
     >>> pprint(list(inst.tokens))
     [<ReteToken: P2->http://www.w3.org/2000/01/rdf-schema#range>,
      <ReteToken: P1->http://www.w3.org/2000/01/rdf-schema#domain>,
      <ReteToken: P2->http://www.w3.org/2000/01/rdf-schema#domain>]
     >>> newInst = inst.newJoin(token1.bindVariables(aNode3),[Variable('P2')])
     >>> token1
     <ReteToken: P1->http://www.w3.org/2000/01/rdf-schema#domain,P2->http://www.w3.org/2000/01/rdf-schema#domain>
     >>> newInst
     <PartialInstantiation (joined on ?P2): {<ReteToken: P1->http://www.w3.org/2000/01/rdf-schema#domain,P2->http://www.w3.org/2000/01/rdf-schema#domain>, <ReteToken: P1->http://www.w3.org/2000/01/rdf-schema#domain>, <ReteToken: P2->http://www.w3.org/2000/01/rdf-schema#domain>}>
     >>> pprint(list(newInst.tokens))
     [<ReteToken: P1->http://www.w3.org/2000/01/rdf-schema#domain,P2->http://www.w3.org/2000/01/rdf-schema#domain>,
      <ReteToken: P1->http://www.w3.org/2000/01/rdf-schema#domain>,
      <ReteToken: P2->http://www.w3.org/2000/01/rdf-schema#domain>]
     """
     newJoinDict = self.joinedBindings.copy()
     if newJoinVariables:
         # only a subset of the tokens in this partial instantiation will be
         # 'merged' with the new token - joined on the new join variables
         newJoinDict.update(project(rightWME.bindingDict, newJoinVariables))
         newPInst = PartialInstantiation([], consistentBindings=newJoinDict)
         for token in self.tokens:
             commonVars = False
             for newVar in filter(
                 lambda x:
                     x in token.bindingDict
                 and rightWME.bindingDict[x] == token.bindingDict[x],
                     newJoinVariables):
                 # consistent token
                 commonVars = True
                 newPInst.add(token, noPostProcessing=True)
             if not commonVars:
                 #there are no common variables, no need to check
                 newPInst.add(token, noPostProcessing=True)
     else:
         # all of the tokens in this partial instantiation are already
         # bound consistently with respect to the new token
         newPInst = PartialInstantiation([], consistentBindings=newJoinDict)
         for token in self.tokens:
             newPInst.add(token, noPostProcessing=True)
     newPInst.add(rightWME)
     return newPInst
Example #58
0
    def remove_specs(self, *specs, **kwargs):
        assert all((s.concrete for s in specs))
        with_dependents = kwargs.get("with_dependents", True)
        with_dependencies = kwargs.get("with_dependencies", False)

        specs = set(specs)

        if with_dependencies:
            specs = get_dependencies(specs)

        if kwargs.get("exclude", None):
            specs = set(filter_exclude(specs, kwargs["exclude"]))

        all_specs = set(self.get_all_specs())

        to_deactivate = specs
        to_keep = all_specs - to_deactivate

        dependents = find_dependents(to_keep, to_deactivate)

        if with_dependents:
            # remove all packages depending on the ones to remove
            if len(dependents) > 0:
                tty.warn(self._croot +
                         "The following dependents will be removed: %s"
                         % ", ".join((s.name for s in dependents)))
                to_deactivate.update(dependents)
        elif len(dependents) > 0:
            tty.warn(self._croot +
                     "The following packages will be unusable: %s"
                     % ", ".join((s.name for s in dependents)))

        extensions = set(filter(lambda s: s.package.is_extension,
                         to_deactivate))
        standalones = to_deactivate - extensions

        # Please note that a traversal of the DAG in post-order and then
        # forcibly removing each package should remove the need to specify
        # with_dependents for deactivating extensions/allow removal without
        # additional checks (force=True). If removal performance becomes
        # unbearable for whatever reason, this should be the first point of
        # attack.
        #
        # see: https://github.com/spack/spack/pull/3227#discussion_r117147475
        remove_extension = ft.partial(self.remove_extension,
                                      with_dependents=with_dependents)

        set(map(remove_extension, extensions))
        set(map(self.remove_standalone, standalones))

        self.purge_empty_directories()
Example #59
0
def taxonomy( article_generator ):
    # called after articles have been read, before calculating tags & categories
    
    self = article_generator
    
    for article in self.articles:
        if hasattr( article, 'type' ):
            # enable usage of {type} in CATEGORY_SAVE_AS/CATEGORY_URL
            # category comparison is done by name, so categories of different types would be merged in a dictionary
            # but we separate category dictionaries by type so that's no problem
            article.category.type = article.type
            if hasattr( article, 'tags' ):
                for tag in article.tags:
                    tag.type = article.type
    
    self.types = {
        type: ArticleType( type, list( articles ), self.settings )
        for type, articles in group_by(
            attrgetter( 'type' ),
            # only take articles that have the 'type' metadata
            filter(
                lambda article: hasattr( article, 'type' ),
                self.articles
            )
        )
    }
    
    # { AuthorName: { TypeName: [ Article } }
    self.articles_by_type_by_author = defaultdict( partial( defaultdict, list ) )
    
    for type, info in self.types.items():
        for article in info.articles:
            # add to authors' (type-partitioned) article lists
            for author in getattr( article, 'authors', [] ):
                self.articles_by_type_by_author[ author ][ type ].append( article )
    
    for author, types in self.articles_by_type_by_author.items():
        for type, articles in types.items():
            articles.sort( key = attrgetter('date'), reverse = True )
    
    self.mrw_all_articles = self.articles
    
    # we overwrite the normal taxonomy phase, so we empty the list used there
    # the self.categories dict wouldn't distinguish categories of different types
    self.articles = []
    # TODO: support translations?
    self.translations = []
    
    self._update_context( [ 'articles_by_type_by_author' ] )
    self.context[ 'types' ] = self.types