예제 #1
0
def test_count():
    assert count((1, 2, 3)) == 3
    assert count([]) == 0
    assert count(iter((1, 2, 3, 4))) == 4

    assert count('hello') == 5
    assert count(iter('hello')) == 5
예제 #2
0
def get_expected(employee_data, years):
    """
    Get the expected percentage of all employees per company that are
    specified number of years older than the company-wide average using
    python.

    :param employee_data: Hypothesis generated employee database source data.
    :param years: Find percentage of employees this number of years older
                    than the company-wide average.

    :return: Generator which when iterated yields an instance of ``Expected``
             which represents the expected result from get_employees_per_company.
    """
    for company, company_employees in get_company_employees(employee_data):

        percentage_older = 0

        def calc_age(employee):
            return (today - employee.date_of_birth).days / 365.25

        employee_ages = list(map(calc_age, company_employees))
        average_employee_age = statistics.mean(employee_ages)

        def is_older(employee_age):
            return employee_age > (average_employee_age + years)

        n_employees_older = count(filter(is_older, employee_ages))

        if n_employees_older:
            n_employees = float(len(company_employees))
            percentage_older = n_employees_older / n_employees * 100

        yield Expected(company.company_id, company.company_name,
                       average_employee_age, percentage_older)
예제 #3
0
def test_print_table(labels, title):
    table = cli_inference.print_table(labels, title, print=False)
    assert isinstance(table, rich.table.Table)
    assert table.title == title
    unique = itertoolz.count(itertoolz.unique(labels))
    assert table.row_count == unique + 1
    assert all(label in getattr(itertoolz.first(table.columns), "_cells")
               for label in labels)
    table = cli_inference.print_table(labels, title, print=True)
예제 #4
0
    def special_issue(self):
        """
        checks if paper has been published in Special Issue
        """
        def si_dict():
            brain = first(paper_view.getSpecialIssues())
            return {'title': brain.Title, 'url': brain.getURL}

        paper_view = getMultiAdapter((self.context, self.request),
                                     name="paperView")
        return count(paper_view.getSpecialIssues()) > 0 and si_dict() or False
예제 #5
0
    def add_async(self, *items):
        """ Add additional items to the asynchronous processing queue.

        Args:
            items (list(Any)): list of items that need processing. Each item is
                applied one at a time to an available driver from the pool.

        Raises:
            StopIteration: when all items have been added.
        """
        if len(items) == 1 and isinstance(items[0], list):
            items = iter(items[0])
        if not items:
            raise DriverPoolValueError(
                'cannot add items with value: %s' % str(items))
        item_count = count(items)
        self.logger.debug('adding %d additional items to tasks', item_count)
        for o in items:
            self._tasks.put(o)
예제 #6
0
def compress(data: Iterable[T], key: Optional[Callable[[T], Any]] = None) -> Iterable[Tuple[T, int]]:
    for k, g in itertools.groupby(data, key=key):
        yield (k, itertoolz.count(g))
예제 #7
0
def matchingfrequencies(*seqs: Iterable[T],
                        key=None) -> Iterable[Tuple[T, int]]:
    for k, g in groupby(merge(*[unique(seq, key=key) for seq in seqs],
                              key=key)):
        yield (k, count(g))
예제 #8
0
def get_expected(employee_data, location, min_percentage):
    """
    Calculate the percentage of employees of a company at a particular location,
    filtered by a minimum percentage of employees using python.

    :param employee_data: Hypothesis generated employee database source data.
    :param location: The location to get employee percentage for. This is a / separated list of
                     any of the following combinations:

                      - continent/country/state/city
                      - continent/country/state
                      - continent/country
                      - continent

    :param min_percentage: The minimum percentage of employees a location should
                           have to be included in the output.

    :return: Generator which when iterated yields an instance ``Expected`` which represents the
             expected result from get_employees_percentage_by_location.
    """
    location_fields = ['city', 'state', 'country', 'continent']

    continent, country, state, city = (x for x, y in itertools.zip_longest(
        location.split('/'), location_fields))

    params = Bunch(continent=continent,
                   country=country,
                   state=state,
                   city=city)

    def get_location(employee):
        return employee_data.locations[employee.location_id - 1]

    def are_all_attrs_equal(lhs, rhs, attrs):
        return all(getattr(lhs, attr) == getattr(rhs, attr) for attr in attrs)

    def get_location_and_where(n):
        """
        Get the location to include in the result and a where function to
        filter the company employees for inclusion when calculating the
        percentage at a location.
        """
        fields = location_fields[n:]
        filter_func = partial(are_all_attrs_equal, params, attrs=fields)
        employee_location = next(filter(filter_func, employee_data.locations))

        location = '/'.join(
            getattr(employee_location, x) for x in reversed(fields))

        def where(employee):
            return are_all_attrs_equal(get_location(employee), params, fields)

        return location, where

    for company, company_employees in get_company_employees(employee_data):

        if city:
            location, where = get_location_and_where(0)
        elif state:
            location, where = get_location_and_where(1)
        elif country:
            location, where = get_location_and_where(2)
        else:
            location, where = get_location_and_where(3)

        n_employees_in_location = count(filter(where, company_employees))
        n_employees = float(len(company_employees))
        percentage = n_employees_in_location / n_employees * 100

        if 0 < percentage > min_percentage:
            yield Expected(company.company_id, company.company_name, location,
                           percentage)
예제 #9
0
def count_files_with_ext(directory: Path, ext: str) -> int:
    """Counts files matching extension in directory"""
    return itertoolz.count(Path(directory).rglob(f"*{ext}"))
예제 #10
0
파일: core.py 프로젝트: jakirkham/yail
def disperse(seq):
    """
        Similar to range except that it recursively proceeds through the given
        range in such a way that values that follow each other are preferably
        not only non-sequential, but fairly different. This does not always
        work with small ranges, but works nicely with large ranges.

        Args:
            a(int):              the lower bound of the range
            b(int):              the upper bound of the range

        Returns:
            result(generator):   a generator that can be used to iterate
                                 through the sequence.

        Examples:

            >>> list(disperse(range(10)))
            [0, 5, 8, 3, 9, 4, 6, 1, 7, 2]
    """

    try:
        len_seq = len(seq)
    except TypeError:
        seq, len_seq = itertools.tee(seq)
        len_seq = count(len_seq)

    def disperse_helper(b, part_seq_1):
        if b != 0:
            half_diff = float(b) / 2.0

            mid_1 = int(math.floor(half_diff))
            mid_2 = int(math.ceil(half_diff))

            if 0 < mid_1 and b > mid_2:
                part_seq_1, part_seq_2 = itertools.tee(part_seq_1)

                front_mid_1_seq, mid_1_val, _ = split(mid_1, part_seq_1)
                _, mid_2_val, back_mid_2_seq = split(mid_2, part_seq_2)
                del _

                mid_2_val = itertools.tee(mid_2_val)
                back_mid_2_seq = concat([mid_2_val[0], back_mid_2_seq])
                mid_2_val = mid_2_val[1]

                yield (first(mid_2_val))

                for _1, _2 in zip(disperse_helper(mid_1 - 0, front_mid_1_seq),
                                  disperse_helper(b - mid_2, back_mid_2_seq)):
                    yield (_2)
                    yield (_1)

                if mid_1 != mid_2:
                    yield (first(mid_1_val))

    if len_seq == 0:
        return

    val, seq = peek(seq)
    yield (val)

    for each in disperse_helper(len_seq, seq):
        yield (each)