Example #1
0
def parse_packet(binary: Iterable[Binary]) -> Packet:
    version = binary_to_int(take(3, binary))
    type_id = binary_to_int(take(3, binary))

    if type_id == 4:  # literal value
        value = 0
        for c in chunked(binary, 5):
            for b in c[1:]:
                value <<= 1
                value |= b
            if c[0] == 0:
                break
        return LiteralValuePacket(version, type_id, value)

    # operator packet
    length_type_id = next(binary)

    children = []
    if length_type_id == 0:
        # next 15 bits are a number that represents the
        # total length in bits of the sub-packets contained by this packet
        total_length_in_bits = binary_to_int(take(15, binary))
        bits = peekable(take(total_length_in_bits, binary))
        while bits:
            children.append(parse_packet(bits))
    else:
        # next 11 bits are a number that represents the
        # number of sub-packets immediately contained by this packet
        number_of_sub_packets = binary_to_int(take(11, binary))
        for _ in range(number_of_sub_packets):
            children.append(parse_packet(binary))

    return OperatorPacket(version, type_id, children)
def test_incompatible_profile(lidar_stream: client.PacketSource) -> None:
    """Test batching of a LidarScan with custom fields set."""

    info = lidar_stream.metadata
    assert info.format.udp_profile_lidar == client.UDPProfileLidar.PROFILE_LIDAR_LEGACY

    packets_per_frame = (info.format.columns_per_frame //
                         info.format.columns_per_packet)

    batch = ScanBatcher(lidar_stream.metadata)

    ls = client.LidarScan(info.format.pixels_per_column,
                          info.format.columns_per_frame,
                          client.UDPProfileLidar.PROFILE_LIDAR_RNG19_RFL8_SIG16_NIR16_DUAL)

    # Try to decode a legacy packet into a dual returns scan
    # TODO change exception thrown on the cpp side
    with pytest.raises(IndexError):
        for p in take(packets_per_frame, lidar_stream):
            batch(p._data, ls)

    batch = ScanBatcher(lidar_stream.metadata)

    fields = {
        client.ChanField.RANGE: np.uint8,
    }
    ls = client.LidarScan(info.format.pixels_per_column,
                          info.format.columns_per_frame,
                          fields)

    # Test for decoding scans to a bad dest buffer type
    with pytest.raises(ValueError):
        for p in take(packets_per_frame, lidar_stream):
            batch(p._data, ls)
Example #3
0
    def test_loop_to_seqc_cluster_handling(self):
        """Test handling of clusters"""
        with self.assertRaises(AssertionError):
            loop_to_seqc(Loop(repetition_count=12, children=[Loop()]),
                         min_repetitions_for_for_loop=3,
                         min_repetitions_for_shared_wf=2,
                         waveform_to_bin=make_binary_waveform)

        loop_to_seqc_kwargs = dict(min_repetitions_for_for_loop=3,
                                   min_repetitions_for_shared_wf=4,
                                   waveform_to_bin=make_binary_waveform)

        wf_same = map(WaveformPlayback,
                      map(make_binary_waveform, get_unique_wfs(100000, 32)))
        wf_sep, = map(WaveformPlayback,
                      map(make_binary_waveform, get_unique_wfs(1, 64)))

        node_clusters = [
            take(2, wf_same), [wf_sep],
            take(3, wf_same), [wf_sep],
            take(4, wf_same),
            take(4, wf_same)
        ]
        root = Loop(
            repetition_count=12,
            children=[Loop() for _ in range(2 + 1 + 3 + 1 + 4 + 1 + 4)])

        expected = Repeat(
            12,
            Scope([
                *node_clusters[0], wf_sep,
                SteppingRepeat(node_clusters[2]), wf_sep,
                SteppingRepeat(node_clusters[4]),
                SteppingRepeat(node_clusters[5])
            ]))

        def dummy_find_sharable_waveforms(cluster):
            if cluster is node_clusters[4]:
                return [True]
            else:
                return None

        p1 = mock.patch('qupulse._program.seqc.to_node_clusters',
                        return_value=node_clusters)
        p2 = mock.patch('qupulse._program.seqc.find_sharable_waveforms',
                        wraps=dummy_find_sharable_waveforms)
        p3 = mock.patch('qupulse._program.seqc.mark_sharable_waveforms')

        with p1 as to_node_clusters_mock, p2 as find_share_mock, p3 as mark_share_mock:
            result = loop_to_seqc(root, **loop_to_seqc_kwargs)
            self.assertEqual(expected, result)

            to_node_clusters_mock.assert_called_once_with(
                root, loop_to_seqc_kwargs)
            self.assertEqual(
                find_share_mock.mock_calls,
                [mock.call(node_clusters[4]),
                 mock.call(node_clusters[5])])
            mark_share_mock.assert_called_once_with(node_clusters[4], [True])
Example #4
0
 def _get_emoji(s='', limit=0, offset=0):
     s = s.lower()
     limited = list
     if limit > 0:
         limited = partial(take, limit)
     emoji = (f'{e} {name}' for name, e in _emoji.items()
              if s in name.lower())
     take(offset, emoji)
     return limited(emoji)
Example #5
0
 def __init__(self, n=4, h=4, canonic=True):
     if canonic:
         x, y = sym.symbols('x, y')
         a = tuple(take(n, sym.numbered_symbols('a', start=1)))
     else:
         x, y = sym.symbols('z_2, z_1', real=True)
         a = tuple(-r for r in take(
             n, sym.numbered_symbols('r', start=1, real=True)))
     super().__init__(x, y, a, h)
 def test_delay_loop_change_state(self):
     transform = SentenceDelayTransform(buffer_size=2, looping=True)
     sentences = list(self.analyzer.analyze_text('Eka lause tää on. Tämä on toka. Kolmatta viedään. Neljäskin löytyi.'))
     sentences2 = list(self.analyzer.analyze_text('Viides lause menossa. Kuudes tulossa. Seitsemäs päättää.'))
     delayed = list(take(3, transform.transform_stream(sentences)))
     delayed2 = list(take(7, transform.transform_stream(sentences2)))
     delayed = delayed + delayed2
     self.assertEqual(delayed[:2], [Sentence(), Sentence()])
     self.assertEqual(delayed[2:5], sentences[:3])
     self.assertEqual(delayed[5:8], sentences2)
     self.assertEqual(delayed[8:], sentences2[-2:])
Example #7
0
def if_only_down_or_right(grid):
    total_risk_grid = np.copy(grid)
    rows, cols = grid.shape
    coords = product(range(rows - 1, -1, -1), range(cols - 1, -1, -1))
    take(1, coords)  # omit bottom-right corner
    for i, j in coords:
        choices = []
        if i != rows - 1:
            choices.append(total_risk_grid[i + 1, j])
        if j != cols - 1:
            choices.append(total_risk_grid[i, j + 1])
        total_risk_grid[i, j] += min(choices)
    return total_risk_grid[0, 0] - grid[0, 0]
Example #8
0
    def select_snapshots(self, count: int) -> Optional[list[Subvolume]]:
        if self.has_snapshots():
            snapshots = none_throws(self.snapshots)

            return take(count, sorted(snapshots, reverse=True))

        return None
Example #9
0
 def process_record(iterable):
     num_child_nodes, num_metadata_entries = next(iterable), next(iterable)
     result = []
     for i in range(num_child_nodes):
         result += process_record(iterable)
     metadata_entries = take(num_metadata_entries, iterable)
     return [metadata_entries] + result
Example #10
0
def show_pulls(labels=None, show_comments=False, state="open", since=None, org=False):
    issues = get_pulls("edx/edx-platform", labels, state, since, org, pull_details="all")

    category = None
    for index, issue in enumerate(issues):
        if issue.get("org") != category:
            # new category! print category header
            category = issue["org"]
            print("-- {category} ----".format(category=category))

        if 0:
            import pprint
            pprint.pprint(issue.obj)
        print(issue.format(ISSUE_FMT))

        if show_comments:
            comments_url = URLObject(issue['comments_url'])
            comments_url = comments_url.set_query_param("sort", "created")
            comments_url = comments_url.set_query_param("direction", "desc")
            comments = paginated_get(comments_url)
            last_five_comments = reversed(more_itertools.take(5, comments))
            for comment in last_five_comments:
                print(comment.format(COMMENT_FMT))

    # index is now set to the total number of pull requests
    print()
    print("{num} pull requests".format(num=index+1))
Example #11
0
def sampled(
    situation_template: Phase1SituationTemplate,
    *,
    ontology: Ontology,
    chooser: SequenceChooser,
    max_to_sample: int,
    default_addressee_node: OntologyNode = LEARNER,
    block_multiple_of_the_same_type: bool,
) -> Iterable[HighLevelSemanticsSituation]:
    """
    Gets *max_to_sample* instantiations of *situation_template* with *ontology*
    """
    check_arg(max_to_sample >= 0)
    return list(
        take(
            max_to_sample,
            _Phase1SituationTemplateGenerator(
                ontology=ontology,
                variable_assigner=_SamplingVariableAssigner(),
                block_multiple_objects_of_the_same_type=
                block_multiple_of_the_same_type,
            ).generate_situations(
                situation_template,
                chooser=chooser,
                default_addressee_node=default_addressee_node,
            ),
        ))
def sort_n(
    stream: Iterator[Any],
    num: Optional[int],
    key: Optional[Callable] = None,
    reverse: bool = False,
    unique: bool = True,
) -> Sequence[Any]:
    """
    Sort a stream. Processes the whole stream, but loads only num*2 elements in memory.
    :param stream:
    :param num:
    :param key:
    :param reverse:
    :param unique:
    :return:
    """

    results = []
    while True:
        buffer = take(num, stream)
        if not buffer:
            return results
        if unique:
            buffer = set(buffer)
        results.extend(buffer)
        results = sorted(results, key=key, reverse=reverse)[:num]
Example #13
0
 def _most_recent_convictions(recent_convictions) -> Optional[Charge]:
     recent_convictions.sort(key=lambda charge: charge.disposition.date, reverse=True)
     newer, older = take(2, padnone(recent_convictions))
     if newer and "violation" in newer.level.lower():
         return older
     else:
         return newer
Example #14
0
def tsp1(nodes, dist):
    """
    巡回セールスマン問題
    入力
        nodes: 点(dist未指定時は、座標)のリスト
        dist: (i,j)をキー、距離を値とした辞書
    出力
        距離と点番号リスト
    """
    from more_itertools import iterate, take

    n = len(nodes)
    df = pd.DataFrame(
        [(i, j, dist[i, j]) for i in range(n) for j in range(n) if i != j],
        columns=["NodeI", "NodeJ", "Dist"],
    )
    m = LpProblem()
    df["VarIJ"] = addbinvars(len(df))
    df["VarJI"] = df.sort_values(["NodeJ", "NodeI"]).VarIJ.values
    u = [0] + addvars(n - 1)
    m += lpDot(df.Dist, df.VarIJ)
    for _, v in df.groupby("NodeI"):
        m += lpSum(v.VarIJ) == 1  # 出次数制約
        m += lpSum(v.VarJI) == 1  # 入次数制約
    for i, j, _, vij, vji in df.query("NodeI!=0 & NodeJ!=0").itertuples(False):
        m += u[i] + 1 - (n - 1) * (1 - vij) + (n - 3) * vji <= u[
            j]  # 持ち上げポテンシャル制約(MTZ)
    for _, j, _, v0j, vj0 in df.query("NodeI==0").itertuples(False):
        m += 1 + (1 - v0j) + (n - 3) * vj0 <= u[j]  # 持ち上げ下界制約
    for i, _, _, vi0, v0i in df.query("NodeJ==0").itertuples(False):
        m += u[i] <= (n - 1) - (1 - vi0) - (n - 3) * v0i  # 持ち上げ上界制約
    m.solve()
    df["ValIJ"] = df.VarIJ.apply(value)
    dc = df[df.ValIJ > 0.5].set_index("NodeI").NodeJ.to_dict()
    return value(m.objective), list(take(n, iterate(lambda k: dc[k], 0)))
Example #15
0
    def test_take_too_much(self):
        """Taking more than an iterator has remaining should return what the
        iterator has remaining.

        """
        t = mi.take(10, range(5))
        self.assertEqual(t, [0, 1, 2, 3, 4])
def test_batch_custom_fields(lidar_stream: client.PacketSource) -> None:
    """Test batching of a LidarScan with custom fields set."""

    info = lidar_stream.metadata

    packets_per_frame = (info.format.columns_per_frame //
                         info.format.columns_per_packet)

    batch = ScanBatcher(lidar_stream.metadata)

    # create LidarScan with only 2 fields
    fields: Dict[client.ChanField, client.FieldDType] = {
        client.ChanField.RANGE: np.uint32,
        client.ChanField.SIGNAL: np.uint16
    }

    ls = client.LidarScan(info.format.pixels_per_column,
                          info.format.columns_per_frame, fields)

    # we expect zero initialized fields
    for f in ls.fields:
        assert np.count_nonzero(ls.field(f)) == 0

    # do batching into ls with a fields subset
    for p in take(packets_per_frame, lidar_stream):
        batch(p._data, ls)

    # it should contain the same num fields as we've added
    assert len(list(ls.fields)) == len(fields)

    # and the content shouldn't be zero after batching
    for f in ls.fields:
        assert np.count_nonzero(ls.field(f)) > 0
Example #17
0
    def test_take_too_much(self):
        """Taking more than an iterator has remaining should return what the
        iterator has remaining.

        """
        t = mi.take(10, range(5))
        self.assertEqual(t, [0, 1, 2, 3, 4])
Example #18
0
def query_recent(nt: Type[NamedTuple], count: int) -> List[NamedTuple]:
    """query the module for recent entries (based on datetime) from a namedtuple"""
    items: List[NamedTuple] = more_itertools.take(
        count,
        sorted(glob_namedtuple(nt), key=_extract_dt_from(nt), reverse=True),
    )
    return items
Example #19
0
def show_pulls(labels=None,
               show_comments=False,
               state="open",
               since=None,
               org=False):
    issues = get_pulls("edx/edx-platform", labels, state, since, org)

    category = None
    for index, issue in enumerate(issues):
        issue.load_pull_details()
        if issue.get("org") != category:
            # new category! print category header
            category = issue["org"]
            print("-- {category} ----".format(category=category))

        if 0:
            import pprint
            pprint.pprint(issue.obj)
        print(issue.format(ISSUE_FMT))

        if show_comments:
            comments_url = URLObject(issue['comments_url'])
            comments_url = comments_url.set_query_param("sort", "created")
            comments_url = comments_url.set_query_param("direction", "desc")
            comments = paginated_get(comments_url)
            last_five_comments = reversed(more_itertools.take(5, comments))
            for comment in last_five_comments:
                print(comment.format(COMMENT_FMT))

    # index is now set to the total number of pull requests
    print()
    print("{num} pull requests".format(num=index + 1))
Example #20
0
def stat(func: Callable[[], Iterable[C]]) -> Dict[str, Any]:
    from more_itertools import ilen, take, first

    # todo not sure if there is something in more_itertools to compute this?
    errors = 0

    def funcit():
        nonlocal errors
        for x in func():
            if isinstance(x, Exception):
                errors += 1
            yield x

    it = iter(funcit())
    res: Any
    if QUICK_STATS:
        initial = take(100, it)
        res = len(initial)
        if first(it, None) is not None:  # todo can actually be none...
            # haven't exhausted
            res = f'{res}+'
    else:
        res = ilen(it)

    if errors > 0:
        # todo not sure, but for now ok
        res = f'{res} ({errors} errors)'

    return {
        func.__name__: res,
    }
Example #21
0
def rings(sizes: Iterable[int] | int,
          atoms: Iterable[str] | str = "C",
          strict: bool = False) -> Generator[str, None, None]:
    """
    Generate rings of various sizes

    >>> list(rings([2], ['C']))
    []
    >>> list(rings(4, 'C'))
    ['C1CCC1']
    >>> list(rings([4, 5], ['C', 'N']))
    ['C1NCN1', 'C1NCNC1']
    """
    sizes = [sizes] if isinstance(sizes, int) else sizes
    atoms = itertools.repeat(atoms) if isinstance(
        atoms, str) else itertools.cycle(atoms)

    for size in sizes:
        if size < 3:
            if strict:
                raise ValueError(f"Cannot make ring with {size=}")
            continue

        it = iter(take(size, atoms))
        yield f"{next(it)}1" + "".join(atom for atom in it) + "1"
def main():
    merged_count_of_arg_dic = get_merged_count_of_arg()
    sum_all = merged_count_of_arg_dic["SUM"]

    pred_file_name_dic = get_pred_dic()
    pred_file_dir = '/local/tsakaki/pa.count.all'

    #標準入力から、禁止されている述語と、その直前格を読み込む
    #古傷/ふるきず:ヲ格 抉る/えぐる:動
    for ban_arg_pred_line in sys.stdin:
        ban_arg_pred_line = ban_arg_pred_line.rstrip()
        lst = ban_arg_pred_line.split(' ')
        input_arg = lst[0]
        input_arg_case = input_arg.split(':')[1]
        pred = lst[1]

        pred_file_name = pred_file_name_dic[pred]
        sum_v = -1

        #中身は
        #373755 SUM 撃つ/うつ:動
        with open(pred_file_dir + '/' + pred_file_name) as pred_file:
            ans_log_PMI_dict = {} #argをキーとして、(log_pmi, (count_arg_given_pred, sum_v, sum_all, count_arg))を値とする辞書

            for count_pred_line in pred_file:
                count_pred_line = count_pred_line.rstrip()
                count_pred_lst = count_pred_line.split(' ')
                count_arg_given_pred = int(count_pred_lst[0])
                arg = count_pred_lst[1]
                # pred = count_pred_lst[2] #既に代入したpredと同じ

                if arg == "SUM": #pred_fileの一行目には"SUM"が入っているという前提
                    sum_v = count_arg_given_pred
                elif arg.split(':')[1] == input_arg_case: #入力した項と同じ格のみ対象とする
                    try:
                        log_PMI = math.log10(count_arg_given_pred) - math.log10(sum_v) + math.log10(sum_all) - math.log10(merged_count_of_arg_dic[arg])
                    except:
                        raise Exception("%s %s %s" % (arg, pred, pred_file_name))
                    ans_log_PMI_dict[arg] = (log_PMI, (count_arg_given_pred, sum_v, sum_all, merged_count_of_arg_dic[arg]))

                    # print "count_arg_given_pred: %d" % count_arg_given_pred
                    # print "sum_v: %d" % sum_v
                    # print "sum_all: %d" % sum_all
                    # print "count_arg: %d" % merged_count_of_arg_dic[arg]


            #ans_log_PMI_dictをソートして出力
            #lambdaの部分がややこしい。items()メソッドで(key, value)のタプルを作って、x[1]でvalueを取り、そのvalue(タプル)の0番目であるPMIをとって、これをキーとしてソートする

            take_num = 1000 #20
            freq_th = 100 #頻度がこれ以上の(項, 述語)ペアのみを対象とする
            ans_items = [(k,v) for k, v in ans_log_PMI_dict.items() if v[1][0] >= freq_th]
            ranked = sorted(ans_items, key=lambda x:x[1][0], reverse=True)
            ranked = more_itertools.take(take_num, ranked)
            for k_arg, v_tpl in ranked:
                log_PMI = v_tpl[0]
                count_tpl = v_tpl[1]

                print "%f %d %d %d %d %s %s" % (log_PMI, count_tpl[0], count_tpl[1], count_tpl[2], count_tpl[3], k_arg, pred)
Example #23
0
def test_empty_examples():
    with pytest.raises(ValueError):
        next(find_dfas(accepting=[], rejecting=[]))

    dfas = take(4, find_dfas(accepting=[], rejecting=[], alphabet={'x', 'y'}))
    assert len(dfas) == 2
    for i, dfa in enumerate(dfas):
        assert dfa.label(()) != (i & 1)
Example #24
0
    def from_bytes(index_bytes: bytearray):

        _index_type, index_bytes = uvarint.cut(1, index_bytes).integers[0], uvarint.cut(1, index_bytes).rest

        if _index_type != 0:
            raise Exception("This is not an EliasFano index!")

        _n, _lower_bits, _upper_bits, inferiors_byte_count, superiors_byte_count = uvarint.cut(5, index_bytes).integers

        bytes_iter = iter(uvarint.cut(5, index_bytes).rest)

        if inferiors_byte_count:
            inferiors = ("{0:0%db}" % (_n * _lower_bits)).format(
                int.from_bytes(take(inferiors_byte_count, bytes_iter), 'little', signed=False))

            _inferiors = list(map(lambda inf: int("".join(inf), 2),
                                  windowed(iter(inferiors), _lower_bits,
                                           step=_lower_bits)))
        else:
            _inferiors = []

        if superiors_byte_count:
            # superiors contains exactly '2**(upper_bits)' 0s and exactly 'n' 1s
            superiors = ("{0:0%db}" % (_n + 2 ** _upper_bits)).format(
                int.from_bytes(take(superiors_byte_count, bytes_iter), 'little', signed=False))

            _superiors = list(map(lambda x: len(x),
                                  split_at(iter(superiors),
                                           lambda v: v == '0', keep_separator=False)))[0:-1]

            _superiors_prefixSums = list(accumulate(_superiors))
        else:
            _superiors = []
            _superiors_prefixSums = []

        # TODO: implement appropriate constructor
        ef_index = EliasFano([0])
        ef_index._n = _n
        ef_index._u = 2 ** max(1, _lower_bits + _upper_bits)
        ef_index._lower_bits = _lower_bits
        ef_index._upper_bits = _upper_bits
        ef_index._inferiors = _inferiors
        ef_index._superiors = _superiors
        ef_index._superiors_prefixSums = _superiors_prefixSums

        return ef_index
 def test_delay_loop(self):
     transform = SentenceDelayTransform(buffer_size=2, looping=True)
     sentences = list(self.analyzer.analyze_text('Eka lause tää on. Tämä on toka. Kolmatta viedään. Neljäskin löytyi.'))
     delayed = list(take(10, transform.transform_stream(sentences)))
     self.assertEqual(delayed[:2], [Sentence(), Sentence()])
     self.assertEqual(delayed[2:6], sentences)
     self.assertEqual(delayed[6:8], sentences[-2:])
     self.assertEqual(delayed[8:10], sentences[-2:])
def isort(iterable, bufsize=1024, key=None):
    """
    Partially sorts a (potentially infinite) iterable in a best-effort way
    with fixed memory usage by maintaining a priority queue of `bufsize`,
    essentially acting as a "lookahead".

    In other words, will always yield the smallest value within the lookahead
    window next.

    Increasing the bufsize will lead to better results.

        >>> a = [1, 4, 9, 2, 5, 3, 7, 8, 0, 6]  # randomly ordered
        >>> list(isort(a, bufsize=3))
        [1, 2, 3, 4, 5, 0, 6, 7, 8, 9]

        >>> list(isort(a, bufsize=6))
        [1, 2, 0, 3, 4, 5, 6, 7, 8, 9]

        >>> list(isort(a, bufsize=8))
        [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]

        >>> diff_from_three = lambda n: abs(3 - n)
        >>> list(isort(a, bufsize=5, key=diff_from_three))
        [3, 2, 4, 5, 1, 6, 0, 7, 8, 9]
    """
    it = iter(iterable)

    if key is None:
        buf = take(bufsize, it)
        heapify(buf)
        for item in it:
            yield heappushpop(buf, item)

        while buf:
            yield heappop(buf)
    else:
        i1, i2 = tee(it)
        it = zip(map(key, i1), count(0, -1), i2)  # decorate
        buf = take(bufsize, it)
        heapify(buf)
        for item in it:
            yield heappushpop(buf, item)[2]  # undecorate

        while buf:
            yield heappop(buf)[2]  # undecorate
Example #27
0
def result(request):
    url = request.POST["url"]
    frequency_list = {}
    db_data = ''
    if not Url.objects.filter(url=url):
        html = urlopen(url).read()
        soup = BeautifulSoup(html, features="html.parser")

        for script in soup(["script", "style"]):
            script.extract()

        text = soup.get_text()
        # break into lines and remove leading and trailing space on each
        lines = (line.strip() for line in text.splitlines())
        # break multi-headlines into a line each
        chunks = (phrase.strip() for line in lines
                  for phrase in line.split("  "))
        # drop blank lines
        text = '\n'.join(chunk for chunk in chunks if chunk)

        text = text.lower()
        text = text.replace('\n', ' ')
        text = text.split(' ')
        text = sorted(text)
        #print(text)

        text = [word for word in text if word not in stopwords.words()]

        frequency = {}
        for word in text:
            if word in frequency:
                frequency[word] += 1
            else:
                frequency[word] = 1

        sorted_frequency = {}
        sorted_keys = sorted(frequency, key=frequency.get, reverse=True)

        for w in sorted_keys:
            sorted_frequency[w] = frequency[w]

        frequency_list = more_itertools.take(10, sorted_frequency.items())

        #saving data in database
        saverecord = Url()
        saverecord.url = url
        saverecord.frequency_list = json.dumps(frequency_list)
        saverecord.save()
    else:
        db_data = Url.objects.filter(url=url).values_list('frequency_list')
        #db_data = serializers.serialize('json',list(db_data))
        db_data = json.loads(db_data[0][0])

    return render(request, 'result.html', {
        "frequency": frequency_list,
        "db_data": db_data
    })
Example #28
0
def test_clone(mock_makedirs, mock_run_commands):
    assert [1, 2, 3] == take(3, clone(['commands'], 'root_dir'))
    assert [call('root_dir', exist_ok=True)] == mock_makedirs.mock_calls
    assert [
        call([('commands', 'root_dir')],
             check=False,
             max_processes=25,
             timeout=60)
    ] == mock_run_commands.mock_calls
Example #29
0
    def fromFile(self, file):
        f = fileinput.input(file)
        header = take(2, f)
        numVertices = int(header[0])
        numEdges = int(header[1])
        graph = Graph(numVertices)
        lines = take(numEdges, f)
        linesList = list(lines)
        linesList.reverse()
        for line in linesList:
            vw = map_except(int, line, ValueError, TypeError)
            vwList = list(vw)
            v = vwList[0]
            w = vwList[1]
            graph.addEdge(v, w)

        fileinput.close()
        return graph
 def _most_recent_convictions(recent_convictions):
     recent_convictions.sort(key=lambda charge: charge.disposition.date,
                             reverse=True)
     first, second, third = take(3, padnone(recent_convictions))
     if first and "violation" in first.level.lower():
         return second, third
     elif second and "violation" in second.level.lower():
         return first, third
     else:
         return first, second
Example #31
0
 def next_batch() -> List[LidarScan]:
     with closing(
             Sensor(hostname,
                    lidar_port,
                    metadata=metadata,
                    buf_size=n * 128,
                    _flush_before_read=False)) as source:
         source.flush(full=True)
         scans = cls(source, timeout=1.0, complete=True, _max_latency=0)
         return take(n, scans)
Example #32
0
def parse_theme(div):
    info = " ".join(RE_INFO.findall(div.contents[1]))
    if not info:
        raise ValueError(f"theme with no info: {div}")

    ps = div.find_all("p")
    qas = mit.take(THEME_SIZE, mit.chunked(ps, 2))

    questions = [parse_question(q, a) for q, a in qas]
    return Theme(info=info, questions=questions)
Example #33
0
def show_pulls(labels=None, show_comments=False, state="open", since=None,
               org=False, intext=None, merged=False):
    """
    `labels`: Filters PRs by labels (all are shown if None is specified)
    `show_comments`: shows the last 5 comments on each PR, if True
    `state`: Filter PRs by this state (either 'open' or 'closed')
    `since`: a datetime representing the earliest time from which to pull information.
             All PRs regardless of time are shown if None is specified.
    `org`: If True, sorts by PR author affiliation
    `intext`: specify 'int' (internal) or 'ext' (external) pull request
    `merged`: If True and state="closed", shows only PRs that were merged.
    """
    num = 0
    adds = 0
    deletes = 0
    repos = [ r for r in Repo.from_yaml() if r.track_pulls ]
    for repo in repos:
        issues = get_pulls(repo.name, labels, state, since, org=org or intext, pull_details="all")

        category = None
        for issue in issues:
            issue.repo = repo.nick
            if intext is not None:
                if issue.intext != intext:
                    continue
            if state == 'closed' and merged and issue.combinedstate != 'merged':
                # If we're filtering on closed PRs, and only want those that are merged,
                # skip ones that were closed without merge.
                continue
            if state == 'closed' and since:
                # If this PR was closed prior to the last `since` interval of days, continue on
                # (it may have been *updated* - that is, referenced or commented on - more recently,
                #  but we just want to see what's been merged or closed in the past "since" days)
                if issue.closed_at < since:
                    continue

            if org and issue.org != category:
                # new category! print category header
                category = issue.org
                print("-- {category} ----".format(category=category))

            print(fformat(ISSUE_FMT, issue))
            num += 1
            adds += issue.additions
            deletes += issue.deletions

            if show_comments:
                comments = get_comments(issue)
                last_five_comments = reversed(more_itertools.take(5, comments))
                for comment in last_five_comments:
                    print(fformat(COMMENT_FMT, comment))

    print()
    print("{num} pull requests; {adds}+ {deletes}-".format(num=num, adds=adds, deletes=deletes))
Example #34
0
def pretty_print_head(dict_, count=10):
    '''
    Pretty print some items of a dict
    
    For an unordered dict, `count` arbitrary items will be printed. 
    
    Parameters
    ----------
    dict_ : dict
        Dict to print from
    count : int, optional
        Number of items to print.
        
    Raises
    ------
    ValueError
        When ``count < 1``
    '''
    if count < 1:
        raise ValueError('`count` must be at least 1')
    pprint(dict(take(count, dict_.items())))
Example #35
0
def show_pulls(labels=None, show_comments=False, state="open", since=None,
               org=False, intext=None, merged=False):
    """
    `labels`: Filters PRs by labels (all are shown if None is specified)
    `show_comments`: shows the last 5 comments on each PR, if True
    `state`: Filter PRs by this state (either 'open' or 'closed')
    `since`: a datetime representing the earliest time from which to pull information.
             All PRs regardless of time are shown if None is specified.
    `org`: If True, sorts by PR author affiliation
    `intext`: specify 'int' (internal) or 'ext' (external) pull request
    `merged`: If True and state="closed", shows only PRs that were merged.
    """
    num = 0
    adds = 0
    deletes = 0
    repos = [ r for r in Repo.from_yaml() if r.track_pulls ]
    for repo in repos:
        issues = get_pulls(repo.name, labels, state, since, org=org or intext, pull_details="all")

        category = None
        for issue in issues:
            issue["repo"] = repo.nick
            if intext is not None:
                if issue["intext"] != intext:
                    continue
            if state == 'closed' and merged and issue['combinedstate'] != 'merged':
                # If we're filtering on closed PRs, and only want those that are merged,
                # skip ones that were closed without merge.
                continue
            if state == 'closed' and since:
                # If this PR was closed prior to the last `since` interval of days, continue on
                # (it may have been *updated* - that is, referenced or commented on - more recently,
                #  but we just want to see what's been merged or closed in the past "since" days)
                closed_at = dateutil.parser.parse(issue["closed_at"][:-1])  # Remove TZ information
                if closed_at < since:
                    continue

            if org and issue.get("org") != category:
                # new category! print category header
                category = issue["org"]
                print("-- {category} ----".format(category=category))

            if 0:
                import pprint
                pprint.pprint(issue.obj)
            print(issue.format(ISSUE_FMT))
            num += 1
            adds += issue['pull']['additions']
            deletes += issue['pull']['deletions']

            if show_comments:
                comments_url = URLObject(issue['comments_url'])
                comments_url = comments_url.set_query_param("sort", "created")
                comments_url = comments_url.set_query_param("direction", "desc")
                comments = paginated_get(comments_url)
                last_five_comments = reversed(more_itertools.take(5, comments))
                for comment in last_five_comments:
                    print(comment.format(COMMENT_FMT))

    print()
    print("{num} pull requests; {adds}+ {deletes}-".format(num=num, adds=adds, deletes=deletes))
Example #36
0
 def test_simple_take(self):
     """Test basic usage"""
     t = mi.take(5, range(10))
     self.assertEqual(t, [0, 1, 2, 3, 4])
Example #37
0
 def test_null_take(self):
     """Check the null case"""
     t = mi.take(0, range(10))
     self.assertEqual(t, [])
Example #38
0
 def test_negative_take(self):
     """Make sure taking negative items results in a ValueError"""
     self.assertRaises(ValueError, lambda: mi.take(-3, range(10)))