Example #1
0
    def check_recall(self):
        c = self._cursor()
        print("Loading questions and guesses")
        raw_questions = seq(qdb.QuestionDatabase(QB_QUESTION_DB).all_questions().values())
        guesses = seq(list(
            c.execute('SELECT * FROM guesses WHERE guesser="deep" AND fold = "devtest"'))) \
            .map(lambda g: Guess(*g)).cache()

        positions = guesses.map(lambda g: (g.question, g.sentence)) \
            .reduce_by_key(max).to_dict()

        guess_lookup = guesses.filter(lambda g: g.sentence == positions[g.question]) \
            .group_by(lambda x: x.question) \
            .map(lambda g: (g[0], seq(g[1]).map(lambda x: x.page).set())).to_dict()

        questions = raw_questions. \
            filter(lambda q: q.qnum in guess_lookup and q.fold != 'train').cache()

        correct = 0
        total = 0
        wrong = []

        print("Computing DAN recall")
        for q in questions:
            if q.page in guess_lookup[q.qnum]:
                correct += 1
            else:
                wrong.append(q)
            total += 1
        return correct / total, total, wrong
Example #2
0
    def submit(self):
        print("Buzzed questions")
        print(self.buzzed_questions)
        print("Non-buzzed questions (should be empty)")
        print(self.questions)
        print("Submitting answers")
        for q in self.buzzed_questions:
            url = 'http://{domain}/qb-api/v1/answer/{q_id}'.format(
                domain=self.domain, q_id=q.id)
            response = requests.post(url, data={
                'user_id': self.user_id,
                'api_key': self.api_key,
                'guess': q.guess
            })
            if response.status_code == 200:
                print("Question submitted")
            else:
                print("Error on question submission")

        print("Printing statistics")
        guess_set = set()
        saved_guesses = []
        for q in self.buzzed_questions:
            for g in q.all_guesses:
                guess_set.add(g[1])
            saved_guesses.append((q.id, q.guess, q.all_guesses))
            print('qid: {0} guess: {1} position: {2} text: {3} all_guesses: {4}'.format(
                q.id, q.guess, q.position, q.text, q.all_guesses))
        print("All guesses")
        print(len(guess_set))
        print(guess_set)
        seq(saved_guesses).to_json('/tmp/stream_results.json')
 def test_for_each(self):
     l = [1, 2, 3, "abc", {1: 2}, {1, 2, 3}]
     result = []
     def f(e):
         result.append(e)
     seq(l).for_each(f)
     self.assertEqual(result, l)
    def test_to_sqlite3_namedtuple(self):
        elements = [(1, 'Tom'), (2, 'Jack'), (3, 'Jane'), (4, 'Stephan')]

        # test namedtuple with the same order as column
        with sqlite3.connect(':memory:') as conn:
            user = collections.namedtuple('user', ['id', 'name'])

            conn.execute('CREATE TABLE user (id INT, name TEXT);')
            conn.commit()

            table_name = 'user'
            seq(elements).map(lambda u: user(u[0], u[1])).to_sqlite3(conn, table_name)
            result = seq.sqlite3(conn, 'SELECT id, name FROM user;').to_list()
            self.assertListEqual(elements, result)

        # test namedtuple with different order
        with sqlite3.connect(':memory:') as conn:
            user = collections.namedtuple('user', ['name', 'id'])

            conn.execute('CREATE TABLE user (id INT, name TEXT);')
            conn.commit()

            table_name = 'user'
            seq(elements).map(lambda u: user(u[1], u[0])).to_sqlite3(conn, table_name)
            result = seq.sqlite3(conn, 'SELECT id, name FROM user;').to_list()
            self.assertListEqual(elements, result)
 def test_add(self):
     l0 = seq([1, 2, 3]).map(lambda x: x)
     l1 = seq([4, 5, 6])
     l2 = [4, 5, 6]
     expect = [1, 2, 3, 4, 5, 6]
     self.assertEqual(l0 + l1, expect)
     self.assertEqual(l0 + l2, expect)
 def test_list(self):
     l = [1, 2, 3, "abc", {1: 2}, {1, 2, 3}]
     result = seq(l).list()
     self.assertEqual(result, l)
     self.assertTrue(isinstance(result, list))
     result = seq(iter([0, 1, 2])).to_list()
     self.assertIsInstance(result, list)
Example #7
0
 def test_join(self):
     A = [('a', 11), ('b', 12), ('c', 13)]
     B = [('b', 22), ('c', 23), ('d', 24)]
     seq(A).inner_join(B).sorted()
     assert seq(A).join(B, 'inner').sorted() == seq(A).inner_join(B).sorted()
     assert seq(A).join(B, 'left').sorted() == seq(A).left_join(B).sorted()
     assert seq(A).join(B, 'outer').sorted() == seq(A).outer_join(B).sorted()
     assert seq(A).join(B, 'right').sorted() == seq(A).right_join(B).sorted()
Example #8
0
def compute_statistics(questions: Dict[int, Answer]) -> Sequence:
    n_questions = len(questions)
    empty_set = [(a, 0) for a in Answer]
    results = seq(questions.values())\
        .map(lambda x: (x, 1))
    results = (results + seq(empty_set)).reduce_by_key(lambda x, y: x + y)\
        .map(lambda kv: (str(kv[0]), kv[1] / n_questions if kv[1] > 0 else 0))
    return results
Example #9
0
 def test_accumulate(self):
     f = lambda x, y: x + y
     l_char = ["a", "b", "c"]
     expect_char = ["a", "ab", "abc"]
     l_num = [1, 2, 3]
     expect_num = [1, 3, 6]
     self.assertEqual(seq(l_char).accumulate(), expect_char)
     self.assertEqual(seq(l_num).accumulate(), expect_num)
Example #10
0
 def test_to_file_without_delim(self):
     path = 'test-wo-delim.txt'
     seq(('a', 'b', 'c'), (11, 12, 13), (21, 22, 23)).to_file(path)
     exp = _normalize('''
         [('a', 'b', 'c'), (11, 12, 13), (21, 22, 23)]
     ''')
     act = _normalize(_read_file(path))
     assert exp == act
Example #11
0
 def test_to_json_array(self):
     path = 'test-array.json'
     seq(('a', 1), ('b', 2), ('c', 3)).to_json(path, root_array=True)
     exp = _normalize('''
         [["a", 1], ["b", 2], ["c", 3]]
     ''')
     act = _normalize(_read_file(path))
     assert exp == act
Example #12
0
    def _log_msg(cls, f, elapsed_time, *args, **kwargs):
        arg_string = (seq(args).map(repr) +
                      seq(kwargs.items()).map(
                          lambda kv: '{!r}={!r}'.format(kv[0], kv[1]))
                      ).make_string(', ')

        log_msg = "{}({}) - execution time: {:.2f} ".format(f.__name__, arg_string, elapsed_time)
        return log_msg
Example #13
0
    def test_to_dict(self):
        res = seq([('a', 1), ('b', 2)]).to_dict()
        assert res == {'a': 1, 'b': 2}
        assert res.get(10) is None

        res = seq([('a', 1), ('b', 2)]).to_dict(5)
        assert res == {'a': 1, 'b': 2}
        assert res[10] == 5
 def test_take(self):
     s = seq([1, 2, 3, 4, 5, 6])
     expect = [1, 2, 3, 4]
     result = s.take(4)
     self.assertIteratorEqual(result, expect)
     self.assert_type(result)
     self.assertIteratorEqual(s.take(0), seq([]))
     self.assertIteratorEqual(s.take(-1), seq([]))
 def test_last_option(self):
     l = seq([1, 2, 3]).map(lambda x: x)
     self.assertEqual(l.last_option(), 3)
     l = seq([1, 2, [3, 4]]).map(lambda x: x)
     self.assertEqual(l.last_option(), [3, 4])
     self.assert_type(l.last_option())
     l = seq([])
     self.assertIsNone(l.last_option())
 def test_head_option(self):
     l = seq([1, 2, 3]).map(lambda x: x)
     self.assertEqual(l.head_option(), 1)
     l = seq([[1, 2], 3, 4]).map(lambda x: x)
     self.assertEqual(l.head_option(), [1, 2])
     self.assert_type(l.head_option())
     l = seq([])
     self.assertIsNone(l.head_option())
 def test_sliding(self):
     l = seq([1, 2, 3, 4, 5, 6, 7])
     expect = [[1, 2], [2, 3], [3, 4], [4, 5], [5, 6], [6, 7]]
     self.assertIteratorEqual(l.sliding(2), expect)
     l = seq([1, 2, 3])
     expect = [[1, 2], [3]]
     self.assertIteratorEqual(l.sliding(2, 2), expect)
     expect = [[1, 2]]
     self.assertIteratorEqual(l.sliding(2, 3), expect)
 def test_first(self):
     l = seq([1, 2, 3]).map(lambda x: x)
     self.assertEqual(l.first(), 1)
     l = seq([[1, 2], 3, 4]).map(lambda x: x)
     self.assertEqual(l.first(), [1, 2])
     self.assert_type(l.first())
     l = seq([])
     with self.assertRaises(IndexError):
         l.head()
Example #19
0
def consume_all(fileName):
    def foldHelp(acum,v):
        acum[v['title']] = v['text']
        return acum
    with open("datafiles/fmeasure.json","w+") as out:
        out.write(jjson.dumps(seq(feedparser.parse(fileName).entries).map(process_text).fold_left({},foldHelp),indent=1))

    with open("datafiles/inorder.txt","w+") as out:
        seq(feedparser.parse(fileName).entries).map(lambda e: out.write("%s\n"%e.title)).to_list()
    def test_to_sqlite3_typerror(self):
        elements = [1, 2, 3]
        with sqlite3.connect(':memory:') as conn:
            conn.execute('CREATE TABLE user (id INT, name TEXT);')
            conn.commit()

            table_name = 'user'
            with self.assertRaises(TypeError):
                seq(elements).to_sqlite3(conn, table_name)
Example #21
0
def remove_children_of(trie: Trie, keys):
    def delete_key(key):
        del trie[key]

    seq(keys) \
        .filter(lambda key: key in trie) \
        .flat_map(lambda key: trie.keys(prefix=key)) \
        .filter(lambda key: key not in keys) \
        .for_each(delete_key)
Example #22
0
    def test_zip(self):
        res = seq(1, 2, 3).zip([4, 5, 6])
        assert res == [(1, 4), (2, 5), (3, 6)]

        res = seq(1, 2, 3).zip([4, 5])
        assert res == [(1, 4), (2, 5)]

        res = seq(1, 2).zip([4, 5, 6])
        assert res == [(1, 4), (2, 5)]
 def test_product(self):
     l = [2, 2, 3]
     self.assertEqual(12, seq(l).product())
     self.assertEqual(96, seq(l).product(lambda x: x * 2))
     s = seq([])
     self.assertEqual(1, s.product())
     self.assertEqual(2, s.product(lambda x: x * 2))
     s = seq([5])
     self.assertEqual(5, s.product())
     self.assertEqual(10, s.product(lambda x: x * 2))
 def test_make_string(self):
     l = [1, 2, 3]
     expect1 = "123"
     expect2 = "1:2:3"
     s = seq(l)
     self.assertEqual(expect1, s.make_string(""))
     self.assertEqual(expect2, s.make_string(":"))
     s = seq([])
     self.assertEqual("", s.make_string(""))
     self.assertEqual("", s.make_string(":"))
Example #25
0
 def test_to_jsonl(self):
     path = 'test.jsonl'
     seq(('a', 1), ('b', 2), ('c', 3)).to_jsonl(path)
     exp = _normalize('''
         ["a", 1]
         ["b", 2]
         ["c", 3]
     ''')
     act = _normalize(_read_file(path))
     assert exp == act
Example #26
0
    def test_to_sqlite3_tuples(self):
        conn = sqlite3.connect(':memory:')
        c = conn.cursor()
        c.execute('''CREATE TABLE demo (color TEXT, value REAL)''')
        conn.commit()

        seq(('red', 1), ('green', 2), ('blue', 3)).to_sqlite3(conn, 'INSERT INTO demo (color, value) VALUES (?, ?)')

        res = seq.sqlite3(conn, 'SELECT * FROM demo')
        assert res == [('red', 1), ('green', 2), ('blue', 3)]
Example #27
0
 def test_to_file_with_delim(self):
     path = 'test-w-delim.txt'
     seq(('a', 'b', 'c'), (11, 12, 13), (21, 22, 23)).to_file(path, delimiter='\n')
     exp = _normalize('''
         ('a', 'b', 'c')
         (11, 12, 13)
         (21, 22, 23)
     ''')
     act = _normalize(_read_file(path))
     assert exp == act
 def test_cache(self):
     calls = []
     func = lambda x: calls.append(x)
     result = seq(1, 2, 3).map(func).cache().map(lambda x: x).to_list()
     self.assertEqual(len(calls), 3)
     self.assertEqual(result, [None, None, None])
     result = seq(1, 2, 3).map(lambda x: x).cache()
     self.assertEqual(repr(result._lineage), 'Lineage: sequence -> map(<lambda>) -> cache')
     result = seq(1, 2, 3).map(lambda x: x).cache(delete_lineage=True)
     self.assertEqual(repr(result._lineage), 'Lineage: sequence')
Example #29
0
 def test_to_csv(self):
     path = 'test.csv'
     seq(('a', 'b', 'c'), (11, 12, 13), (21, 22, 23)).to_csv(path)
     exp = _normalize('''
         a,b,c
         11,12,13
         21,22,23
     ''')
     act = _normalize(_read_file(path))
     assert exp == act
 def test_zip_with_index(self):
     l = [2, 3, 4]
     e = [(2, 0), (3, 1), (4, 2)]
     result = seq(l).zip_with_index()
     self.assertIteratorEqual(result, e)
     self.assert_type(result)
     e = [(2, 5), (3, 6), (4, 7)]
     result = seq(l).zip_with_index(5)
     self.assertIteratorEqual(result, e)
     self.assert_type(result)
Example #31
0
 def get_best(self, fit_too=False):
     fit, op = seq(self.pop.values()).map(lambda op: (op.best_fitness, op)).max_by(lambda tp: tp[0])
     return (fit, op) if fit_too else op
Example #32
0
    (65, 17, 82),
    (66, 74, 67),
    (67, 10, 68),
    (68, 48, 19),
    (69, 83, 86),
    (70, 84, 94),
]
clusters = seq("""
1 1 15 29 43 57 -1
2 2 16 30 44 58 -1
3 3 17 31 45 59 -1
4 4 18 32 46 60 -1
5 5 19 33 47 61 -1
6 6 20 34 48 62 -1
7 7 21 35 49 63 -1
8 8 22 36 50 64 -1
9 9 23 37 51 65 -1
10 10 24 38 52 66 -1
11 11 25 39 53 67 -1
12 12 26 40 54 68 -1
13 13 27 41 55 69 -1
14 14 28 42 56 70 -1
""".strip().split('\n')).map(lambda row: seq(row.split()[1:-1]).map(int).map(
    lambda n: n - 1).to_set()).to_list()

G = nx.Graph()
G.add_nodes_from(range(len(node_cords)), weight=1)
for u in range(len(node_cords)):
    for v in range(len(node_cords)):
        if u >= v:
            continue
def get_frame_with_lines(im: np.ndarray) -> Frame:
    scale = min(MAX_SIDE / im.shape[0], MAX_SIDE / im.shape[1])
    resized = cv2.resize(im, (0, 0), fx=scale, fy=scale)

    if SHOW_OUTPUT:
        plt.imshow(cv2.cvtColor(resized, cv2.COLOR_BGR2RGB))
        plt.show()

    gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)

    if SHOW_OUTPUT:
        plt.imshow(gray, 'gray')
        plt.show()

    gray = cv2.GaussianBlur(gray, ksize=(5, 5), sigmaX=0)

    # uses the above two partial derivatives
    sobelx = cv2.Sobel(gray, cv2.CV_16S, 1, 0, ksize=3)
    sobely = cv2.Sobel(gray, cv2.CV_16S, 0, 1, ksize=3)
    abs_gradientx = cv2.convertScaleAbs(sobelx)
    abs_gradienty = cv2.convertScaleAbs(sobely)
    # combine the two in equal proportions
    gray = cv2.addWeighted(abs_gradientx, 0.5, abs_gradienty, 0.5, 0)

    gray = cv2.GaussianBlur(gray, ksize=(5, 5), sigmaX=0)

    gray = cv2.Canny(gray, threshold1=0, threshold2=50, apertureSize=3)

    if SHOW_OUTPUT:
        plt.imshow(gray, 'gray')
        plt.show()

    lines = cv2.HoughLinesP(gray, rho=1, theta=np.pi / 180, threshold=80, minLineLength=100, maxLineGap=10)

    imres = None
    if SHOW_OUTPUT:
        im2 = resized.copy()
        for line in lines:
            cv2.line(im2, (line[0][0], line[0][1]), (line[0][2], line[0][3]), (0, 0, 255), 3)
        plt.imshow(cv2.cvtColor(im2, cv2.COLOR_BGR2RGB))
        plt.show()

        imres = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB)

    intersections = get_intersections(lines)
    if SHOW_OUTPUT:
        im2 = resized.copy()
        for p in intersections:
            cv2.circle(im2, (int(p[0]), int(p[1])), 3, (0, 0, 255), -1)

        plt.imshow(cv2.cvtColor(im2, cv2.COLOR_BGR2RGB))
        plt.show()

    intersections = simplify_intersections(intersections)
    if SHOW_OUTPUT:
        im2 = resized.copy()
        for p in intersections:
            cv2.circle(im2, (int(p[0]), int(p[1])), 3, (0, 0, 255), -1)

        plt.imshow(cv2.cvtColor(im2, cv2.COLOR_BGR2RGB))
        plt.show()

    points = [(0., 0.), (0., 0.), (0., 0.), (0., 0.)]
    angle = 0
    if len(intersections) > 4:
        points, angle = magic(intersections, im.shape[0] * scale, im.shape[1] * scale)

        if SHOW_OUTPUT:
            for p in points:
                cv2.circle(imres, (int(p[0]), int(p[1])), 3, (255, 0, 0), thickness=-1)

    if SHOW_OUTPUT:
        plt.imshow(imres)
        plt.show()

    # Undo scale
    points = (seq(points)
              .map(lambda point: (int(point[0] / scale), int(point[1] / scale)))
              .to_list())

    return Frame(points, angle)
Example #34
0
        if j == 0:
            y = max(y, 0)
        elif j == current_m.shape[1] - 1:
            y = min(y, 0)

        if current_m[i + x, j + y] == -1:
            if next_m[i + x, j + y] == -1:
                next_m[i + x, j + y] = current_m[i, j]
            elif next_m[i + x, j + y] != current_m[i, j]:
                next_m[i + x, j + y] = -2


coords = []
with open('input.txt', 'r') as f:
    for line in f:
        coords.append(seq(line.strip().split(',')).map(int).to_list())

tlx = seq(coords).map(lambda c: c[0]).min()
tly = seq(coords).map(lambda c: c[1]).min()
brx = seq(coords).map(lambda c: c[0]).max()
bry = seq(coords).map(lambda c: c[1]).max()

width = brx - tlx + 1
height = bry - tly + 1

matrix = np.full((width, height), -1)

for pos, coord in enumerate(coords):
    matrix[coord[0] - tlx, coord[1] - tly] = pos

sol = grow_loop(matrix)
Example #35
0
 def get_fitlessness(self):
     """
     Метод возвращает список всех хромосом (решений) в поколении, у которых нету фитнесс-функций
     :return:
     """
     return seq(self.pop.values()).flat_map(lambda op: op.get_fitlessness()).to_list()
Example #36
0
def showDataset(dataset, title, cols=10, figsize=(15, 15), y=0.6):
    showImages(
        seq(dataset).select(lambda data: data.image).to_list(), title, cols,
        figsize, y)
Example #37
0
 def has_pos(self, obj):
     return (seq(self.fields).map(partial(getattr,
                                          obj)).filter(self.has_pos_filter))
Example #38
0
def total_tx_in_values(transaction, a_unspent_tx_outs):
    return seq(transaction['tx_ins'])\
            .map(lambda tx_in : get_tx_in_amount(tx_in, a_unspent_tx_outs))\
            .reduce(lambda a, b : (a + b), 0)
Example #39
0
def has_valid_tx_ins(transaction, a_unspent_tx_outs):
    return seq(transaction['tx_ins'])\
            .map(lambda tx_in : validate_tx_in(tx_in, transaction, a_unspent_tx_outs))\
            .reduce(lambda a, b : a and b, True)
Example #40
0
def get_consumed_tx_outs(new_transactions):
    return seq(new_transactions)\
            .map(lambda t : t['tx_ins'])\
            .reduce(lambda a, b : a + b, [])\
            .map(lambda tx_in : new_unspent_tx_out(tx_in['tx_out_id'], tx_in['tx_out_index'], '',0))
Example #41
0
def solve_cds_christofides(G, cds_fn, start=None, all_paths=None):
    cds = cds_fn(G)
    if not nx.is_dominating_set(G, cds) or \
            not g_utils.is_connected_subset(G, cds):
        raise Exception('CDS fn did not return a valid CDS')

    # christofides
    # all pairs shortest dist graph (of original)
    if all_paths:
        pred, dist, path = all_paths
    else:
        pred, dist, path = g_utils.floyd_warshall_all(G)

    # construct CDS tree
    G_tree = g_utils.minimum_connected_subset_spanning_tree(G, cds)

    # get odd degree nodes in trees
    odd = seq(G_tree.nodes()) \
            .filter(lambda n: G_tree.degree(n) % 2 == 1) \
            .to_list()

    G_matching = nx.Graph()
    G_matching.add_nodes_from(odd)
    max_dist = 0
    for a in odd:
        for b in odd:
            max_dist = max(max_dist, dist[a][b])
    for a in odd:
        for b in odd:
            # invert weights to find *minimal* matching (+1 for nonzero)
            G_matching.add_edge(a, b, weight=1 + max_dist - dist[a][b])
    match_edges = nx.max_weight_matching(G_matching)

    # eulerian multigraph
    G_euler = nx.MultiGraph(G_tree)
    for a, b in match_edges:
        G_euler.add_edge(a, b, weight=dist[a][b])
    if not nx.is_eulerian(G_euler):
        raise Exception('New G_euler not eulerian')

    # All vertices now have even degree. We construct a eulerian tour.
    # This doesn't contain all the "bridge" vertices we need for the
    # added "indices". Hence it is called "stops" as in "tour stops"
    # rather than "tour".
    if len(cds) > 1:
        stops = seq(nx.eulerian_circuit(G_euler)) \
                .map(lambda e: e[0]) \
                .to_list()
    else:
        stops = list(cds) # singleton or empty

    # Add the starting point if it is not added already
    stops = g_utils.insert_start_into_stops(stops, dist, path, start)

    # TODO smart cutting short
    stops = g_utils.remove_dupes(stops)
    #print(stops) ###

    # remove extra from cds (connected dominating set) to make ds (dominating set)
    ds = set(cds)
    while True:
        can_remove = seq(ds) \
                .filter(lambda n: n != start and nx.is_dominating_set(G, ds - set([ n ]))) \
                .to_list()
        if not can_remove:
            break
        to_remove = seq(can_remove) \
                .max_by(lambda n: G.nodes[n]['weight'] \
                        + dist[stops[stops.index(n) - 1]][n] + dist[n][stops[(stops.index(n) + 1) % len(stops)]] \
                        - dist[stops[stops.index(n) - 1]][stops[(stops.index(n) + 1) % len(stops)]])
                        # )
        # print('removing ' + str(to_remove))
        ds.remove(to_remove)
        stops.remove(to_remove)

    if len(stops) > 1:
        tour = seq(pairwise(stops + [ stops[0] ])) \
                .flat_map(lambda e: path(e[0], e[1])) \
                .to_list()
    else:
        tour = stops[:]

    return tour, ds
Example #42
0
 def cache_enum_type(self, enum_class):
     items = enum_class.__members__.items()
     self.cached_types[enum_class] = seq(items) \
         .map(lambda t0: (t0[1], seq(items).map(lambda t1: 1 if t1[1] == t0[1] else 0).to_list())) \
         .to_dict()
Example #43
0
 def _get_hash(cls, tokens, n):
     return HashFactory.get_hash(
         seq(tokens).take(n).order_by(lambda token: token.index).map(
             lambda token: token.token))
Example #44
0
 def run(self, obj):
     if not self.has_pos(obj):
         return
     print(obj.question_id)
     seq(self.fields).for_each(self.setfields(obj))
     obj.save()
Example #45
0
def assign_scaleset_role(onefuzz_instance_name: str, scaleset_name: str):
    """ Allows the nodes in the scaleset to access the service by assigning their managed identity to the ManagedNode Role """

    onefuzz_service_appId = query_microsoft_graph(
        method="GET",
        resource="applications",
        params={
            "$filter": "displayName eq '%s'" % onefuzz_instance_name,
            "$select": "appId",
        },
    )

    if len(onefuzz_service_appId["value"]) == 0:
        raise Exception("onefuzz app registration not found")
    appId = onefuzz_service_appId["value"][0]["appId"]

    onefuzz_service_principals = query_microsoft_graph(
        method="GET",
        resource="servicePrincipals",
        params={"$filter": "appId eq '%s'" % appId},
    )

    if len(onefuzz_service_principals["value"]) == 0:
        raise Exception("onefuzz app service principal not found")
    onefuzz_service_principal = onefuzz_service_principals["value"][0]

    scaleset_service_principals = query_microsoft_graph(
        method="GET",
        resource="servicePrincipals",
        params={"$filter": "displayName eq '%s'" % scaleset_name},
    )
    if len(scaleset_service_principals["value"]) == 0:
        raise Exception("scaleset service principal not found")
    scaleset_service_principal = scaleset_service_principals["value"][0]

    managed_node_role = (seq(
        onefuzz_service_principal["appRoles"]).filter(lambda x: x[
            "value"] == OnefuzzAppRole.ManagedNode.value).head_option())

    if not managed_node_role:
        raise Exception(
            "ManagedNode role not found int the onefuzz application registration. Please redeploy the instance"
        )

    assignments = query_microsoft_graph(
        method="GET",
        resource="servicePrincipals/%s/appRoleAssignments" %
        scaleset_service_principal["id"],
    )

    # check if the role is already assigned
    role_assigned = seq(assignments["value"]).find(
        lambda assignment: assignment["appRoleId"] == managed_node_role["id"])
    if not role_assigned:
        query_microsoft_graph(
            method="POST",
            resource="servicePrincipals/%s/appRoleAssignedTo" %
            scaleset_service_principal["id"],
            body={
                "principalId": scaleset_service_principal["id"],
                "resourceId": onefuzz_service_principal["id"],
                "appRoleId": managed_node_role["id"],
            },
        )
Example #46
0
def groupDataset(dataset):
    groups = seq(dataset).group_by(lambda data: data.label)
    return groups
Example #47
0
 def __init__(self, video_path: str):
     self.video_path = video_path
     self.files = (seq(
         os.listdir(video_path)).filter(lambda f: f.endswith('.jpg')).map(
             lambda p: os.path.join(video_path, p)).sorted().to_list())
                alone.remove(claimId)
    else:
        innerMap[row] = {claim.claimId}


def appendClaimToClaimMap(claimMap, alone, claim):
    for c in range(0, claim.colLen):
        for r in range(0, claim.rowLen):
            appendPositionToClaimMap(claimMap, alone, claim, claim.col + c,
                                     claim.row + r)


claims = (
    seq(sys.stdin).map(
        lambda line: line.rstrip())  # remove the newline that readlines keeps
    .filter(lambda line: claimRegex.match(line)
            )  # remove the newline that readlines keeps
    .map(lambda line: claimFromEncodedStr(line)).list(
    )  # save to a list, because multiple iterations of sys.stdin is not possible
)

claimMap = {}
alone = set()
for claim in claims:
    alone.add(claim.claimId)

for claim in claims:
    appendClaimToClaimMap(claimMap, alone, claim)

numContestedClaims = 0
for c in claimMap:
    innerMap = claimMap[c]
Example #49
0
def fed_forward(input_val: Sequence, network: Network) -> Sequence:
    init_feed = seq([(input_val, network.head())])
    return network \
        .tail() \
        .fold_left(init_feed, lambda acc, x: fed_forward_acc_f(acc, x))
Example #50
0
def tag_positions(tag_count: int):
    """Generates tag positions in QUIC message buffer."""
    return seq(range(0, tag_count)).map(lambda tag_nr: 8 + tag_nr * 8)
Example #51
0
 def test_tabulate_namedtuple(self):
     sequence_tabulated = seq([Data(1, 2), Data(6, 7)]).tabulate()
     self.assertEqual(sequence_tabulated,
                      '  x    y\n---  ---\n  1    2\n  6    7')
Example #52
0
def process_ocr_receipts(ocr_receipts):
    return seq(ocr_receipts) \
        .map(lambda ocr_receipt: process_single_text_receipt(ocr_receipt)) \
        .filter(None) \
        .to_list()
Example #53
0
# Customs questions
from common import INPUT_DIR
from functional import seq

data = []
with open(f"{INPUT_DIR}\\6.txt") as f:
    data = f.read().strip()

n = len(data)
res = 0
for l in data.split("\n\n"):
    res += len(
        seq(l.split("\n")).map(lambda x: set(x)).reduce(
            lambda x, y: x.union(y)))

print(res)
Example #54
0
def deserialized(network_serialized: NetworkSerialized) -> Network:
    return seq(network_serialized).map(lambda x: layer.deserialized(x))
Example #55
0
def total_tx_out_values(transaction):
    return seq(transaction['tx_outs'])\
            .map(lambda tx_out : tx_out['amount'])\
            .reduce(lambda a, b : (a + b), 0)
Example #56
0
        '1_shift_22050_-20',
        '1_shift_22050_40',
        '2',
        '2_noise_4',
        '2_noise_0.25',
        '2_shift_22050_-20',
        '2_shift_22050_40',
        '3',
        '3_noise_4',
        '3_noise_0.25',
        '3_shift_22050_-20',
        '3_shift_22050_40',
    ]

    data = {
        'mfcc': seq(filenames).map(load_file).map(get_mfcc),
        'denoised_mfcc': seq(filenames).map(load_file).map(denoise).map(get_mfcc),
        'recording': seq(filenames).map(get_recording)
        'denoised_recording': seq(filenames).map(denoise_dejavu).map(get_recording)
    }
    print('3')

    for input_type, inputs in data.items():
        pairs = get_combinations(inputs)

        # NB! only select metrics compatible with the data
        result_lists = {
            f'{input_type}_cosine': seq(pairs).map(partial(apply_metric, cosine_dist)),
            f'{input_type}_dtw': seq(pairs).map(partial(apply_metric, dtw_dist)),
            f'{input_type}_split_dtw': seq(pairs).map(partial(apply_metric, split_dtw_dist)),
            f'{input_type}_dejavu': seq(pairs).map(partial(apply_metric, dejavu_similarity))
Example #57
0
def _get_idf_values(movies_containing_word: Dict[str, int]) -> Dict[str, float]:
    print('_get_idf_values')
    movies = len(movies_containing_word)
    return seq(movies_containing_word.items()) \
        .map(lambda item: (item[0], _get_idf_value(item[1], movies)))
Example #58
0
def _load_words_by_movies(movies_loader) -> List[Set[str]]:
    print('_load_words_by_movies')
    return seq(movies_loader).map(_get_words_from_lines)
Example #59
0
    def run(self):

        ardProducts = {}

        with self.input().open('r') as CheckArdProductsFile:
            ardProducts = json.load(CheckArdProductsFile)
    
        # filesToConvert = list(filter(lambda x: os.path.splitext(x)[1] == '.kea', processRawToArdInfo['files']))

        cogTasks = []
        for p in ardProducts["products"]:
            cogTasks.append(CreateCOG(paths=self.paths, 
                product=p, 
                maxCogProcesses=self.maxCogProcesses,
                testProcessing=self.testProcessing))
        
        yield cogTasks
        
        cogProducts = []
        for task in cogTasks:
            with task.output().open('r') as cogInfo:
                cogProducts.append(json.load(cogInfo))

        numFilesToConvert = seq(ardProducts["products"]) \
                        .map(lambda x: x["files"]) \
                        .flatten() \
                        .count(lambda x: os.path.splitext(x)[1] == '.kea')

        numCogProducts = seq(cogProducts) \
                        .map(lambda x: x["files"]) \
                        .flatten() \
                        .count(lambda x: os.path.splitext(x)[1] == '.tif')

        if not numFilesToConvert == numCogProducts:
            log.error("""The length of known files to convert to tif is not the same as the number of converted files, expected conversions for the files;
                Expected:
                {}
                Found:
                {}
                Missing:
                {}""".format(numFilesToConvert, 
                    numCogProducts, 
                    (numFilesToConvert - numCogProducts)
                )
            )
            raise RuntimeError("Not all files were converted from kea to tif files")

        if self.validateCogs:
            validateCogTasks = []
            for p in cogProducts:
                validateCogTasks.append(ValidateCOG(paths=self.paths, 
                    product=p, 
                    maxCogProcesses=self.maxCogProcesses,
                    validateCogScriptDir=self.validateCogScriptDir,
                    testProcessing=self.testProcessing))
            
            yield validateCogTasks

        output = {
            "outputDir": ardProducts["outputDir"],
            "products": cogProducts
            }

        with self.output().open('w') as o:
            json.dump(output, o, indent=4)
Example #60
0
 def sum_binaries(binaries: str):
     return (seq(iter(binaries))
             .filter(lambda e: e == '1')
             .len())  # yapf: disable