示例#1
0
def test_multiset_permutations():
    ans = ['abby', 'abyb', 'aybb', 'baby', 'bayb', 'bbay', 'bbya', 'byab',
           'byba', 'yabb', 'ybab', 'ybba']
    assert [''.join(i) for i in multiset_permutations('baby')] == ans
    assert [''.join(i) for i in multiset_permutations(multiset('baby'))] == ans
    assert list(multiset_permutations([0, 0, 0], 2)) == [[0, 0]]
    assert list(multiset_permutations([0, 2, 1], 2)) == [
        [0, 1], [0, 2], [1, 0], [1, 2], [2, 0], [2, 1]]

    def test():
        for i in range(1, 7):
            print i
            for p in multiset_permutations([0, 0, 1, 0, 1], i):
                print p
    assert capture(lambda: test()) == dedent('''\
        1
        [0]
        [1]
        2
        [0, 0]
        [0, 1]
        [1, 0]
        [1, 1]
        3
        [0, 0, 0]
        [0, 0, 1]
        [0, 1, 0]
        [0, 1, 1]
        [1, 0, 0]
        [1, 0, 1]
        [1, 1, 0]
        4
        [0, 0, 0, 1]
        [0, 0, 1, 0]
        [0, 0, 1, 1]
        [0, 1, 0, 0]
        [0, 1, 0, 1]
        [0, 1, 1, 0]
        [1, 0, 0, 0]
        [1, 0, 0, 1]
        [1, 0, 1, 0]
        [1, 1, 0, 0]
        5
        [0, 0, 0, 1, 1]
        [0, 0, 1, 0, 1]
        [0, 0, 1, 1, 0]
        [0, 1, 0, 0, 1]
        [0, 1, 0, 1, 0]
        [0, 1, 1, 0, 0]
        [1, 0, 0, 0, 1]
        [1, 0, 0, 1, 0]
        [1, 0, 1, 0, 0]
        [1, 1, 0, 0, 0]
        6\n''')
示例#2
0
def test_multiset_permutations():
    ans = ["abby", "abyb", "aybb", "baby", "bayb", "bbay", "bbya", "byab", "byba", "yabb", "ybab", "ybba"]
    assert ["".join(i) for i in multiset_permutations("baby")] == ans
    assert ["".join(i) for i in multiset_permutations(multiset("baby"))] == ans
    assert list(multiset_permutations([0, 0, 0], 2)) == [[0, 0]]
    assert list(multiset_permutations([0, 2, 1], 2)) == [[0, 1], [0, 2], [1, 0], [1, 2], [2, 0], [2, 1]]
    assert len(list(multiset_permutations("a", 0))) == 1
    assert len(list(multiset_permutations("a", 3))) == 0

    def test():
        for i in range(1, 7):
            print(i)
            for p in multiset_permutations([0, 0, 1, 0, 1], i):
                print(p)

    assert capture(lambda: test()) == dedent(
        """\
        1
        [0]
        [1]
        2
        [0, 0]
        [0, 1]
        [1, 0]
        [1, 1]
        3
        [0, 0, 0]
        [0, 0, 1]
        [0, 1, 0]
        [0, 1, 1]
        [1, 0, 0]
        [1, 0, 1]
        [1, 1, 0]
        4
        [0, 0, 0, 1]
        [0, 0, 1, 0]
        [0, 0, 1, 1]
        [0, 1, 0, 0]
        [0, 1, 0, 1]
        [0, 1, 1, 0]
        [1, 0, 0, 0]
        [1, 0, 0, 1]
        [1, 0, 1, 0]
        [1, 1, 0, 0]
        5
        [0, 0, 0, 1, 1]
        [0, 0, 1, 0, 1]
        [0, 0, 1, 1, 0]
        [0, 1, 0, 0, 1]
        [0, 1, 0, 1, 0]
        [0, 1, 1, 0, 0]
        [1, 0, 0, 0, 1]
        [1, 0, 0, 1, 0]
        [1, 0, 1, 0, 0]
        [1, 1, 0, 0, 0]
        6\n"""
    )
示例#3
0
def test_multiset_permutations():
    assert ["".join(i) for i in (list(multiset_permutations("baby")))] == [
        "abby",
        "abyb",
        "aybb",
        "baby",
        "bayb",
        "bbay",
        "bbya",
        "byab",
        "byba",
        "yabb",
        "ybab",
        "ybba",
    ]
示例#4
0
def test_nC_nP_nT():
    from sympy.utilities.iterables import (multiset_permutations,
                                           multiset_combinations,
                                           multiset_partitions, partitions,
                                           subsets, permutations)
    from sympy.functions.combinatorial.numbers import (nP, nC, nT, stirling,
                                                       _multiset_histogram,
                                                       _AOP_product)
    from sympy.combinatorics.permutations import Permutation
    from sympy.core.numbers import oo
    from random import choice

    c = string.ascii_lowercase
    for i in range(100):
        s = ''.join(choice(c) for i in range(7))
        u = len(s) == len(set(s))
        try:
            tot = 0
            for i in range(8):
                check = nP(s, i)
                tot += check
                assert len(list(multiset_permutations(s, i))) == check
                if u:
                    assert nP(len(s), i) == check
            assert nP(s) == tot
        except AssertionError:
            print(s, i, 'failed perm test')
            raise ValueError()

    for i in range(100):
        s = ''.join(choice(c) for i in range(7))
        u = len(s) == len(set(s))
        try:
            tot = 0
            for i in range(8):
                check = nC(s, i)
                tot += check
                assert len(list(multiset_combinations(s, i))) == check
                if u:
                    assert nC(len(s), i) == check
            assert nC(s) == tot
            if u:
                assert nC(len(s)) == tot
        except AssertionError:
            print(s, i, 'failed combo test')
            raise ValueError()

    for i in range(1, 10):
        tot = 0
        for j in range(1, i + 2):
            check = nT(i, j)
            tot += check
            assert sum(1 for p in partitions(i, j, size=True)
                       if p[0] == j) == check
        assert nT(i) == tot

    for i in range(1, 10):
        tot = 0
        for j in range(1, i + 2):
            check = nT(range(i), j)
            tot += check
            assert len(list(multiset_partitions(list(range(i)), j))) == check
        assert nT(range(i)) == tot

    for i in range(100):
        s = ''.join(choice(c) for i in range(7))
        u = len(s) == len(set(s))
        try:
            tot = 0
            for i in range(1, 8):
                check = nT(s, i)
                tot += check
                assert len(list(multiset_partitions(s, i))) == check
                if u:
                    assert nT(range(len(s)), i) == check
            if u:
                assert nT(range(len(s))) == tot
            assert nT(s) == tot
        except AssertionError:
            print(s, i, 'failed partition test')
            raise ValueError()

    # tests for Stirling numbers of the first kind that are not tested in the
    # above
    assert [stirling(9, i, kind=1) for i in range(11)
            ] == [0, 40320, 109584, 118124, 67284, 22449, 4536, 546, 36, 1, 0]
    perms = list(permutations(range(4)))
    assert [
        sum(1 for p in perms if Permutation(p).cycles == i) for i in range(5)
    ] == [0, 6, 11, 6, 1] == [stirling(4, i, kind=1) for i in range(5)]
    # http://oeis.org/A008275
    assert [
        stirling(n, k, signed=1) for n in range(10) for k in range(1, n + 1)
    ] == [
        1, -1, 1, 2, -3, 1, -6, 11, -6, 1, 24, -50, 35, -10, 1, -120, 274,
        -225, 85, -15, 1, 720, -1764, 1624, -735, 175, -21, 1, -5040, 13068,
        -13132, 6769, -1960, 322, -28, 1, 40320, -109584, 118124, -67284,
        22449, -4536, 546, -36, 1
    ]
    # http://en.wikipedia.org/wiki/Stirling_numbers_of_the_first_kind
    assert [stirling(n, k, kind=1) for n in range(10)
            for k in range(n + 1)] == [
                1, 0, 1, 0, 1, 1, 0, 2, 3, 1, 0, 6, 11, 6, 1, 0, 24, 50, 35,
                10, 1, 0, 120, 274, 225, 85, 15, 1, 0, 720, 1764, 1624, 735,
                175, 21, 1, 0, 5040, 13068, 13132, 6769, 1960, 322, 28, 1, 0,
                40320, 109584, 118124, 67284, 22449, 4536, 546, 36, 1
            ]
    # http://en.wikipedia.org/wiki/Stirling_numbers_of_the_second_kind
    assert [stirling(n, k, kind=2) for n in range(10)
            for k in range(n + 1)] == [
                1, 0, 1, 0, 1, 1, 0, 1, 3, 1, 0, 1, 7, 6, 1, 0, 1, 15, 25, 10,
                1, 0, 1, 31, 90, 65, 15, 1, 0, 1, 63, 301, 350, 140, 21, 1, 0,
                1, 127, 966, 1701, 1050, 266, 28, 1, 0, 1, 255, 3025, 7770,
                6951, 2646, 462, 36, 1
            ]
    assert stirling(3, 4, kind=1) == stirling(3, 4, kind=1) == 0
    raises(ValueError, lambda: stirling(-2, 2))

    def delta(p):
        if len(p) == 1:
            return oo
        return min(abs(i[0] - i[1]) for i in subsets(p, 2))

    parts = multiset_partitions(range(5), 3)
    d = 2
    assert (sum(1
                for p in parts if all(delta(i) >= d
                                      for i in p)) == stirling(5, 3, d=d) == 7)

    # other coverage tests
    assert nC('abb', 2) == nC('aab', 2) == 2
    assert nP(3, 3, replacement=True) == nP('aabc', 3, replacement=True) == 27
    assert nP(3, 4) == 0
    assert nP('aabc', 5) == 0
    assert nC(4, 2, replacement=True) == nC('abcdd', 2, replacement=True) == \
        len(list(multiset_combinations('aabbccdd', 2))) == 10
    assert nC('abcdd') == sum(nC('abcdd', i) for i in range(6)) == 24
    assert nC(list('abcdd'), 4) == 4
    assert nT('aaaa') == nT(4) == len(list(partitions(4))) == 5
    assert nT('aaab') == len(list(multiset_partitions('aaab'))) == 7
    assert nC('aabb' * 3, 3) == 4  # aaa, bbb, abb, baa
    assert dict(_AOP_product((4, 1, 1, 1))) == {
        0: 1,
        1: 4,
        2: 7,
        3: 8,
        4: 8,
        5: 7,
        6: 4,
        7: 1
    }
    # the following was the first t that showed a problem in a previous form of
    # the function, so it's not as random as it may appear
    t = (3, 9, 4, 6, 6, 5, 5, 2, 10, 4)
    assert sum(_AOP_product(t)[i] for i in range(55)) == 58212000
    raises(ValueError, lambda: _multiset_histogram({1: 'a'}))
def computeExprTreeActionFixed(cur_strat, opp_action, ordered_groups, N):
    """
    Compute the expression of a strategy against another strategy, knowing that the opponent's action is fixed
    :param cur_strat: current strategy
    :param opp_action: opponent action
    :param ordered_groups: combination of all possible groups of size (N-1) of cur_strat and other_strat
    :param N: size of the group
    :return: array of expressions for each combination of the groups
    """
    nb_states = len(cur_strat[2:])

    expr_all_opponent_nb = []
    print(len(ordered_groups))
    for i in range(
            N - 1
    ):  #Only against 0same strat up to N-1 same strat (the rest in group is opponent strat)
        print("loop ", i)
        opponents = i * [0] + (N - 1 - i) * [1]
        groups_comb = list(multiset_permutations(opponents))
        print(len(groups_comb))
        strat_loop = time.time()
        expr_groups = 0.
        for group in groups_comb:

            expr_all_states = 0.
            for state in range(nb_states):
                root = Tree(state, cur_strat, 1)
                if group[0] == 1:  #If first element has the opponent strat
                    root.buildExpr(opp_action)
                    root.addChildren(opp_action)
                else:
                    root.buildExprVariableActionsStrat(cur_strat)
                    root.addChildrenVariableAction(cur_strat)
                current_level = root.getChildren()

                expr_state_config = root.getExpr()
                for j in range(1, N - 1):
                    if group[j] == 1:
                        next_level = []
                        for child in current_level:
                            child.buildExpr(opp_action)
                            child.addChildren(opp_action)
                            expr_state_config += child.getExpr()
                            for new_child in child.getChildren():
                                next_level.append(new_child)
                        current_level = list(next_level)
                    else:
                        next_level = []
                        for child in current_level:
                            child.buildExprVariableActionsStrat(cur_strat)
                            child.addChildrenVariableAction(cur_strat)
                            expr_state_config += child.getExpr()
                            for new_child in child.getChildren():
                                next_level.append(new_child)
                        current_level = list(next_level)
                expr_all_states += expr_state_config
            expr_all_states /= nb_states
            expr_groups += expr_all_states
        expr_groups /= len(
            groups_comb
        )  #Avg expression of strategy A against group of A/B's of size N-1 with i B's in it
        print(expr_groups)
        expr_all_opponent_nb.append(expr_groups)
        print("Loop computeExprTreeActionFixed ", i, " of ", N - 1,
              " computed in --- %s seconds --- " % (time.time() - strat_loop))
    print(len(expr_all_opponent_nb))
    return expr_all_opponent_nb
示例#6
0
def permutations(points):
    return multiset_permutations(points)
示例#7
0
def test_multiset_permutations():
    ans = [
        'abby', 'abyb', 'aybb', 'baby', 'bayb', 'bbay', 'bbya', 'byab', 'byba',
        'yabb', 'ybab', 'ybba'
    ]
    assert [''.join(i) for i in multiset_permutations('baby')] == ans
    assert [''.join(i) for i in multiset_permutations(multiset('baby'))] == ans
    assert list(multiset_permutations([0, 0, 0], 2)) == [[0, 0]]
    assert list(multiset_permutations([0, 2, 1],
                                      2)) == [[0, 1], [0, 2], [1, 0], [1, 2],
                                              [2, 0], [2, 1]]
    assert len(list(multiset_permutations('a', 0))) == 1
    assert len(list(multiset_permutations('a', 3))) == 0
    for nul in ([], {}, ''):
        assert list(multiset_permutations(nul)) == [[]]
    assert list(multiset_permutations(nul, 0)) == [[]]
    # impossible requests give no result
    assert list(multiset_permutations(nul, 1)) == []
    assert list(multiset_permutations(nul, -1)) == []

    def test():
        for i in range(1, 7):
            print(i)
            for p in multiset_permutations([0, 0, 1, 0, 1], i):
                print(p)

    assert capture(lambda: test()) == dedent('''\
        1
        [0]
        [1]
        2
        [0, 0]
        [0, 1]
        [1, 0]
        [1, 1]
        3
        [0, 0, 0]
        [0, 0, 1]
        [0, 1, 0]
        [0, 1, 1]
        [1, 0, 0]
        [1, 0, 1]
        [1, 1, 0]
        4
        [0, 0, 0, 1]
        [0, 0, 1, 0]
        [0, 0, 1, 1]
        [0, 1, 0, 0]
        [0, 1, 0, 1]
        [0, 1, 1, 0]
        [1, 0, 0, 0]
        [1, 0, 0, 1]
        [1, 0, 1, 0]
        [1, 1, 0, 0]
        5
        [0, 0, 0, 1, 1]
        [0, 0, 1, 0, 1]
        [0, 0, 1, 1, 0]
        [0, 1, 0, 0, 1]
        [0, 1, 0, 1, 0]
        [0, 1, 1, 0, 0]
        [1, 0, 0, 0, 1]
        [1, 0, 0, 1, 0]
        [1, 0, 1, 0, 0]
        [1, 1, 0, 0, 0]
        6\n''')
    raises(ValueError, lambda: list(multiset_permutations({0: 3, 1: -1})))
示例#8
0
def playMusicPuzzle(save_txt=True, save_csv=True, save_mp3=True):
    # all target audio
    fs = sorted(glob('data/*.mp3'))

    # extract mel-spectrogram and name
    target_name, target_fea = [], []
    for f in fs:
        S = read_audio(f)
        target_name.append(f.split('/')[-1][:-4])
        target_fea.append(S)

    # overall possible permutations (brute-force method)
    n = len(fs)
    ps = [p for p in multiset_permutations(np.arange(n))]
    ps = np.array(ps)

    # calculate pairwise-similarity
    with tf.Session() as sess:
        model = SEN(is_train=False)
        sess.run(tf.global_variables_initializer())
        model.saver.restore(sess, 'model/model')
        score = np.zeros((n, n))
        for i in range(n):
            for j in range(n):
                if i != j:
                    batch = Batch()
                    batch.x1 = [target_fea[i]]
                    batch.x2 = [target_fea[j]]
                    batch.y = [[0, 0]]
                    score[i, j] = model.calculate(sess, batch)[0][1]

    # find the best permutation
    output = []
    for p in ps:
        temp = 0
        for z in range(len(p) - 1):
            temp += score[p[z], p[z + 1]]
        output.append(temp)
    best_p = ps[np.argmax(output)]

    # save best permutation (txt)
    if save_txt:
        with open('output/best_permutation.txt', mode='w',
                  encoding='utf-8') as file:
            temp = [target_name[index] for index in best_p]
            file.write('%s' % ('\t'.join(temp)))

    # save pair-wise similarity (csv)
    if save_csv:
        with open('output/output.csv', mode='w',
                  encoding='utf_8_sig') as csvfile:
            writer = csv.writer(csvfile)
            writer.writerow([''] + target_name)
            for n, s in zip(target_name, score):
                temp = [str(ss) for ss in s]
                temp = [n] + temp
                writer.writerow(temp)

    # save concatenated audio based on the best permutation (mp3)
    if save_mp3:
        result = ''
        for index in best_p:
            audio = AudioSegment.from_file(fs[index], format='mp3')
            if result == '':
                result = audio
            else:
                result += audio
            result.export('output/output.mp3', format='mp3')
示例#9
0
def combos(new_grid, blocks):
    '''
    This gives all the possible block location combinations.
    '''

    # This extracts all the 'o' in the original grid, and appends
    # them into p_list. 'o' is the position we could put blocks.
    p_list = []
    block_list = bk_list(blocks)
    for row in new_grid:
        for item in row:
            if item == 'o':
                p_list.append('o')
            else:
                continue

    # This uses blocks in the block list to replace the first 'o's
    # in the p_list.
    block_num = sum(blocks.values())

    for num in range(block_num):
        if len(block_list) >= num:
            p_list[num] = block_list[num]
        else:
            continue

    # This gives all the permutations of blocks and 'o's in the p_list.
    Combo_list = list(multiset_permutations(p_list))

    # This puts all the blocks and 'o's in p_list back to the new_grid.
    possibile_grid_list = []

    for number in range(len(Combo_list)):
        # This makes a copy of the new_grid
        possible_grid = [['n' for x in range(len(new_grid[0]))]
                         for y in range(len(new_grid))]
        for i in range(len(new_grid)):
            for j in range(len(new_grid[0])):
                possible_grid[i][j] = new_grid[i][j]

        # This puts blocks and 'o's into the copied grid.
        for y in range(len(possible_grid)):
            for x in range(len(possible_grid[0])):
                if possible_grid[y][x] == 'o':
                    possible_grid[y][x] = Combo_list[number].pop(0)
                else:
                    continue

        # All the possible grids are appended into possibile_grid_list.
        possibile_grid_list.append(possible_grid)

    # From all possible grids, this extracts the (x,y) coordinates of
    # blocks A, B, and C, including the fixed blocks.
    block_A = []
    block_B = []
    block_C = []
    for n in range(len(possibile_grid_list)):
        block_A.append([])
        block_B.append([])
        block_C.append([])
        for i in range(len(new_grid)):
            for j in range(len(new_grid[0])):
                if possibile_grid_list[n][i][j] == 'A':
                    block_A[n].append((int(j), int(i)))
                elif possibile_grid_list[n][i][j] == 'B':
                    block_B[n].append((int(j), int(i)))
                elif possibile_grid_list[n][i][j] == 'C':
                    block_C[n].append((int(j), int(i)))
                else:
                    continue

    return block_A, block_B, block_C
示例#10
0
        print(*args)

DEBUG = False
HISTORY_THRESHOLD = 1000

Ntot = 20
N4 = 14
N3 = 2
N2 = 2
N1 = 2

startingUnified = [4]*N4 + [3]*N3 + [2]*N2 + [1]*N1
startingUnified = np.array(startingUnified, np.int8)
iterations = 0
max_moves = 0
for currentSet in multiset_permutations(startingUnified):
    iterations = iterations+1
    starting0 = currentSet[:Ntot//2]
    starting1 = currentSet[Ntot//2:]
    if iterations % 10000 == 0:
        print("ITERATION:", iterations, starting0, starting1, max_moves)
        max_moves = 0

    #starting0 = [1, 3, 4, 2, 4, 2, 4, 4, 4, 4]
    #starting1 = [4, 4, 3, 4, 1, 4, 4, 4, 4, 4]

    #Start playing!
    #0 always starts first
    s0 = list(currentSet[:Ntot//2])
    s1 = list(currentSet[Ntot//2:])
    s = [s0, s1]
示例#11
0
    def blocks(self, filename):
        '''
        Depensing upon the data read from the bff file
        Board, Number of A, B, C blocks, Lazor List, Holes
        This block generates all the possible boards by permuting the list
        of all movable blocks like o's, A's, B's, and C's
        Each board so generated is first converted into a grid and then checked
        with lazor solver function if the generated board is the solution for
        the board so given, The maximum iterations possible and iterations so
        performed are printed
        ** Parameters **
        self - consists of all data (board, A_blocks, B_blocks, C_blocks .. )
        ** Returns **
        Nothing!
        '''
        movable_blocks = []
        for x in self.board:
            for y in x:
                if y == 'o':
                    movable_blocks.append(y)

        for i in range(self.A):
            movable_blocks[i] = 'A'
        for i in range(self.A, (self.A + self.B)):
            movable_blocks[i] = 'B'
        for i in range((self.A + self.B), (self.A + self.B + self.C)):
            movable_blocks[i] = 'C'
        ITER_B = 0
        b = "Loading"
        print(b, end="\r")
        t1 = time.time()
        permutations = list(multiset_permutations(movable_blocks))
        t2 = time.time()
        print("Maximum possible iteration possible : ", len(permutations))
        print("Time for generating grids: ", t2 - t1)
        x = 0
        b = "Loading"
        print(b, end="\r")
        t1 = time.time()
        for permut in permutations:
            sinks = copy.deepcopy(self.H)
            actual_board = copy.deepcopy(self.grid)
            possible_grid = create_grid(actual_board, permut)
            ITER_B += 1
            if lazor_path(possible_grid, self.L, sinks):
                print("Congratulations!! Board Solved")
                length = int((len(possible_grid) - 1) / 2)
                width = int((len(possible_grid[0]) - 1) / 2)
                for i in range(length):
                    for j in range(width):
                        print(possible_grid[2 * i + 1][2 * j + 1], end=' ')
                    print()
                print(
                    "This is the solution grid! OR just check the pngimage so created!"
                )
                fname1 = filename.split(".bff")[0]
                fname = fname1 + "_solution_textfile.txt"
                f = open(fname, "w+")
                f.write("The solution to your board is: \n")
                for i in range(length):
                    for j in range(width):
                        f.write(possible_grid[2 * i + 1][2 * j + 1])
                        f.write(" ")
                    f.write("\n")
                f.write("A is the reflect block, B is the absorb ")
                f.write("block and  C is the reflect block.\nThe o should ")
                f.write("be empty. Try not to cheat next time :)")
                f.close()
                break
            t2 = time.time()
            if t2 - t1 >= 5:
                t1 = time.time()
                b = "Loading" + "." * x
                print(b, end="\r")
                if x == 3:
                    x = 0
                x += 1
        print("Iteration took to solve: ", ITER_B)
示例#12
0
#-*-coding:utf8-*-

from sympy.utilities.iterables import multiset_permutations
import datetime
import numpy as np

a = np.array(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j'])

pat_list = []
start = datetime.datetime.now()
for i in range(1, len(a) + 1):
    print(i)
    for p in multiset_permutations(a, i):
        pat_list.append(p)

end = datetime.datetime.now()
print((end - start).total_seconds())
示例#13
0
    def convexConcaveOptimization(self,x,Y,sample_weight,samp_counts):
        random.seed()
        self.counts = numpy.zeros((x.shape[0],))
        if x.shape[0] > 0:
            sample_idx = sample_weight > 0
            sample_idx_ran = asarray(range(x.shape[0]))[sample_idx.reshape(-1)]
            Y_tmp = Y[sample_idx.reshape(-1)]
            x_tmp = csr_matrix(x[sample_idx.reshape(-1)],dtype=np.float32)

            #sample X and Y
            if self.sample_ratio*x.shape[0] > 10:
                #idxs =  random.permutation(x_tmp.shape[0])[:int(x_tmp.shape[0]*self.sample_ratio)]            
                idxs = randint(0, x_tmp.shape[0], int(x_tmp.shape[0]*self.sample_ratio)) #bootstrap
                  
                to_add_cnt = numpy.unique(sample_idx_ran[idxs]) 
                x_ = csr_matrix(x_tmp[idxs],dtype=np.float32)
                Y_ = Y_tmp[idxs]
                    
                diff_y = unique(Y_)
                if diff_y.shape[0] > 1:
                    x_tmp = x_
                    Y_tmp = Y_
                    #print ("sampling shape:",diff_y.shape[0])
            else:
                to_add_cnt = sample_idx_ran

            if not (samp_counts is None): 
                self.counts[to_add_cnt] += 1
            
            def nu(arr):
                return asarray([1 + unique(arr[:,i].data,return_counts=True)[1].shape[0] for i in range(arr.shape[1])])
            
            #nonzero_idxs = unique(x_tmp.nonzero()[1]) 
            counts_p = nu(csc_matrix(x_tmp))
            pos_idx = where(counts_p > 1)[0]

            fw_size = int(x_tmp.shape[1] * self.feature_ratio)
            if fw_size > pos_idx.shape[0]:
                fw_size = pos_idx.shape[0]
            #fw_size = int(pos_idx.shape[0] * self.feature_ratio)

            self.features_weight = random.permutation(pos_idx)[:fw_size]
            
            if fw_size == 0:
                return 0.

            x_tmp = csr_matrix(x_tmp[:,self.features_weight],dtype=np.float32)
            
            #print (x_tmp.shape,fw_size)

            H = zeros(shape = (1,Y_tmp.shape[0]))        
            
            gini_res = 0    
    
            class_counts = unique(Y_tmp, return_counts=True)
            class_counts = numpy.asarray(list(zip(class_counts[0],class_counts[1])))

            class2side = {}
            class2count = {}
            side2count = {}

            min_gini = self.max_criteria
            min_p = []
            
            if len(class_counts) > 13:
            #Greedy
                for _ in range(len(class_counts)*len(class_counts)*15):
                    lmin_gini = self.max_criteria
                    lmin_p = []

                    next = True
                    elements = [-1,+1]
                    probabilities = [0.5, 0.5]
                    p = numpy.random.choice(elements,len(class_counts) , p=probabilities)

                    zc = 0 
                    while next:
                        next = False
                        zc += 1  
                        for i in range(p.shape[0]):
                            p[i] = - p[i]
                            left_counts = class_counts[p < 0, 1]
                            right_counts = class_counts[p > 0, 1]

                            lcs = left_counts.sum()
                            rcs = right_counts.sum()  
                            den = lcs + rcs 

                            PL = float(lcs)/ den
                            PR = float(rcs)/ den
            
                            gini_l = self.criteria_row(left_counts / lcs)
                            gini_r = self.criteria_row(right_counts / rcs)

                            gini =  PL*gini_l + PR* gini_r
                            if gini < lmin_gini:
                                lmin_p = deepcopy(p)
                                lmin_gini = gini
                                next = True
                        p = lmin_p

                    if  lmin_gini < min_gini:
                        min_p = deepcopy(lmin_p)
                        min_gini = lmin_gini
 
            else:
                for zc in range(1,len(class_counts),1):
                    a = numpy.hstack([-numpy.ones((zc,)),numpy.ones((len(class_counts) - zc,))])
                    for p in multiset_permutations(a):
                        p = numpy.asarray(p)
                        left_counts = class_counts[p < 0, 1]
                        right_counts = class_counts[p > 0, 1]

                        lcs = left_counts.sum()
                        rcs = right_counts.sum()  
                        den = lcs + rcs 

                        PL = float(lcs)/ den
                        PR = float(rcs)/ den
            
                        gini_l = self.criteria_row(left_counts / lcs)
                        gini_r = self.criteria_row(right_counts / rcs)

                        gini =  PL*gini_l + PR* gini_r

                        if gini < min_gini:
                            min_p = p
                            min_gini = gini

            left_counts = numpy.asarray([c[1] for c in class_counts[min_p < 0]])
            right_counts = numpy.asarray([c[1] for c in class_counts[min_p > 0]])
            side2count[-1] = left_counts.sum()
            side2count[1] = right_counts.sum()               
            for i,(cl,cnt) in enumerate(class_counts):
                class2side[cl] = min_p[i]
                H[0,Y_tmp == cl] = min_p[i]     
                class2count[cl] = cnt

            gini_best = 0
            gini_old = 0
            for class_id, count_ in class_counts:
                p = float(count_) / side2count[class2side[class_id]]
                p2 = float(count_) / (side2count[-1] + side2count[1])

                gini_old += self.criteria(p2)
                gini_best +=  (float(side2count[class2side[class_id]])/ (side2count[-1] + side2count[1]))*self.criteria(p)

            Hsize, IH,IY, gini_old_wise = self.getDeltaParams(H,Y_tmp, self.criteria)

            gini_best = gini_old - gini_best

            deltas = zeros(shape=(H.shape[1]))
            #deltas = ones(shape=(H.shape[1])) 
            for i in range(H.shape[1]):
                gini_i = self.delta_wise(Hsize, IH,IY,Y_tmp[i],-H[0,i],self.criteria)
                deltas[i] = float(gini_i - gini_old_wise)  

                if self.balance:
                    deltas[i] = deltas[i] * float(H.reshape(-1).shape[0]) / (2*side2count[H[0,i]])

            #deltas = deltas - deltas.min()

            ratio = 1

            dm = deltas.max()
            if deltas.max() == 0:
                deltas = ones(shape=(H.shape[1]))  
            else:
                deltas = (deltas / dm)*ratio 

            #print ("deltas:",deltas.min(),deltas.max())

            #start_time = time.time()

            if self.noise > 0.:
                #x_maxis = asarray(abs(x_tmp).max(axis=0).todense()).flatten()
                gauss_noise = random.normal(ones((x_tmp.shape[1],),dtype=float),self.noise,(1,x_tmp.shape[1]))
                x_tmp = csr_matrix(x_tmp.multiply(gauss_noise),dtype=np.float32)

            #numpy.save("x_tmp",numpy.asarray(x_tmp.todense()))
            #numpy.save("H",H.reshape(-1))
            #numpy.save("deltas",deltas)
            #time.sleep(10.0)
            #return
            try:
                if self.kernel == 'linear':
                    if not self.dual:
                        self.model = SGDClassifier(n_iter_no_change=5,loss='squared_hinge', alpha=1. / (100*self.C), fit_intercept=True, max_iter=self.max_iter, tol=self.tol, eta0=0.5,shuffle=True, learning_rate='adaptive')
                        #self.model = LinearSVC(penalty='l2',dual=self.dual,tol=self.tol,C = self.C,max_iter=self.max_iter)
                        self.model.fit(x_tmp,H.reshape(-1),sample_weight=deltas)
                    else:  
                        self.model = LinearSVC(penalty='l2',dual=self.dual,tol=self.tol,C = self.C,max_iter=self.max_iter)
                        self.model.fit(x_tmp,H.reshape(-1),sample_weight=deltas)
                    
                #else:
                if self.kernel == 'polynomial':
                    self.model = SVC(kernel='poly',tol=self.tol,C = self.C,max_iter=self.max_iter,degree=4,gamma=self.gamma)
                    self.model.fit(x_tmp,H.reshape(-1),sample_weight=deltas)
                else:
                    if self.kernel == 'gaussian':
                        self.model = SVC(kernel='rbf',tol=self.tol,C = self.C,max_iter=self.max_iter,gamma=self.gamma)#,gpu_id=1,max_mem_size=128)
                        self.model.fit(x_tmp,H.reshape(-1),sample_weight=deltas)
                        #numpy.save("vertexDataX",x_tmp.data)
                        #numpy.save("vertexIndX",x_tmp.indices)
                        #numpy.save("vertexPtrX",x_tmp.indptr)
                        #numpy.save("DataH",H)                             
                        #numpy.save("deltas",deltas)
                        #self.classifier_id = str(uuid.uuid4()) 

                        #with open('shape.pickle', 'wb') as f:
                        #    pickle.dump(x_tmp.shape, f)
                        #if self.kernel == 'gaussian':
                        #    kernel_ =  'rbf'     
                        #else:
                        #    kernel_ = 'linear'

                        #p = subprocess.Popen("python train.py " + self.classifier_id + " " + kernel_, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
                        #p.wait()
                        #self.model = SVC(kernel='rbf',tol=self.tol,C = self.C,max_iter=self.max_iter,gamma=self.gamma,gpu_id=1)
                        #self.model.load_from_file(".model")
                            
            except Exception as exp:
                print (str(exp))
                return 0.            
                        #print(1,unique(H),unique(Y_tmp,return_counts = True),accuracy_score(self.model.predict(x_tmp),H.reshape(-1)))

            #end_time = time.time()

            #print ("Fit 1") #, end_time - start_time,"div:",side2count,"delta_uniques:",unique(deltas)) 

            if self.dropout_low > 0 or self.dropout_high < 1.:
                #coeffs = np.abs(self.model.coef_).flatten()
                
                #max_coeff = coeffs.max()
                #if max_coeff > 0:
                #    coeffs = coeffs / max_coeff
                
                #den = np.exp(self.dropout_low*coeffs)
                #p = den / den.sum()
                #remain_idxs = np.random.choice(len(coeffs), size=len(coeffs), replace=True, p=p)                
                max_coef = np.abs(self.model.coef_).max()
                remain_idxs = ((np.abs(self.model.coef_) >= max_coef * self.dropout_low) & (np.abs(self.model.coef_) <= max_coef * self.dropout_high)).flatten() 
            
                #print ("old feat size: ", self.features_weight.shape[0])
                #if self.features_weight[remain_idxs].shape[0] < 1:
                #    remain_idxs = np.asarray([np.argmax(np.abs(self.model.coef_.flatten()), axis = 0)])  
               
            
                self.features_weight = self.features_weight[remain_idxs]
                #print ("new feat size: ", self.features_weight.shape[0]) 
                x_tmp = x_tmp[:,remain_idxs]

                if self.kernel == 'linear':
                    if not self.dual:
                        self.model = SGDClassifier(n_iter_no_change=5,loss='squared_hinge', alpha=1. / (100*self.C), fit_intercept=True, max_iter=self.max_iter, tol=self.tol, eta0=0.5,shuffle=True, learning_rate='adaptive')
                        #self.model = LinearSVC(penalty='l2',dual=self.dual,tol=self.tol,C = self.C,max_iter=self.max_iter)
                        self.model.fit(x_tmp,H.reshape(-1),sample_weight=deltas)
                        #coef_tmp = self.model.coef_[0,remain_idxs].reshape(1,-1)
                        #self.model.coef_ = coef_tmp 
                    else:
                        self.model = LinearSVC(penalty='l2',dual=self.dual,tol=self.tol,C = self.C,max_iter=self.max_iter)
                        self.model.fit(x_tmp,H.reshape(-1),sample_weight=deltas)

                else:
                    if self.kernel == 'polynomial':
                        self.model = SVC(kernel='poly',tol=self.tol,C = self.C,max_iter=self.max_iter,degree=3,gamma=self.gamma)
                        self.model.fit(x_tmp,H.reshape(-1),sample_weights=deltas)
                    else:
                        if self.kernel == 'gaussian':
                            self.model = SVC(kernel='rbf',tol=self.tol,C = self.C,max_iter=self.max_iter,gamma=self.gamma)
                            self.model.fit(x_tmp,H.reshape(-1),sample_weights=deltas)

            #print ("Fit 2")
            gini_res = self.calcGini(x,Y)
            
            #print (gini_res,"|",gini_best)

            self.estimateTetas(x_tmp, Y_tmp) 

            self.p0 = zeros(shape=(self.class_max + 1))
            self.p1 = zeros(shape=(self.class_max + 1))

            sum_t0 = self.Teta0.sum()
            sum_t1 = self.Teta1.sum()

            if sum_t0 > 0: 
                p0_ = multiply(self.Teta0, 1. / sum_t0)                

                for i in range(len(p0_)):
                    self.p0[self.class_map_inv[i]] = p0_[i]

                #exp_0 = exp(self.p0)
                #self.p0 = exp_0 / exp_0.sum()

            if sum_t1 > 0:       
                p1_ = multiply(self.Teta1, 1. / sum_t1)

                for i in range(len(p1_)):
                    self.p1[self.class_map_inv[i]] = p1_[i]  

                #exp_1 = exp(self.p1)
                #self.p1 = exp_1 / exp_1.sum()
            self.counts = numpy.hstack([samp_counts,self.counts])
            return gini_res    
示例#14
0
文件: eq2pc.py 项目: ruher/EQ2PC
def Ceq_search(
    d,
    nev,
    b=True,
    ref=True,
    f=False,
    spec=False,
    tol=1e-13,
        info=False):
    # Turn off reflection relation if 3d
    if len(d) == 3:
        ref = True

    # Generate inidicator arrays
    print('...Generating indicator arrays')
    Ss = gS(d, nev, sh=False).flatten()[1:]
    Ss = np.array(list(multiset_permutations(Ss)))
    nSs = len(Ss)
    Ss = np.hstack([
        np.ones([nSs, 1], dtype=np.int), Ss
    ])
    Ss = Ss.reshape([nSs] + list(d))

    # Compute correlations
    print('...Computing correlations')
    temp = rCs(Ss[0], f=f, spec=spec)
    C = np.zeros([nSs] + list(temp.shape))
    C[0] = temp
    for i in range(1, nSs):
        if info:
            print('...Computing correlations - %i / %i = %.4f' %
                  (i + 1, nSs, (i + 1) / nSs))
        C[i] = rCs(Ss[i], f=f, spec=spec)

    # Search for cases
    print('...Searching for cases')
    i = 0
    out = []
    def unrelloc(i1, i2): return not rel(Ss[i1], Ss[i2], ref)
    while i < nSs:
        if info:
            print('...Searching for cases - %i / %i = %.4f' %
                  (i + 1, nSs, (i + 1) / nSs))
        Ci = C[i]
        if f:
            cand = np.where([np.linalg.norm(Ci - CC) < tol for CC in C])[0]
        else:
            cand = np.array(
                [j for j in range(i + 1, nSs) if np.all(Ci == C[j])])
        if len(cand) > 0:
            Si = Ss[i]
            check = [not rel(Si, Ss[c], ref) for c in cand]
            cand = cand[check]
            if len(cand) > 0:
                cand = unrelmax(np.array(cand), unrelloc)
                event = np.zeros([1 + len(cand)] + d, dtype=np.int)
                event[0] = Si
                for j in range(len(cand)):
                    event[j + 1] = Ss[cand[j]]
                out.append(event)
                if isinstance(b, type(True)):
                    if b:
                        break
                if isinstance(b, type(1)):
                    if len(out) == b:
                        break
        i += 1
    if len(out) > 1:
        def unrelloc(i1, i2): return not rel(out[i1][0], out[i2][0], ref)
        cand = unrelmax(np.array(list(range(len(out)))), unrelloc)
        for j in range(1, len(out[0])):
            cand = np.array([c for c in cand if not rel(out[0][j], out[c][0])])
        if len(cand) > 1:
            out = [out[c] for c in cand]
        else:
            out = [out[0]]
    print('...Number of structure sets: %i' % len(out))
    return out
示例#15
0
def get_best_perm(Ca, Ce, Ua, Ue, N):
    clone_idxs = [x for x in range(0, N)]
    perms = [p for p in multiset_permutations(clone_idxs)]
    perms = get_perms_best_C_score(Ca, Ce, perms)
    perm = get_perm_best_U_score(Ua, Ue, perms)
    return perm
示例#16
0
def test_nC_nP_nT():
    from sympy.utilities.iterables import (
        multiset_permutations, multiset_combinations, multiset_partitions,
        partitions, subsets, permutations)
    from sympy.functions.combinatorial.numbers import (
        nP, nC, nT, stirling, _multiset_histogram, _AOP_product)
    from sympy.combinatorics.permutations import Permutation
    from sympy.core.numbers import oo
    from random import choice

    c = string.ascii_lowercase
    for i in range(100):
        s = ''.join(choice(c) for i in range(7))
        u = len(s) == len(set(s))
        try:
            tot = 0
            for i in range(8):
                check = nP(s, i)
                tot += check
                assert len(list(multiset_permutations(s, i))) == check
                if u:
                    assert nP(len(s), i) == check
            assert nP(s) == tot
        except AssertionError:
            print(s, i, 'failed perm test')
            raise ValueError()

    for i in range(100):
        s = ''.join(choice(c) for i in range(7))
        u = len(s) == len(set(s))
        try:
            tot = 0
            for i in range(8):
                check = nC(s, i)
                tot += check
                assert len(list(multiset_combinations(s, i))) == check
                if u:
                    assert nC(len(s), i) == check
            assert nC(s) == tot
            if u:
                assert nC(len(s)) == tot
        except AssertionError:
            print(s, i, 'failed combo test')
            raise ValueError()

    for i in range(1, 10):
        tot = 0
        for j in range(1, i + 2):
            check = nT(i, j)
            tot += check
            assert sum(1 for p in partitions(i, j, size=True) if p[0] == j) == check
        assert nT(i) == tot

    for i in range(1, 10):
        tot = 0
        for j in range(1, i + 2):
            check = nT(range(i), j)
            tot += check
            assert len(list(multiset_partitions(range(i), j))) == check
        assert nT(range(i)) == tot

    for i in range(100):
        s = ''.join(choice(c) for i in range(7))
        u = len(s) == len(set(s))
        try:
            tot = 0
            for i in range(1, 8):
                check = nT(s, i)
                tot += check
                assert len(list(multiset_partitions(s, i))) == check
                if u:
                    assert nT(range(len(s)), i) == check
            if u:
                assert nT(range(len(s))) == tot
            assert nT(s) == tot
        except AssertionError:
            print(s, i, 'failed partition test')
            raise ValueError()

    # tests for Stirling numbers of the first kind that are not tested in the
    # above
    assert [stirling(9, i, kind=1) for i in range(11)] == [
        0, 40320, 109584, 118124, 67284, 22449, 4536, 546, 36, 1, 0]
    perms = list(permutations(range(4)))
    assert [sum(1 for p in perms if Permutation(p).cycles == i)
            for i in range(5)] == [0, 6, 11, 6, 1] == [
            stirling(4, i, kind=1) for i in range(5)]
    # http://oeis.org/A008275
    assert [stirling(n, k, signed=1)
        for n in range(10) for k in range(1, n + 1)] == [
            1, -1,
            1, 2, -3,
            1, -6, 11, -6,
            1, 24, -50, 35, -10,
            1, -120, 274, -225, 85, -15,
            1, 720, -1764, 1624, -735, 175, -21,
            1, -5040, 13068, -13132, 6769, -1960, 322, -28,
            1, 40320, -109584, 118124, -67284, 22449, -4536, 546, -36, 1]
    # http://en.wikipedia.org/wiki/Stirling_numbers_of_the_first_kind
    assert  [stirling(n, k, kind=1)
        for n in range(10) for k in range(n+1)] == [
            1,
            0, 1,
            0, 1, 1,
            0, 2, 3, 1,
            0, 6, 11, 6, 1,
            0, 24, 50, 35, 10, 1,
            0, 120, 274, 225, 85, 15, 1,
            0, 720, 1764, 1624, 735, 175, 21, 1,
            0, 5040, 13068, 13132, 6769, 1960, 322, 28, 1,
            0, 40320, 109584, 118124, 67284, 22449, 4536, 546, 36, 1]
    # http://en.wikipedia.org/wiki/Stirling_numbers_of_the_second_kind
    assert [stirling(n, k, kind=2)
        for n in range(10) for k in range(n+1)] == [
            1,
            0, 1,
            0, 1, 1,
            0, 1, 3, 1,
            0, 1, 7, 6, 1,
            0, 1, 15, 25, 10, 1,
            0, 1, 31, 90, 65, 15, 1,
            0, 1, 63, 301, 350, 140, 21, 1,
            0, 1, 127, 966, 1701, 1050, 266, 28, 1,
            0, 1, 255, 3025, 7770, 6951, 2646, 462, 36, 1]
    assert stirling(3, 4, kind=1) == stirling(3, 4, kind=1) == 0
    raises(ValueError, lambda: stirling(-2, 2))

    def delta(p):
        if len(p) == 1:
            return oo
        return min(abs(i[0] - i[1]) for i in subsets(p, 2))
    parts = multiset_partitions(range(5), 3)
    d = 2
    assert (sum(1 for p in parts if all(delta(i) >= d for i in p)) ==
            stirling(5, 3, d=d) == 7)

    # other coverage tests
    assert nC('abb', 2) == nC('aab', 2) == 2
    assert nP(3, 3, replacement=True) == nP('aabc', 3, replacement=True) == 27
    assert nP(3, 4) == 0
    assert nP('aabc', 5) == 0
    assert nC(4, 2, replacement=True) == nC('abcdd', 2, replacement=True) == \
        len(list(multiset_combinations('aabbccdd', 2))) == 10
    assert nC('abcdd') == sum(nC('abcdd', i) for i in range(6)) == 24
    assert nC(list('abcdd'), 4) == 4
    assert nT('aaaa') == nT(4) == len(list(partitions(4))) == 5
    assert nT('aaab') == len(list(multiset_partitions('aaab'))) == 7
    assert nC('aabb'*3, 3) == 4  # aaa, bbb, abb, baa
    assert dict(_AOP_product((4,1,1,1))) == {
        0: 1, 1: 4, 2: 7, 3: 8, 4: 8, 5: 7, 6: 4, 7: 1}
    # the following was the first t that showed a problem in a previous form of
    # the function, so it's not as random as it may appear
    t = (3, 9, 4, 6, 6, 5, 5, 2, 10, 4)
    assert sum(_AOP_product(t)[i] for i in range(55)) == 58212000
    raises(ValueError, lambda: _multiset_histogram({1:'a'}))
示例#17
0
 def get_full_basis(self):
     assert (self.nspin <= 20), "basis too large, use rand_basis instead"
     ref_state = np.concatenate([np.ones(self.num_spinup), -np.ones(self.num_spindown)]).astype(int)
     np_output = np.array(list(multiset_permutations(ref_state)))
     return th.tensor(np_output,dtype=self.dtype,device = self.device)
示例#18
0
文件: labeler.py 项目: zaprice/csm-py
def m_perms(l: List):
    if l:
        return multiset_permutations(l)
    else:
        return [()]
示例#19
0
                                [2, 3, 0, -1, 0], [2, 3, 1, -1, 0],
                                [2, 0, 0, 0, 0], [2, 0, 0, -1, 0],
                                [2, 0, 0, 0, -1], [2, 0, 0, -1, -1],
                                [2, 1, 0, 0, 0], [2, 1, 1, 0, 0],
                                [2, 1, 0, 0, -1], [2, 1, 1, 0, -1],
                                [3, 2, 0, 0, 0], [3, 2, -1, 0, 0],
                                [3, 2, 0, 1, 0], [3, 2, -1, 1, 0],
                                [3, 1, 0, 0, 0], [3, 1, 0, 1, 0],
                                [3, 1, 0, 0, -1], [3, 1, 0, 1, -1],
                                [3, 0, 0, 0, 0], [3, 0, -1, 0, 0],
                                [3, 0, 0, 0, -1], [3, 0, -1, 0, -1]])

# occupancies (there will be 70^4 = 24.01 million of these!)

c0 = np.array([0, 0, 0, 0, 1, 1, 1, 1])
pc0 = np.array(list(multiset_permutations(c0))).reshape(70, 2, 2, 2)

c1 = np.array([2, 2, 2, 2, 3, 3, 3, 3])
pc1 = np.array(list(multiset_permutations(c1))).reshape(70, 2, 2, 2)

c2 = np.array([4, 4, 4, 4, 5, 5, 5, 5])
pc2 = np.array(list(multiset_permutations(c2))).reshape(70, 2, 2, 2)

c3 = np.array([6, 6, 6, 6, 7, 7, 7, 7])
pc3 = np.array(list(multiset_permutations(c3))).reshape(70, 2, 2, 2)

bond_energies = np.array([[0., 1., 2., 3., 4., 6., 7., 5.],
                          [0., 0., 7., 7., 4., 4., 3., 4.],
                          [0., 0., 0., 1., 1., 6., 3., 6.],
                          [0., 0., 0., 0., 2., 7., 7., 8.],
                          [0., 0., 0., 0., 0., 2., 5., 3.],
示例#20
0
def lazors_cheat(filename):
    """
    Takes a .bff file for a Lazor puzzle (containing only block types reflect,
    opaque, and refract) and solves it. Then creates a .png solution that shows
    where to put each block

    **Parameters**
        filename: *string*
            The .bff file that contains all the Lazor information.
            Can contain .bff or not

    **Returns**
        None
    """

    # Checks for if the file name contains '.bff'' or not
    if ".bff" in filename:
        filename = filename.split(".bff")[0]

    # Read the .bff file and extract the information
    information = laser_board_reader(filename=filename + '.bff')
    grid = information[0]
    [A, B, C] = information[1]
    laser_origin = information[2]
    targetPos = information[3]

    # Seting up block locations

    # 1. Pull out block locations from grid
    blockspots = []
    for y in grid:
        for x in y:
            if x is 'o':
                blockspots.append(x)

    # 2. Assign types of block to list of grid locations
    for i in range(A):
        blockspots[i] = 'A'
    for i in range(A, (A + B)):
        blockspots[i] = 'B'
    for i in range((A + B), (A + B + C)):
        blockspots[i] = 'C'

    # 3. Get all permutations of block locations for a given grid and number of each
    # type of block
    permutations = list(multiset_permutations(blockspots))
    length = len(grid)
    width = len(grid[0])

    # Algorithm for solving: Create a list of all possible combinations of the
    # lists containging possible block positions and run them individually until
    # finding a solution
    print("%i possible solutions." % len(permutations))
    print("Solving...")
    SOLUTION_FOUND = False
    for possibility in permutations:

        # Create a working grid that reads the information of the block location
        # inside each possibility of the permutations by looping through the array
        # replacing 'o's with the blocks
        workinggrid = copy.deepcopy(grid)
        for l in range(length):
            for w in range(width):
                if workinggrid[l][w] == 'o':
                    workinggrid[l][w] = possibility.pop(0)

        if laser_runner(workinggrid, laser_origin, targetPos):
            print("Solution found!")
            save_grid(workinggrid, name="%s_solution.png" % filename)
            SOLUTION_FOUND = True
            break

    if not SOLUTION_FOUND:
        print(
            '''Solution not found. Please double check .bff file is correct''')
示例#21
0
    "е": "e",
    "о": "o",
    "а": "a",
    "с": "c",
    "А": "A",
    "О": "O",
    "Е": "E",
    "С": "C",
}

string_dict = {}
all_symbols = np.array([])
string = "1. Физические явления и процессы в об.. 11:50-13:20\nНизамов А.Ж."

all_combs_list = []
for i in range(len(string)):
    if string[i] in d:
        all_symbols = np.append(all_symbols, i)

i = 0
for p in multiset_permutations(all_symbols.astype(int)):
    i += 1
    #buf_string = list(string)
    #for element in p:
    #    buf_string[element] = d[buf_string[element]]
    #    string_dict["".join(buf_string)] = None

print(i)
#print(len(string_dict))
#for key in string_dict:
#   print(key)
示例#22
0
    def permutate(self, l): #you might have to make this a generator
        return [i for i in multiset_permutations(l)]

        pass #take in necessary objs and their corresponding prob via zip. #create permutations (this could possibly be the first step). use build ftns above to create one_hot_vecs
示例#23
0
def compute_elementary_contractions_half_cumulant(pure_ops_list, max_cu):
    """
    Generate all possible combinations of pure creation and annihilation operators for cumulant contractions.
    :param pure_ops_list: a list of pure creation or annihilation indices for each input operator
    :param max_cu: the max level of cumulants
    :return: {cumulant level: [[n_cumulant of chosen indices (op index, relative index)], ...]}

    Consider the following list of pure cre/ann operators: pure_ops_list = [[a, b], [c, d], [e, f, g]]
    and we want to obtain 2, 3, 4 cumulants, i.e., max_cu = 4.
    Note that there are three macro operators: [a, b], [c, d], [e, f, g]
    and the first macro operator has two micro operators: a, b

    This function will do the following:
        (1) generate all unique integer partitions of 2, 3, and 4, i.e, partition legs for k cumulant
            2 = 1 + 1                       # note a)
            3 = 2 + 1 = 1 + 1 + 1           # note b)
            4 = 3 + 1 = 2 + 2 = 2 + 1 + 1   # note c)
            Note:
                a) single partition is included, e.g, [2] is valid
                a) only unique partitions, e.g., 1 + 2 is ignored
                b) len(pure_ops_list) = 3 => number of partitions must <= 3
        (2) generate all possible sub-indices for each macro operator in pure_ops_list
            and return [{n_leg: [relative indices of the current string of cre/ann operators]}, ...]
            for the example, we should get:
                [{1: [(0,), (1,)], 2: [(0, 1)]},
                 {1: [(0,), (1,)], 2: [(0, 1)]},
                 {1: [(0,), (1,), (2,)], 2: [(0, 1), (0, 2), (1, 2)], 3: [(0, 1, 2)]}]
        (3) loop over integer partitions and choose the corresponding sub-indices partitions
            for example,
                => 2 + 1, i.e, select 2 legs from one macro operator and 1 leg from one of the others
                => consider multiset permutations, 2 + 1 = 1 + 2. another example: 2 + 1 + 1 = 1 + 2 + 1 = 1 + 1 + 2
                => choose 2 macro operators from the given three
                => say we choose the first two macro operators, where the first contributes two legs:
                    that is, [(0, 1)] from the first macro operator, [(0,), (1,)] from the second macro operator
                => generate all possible selections by a nested loop
                    => cartesian product between [(0, 1)] and [(0,), (1,)], i.e., a nested loop
                    => combine with macro operator index and get [(0, 0), (0, 1), (1, 0)], [(0, 0), (0, 1), (1, 1)]
    """
    results = {i: [] for i in range(2, max_cu + 1)}

    # generate all possible unique partitions for k cre/ann legs for k cumulant
    macro_size = len(pure_ops_list)
    unique_partitions = [
        part for k in range(2, max_cu + 1) for part in integer_partition(k)
        if len(part) <= macro_size
    ]

    # generate all possible sub-indices for each macro operator
    sub_indices = [{
        n_leg: [ele_ops for ele_ops in combinations(range(ops.size), n_leg)]
        for n_leg in range(1,
                           min(max_cu, ops.size) + 1)
    } for ops in pure_ops_list]

    for unique_partition in unique_partitions:
        n_macro = len(unique_partition)
        cu_level = sum(unique_partition)

        for leg_part in multiset_permutations(unique_partition):

            # choose n_macro from ops_list
            for macro_ops in combinations(range(macro_size), n_macro):

                # check if this partition is valid on these chosen macro operators
                if any([
                        len(pure_ops_list[i]) < n_leg
                        for i, n_leg in zip(macro_ops, leg_part)
                ]):
                    continue

                # generate all possibilities
                for micro_ops_pro in product(*[
                        sub_indices[i][n_leg]
                        for i, n_leg in zip(macro_ops, leg_part)
                ]):
                    results[cu_level].append([
                        (i_macro, i_micro)
                        for i_macro, micro_ops in zip(macro_ops, micro_ops_pro)
                        for i_micro in micro_ops
                    ])

    return results
示例#24
0
            else:
                memory[paramThree] = 0

        if jumped == False:
            pc = pc + length
            instructionRead = int(memory[pc])

    return prgOutput


file = open('input.txt', 'r')

for line in file:
    origPrg = line.split(',')

phaseSettings = list(multiset_permutations([0, 1, 2, 3, 4]))
maxSignal = 0

for phaseSetting in phaseSettings:
    prg = origPrg.copy()
    output = intcodeVM(prg, phaseSetting[0], 0)
    prg = origPrg.copy()
    output = intcodeVM(prg, phaseSetting[1], output)
    prg = origPrg.copy()
    output = intcodeVM(prg, phaseSetting[2], output)
    prg = origPrg.copy()
    output = intcodeVM(prg, phaseSetting[3], output)
    prg = origPrg.copy()
    output = intcodeVM(prg, phaseSetting[4], output)

    if output > maxSignal:
示例#25
0
 def test():
     for i in range(1, 7):
         print(i)
         for p in multiset_permutations([0, 0, 1, 0, 1], i):
             print(p)
示例#26
0
# -*- coding: utf-8 -*-
def computeExprTreeComplete(cur_strat, other_strat, ordered_groups, N):
    """
    Compute the expression of a strategy against another strategy, knowing that their action vary over time
    :param cur_strat: current strategy
    :param other_strat: opponent strategy
    :param ordered_groups: combination of all possible groups of size (N-1) of cur_strat and other_strat
    :param N: size of the group
    :return: array of expressions for each combination of the groups
    """
    nb_states = len(cur_strat[2:])
    expr_all_opponent_nb = []
    for i in range(
            N - 1
    ):  #Only against one same strat up to N-1 in group of N containing opponent strat
        #print("loop ", i)
        strat_loop = time.time()
        opponents = i * [0] + (N - 1 - i) * [1]
        groups_comb = list(multiset_permutations(opponents))
        #print(len(groups_comb))
        #groups = ordered_groups[i]
        expr_groups = 0.
        for group in groups_comb:
            expr_all_states = 0.
            for state in range(nb_states):
                root = Tree(state, cur_strat, 1)
                if group[0] == 1:  #If first element has the opponent strat
                    root.buildExprVariableActionsStrat(other_strat)
                    root.addChildrenVariableAction(other_strat)
                else:
                    root.buildExprVariableActionsStrat(cur_strat)
                    root.addChildrenVariableAction(cur_strat)
                current_level = root.getChildren()

                expr_state_config = root.getExpr()
                for j in range(1, N - 1):
                    if group[j] == 1:
                        next_level = []
                        for child in current_level:
                            child.buildExprVariableActionsStrat(other_strat)
                            child.addChildrenVariableAction(other_strat)
                            expr_state_config += child.getExpr()
                            for new_child in child.getChildren():
                                next_level.append(new_child)
                        current_level = list(next_level)
                    else:
                        next_level = []
                        for child in current_level:
                            child.buildExprVariableActionsStrat(cur_strat)
                            child.addChildrenVariableAction(cur_strat)
                            expr_state_config += child.getExpr()
                            for new_child in child.getChildren():
                                next_level.append(new_child)
                        current_level = list(next_level)

                # expr_state_config /= N-1       #in comment because I want the total payoff expression after N rounds
                expr_all_states += expr_state_config
            expr_all_states /= nb_states
            expr_groups += expr_all_states

        expr_groups /= len(
            groups_comb
        )  #Avg expression of strategy A against group of A/B's of size N-1 with i B's in it
        expr_all_opponent_nb.append(expr_groups)
        print("Loop computeExprTreeComplete ", i, " of ", N - 1,
              " computed in --- %s seconds --- " % (time.time() - strat_loop))
    return expr_all_opponent_nb
示例#28
0
def q_instability_psi(n_qubits, n_ones):
    # from Phys. Rev. A 79, 022302 , eq. 4
    S = lambda k, l: np.sqrt(2 /
                             (n_qubits + 1)) * np.sin(np.pi * (k + 1) *
                                                      (l + 1) / (n_qubits + 1))

    ## generate unnormalized dicke state with (n_qubits, k).
    n_zeros = n_qubits - n_ones
    # initialize vector with n_zeros of zeros and n_ones of ones
    zeros_int = [0 for _ in range(n_zeros)]
    ones_int = [1 for _ in range(n_ones)]
    statevector_init_int = zeros_int + ones_int

    # generate all permutations
    statevect_permutations_int = multiset_permutations(statevector_init_int)

    # generate a list of combinations of flipped spin locations for a given number of spins
    ls = [list(np.nonzero(row)[0]) for row in list(statevect_permutations_int)]
    ls = np.array(ls)

    # generate a permutation group for number of spins
    #ks = list(multiset_permutations(list(range(n_ones))))
    ks = list(generate_bell(n_ones))
    ks = np.array(ks)

    # construct pairs of ks and ls
    clist = []
    for llist in ls:
        # llist goes in the subscript of C (so C_(llist))
        c = 0
        for j, klist in enumerate(ks):
            # j = permutation number
            # klist = ks[j] = member of the permuation group of k
            #       = P_j(ks)
            cterm = (-1)**(j + 1)
            for i in range(len(klist)):
                k, l = klist[i], llist[i]

                cterm *= S(k, l)
            c += cterm

        clist += [c]

    zeros = ["0" for _ in range(n_zeros)]
    ones = ["1" for _ in range(n_ones)]
    statevector_init = zeros + ones

    # generate all permutations
    statevect_permutations = multiset_permutations(statevector_init)

    # join the string of 0s and 1s, convert it to an integer from base 2,
    # these will be the locations of nonzero probabilities
    positiveproblocations = [
        int("".join(vect_permutation), 2)
        for vect_permutation in statevect_permutations
    ]

    # generate XX state
    q_instability_state = [0.] * 2**n_qubits
    for i, loc in enumerate(positiveproblocations):
        q_instability_state[loc] = clist[i]

    # normalize
    q_instability_state = np.array(q_instability_state)
    q_instability_state /= norm(q_instability_state)
    return q_instability_state
            down = lst[i + L]
            if right == x:
                total_pen += 1
            if up == x:
                total_pen += 1
            if left == x:
                total_pen += 1
            if down == x:
                total_pen += 1
    return total_pen


colors = ['R'] * 13 + ['B'] * 12
L = 5

combi_list = multiset_permutations(colors, L * L)
best_combi, best_penalty = None, L * L * 4
for combi in combi_list:
    pen = calc_penalty(combi)
    if pen < best_penalty:
        best_combi = combi
        best_penalty = pen
        print("Best: {}, pen: {}".format(best_combi, best_penalty))
        if best_penalty == 0:
            break

square_grid = pd.DataFrame('-', index=list(range(L)), columns=list(range(L)))
for i in range(len(best_combi)):
    color = best_combi[i]
    col = i % L
    row = int(i / L)
示例#30
0
def test_multiset_permutations():
    assert [''.join(i) for i in (list(multiset_permutations('baby')))] == [
        'abby', 'abyb', 'aybb', 'baby', 'bayb', 'bbay', 'bbya', 'byab',
        'byba', 'yabb', 'ybab', 'ybba']
示例#31
0
    #print(RSS(res.x,TOL))
    #print(RSS(err1,TOL))

    return err1, errE, x0, BS


#%%
#################################################

################################################################

################################################################################

from sympy.utilities.iterables import multiset_permutations
l = list(multiset_permutations([1, 1, 1, 1, 0, 0, 0, 0]))
print(l)

original = np.zeros(8)
results = np.zeros(8)
cn = 0
for p in l:
    print("%2.f%%\r" % (cn / len(l) * 100))
    err1, errE, x0, BS = all(TOL, p)
    count = 0
    arr = np.zeros(8)
    ori = np.zeros(8)

    for i in range(len(p)):
        if p[i] == 0:
            arr[i] = errE[count]
    for p in p_set:
        #hamming distances of ith for all other P'
        D_i = torch.stack([(p != p_prime) for p_prime in P_[1:, :].t()],
                          dim=1).sum(dim=0).view(1, -1).type(
                              torch.FloatTensor).to(device=device)
        D = torch.cat((D, D_i)).to(device=device)

    D = D.sum(dim=0).view(1, -1).to(device=device)
    j = P_[0, torch.argmax(D).item()].item(
    )  #first row of P_ is index checking which indexed perm is max

    return int(j)


set_p = np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])
P_ = torch.stack([torch.FloatTensor(p) for p in multiset_permutations(set_p)],
                 dim=1).to(device=device)
_, perm_size = P_.shape

idx = torch.arange(perm_size).type(torch.FloatTensor).view(
    1, -1).to(device=device)
P_ = torch.cat((idx, P_))

p_set = torch.Tensor().to(device=device)
j = np.random.choice(perm_size)

i = 0
checkpoint = 1
cardinality = 110

while i <= cardinality:
示例#33
0
def main(experiment_path, load_path, data_path, loss_type, freqloss, optimizer,
         upsampling_mode, model_type, blocktype, space, weight_init,
         init_criterion, learning_rate, weight_decay, momentum, alpha, eps,
         beta1, beta2, clip, stochastic_drop, dropcopy, real_penalty,
         imag_penalty, efficient, bottleneck, schedule, nonesterov,
         rmspop_momentum, nopil, avg_copies, stochdrop_schedule, nb_resblocks,
         growth_rate, start_fmaps, epochs, batch_size, n_sources,
         print_interval, seed, nb_copies, **kwargs):
    # First thing we check is to see if we should load a prexisting model.
    if load_path:
        load_path = os.path.join(experiment_path, load_path)

    chkptFilename = os.path.join(experiment_path, 'checkpoint.pth.tar')
    isResuming = os.path.isfile(chkptFilename) or load_path
    if isResuming and (not load_path):
        load_path = chkptFilename
    # If experiment path is not created, we create it.
    if not os.path.isdir(experiment_path):
        os.mkdir(experiment_path)

    print('...preparing dataset')
    trainset = WSJ2MReader(data_path, 'train', random_seed=seed)
    devset = WSJ2MReader(data_path, 'dev', random_seed=seed)
    print('...number of training utterances {}'.format(trainset.n_examples))
    print('...number of dev utterances {}'.format(devset.n_examples))
    bptt_len = None
    train_stream = trainset.read(batch=batch_size,
                                 sortseq=False,
                                 normalize=False,
                                 bptt_len=bptt_len)
    dev_stream = devset.read(batch=batch_size,
                             sortseq=False,
                             normalize=False,
                             bptt_len=bptt_len)
    print('...building model')
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    if model_type == 'complexunet':
        model = ComplexUNet(in_channels=1,
                            nb_speakers=2,
                            mode=upsampling_mode,
                            nb_residual_blocks=nb_resblocks,
                            start_fmaps=start_fmaps,
                            blocktype=blocktype,
                            growth_rate=growth_rate,
                            space=space,
                            seed=seed,
                            weight_init=weight_init,
                            init_criterion=init_criterion,
                            efficient=efficient,
                            bottleneck=bottleneck,
                            nb_copies=nb_copies,
                            avg_copies=avg_copies,
                            stochastic_drop=stochastic_drop,
                            stochdrop_schedule=stochdrop_schedule,
                            dropcopy=dropcopy).to(device)
    print(model)
    if torch.cuda.device_count() > 1:
        print('...multi-gpu training')
        model = DataParallel(model)
    # loss function
    print('...define loss function')
    if freqloss is None:
        if loss_type == 'specl2loss':
            maskloss = SequenceLoss(nb_speakers=n_sources, pil=nopil)
        elif loss_type == 'speccosloss':
            maskloss = CosLoss(real_penalty=real_penalty,
                               imag_penalty=imag_penalty,
                               nb_speakers=n_sources,
                               pil=nopil)
        elif loss_type in {'istftl2loss', 'istftcosloss'}:
            maskloss = ISTFTLoss(nb_speakers=n_sources,
                                 pil=nopil,
                                 loss_type=loss_type)
    else:
        print("USING TIME FREQUENCY LOSS!")
        if loss_type in {'istftl2loss', 'istftcosloss'
                         } and freqloss in {'specl2loss', 'speccosloss'}:
            maskloss = ISTFTLoss(nb_speakers=n_sources,
                                 pil=nopil,
                                 loss_type=loss_type,
                                 frequencyloss=freqloss,
                                 real_penalty=real_penalty,
                                 imag_penalty=imag_penalty)
        else:
            raise Exception(
                "When a frequency loss is used it should be used along with a loss on the temporal signal."
                + "Found freqloss == " + str(freqloss) + " and loss_type == " +
                str(loss_type) + ".")
    maskloss = maskloss.to(device)

    # optimizer
    print('...define optimizer')
    if optimizer == 'adam':
        optimizer = Adam(filter(lambda p: p.requires_grad, model.parameters()),
                         lr=learning_rate,
                         weight_decay=weight_decay,
                         betas=(beta1, beta2),
                         eps=eps)
    elif optimizer == 'sgd':
        optimizer = SGD(filter(lambda p: p.requires_grad, model.parameters()),
                        lr=learning_rate,
                        momentum=momentum,
                        weight_decay=weight_decay,
                        nesterov=nonesterov)
    elif optimizer == 'rmsprop':
        rmsmom = momentum if rmspop_momentum else 0
        optimizer = RMSprop(filter(lambda p: p.requires_grad,
                                   model.parameters()),
                            lr=learning_rate,
                            alpha=alpha,
                            eps=eps,
                            weight_decay=weight_decay,
                            momentum=rmsmom)
    # initialize training arguments
    start_epoch = 0
    start_i = 0
    bestloss = np.inf
    best_epoch = 0
    best_iter = 0
    training_losses = []
    validation_losses = []
    trainCost = []
    devSDR, devSIR, devSAR = [], [], []
    params_num = 0
    best_devSDR = 0.
    # perform the loading if we need to do it
    if load_path:
        if os.path.isfile(load_path):
            print("=> loading checkpoint '{}'".format(load_path))
            checkpoint = torch.load(load_path)
            for k in checkpoint.keys():
                if k not in [
                        'optimizer', 'training_losses', 'validation_losses',
                        'training_losses', 'train_reader', 'model_state_dict'
                ]:
                    print("    " + str(k) + ": " + str(checkpoint[k]))
            start_epoch = checkpoint['start_epoch']
            clip = checkpoint['clip']
            schedule = checkpoint['schedule']
            epochs = checkpoint['epochs']
            batch_size = checkpoint['batch_size']
            print_interval = checkpoint['print_interval']
            best_devSDR = checkpoint['bestSDR']
            best_epoch = checkpoint['best_epoch']
            trainCost = checkpoint['training_losses']
            devSDR = checkpoint['devSDR']
            devSIR = checkpoint['devSIR']
            devSAR = checkpoint['devSAR']
            trainset = checkpoint['train_reader']
            devset = checkpoint['dev_reader']
            maskloss = checkpoint['maskloss']
            optimizer.load_state_dict(checkpoint['optimizer'])
            model.load_state_dict(checkpoint['model_state_dict'])
        else:
            print("=> no checkpoint found at '{}'".format(load_path))

    m = model.module if isinstance(model, DataParallel) else model

    print("    loss_type:              " + str(maskloss.loss_type))
    if maskloss.loss_type == 'speccosloss':
        print("    real_penalty:           " + str(maskloss.real_penalty))
        print("    imag_penalty:           " + str(maskloss.imag_penalty))
    print("    PIL:                    " + str(maskloss.pil))
    print("    maskloss:               " + str(maskloss.modules))
    print("    optimizer:              " + str(optimizer.__module__))
    print("    upsampling_mode         " + str(m.mode))
    print("    model_type              " + str(model_type))
    print("    weight_init             " + str(m.weight_init))
    print("    init_criterion          " + str(m.init_criterion))
    if isinstance(m, ComplexUNet):
        print("    space                   " + str(m.space))
        print("    blocktype               " + str(m.blocktype))
        print("    nb_copies               " + str(m.nb_copies))
        print("    avg_copies              " + str(m.avg_copies))
        print("    stochastic_drop         " + str(m.stochastic_drop))
        print("    stochdrop_schedule      " + str(m.stochdrop_schedule))
        print("    nb_residual_blocks      " + str(m.nb_residual_blocks))
        print("    dropcopy                " + str(m.dropcopy))
        if m.blocktype in {'dense'}:
            print("    growth_rate             " + str(m.growth_rate))
            if m.space == 'real':
                print("    efficient               " + str(m.efficient))
                print("    bottleneck              " + str(m.bottleneck))
    print("    start_fmaps             " + str(m.start_fmaps))
    print("    learning_rate           " +
          str(optimizer.param_groups[0]['lr']))
    print("    weight_decay            " +
          str(optimizer.param_groups[0]['weight_decay']))
    if isinstance(optimizer, SGD):
        print("    nesterov                " +
              str(optimizer.param_groups[0]['nesterov']))
        print("    momentum                " +
              str(optimizer.param_groups[0]['momentum']))
    elif isinstance(optimizer, RMSprop):
        print("    alpha                   " +
              str(optimizer.param_groups[0]['alpha']))
        print("    rms_momentum            " +
              str(optimizer.param_groups[0]['momentum']))
        print("    eps                     " +
              str(optimizer.param_groups[0]['eps']))
    elif isinstance(optimizer, Adam):
        print("    beta1                   " +
              str(optimizer.param_groups[0]['betas'][0]))
        print("    beta2                   " +
              str(optimizer.param_groups[0]['betas'][1]))
        print("    eps                     " +
              str(optimizer.param_groups[0]['eps']))
    print("    clip:                   " + str(clip))
    print("    schedule                " + str(schedule))
    print("    start_epoch:            " + str(start_epoch))
    print("    epochs:                 " + str(epochs))
    print("    print_interval:         " + str(print_interval))
    print("    batch_size:             " + str(batch_size))
    print("    nb_speakers:            " + str(maskloss.nb_speakers))
    print("    trainset_seed:          " + str(trainset.random_seed))
    print("    model_seed:             " + str(m.seed))

    for param in model.parameters():
        if param.requires_grad:
            params_num += np.prod(param.size())
    print("number of parameters: {}".format(params_num))

    print('...start training')
    for epoch in range(start_epoch, epochs):
        model.train()
        if schedule:
            adjust_learning_rate(optimizer, epoch)
        train_cost, total_train_cost = 0, 0
        print("------ Epoch {} ------".format(epoch))
        train_iters = 0
        for data in train_stream:
            source, target, mask, _ = (torch.FloatTensor(_data).to(device)
                                       for _data in data)
            optimizer.zero_grad()
            loss = 0
            output = model(
                source[:, None]
            )  # By providing a None in the second dimension, we actually add a new dimension so
            # source[:, None] has shape of (batch_size, 1, feature_size, nb_spectrums)
            #if torch.cuda.is_available():
            #    torch.cuda.empty_cache()
            if not maskloss.pil:
                for i in range(n_sources):
                    if nb_copies is None:
                        loss += maskloss(
                            pred=complex_product(
                                source, output[i], input_type='convolution'
                            ),  #complex_mul(source, output[i]),
                            truth=target[:, i],
                            mask=mask)
                    else:
                        loss += maskloss(pred=output[i],
                                         truth=target[:, i],
                                         mask=mask)
                loss.backward()
            else:
                list_losses = []
                speakers_indices = range(n_sources)
                for speakers_idx in multiset_permutations(speakers_indices):
                    loss = 0
                    j = 0
                    for i in speakers_idx:
                        if nb_copies is None:
                            loss += maskloss(
                                pred=complex_product(
                                    source,
                                    output[j],
                                    input_type='convolution'
                                ),  # complex_mul(source, output[j]),
                                truth=target[:, i],
                                mask=mask)
                        else:
                            loss += maskloss(pred=output[j],
                                             truth=target[:, i],
                                             mask=mask)
                        j += 1
                    list_losses.append(loss)
                loss = min(torch.stack(list_losses))
                loss.backward()
            # gradient clipping
            torch.nn.utils.clip_grad_norm_(
                filter(lambda p: p.requires_grad, model.parameters()), clip)
            optimizer.step()
            #if torch.cuda.is_available():
            #    torch.cuda.empty_cache()
            train_cost += loss.item()
            train_iters += 1
            if train_iters % print_interval == 0:
                print("train loss {} at batch \
                      {}".format(train_cost / print_interval, train_iters))
                train_cost = 0

            total_train_cost += loss.item()

        trainCost.append(total_train_cost / train_iters)
        train_stream = trainset.read(batch=batch_size,
                                     sortseq=False,
                                     normalize=False,
                                     bptt_len=bptt_len)

        print('...start evaluation')
        dev_sdr, dev_sir, dev_sar = 0, 0, 0
        dev_iters = 0
        model.eval()

        with torch.no_grad():
            for idx, data in enumerate(dev_stream):
                source, target, mask, sourcelen = (
                    torch.FloatTensor(_data).to(device) for _data in data)
                output = model(source[:, None])

                #if torch.cuda.is_available():
                #    torch.cuda.empty_cache()
                output_pred = []
                for i in range(n_sources):
                    if nb_copies is None:
                        output_pred.append(
                            complex_product(
                                source, output[i],
                                input_type='convolution').cpu().data.numpy()
                        )  # complex_mul(source, output[i]).cpu().data.numpy())
                    else:
                        output_pred.append(output[i].cpu().data.numpy())

                if (idx + 1) % 10 == 0:
                    sdr, sir, sar = eval_sources(source.cpu().data.numpy(),
                                                 target.cpu().data.numpy(),
                                                 output_pred,
                                                 mask.cpu().data.numpy(),
                                                 sourcelen.cpu().data.numpy())
                    dev_sdr += sdr
                    dev_sir += sir
                    dev_sar += sar
                    print(sdr, sir, sar)
                dev_iters += 1
                #if torch.cuda.is_available():
                #    torch.cuda.empty_cache()

        devSDR.append(dev_sdr / (dev_iters // 10))
        devSIR.append(dev_sir / (dev_iters // 10))
        devSAR.append(dev_sar / (dev_iters // 10))

        print("SDR on valid set {}".format(devSDR[-1]))
        print("SIR on valid set {}".format(devSIR[-1]))
        print("SAR on valid set {}".format(devSAR[-1]))
        print("------- End -------")

        dev_stream = devset.read(batch=batch_size,
                                 sortseq=False,
                                 normalize=False,
                                 bptt_len=bptt_len)
        if len(devSDR) == 1 or devSDR[-1] > best_devSDR:
            is_best = True
            best_devSDR = devSDR[-1]
            best_epoch = epoch
        else:
            is_best = False
        state = {
            'start_epoch': epoch + 1,
            'model_state_dict': model.state_dict(),
            'bestSDR': best_devSDR,
            'optimizer': optimizer.state_dict(),
            'clip': clip,
            'schedule': schedule,
            'epochs': epochs,
            'batch_size': batch_size,
            'print_interval': print_interval,
            'best_epoch': best_epoch,
            'maskloss': maskloss,
            'training_losses': trainCost,
            'devSDR': devSDR,
            'devSIR': devSIR,
            'devSAR': devSAR,
            'train_reader': trainset,
            'dev_reader': devset
        }
        if is_best:
            print("\rSaving Best Model.\r")
            save_checkpoint(state, is_best, save_path=experiment_path)
        else:
            print("\rSaving last Model.\r")
            save_checkpoint(state, False, save_path=experiment_path)
示例#34
0
                                [3, 2, 0, 0, 0], [3, 2, -1, 0, 0],
                                [3, 2, 0, 1, 0], [3, 2, -1, 1, 0],
                                [3, 1, 0, 0, 0], [3, 1, 0, 1, 0],
                                [3, 1, 0, 0, -1], [3, 1, 0, 1, -1],
                                [3, 0, 0, 0, 0], [3, 0, -1, 0, 0],
                                [3, 0, 0, 0, -1], [3, 0, -1, 0, -1]])

# occupancies (there will be 70^4 = 24.01 million of these!)

lengths = [2, 2, 2]

bond_energies = np.array([[0., 1., 3., 5.], [1., 0., 6., 8.], [3., 6., 0., 2.],
                          [5., 8., 2., 0.]])

c0 = np.array([0, 0, 1, 1, 2, 2, 3, 3])
pc0 = np.array(list(multiset_permutations(c0)))
pc0 = pc0.reshape(len(pc0), *lengths)

xc = [list(range(l)) for l in lengths]
clusters = np.array(list(product(xc[0], xc[1], xc[2])))

cls = product(clusters, bond_connectivities)
bonds = []
for cl in cls:
    bonds.append([[cl[1][0], cl[0][0], cl[0][1], cl[0][2]],
                  [
                      cl[1][1], (cl[0][0] + cl[1][2]) % lengths[0],
                      (cl[0][1] + cl[1][3]) % lengths[1],
                      (cl[0][2] + cl[1][4]) % lengths[2]
                  ]])
示例#35
0
 def test():
     for i in range(1, 7):
         print i
         for p in multiset_permutations([0, 0, 1, 0, 1], i):
             print p
示例#36
0
def test_multiset_permutations():
    assert [''.join(i) for i in (list(multiset_permutations('baby')))] == [
        'abby', 'abyb', 'aybb', 'baby', 'bayb', 'bbay', 'bbya', 'byab',
        'byba', 'yabb', 'ybab', 'ybba']