Esempio n. 1
0
 def play(self, login, password):
     game_controller = GameController()
     game_controller.load_board()
     board = self.get_board()
     # self.board.show()
     solver = Solver(board)
     solver.solve()
Esempio n. 2
0
    def __init__(self, dim, integrator_type, h0, eps, k, g1, g2, alpha, beta,
                 gamma=1.4, xsph_eps=0,
                 kernel=base.CubicSplineKernel, hks=True):

        # solver dimension
        self.dim = dim

        # Hernquist and Katz normalization
        self.hks = hks

        # the SPH kernel to use
        self.kernel = kernel(dim)

        self.defaults = dict(alpha=alpha,
                             beta=beta,
                             gamma=gamma,
                             adke_eps=eps,
                             adke_k=k,
                             adke_h0=h0,
                             g1=g1,
                             g2=g2,
                             xsph_eps=xsph_eps)

        # base class constructor
        Solver.__init__(self, dim, integrator_type)
Esempio n. 3
0
def test_solve_ordering_between_variables():
    solver = Solver()
    x = variable('x')
    y = variable('y')
    t = solver.solve(x < y)
    assert t['x'] is False
    assert t['y'] is True
Esempio n. 4
0
    def compare_text(self, files):
        """
        Compares the original plain text with the decrypted one
        """

        plain_txt = CipherText(files['plain'])
        cipher_txt = CipherText(files['encoded'])
        corpus = CorpusStats(self.corpus_file)
        solver = Solver(cipher_txt, corpus)

        best_solution = solver.solve(corpus)

        def clean(w_list):
            """
            Cleans up white spaces
            """
            return [w.strip(' \r\t') for w in w_list if w.strip(' \r\t')]

        w_enc = clean(cipher_txt.words)
        w_dec = clean(plain_txt.words)


        n_solved = 0.0
        for encoded, actual in zip(w_enc, w_dec):
            decoded = textutil.decode_word(encoded, best_solution)
            if decoded == actual:
                n_solved += 1.0
            else:
                print "Mismatch! Expected: ", actual, "but got: ", decoded

        return n_solved, len(cipher_txt.words)
Esempio n. 5
0
 def setUp(self):
     self.solver_txt = Solver("juego.txt")
     self.solver_csv = Solver("juego.csv")
     self.solver_invalidtxt = Solver("juego1.txt")
     self.solver_invalidcsv = Solver("juego1.csv")
     self.solver_other = Solver("juego.png")
     self.resultpuzzle = '483921657967345821251876493548132976729564138136798245372689514814253769695417382'
Esempio n. 6
0
 def test_is_valid(self):
     puzzle = [
                 [
                     Square(1,1,1,1),
                     Square(4,1,2,1),
                     Square(3,1,3,2),
                     Square(2,1,4,2)
                 ],
                 [
                     Square(3,2,1,1),
                     Square(2,2,2,1),
                     Square(4,2,3,2),
                     Square(1,2,4,2)
                 ],
                 [
                     Square(4,3,1,3),
                     Square(1,3,2,3),
                     Square(2,3,3,4),
                     Square(3,3,4,4)
                 ],
                 [
                     Square(2,4,1,3),
                     Square(3,4,2,3),
                     Square(1,4,3,4),
                     Square(4,4,4,4)
                 ]
             ]
     solver = Solver()
     self.assertTrue(solver.is_valid(puzzle))
Esempio n. 7
0
    def test_is_complete_returns_false_with_blanks(self):
        puzzle = [
                    [
                        Square(None,1,1,1),
                        Square(4,1,2,1),
                        Square(2,1,3,2),
                        Square(3,1,4,2)
                    ],
                    [
                        Square(2,2,1,1),
                        Square(None,2,2,1),
                        Square(None,2,3,2),
                        Square(1,2,4,2)
                    ],
                    [
                        Square(None,3,1,3),
                        Square(None,3,2,3),
                        Square(None,3,3,4),
                        Square(None,3,4,4)
                    ],
                    [
                        Square(3,4,1,3),
                        Square(2,4,2,3),
                        Square(None,4,3,4),
                        Square(None,4,4,4)
                    ]
                ]

        solver = Solver()
        self.assertFalse(solver.is_complete(puzzle))
def train(param=PARAMS, sv=SOLVE, small=False):

    sv['name'] = __file__.rstrip('.py')
    input_var = raw_input('Are you testing now? ')
    
    if 'no' in input_var:
        sv.pop('name')
    else:
        sv['name'] += input_var

    out = get(1) 
    from my_layer import LSTM
    sym = LSTM(e_net.l3_4, 64*64, 1, 64, 64)
    sym = list(sym)
    sym[0] = mx.sym.LogisticRegressionOutput(data=sym[0], name='softmax')
    sym = mx.symbol.Group(list(sym))

    param['eval_data'] = out['val'] 
    param['marks'] = param['e_marks'] = out['marks'] 
    param['ctx'] = mu.gpu(1)

    print out['train'].label[0][1].shape
  
    s = Solver(sym, out['train'], sv, **param)
    s.train()
    s.predict()
Esempio n. 9
0
 def test_is_valid_return_false_when_two_of_same_number_in_block(self):
     puzzle = [
                 [
                     Square(1,1,1,1),
                     Square(None,1,2,1),
                     Square(None,1,3,2),
                     Square(None,1,4,2)
                 ],
                 [
                     Square(None,2,1,1),
                     Square(1,2,2,1),
                     Square(None,2,3,2),
                     Square(None,2,4,2)
                 ],
                 [
                     Square(None,3,1,3),
                     Square(None,3,2,3),
                     Square(None,3,3,4),
                     Square(None,3,4,4)
                 ],
                 [
                     Square(None,4,1,3),
                     Square(None,4,2,3),
                     Square(None,4,3,4),
                     Square(None,4,4,4)
                 ]
             ]
     solver = Solver()
     self.assertFalse(solver.is_valid(puzzle))
Esempio n. 10
0
 def test_2x1_backtracking(self):
     solver = Solver(""" 2x1:.12
                         . .
                         ---
                         . . """)
     solutions = solver.solve()[0]
     self.assertEqual(2, len(solutions))
Esempio n. 11
0
def csp_sdk(folder, instance):

    fo = open("problems/%d/%d.sd"%(folder,instance), "rw+")
    sys.setrecursionlimit(1000000)
    sudoku = Sudoku()

    for line in fo:
        l = line.split()
        tmp = []
        for i in l:
            tmp.append(int(i))
        if l:
            sudoku.add_row(tmp)

    fo.close()

    starter = sudoku.rst_cst_var()
    csp = Solver(sudoku, starter)
    result = csp.game()

    for l in result.sudoku:
        print l

    del sudoku

    print("\n")
    return csp.steps
Esempio n. 12
0
 def test_3x3_trivial(self):
     solver = Solver("""
                     ..6|..9|47.
                     .8.|.6.|..5
                     .47|..5|..1
                     -----------
                     2..|...|.59
                     ...|9.3|...
                     .7.|...|..3
                     -----------
                     8..|3..|12.
                     7..|.2.|.9.
                     .21|6..|3..
                     """)
     solutions = solver.solve()[0]
     self.assertEqual(1, len(solutions))
     solution = solutions[0]
     expected_lines = [
         "3x3:.123456789",
         "356819472",
         "182764935",
         "947235861",
         "238146759",
         "615973284",
         "479582613",
         "894357126",
         "763421598",
         "521698347"
         ]
     expected_solution = "\n".join(expected_lines)
     self.assertEqual(expected_solution, solution.asText())
Esempio n. 13
0
 def test_2x1_trivial(self):
     solver = Solver(""" 2x1:.12
                         1 .
                         ---
                         . . """)
     solutions = solver.solve()[0]
     self.assertEqual(1, len(solutions))
Esempio n. 14
0
 def on_solve_clicked(self):
     """ Solve in domain tab. """
     domain = self.domains_dict[str(self.domains.currentItem().text())]
     if not (self.mesh is not None and domain.dim == 'FILE'):
         if not self.getMesh() and self.mesh is None:
             return
     dim = self.mesh.topology().dim()
     initsize = self.mesh.size(dim)
     trans = [self.selectedTransforms.item(i).obj
              for i in xrange(self.selectedTransforms.count())]
     bcs = self.getBCList()
     # create solver and adjust parameters
     wTop, wBottom = self.getWeights()
     solver = Solver(self.mesh, bcs, trans, deg=self.femDegree.value(),
                     bcLast=self.useTransformed.isChecked(),
                     method=str(self.femType.currentText()),
                     wTop=wTop, wBottom=wBottom)
     solver.refineTo(int(self.meshSize.text()),
                     self.meshLimit.currentText() == 'at most',
                     self.refine.currentText() == 'long edge')
     if self.solveType.currentIndex() == 0:
         solver.solveFor(self.solveNumber.value(), None, False)
     else:
         solver.solveFor(self.solveNumber.value(),
                         float(self.targetValue.text()), False)
     # get ready for pickling
     solver.removeMesh()
     longcalc = LongCalculation(solver, [], pickle_solutions, "Solving")
     code = longcalc.exec_()
     if not code:
         # worker failed
         longcalc.cleanUp()
         self.stats.appendPlainText("Solver failed!\n\n")
         return
     results = longcalc.res
     eigv, eigf = results[:2]
     for i in range(len(eigf)):
         u = solver.newFunction()
         u.vector()[:] = eigf[i]
         eigf[i] = u
     finalsize = solver.finalsize
     sol = SolutionTab(dim)
     sol.data = {'geometry': results[2]}
     self.fillTabData(sol.data, trans, bcs, str(initsize), str(finalsize),
                      solver.extraRefine)
     sol.formatData()
     domain = self.domains.currentItem().text()
     self.solutionTabs.addTab(sol, domain)
     for i, [e, u] in enumerate(zip(eigv, eigf)):
         if abs(e) < 1E-9:
             e = 0.0
         new = QListWidgetItem(str(i+1)+': '+str(e))
         new.eigenvalue = e
         new.eigenfunction = u
         sol.eigList.addItem(new)
     self.tabs.tabBar().setCurrentIndex(4)
     self.solutionTabs.tabBar().setCurrentIndex(self.solutionTabs.count()-1)
     sol.setFocus(True)
     self.stats.appendPlainText("Solutions found.\n\n")
Esempio n. 15
0
def test_equalities_give_pseudo_boolean_constraints(ls, m):
    solver = Solver()
    variables = [variable(i) for i in range(len(ls))]
    objective = sum(l * v for l, v in zip(ls, variables))
    constraint = solver.compile(objective == m)
    pseudo_boolean = solver.builder.pseudo_boolean_constraint(
        [(l, solver.compile(v)) for l, v in zip(ls, variables)], m, m)
    assert constraint == pseudo_boolean
Esempio n. 16
0
 def __init__(self, dst_point, vertex_rect):
   Solver.__init__(self, dst_point, vertex_rect)
   self.vertex_rect = self.neighbor_lst
   # init wgt_lst as [0.0, 0.0, 0.0, 0.0]
   self.wgt_lst.append(0.0)
   self.wgt_lst.append(0.0)
   self.wgt_lst.append(0.0)
   self.wgt_lst.append(0.0)
Esempio n. 17
0
def main():
    """
    Entry point into the decryption process
    """

    # If the corpus and encoded file paths are not provided in the
    # command line arguments, pick the default files
    if len(sys.argv) < 3:
        print """
        Assuming default paths for the corpus and encrypted files:
        ./corpus-en.txt' and ./encoded-en.txt.

        If they are located somewhere else, please run the script
        with command-line arguments specifying the file locations.
        For example:

        python ./decode.py ./data/corpus-en.txt ./data/encoded-en.txt
        """

        corpus_file = 'corpus-en.txt'
        encrypted_file = 'encoded-en.txt'
    else:
        corpus_file, encrypted_file = sys.argv[1], sys.argv[2]

    # Read the encoded file and the corpus
    cipher_txt = CipherText(encrypted_file)
    corpus = CorpusStats(corpus_file)
    solver = Solver(cipher_txt, corpus)
    # Compute the solution
    best_solution = solver.solve(corpus)

    print """
    Writing the decrypted text to file:
    ./decoded.txt

    the best solution key to file:
    ./decryption_cipher.txt

    original encryption key to file:
    ./encryption_cipher.txt
    """

    # Write the solutions
    file_decrypt_cipher = open("decryption_cipher.txt", 'w')
    file_encrypt_cipher = open("encryption_cipher.txt", 'w')
    for char in textutil.A2Z:
        if char in best_solution:
            file_decrypt_cipher.write(
                char + ' -> ' + best_solution[char] + '\n')
            file_encrypt_cipher.write(
                best_solution[char] + ' -> ' + char + '\n')
        else:
            file_decrypt_cipher.write(char + ' -> ?' + '\n')
            file_encrypt_cipher.write('? -> ' + char + '\n')

    file_decrypt_cipher.close()
    file_encrypt_cipher.close()
    cipher_txt.decode(best_solution, 'decoded.txt')
Esempio n. 18
0
def main():
    input_file = sys.argv[1] if len(sys.argv) == 2 else 'input.txt'
    s = Solver(input_file)
    winning_state = s.solve()
    solution = [ winning_state ]
    while solution[0].prev_state is not None:
        solution.insert(0, solution[0].prev_state)
    p = Printer()
    p.print_solution(solution, 4)
Esempio n. 19
0
def test_upper_bound_gives_pseudo_boolean_constraint(ls, m):
    solver = Solver()
    variables = [variable(i) for i in range(len(ls))]
    objective = sum(l * v for l, v in zip(ls, variables))
    constraint = solver.compile(objective <= m)
    pseudo_boolean = solver.builder.pseudo_boolean_constraint(
        [(l, solver.compile(v)) for l, v in zip(ls, variables)],
        -sum(map(abs, ls)), m)
    assert constraint == pseudo_boolean
Esempio n. 20
0
def test_intervals_give_pseudo_boolean_constraints(ls, m, n):
    assume(m <= n)
    solver = Solver()
    variables = [variable(i) for i in range(len(ls))]
    objective = sum(l * v for l, v in zip(ls, variables))
    constraint = solver.compile((objective >= m) & (objective <= n))
    pseudo_boolean = solver.builder.pseudo_boolean_constraint(
        [(l, solver.compile(v)) for l, v in zip(ls, variables)], m, n)
    assert constraint == pseudo_boolean
Esempio n. 21
0
def test_linear_comparisions():
    solver = Solver()
    x = variable('x')
    y = variable('y')
    t = solver.solve((x + 9 * y) == 10)
    assert t['y'] == t['x'] is True

    s = solver.solve((x + 9 * y) != 10)
    assert not (s['x'] and s['y'])
Esempio n. 22
0
    def new_test():
        S = Solver(parameter,EF,initial_state,1e-3)
        print 'going to simulate',S.total_period(),'total periods.'
#        S.main_control()
        S.main_control_matrix()
        print S.matrix_no_field
        print S.period_matrix #oneperiod
        print np.linalg.norm(S.matrix_no_field - S.period_matrix)
        return S
Esempio n. 23
0
def solve(sl: solver.Solver):
    """These will run on separate thread"""
    global solution
    s = 0
    while s != -1:
        s = sl.find_next_solution()
        if s != -1:
            print('Solution#', s)
            solution = sl.get_grid()
    print('end')
Esempio n. 24
0
 def get_move(self, state):
     """Returns a valid move from the computer player."""
     solver = Solver(self.label)
     print 'Player', self.label, '\'s turn.'
     try:
         move, ret_state, score = solver.minimax(state)
         print 'Bot plays:', move, 'with value', score, os.linesep
         return move
     except Exception, e:
         print 'Bot Error:', e
Esempio n. 25
0
def solver(heuri,tab):
    tam_tabuleiro = len(tab[0])
    print 'Tabuleiro inicial'
    print Tabuleiro(tab)
    c1 = cpu_time()
    s = Solver(n=tam_tabuleiro,tabini=tab,heuristica=eval(heuri))
    n_mov,sol = s.magic()
    cf = cpu_time() - c1
    print "Num. de movimentos, tempo"
    print n_mov,cf
Esempio n. 26
0
def main(size, path):
    checker = Checker(size)
    reader = Reader(checker, size)
    data = reader.read("data\\" + path)
    solver = Solver(checker, size)

    try:
        return solver.solve(data)
    except Exception as e:
        return str(e)
Esempio n. 27
0
    def test_2x2_without_backtracking(self):
        generator = self.create_generator(0, "2x2:.1234")
        puzzles, limit_reached = generator.solve()
        self.assertEqual(1, len(puzzles))
        puzzle = puzzles[0]

        solver = Solver(puzzle.asText())
        solutions, limit_reached = solver.solve()
        self.assertEqual(1, len(solutions))
        self.assertEqual(0, solutions[0].backtrack_count)
Esempio n. 28
0
def worker_solve(solveable, mutateable, log):
    s = Solver()
    while True:
        try:
            f = solveable.get(block=False)
            s.solve(f)
            log.put("Solver: solved   %s" % f.__unicode__())
            mutateable.put(f)
        except Empty:
            time.sleep(0.1)
Esempio n. 29
0
    def __init__(self, dim, integrator_type=None):

        self.dim = dim

        integrator_type = GSPHIntegrator

        # base class constructor
        Solver.__init__(self, dim, integrator_type)

        self.default_kernel = base.GaussianKernel(dim)
Esempio n. 30
0
def main():
    args = parse_args()
    f = open(args.puzzle)

    g = Grid()
    s = Solver(g)
    s.populate(puzzle_start_input(f.readlines()))
    print g
    s.solve()
    print g
Esempio n. 31
0
import sys

sys.path.extend(['./'])

import os

os.environ["CUDA_VISIBLE_DEVICES"] = '0'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

import tensorflow as tf

tf.device('/gpu:0')

from solver import Solver

if __name__ == "__main__":
    train_data_path = './dataset/record_data/train_data_record/'
    test_data_path = './dataset/record_data/val_data_record/'
    train_num = 3096
    test_num = 774
    epochs = 10
    batch_size = 32
    N = 1
    solver = Solver()
    solver.train(train_data_path, test_data_path, train_num, test_num, epochs,
                 batch_size, N)
Esempio n. 32
0
small_data = {
    'X_train': data['X_train'][:num_train],
    'y_train': data['y_train'][:num_train],
    'X_val': data['X_val'],
    'y_val': data['y_val'],
}

weight_scale = 1e-2
learning_rate = 1e-4
model = FullyConnectedNet([100, 100],
                          weight_scale=weight_scale,
                          dtype=np.float64)
solver = Solver(model,
                small_data,
                print_every=10,
                num_epochs=20,
                batch_size=25,
                update_rule='sgd',
                optim_config={'learning_rate': learning_rate})
solver.train()

plt.plot(solver.loss_history, 'o')
plt.title('Training loss history')
plt.xlabel('Iteration')
plt.ylabel('Training loss')
plt.show()

# Now try to use a five-layer network with 100 units on each layer to overfit 50 training examples. Again you will have to adjust the learning rate and weight initialization, but you should be able to achieve 100% training accuracy within 20 epochs.

num_train = 50
small_data = {
Esempio n. 33
0
 def solve_puzzle(self):
     solver = Solver(cube=Cube(self.get_cube_state()))
     solution = solver.solve()
     self.solution.setText(' '.join(solution))
     self.window.repaint()
Esempio n. 34
0
    def forward_analysis(self, node_interested, appended=None):
        nodes_interested = self.backward_slice(node_interested.name, set())
        for node in nodes_interested:
            if "gradient" in node.lower() and "stopgradient" not in node.lower(
            ):
                print("----------Gradients are not interested----------")
                return None

        nodes_interested.sort(
            key=lambda x: self.nodes_in_main_clique_topology[x])
        if appended is not None:
            if "gradient" in appended.name.lower(
            ) and "stopgradient" not in appended.name.lower():
                print("----------Gradients are not interested----------")
                return None
            nodes_interested.append(appended.name)

        pre_check = True
        for son in nodes_interested[:-1]:
            u = self.node_by_name[son]
            try:
                getattr(InferValue, u.op.lower())([], u)
            except AttributeError:
                if u.op.lower() not in ["assert", "nextiteration"]:
                    print(u.op)
                    pre_check = False
            except:
                pass

        if not pre_check:
            raise AttributeError

        for son in nodes_interested[:-1]:
            u = self.node_by_name[son]
            if son in self.node_visited:
                self.write(str(son) + "passed")
                continue

            self.write(son)
            self.node_visited.add(son)
            parents_aps = []
            all_none = True
            for (i, (in_node_name,
                     is_control)) in enumerate(self.graph_backward[son]):
                if not is_control:
                    if in_node_name not in self.node_visited:
                        # there is a loop, and the node is "Merge"
                        assert self.node_by_name[
                            in_node_name].op == "NextIteration"
                        self.node_visited.add(in_node_name)
                        self.node_output[in_node_name].value = Range(
                            name="nextiteration",
                            dtype=self.node_output[in_node_name].dtype)

                    parents_aps.append(self.node_output[in_node_name].index_of(
                        self.edge_index[son][i]))
                    all_none &= parents_aps[-1].has_none()

            temp = None
            temp_array = None
            if all_none and len(parents_aps) > 0:
                warnings.warn("fail to analysis %s due to None" % son,
                              RuntimeWarning)
            else:
                try:
                    temp = getattr(InferValue, u.op.lower())(parents_aps, u)
                except AttributeError:
                    if u.op.lower() in ["assert"]:
                        pass
                    else:
                        raise AttributeError
                except AssertionError:
                    raise AssertionError
                # except:
                #     warnings.warn("fail to analysis %s due to None" % son, RuntimeWarning)
                #     temp = None
                if turn_on_array:
                    try:
                        temp_array = getattr(InferArray,
                                             u.op.lower())(parents_aps, u)
                        flag = True
                        if isinstance(self.node_output[son].dtype, list):
                            for x in self.node_output[son].dtype:
                                if int(x) == 10:
                                    flag = False
                                    break
                        else:
                            flag = int(self.node_output[son].dtype) != 10

                        if not flag:
                            temp_array = None
                    except AttributeError:
                        pass
                    except AssertionError:
                        pass

            if isinstance(temp, tuple):
                self.node_output[son].value = temp[0]
                self.node_output[son].constraints = temp[1]
            else:
                self.node_output[son].value = temp

            if temp_array is not None:
                self.node_output[son].array = temp_array
                temp_constraints = [] if self.node_output[
                    son].constraints is None else [
                        self.node_output[son].constraints
                    ]
                if isinstance(temp_array, list):
                    temp = []
                    for (i, tmp_array) in enumerate(temp_array):
                        if temp_array[i].index_slices is None:
                            temp.append(self.node_output[son].value[i])
                            continue
                        left, right = self.get_left_right(
                            tmp_array.get_possible_values(), son)
                        if left is None:
                            temp.append(self.node_output[son].value[i])
                        elif len(left) == 1:
                            temp.append(Range(left=left[0], right=right[0]))
                        elif len(left) == 2:
                            temp.append(
                                Range(left=z3.If(left[0] < left[1], left[0],
                                                 left[1]),
                                      right=z3.If(right[0] > right[1],
                                                  right[0], right[1])))
                        else:
                            temp.append(
                                Range(name="array_ai",
                                      dtype=self.node_output[son].dtype[i]))
                            temp_constraints += [
                                Solver.min(temp[-1].left, left),
                                Solver.max(temp[-1].right, right)
                            ]
                elif temp_array.index_slices is not None:
                    left, right = self.get_left_right(
                        temp_array.get_possible_values(), son)
                    if left is not None:
                        if len(left) == 1:
                            temp = Range(left=left[0], right=right[0])
                        elif len(left) == 2:
                            temp = Range(left=z3.If(left[0] < left[1], left[0],
                                                    left[1]),
                                         right=z3.If(right[0] > right[1],
                                                     right[0], right[1]))
                        else:
                            temp = Range(name="array_ai",
                                         dtype=self.node_output[son].dtype)
                            temp_constraints += [
                                Solver.min(temp.left, left),
                                Solver.max(temp.right, right)
                            ]

                self.node_output[son].value = temp
                self.node_output[son].constraints = None if len(
                    temp_constraints) == 0 else z3.And(temp_constraints)

            self.write(self.node_output[son])

        ret_constraints = []
        for son in nodes_interested[:-1]:
            if self.node_output[son].constraints is not None:
                ret_constraints.append(self.node_output[son].constraints)
                self.write(self.node_output[son].constraints)
        return z3.And(ret_constraints)
Esempio n. 35
0
from solver import Solver
from architecture.unet import Unet


model = Unet()
solver = Solver(model.build_model()
                ,[], []
                ,num_train_examples = 10
                ,num_val_examples = 2
                ,metric = 'dice_loss'
               )

print(solver._optim_config)
Esempio n. 36
0
#!pypy3
import sys
import cProfile

from graph import Graph
from grammar import Grammar
from solver import Solver

arg = ['demo/swap.pag.dot','demo/VM_Grammar.txt','PAG_Matrix','Cubic']

for i in range(1, len(sys.argv)):
    arg[i-1] = sys.argv[i]    

graph = Graph(arg[0],arg[2])
grammar = Grammar(arg[1])
solver = Solver(arg[3])

cProfile.run('solver.solve(graph, grammar)')
print(f"The result of graph:{arg[0]} for grammar:{arg[1]} in DS:{arg[2]} by {arg[3]} algo has dump in {arg[0]}_solved")

Esempio n. 37
0
    if args.use_wandb:
        import wandb
        wandb.init(project="few-shot-learning", config=args)
        args = wandb.config
        print(args)

    train_dataset = MiniDataset(args.train_csv, args.train_data_dir,
                                args.data_aug)
    val_dataset = MiniDataset(args.val_csv, args.val_data_dir)

    train_loader = DataLoader(train_dataset,
                              num_workers=3,
                              pin_memory=False,
                              worker_init_fn=worker_init_fn,
                              batch_sampler=NShotTaskSampler(
                                  args.train_csv, args.episodes_per_epoch,
                                  args.N_way_train, args.N_shot_train,
                                  args.N_query_train))

    val_loader = DataLoader(val_dataset,
                            batch_size=args.N_way_val *
                            (args.N_query_val + args.N_shot_val),
                            num_workers=3,
                            pin_memory=False,
                            worker_init_fn=worker_init_fn,
                            sampler=GeneratorSampler(args.val_testcase_csv))

    solver = Solver(args, train_loader, val_loader)

    solver.train()
Esempio n. 38
0
        'U_Net', 'R2U_Net', 'AttU_Net', 'R2AttU_Net', 'ResAttU_Net'
]:
    print(
        'ERROR!! model_type should be selected in U_Net/R2U_Net/AttU_Net/R2AttU_Net/ResAttU_Net'
    )
    print('Your input for model_type was %s' % model_type)
    break

# initialize the test loader
train_loader = get_loader(config, mode='train')

validation_loader = get_loader(config, mode='validation')
test_loader = get_loader(config, mode='test', shuffle=False)

# test model
solver = Solver(config, train_loader, validation_loader, test_loader)
solver.test(which_unet='best', stop_epoch=stop_epoch)

# visualize the prediction
img = config.img_test
GT = config.GT_test
try:
    prediction_path = config.current_prediction_path + 'epoch%d/' % stop_epoch
    prediction = list(np.sort(glob(prediction_path + '*.npy')))
except:
    print('no stop epoch')
    prediction_path = config.current_prediction_path + 'best/'
    prediction = list(np.sort(glob(prediction_path + '*.npy')))

# recover patches
whole_img_predic_path = prediction_path + '/prediction_whole/'
Esempio n. 39
0
from solver import Solver
from graph_builder import GraphBuilder
from node import Node

alphas = [0.0005, 0.0004, 0.0003, 0.00025, 0.0005]
mus = list(map(lambda x: x * 30, alphas))

# init states & matrix
g = Generator()
g.init_nodes()
print(len(g.nodes))
matrix = g.get_matrix(alphas + mus)

# show graph with states
b = GraphBuilder(g.nodes)
b.call()

# show system of equations
print('\n'.join(list(g.get_eqs())))

s = Solver(matrix, len(g.nodes))

# export results to csv
# s.export_solution()

# show results in graphic
# s.ps_state(10)

# show reliability rate graphic
s.reliability(10, list(map(lambda n: n.pi, g.true_nodes())))
                        default='./models/model_single_sample.pkl')
    parser.add_argument('--test_clf', default=False, action='store_true')
    args = parser.parse_args()
    hps = Hps()
    hps.load(args.hps_path)
    hps_tuple = hps.get_tuple()
    if not args.single:
        dataset = myDataset(args.dataset_path,
                            args.index_path,
                            seg_len=hps_tuple.seg_len)
    else:
        dataset = SingleDataset(args.dataset_path,
                                args.index_path,
                                seg_len=hps_tuple.seg_len,
                                is_h5=args.is_h5)

    data_loader = DataLoader(dataset, 1)

    solver = Solver(hps_tuple, data_loader)

    # if args.test_clf:
    #     solver.probar_clf_bueno('/home/julian/Documentos/PI_JCL/audios/voice_integrador_OLD_JOSE_DANIEL/wav48/p101/p101_001.wav')

    if args.load_model:
        solver.load_model(args.load_model_path)
    if args.train:
        # solver.train(args.output_model_path, args.flag, mode='pretrain_G')
        solver.train(args.output_model_path, args.flag, mode='pretrain_D')
        # solver.train(args.output_model_path, args.flag, mode='train')
        # solver.train(args.output_model_path, args.flag, mode='patchGAN')
Esempio n. 41
0
def main(arglist):
    """
    Test whether the given output file is a valid solution to the given map file.

    This test script uses a 'trapdoor function' approach to comparing your computed values and policy to a reference
    solution without revealing the reference solution - 3 different results are computed based on your values and policy
    and compared to the results computed for the reference solution.

    :param arglist: [map file name]
    """
    if len(arglist) != 1:
        print("Running this file tests whether your code produces an optimal policy for the given map file.")
        print("Usage: tester.py [map file name]")
        return

    input_file = arglist[0]
    game_map = LaserTankMap.process_input_file(input_file)
    solver = Solver(game_map)

    mark = 0

    # do offline computation
    if game_map.method == 'vi':
        if not WINDOWS and not DEBUG_MODE:
            signal.signal(signal.SIGALRM, timeout_handler)
            signal.alarm(game_map.time_limit + 1)
        try:
            solver.run_value_iteration()
        except TimeOutException:
            print("/!\\ Ran overtime during run_value_iteration( )")
            sys.exit(OVERTIME)
        except:
            traceback.print_exc()
            print("/!\\ Crash occurred during run_value_iteration( )")
            sys.exit(CRASH)
        if not WINDOWS and not DEBUG_MODE:
            signal.alarm(0)
    elif game_map.method == 'pi':
        if not WINDOWS and not DEBUG_MODE:
            signal.signal(signal.SIGALRM, timeout_handler)
            signal.alarm(game_map.time_limit + 1)
        try:
            solver.run_policy_iteration()
        except TimeOutException:
            print("/!\\ Ran overtime during run_policy_iteration( )")
            sys.exit(OVERTIME)
        except:
            traceback.print_exc()
            print("/!\\ Crash occurred during run_policy_iteration( )")
            sys.exit(CRASH)
        if not WINDOWS and not DEBUG_MODE:
            signal.alarm(0)

    # simulate an episode (using de-randomised transitions) and compare total reward to benchmark
    total_reward = 0
    state = game_map.make_clone()
    seed = game_map.initial_seed
    for i in range(int((game_map.benchmark / game_map.move_cost) * 2)):
        new_seed = seed + 1
        if not WINDOWS and not DEBUG_MODE:
            signal.signal(signal.SIGALRM, timeout_handler)
            if game_map.method == 'mcts':
                signal.alarm(game_map.time_limit + 1)
            else:
                signal.alarm(1)
        try:
            if game_map.method == 'mcts':
                action = solver.get_mcts_policy(state)
            else:
                action = solver.get_offline_policy(state)
        except TimeOutException:
            if game_map.method == 'mcts':
                print("/!\\ Ran overtime during get_mcts_policy( )")
            else:
                print("/!\\ Ran overtime during get_offline_policy( )")
            sys.exit(mark)
        except:
            traceback.print_exc()
            if game_map.method == 'mcts':
                print("/!\\ get_mcts_policy( ) caused crash during evaluation")
            else:
                print("/!\\ get_offline_policy( ) caused crash during evaluation")
            sys.exit(mark)
        r = state.apply_move(action, new_seed)
        total_reward += r
        if r == game_map.goal_reward or r == game_map.game_over_cost:
            break
        seed = new_seed

    # compute score based on how close episode reward is to optimum
    print(f"Episode Reward = {str(total_reward)}, Benchmark = {str(game_map.benchmark)}")
    mark = 10
    below = 0
    for i in range(1, 11):
        if total_reward > (game_map.benchmark * (1 + (i / 20))):
            break
        else:
            mark -= 1
            below += 1

    if below == 0:
        print("Testcase passed, policy optimum")
    elif mark > 0:
        print(f"Testcase passed, {below} points below optimum")
    sys.exit(mark)
Esempio n. 42
0
from __future__ import print_function
import sys
import numpy as np
from solver import Solver

error_values = []

def grid_search():
	for lambda_ in np.linspace(0.70,0.99,30):
		for alpha in np.linspace(0.01, 0.055,30):
			error = solver.td_lambda(lambda_=lambda_, alpha=alpha)
			if error < 0.0005:
				print('----------------lambda = {}, alpha= {}---------------------'.format(lambda_ ,alpha))
				print(error)

solver = Solver(sys.argv[1], sys.argv[-1])

solver.monte_carlo_learning()

# grid_search()
Esempio n. 43
0
from pathlib import Path
import numpy as np
import tensorflow as tf

tf.device('/gpu:0')

from solver import Solver
from gen_tfrecord_dataset import get_data

if __name__ == '__main__':

    model_path = './pb/stgcn.pb'
    data_path = './dataset/joint_data'

    solver = Solver()
    frame = solver.num_frames

    # 读取数据集
    datac = list()
    if not ((Path(data_path)).exists()):
        print('Data file does not exist')
    for root, dirs, files in os.walk(data_path):
        for f in files:
            filename = os.path.join(root, f)
            print(filename)
            data = get_data(filename, frame)
            datac.append(data)
    data = np.array(datac)

    # # txt读取测试
Esempio n. 44
0
    def __init__(self):
        """Constructs Arm class.
        """
        servo_info = {}
        servo_info['s1'] = {
            'function': 'waist',
            'default_value': 90.0,
            'min_value': 0.0,
            'max_value': 180.0
        }
        servo_info['s2'] = {
            'function': 'shoulder',
            'default_value': 150.0,
            'min_value': 0.0,
            'max_value': 180.0
        }
        servo_info['s3'] = {
            'function': 'elbow',
            'default_value': 35.0,
            'min_value': 0.0,
            'max_value': 180.0
        }
        servo_info['s4'] = {
            'function': 'wrist_roll',
            'default_value': 140.0,
            'min_value': 0.0,
            'max_value': 180.0
        }
        servo_info['s5'] = {
            'function': 'wrist_pitch',
            'default_value': 85.0,
            'min_value': 0.0,
            'max_value': 180.0
        }
        servo_info['s6'] = {
            'function': 'grip',
            'default_value': 80.0,
            'min_value': 0.0,
            'max_value': 180.0
        }
        self._servo_info = servo_info

        segment_info = {}
        segment_info['seg1'] = {
            'base_servo': 's1',
            'segment_length': 1.0,
            'axis_of_rotation': 'Z'
        }
        segment_info['seg2'] = {
            'base_servo': 's2',
            'segment_length': 120.0,
            'axis_of_rotation': 'Y'
        }
        segment_info['seg3'] = {
            'base_servo': 's3',
            'segment_length': 1.0,
            'axis_of_rotation': 'Y'
        }
        segment_info['seg4'] = {
            'base_servo': 's4',
            'segment_length': 1.0,
            'axis_of_rotation': 'X'
        }
        segment_info['seg5'] = {
            'base_servo': 's5',
            'segment_length': 1.0,
            'axis_of_rotation': 'Y'
        }
        self._segment_info = arm_info

        current_angles = {}
        current_angles['s1'] = 0.0
        current_angles['s2'] = 0.0
        current_angles['s3'] = 0.0
        current_angles['s4'] = 0.0
        current_angles['s5'] = 0.0
        current_angles['s6'] = 0.0
        self._current_angles = current_angles

        self._servo_speed = 1.0
        self._solver = Solver(servo_info, segment_info)
        self._kit = ServoKit(channels=16)
        self.configure_board()
Esempio n. 45
0
class Prog:
    def __init__(self):
        self.board = Board()
        self.matcher = WordMatcher()
        self.solver = Solver()
        self.thread = None
        self.stopping = True
        self.stopped = True
        self.loaded = False

        self.cb_progress = None
        self.cb_options = None
        self.cb_intermediate = None

    def attach_interface(self, cb_progress, cb_options):
        self.cb_progress = cb_progress
        self.cb_options = cb_options

    def launch_load(self):
        self._launch(target=self._load)

    def launch_compute(self, letters):
        if not self.loaded:
            return
        self._launch(target=lambda: self._compute(letters))

    def _launch(self, target):
        if self.thread is not None:
            self.stop()
        self.stopping = False
        self.stopped = False

        def wrapped():
            target()
            self.stopped = True

        self.thread = threading.Thread(target=wrapped)
        self.thread.start()

    def stop(self):
        self.stopping = True
        if self.thread is not None:
            self.thread.join()

    def try_stop_compute(self):
        self.stopping = True
        if self.stopped:
            self.thread.join()
            self.thread = None
            return True
        return False

    def _load(self):
        def cb_stop():
            return self.stopping

        with open("dico.txt") as file:
            W = file.readlines()
        W = [w.strip().lower() for w in W]
        #W = W[:len(W)//8]
        self.matcher.set_dict(W, self.cb_progress, cb_stop)
        if self.cb_progress is not None:
            self.cb_progress(100, 100)
        self.loaded = True
        print('complete')

    def _compute(self, letters):
        if self.cb_options is None:
            return
        if self.cb_progress is not None:
            self.cb_progress(-1, 30)

        def cb_stop():
            return self.stopping

        def cb_intermediate(opt):
            self.cb_intermediate((opt, self.board.get_score(opt)))

        self.cb_options([])

        opts = self.solver.get_options(self.board, self.matcher, letters,
                                       lambda: self.cb_progress(-1, 1),
                                       cb_stop, cb_intermediate)
        pairs = [(op, self.board.get_score(op)) for op in opts]
        self.cb_options(pairs)
        if self.cb_progress is not None:
            self.cb_progress(100, 100)
Esempio n. 46
0
from npuzzle import NPuzzle
from solver import Solver

if __name__ == '__main__':
    npuzzle = NPuzzle(5)
    npuzzle.embaralhar(35)
    npuzzle.print_puzzle()
    Solver(npuzzle, 'BFS').solve()
    Solver(npuzzle, 'IDFS').solve()
    Solver(npuzzle, 'UCS').solve()
    Solver(npuzzle, 'A* 0').solve()  # full_manhattan
    Solver(npuzzle, 'A* 1').solve()  # manhattan
Esempio n. 47
0
class TrainVal:
    def __init__(self, config, fold, train_labels_number):
        """
        Args:
            config: 配置参数
            fold: int, 当前为第几折
            train_labels_number: list, 某一折的[number_class0, number__class1, ...]
        """
        self.config = config
        self.fold = fold
        self.epoch = config.epoch
        self.num_classes = config.num_classes
        self.lr_scheduler = config.lr_scheduler
        self.cut_mix = config.cut_mix
        self.beta = config.beta
        self.cutmix_prob = config.cutmix_prob
        self.train_url = config.train_url
        self.bucket_name = config.bucket_name

        self.image_size = config.image_size
        self.multi_scale = config.multi_scale
        self.multi_scale_size = config.multi_scale_size
        self.multi_scale_interval = config.multi_scale_interval
        if self.cut_mix:
            print('Using cut mix.')
        if self.multi_scale:
            print('Using multi scale training.')
        print('USE LOSS: {}'.format(config.loss_name))

        # 拷贝预训练权重
        print("=> using pre-trained model '{}'".format(config.model_type))
        if not mox.file.exists(
                '/home/work/.cache/torch/checkpoints/se_resnext101_32x4d-3b2fe3d8.pth'
        ):
            mox.file.copy(
                os.path.join(self.bucket_name,
                             'model_zoo/se_resnext101_32x4d-3b2fe3d8.pth'),
                '/home/work/.cache/torch/checkpoints/se_resnext101_32x4d-3b2fe3d8.pth'
            )
            print(
                'copy pre-trained model from OBS to: %s success' %
                (os.path.abspath(
                    '/home/work/.cache/torch/checkpoints/se_resnext101_32x4d-3b2fe3d8.pth'
                )))
        else:
            print('use exist pre-trained model at: %s' % (os.path.abspath(
                '/home/work/.cache/torch/checkpoints/se_resnext101_32x4d-3b2fe3d8.pth'
            )))

        # 拷贝预训练权重
        print("=> using pre-trained model '{}'".format(config.model_type))
        if not mox.file.exists(
                '/home/work/.cache/torch/checkpoints/efficientnet-b5-b6417697.pth'
        ):
            mox.file.copy(
                os.path.join(self.bucket_name,
                             'model_zoo/efficientnet-b5-b6417697.pth'),
                '/home/work/.cache/torch/checkpoints/efficientnet-b5-b6417697.pth'
            )
            print(
                'copy pre-trained model from OBS to: %s success' %
                (os.path.abspath(
                    '/home/work/.cache/torch/checkpoints/efficientnet-b5-b6417697.pth'
                )))
        else:
            print('use exist pre-trained model at: %s' % (os.path.abspath(
                '/home/work/.cache/torch/checkpoints/efficientnet-b5-b6417697.pth'
            )))

        # 加载模型
        prepare_model = PrepareModel()
        self.model = prepare_model.create_model(model_type=config.model_type,
                                                classes_num=self.num_classes,
                                                drop_rate=config.drop_rate,
                                                pretrained=True,
                                                bn_to_gn=config.bn_to_gn)
        self.model = torch.nn.DataParallel(self.model).cuda()

        # 加载优化器
        self.optimizer = prepare_model.create_optimizer(
            config.model_type, self.model, config)

        # 加载衰减策略
        self.exp_lr_scheduler = prepare_model.create_lr_scheduler(
            self.lr_scheduler,
            self.optimizer,
            step_size=config.lr_step_size,
            restart_step=config.restart_step,
            multi_step=config.multi_step)

        # 加载损失函数
        self.criterion = Loss(config.model_type, config.loss_name,
                              self.num_classes, train_labels_number,
                              config.beta_CB, config.gamma)

        # 实例化实现各种子函数的 solver 类
        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')
        self.solver = Solver(self.model, self.device)
        if config.restore:
            weight_path = os.path.join('checkpoints', config.model_type)
            if config.restore == 'last':
                lists = os.listdir(weight_path)  # 获得文件夹内所有文件
                lists.sort(key=lambda fn: os.path.getmtime(weight_path + '/' +
                                                           fn))  # 按照最近修改时间排序
                weight_path = os.path.join(weight_path, lists[-1],
                                           'model_best.pth')
            else:
                weight_path = os.path.join(weight_path, config.restore,
                                           'model_best.pth')
            self.solver.load_checkpoint(weight_path)

        # log初始化
        self.writer, self.time_stamp = self.init_log()
        self.model_path = os.path.join(self.config.train_local,
                                       self.config.model_type, self.time_stamp)

        # 初始化分类度量准则类
        with open(config.local_data_root + 'label_id_name.json',
                  'r',
                  encoding='utf-8') as json_file:
            self.class_names = list(json.load(json_file).values())
        self.classification_metric = ClassificationMetric(self.class_names,
                                                          self.model_path,
                                                          text_flag=0)

        self.max_accuracy_valid = 0

    def train(self, train_loader, valid_loader):
        """ 完成模型的训练,保存模型与日志
        Args:
            train_loader: 训练数据的DataLoader
            valid_loader: 验证数据的Dataloader
        """
        global_step = 0
        for epoch in range(self.epoch):
            self.model.train()
            epoch += 1
            images_number, epoch_corrects = 0, 0

            tbar = tqdm.tqdm(train_loader)
            image_size = self.image_size
            for i, (_, images, labels) in enumerate(tbar):
                if self.multi_scale:
                    if i % self.multi_scale_interval == 0:
                        image_size = random.choice(self.multi_scale_size)
                    images = multi_scale_transforms(image_size, images)
                if self.cut_mix:
                    # 使用cut_mix
                    r = np.random.rand(1)
                    if self.beta > 0 and r < self.cutmix_prob:
                        images, labels_a, labels_b, lam = generate_mixed_sample(
                            self.beta, images, labels)
                        labels_predict = self.solver.forward(images)
                        loss = self.solver.cal_loss_cutmix(
                            labels_predict, labels_a, labels_b, lam,
                            self.criterion)
                    else:
                        # 网络的前向传播
                        labels_predict = self.solver.forward(images)
                        loss = self.solver.cal_loss(labels_predict, labels,
                                                    self.criterion)
                else:
                    # 网络的前向传播
                    labels_predict = self.solver.forward(images)
                    loss = self.solver.cal_loss(labels_predict, labels,
                                                self.criterion)
                self.solver.backword(self.optimizer, loss)

                images_number += images.size(0)
                epoch_corrects += self.model.module.get_classify_result(
                    labels_predict, labels, self.device).sum()
                train_acc_iteration = self.model.module.get_classify_result(
                    labels_predict, labels, self.device).mean()

                # 保存到tensorboard,每一步存储一个
                descript = self.criterion.record_loss_iteration(
                    self.writer.add_scalar, global_step + i)
                self.writer.add_scalar('TrainAccIteration',
                                       train_acc_iteration, global_step + i)

                params_groups_lr = str()
                for group_ind, param_group in enumerate(
                        self.optimizer.param_groups):
                    params_groups_lr = params_groups_lr + 'pg_%d' % group_ind + ': %.8f, ' % param_group[
                        'lr']

                descript = '[Train Fold {}][epoch: {}/{}][image_size: {}][Lr :{}][Acc: {:.4f}]'.format(
                    self.fold, epoch, self.epoch, image_size, params_groups_lr,
                    train_acc_iteration) + descript

                # 对于 CyclicLR,要每一步均执行依次学习率衰减
                if self.lr_scheduler == 'CyclicLR':
                    self.exp_lr_scheduler.step()
                    self.writer.add_scalar(
                        'Lr', self.optimizer.param_groups[1]['lr'],
                        global_step + i)

                tbar.set_description(desc=descript)

            # 写到tensorboard中
            epoch_acc = epoch_corrects / images_number
            self.writer.add_scalar('TrainAccEpoch', epoch_acc, epoch)
            if self.lr_scheduler != 'CyclicLR':
                self.writer.add_scalar('Lr',
                                       self.optimizer.param_groups[1]['lr'],
                                       epoch)
            descript = self.criterion.record_loss_epoch(
                len(train_loader), self.writer.add_scalar, epoch)

            # Print the log info
            print('[Finish epoch: {}/{}][Average Acc: {:.4}]'.format(
                epoch, self.epoch, epoch_acc) + descript)

            # 验证模型
            val_accuracy, val_loss, is_best = self.validation(valid_loader)

            # 保存参数
            state = {
                'epoch': epoch,
                'state_dict': self.model.module.state_dict(),
                'max_score': self.max_accuracy_valid
            }
            self.solver.save_checkpoint_online(
                os.path.join(
                    self.model_path,
                    '%s_fold%d.pth' % (self.config.model_type, self.fold)),
                state, is_best, self.bucket_name, config.model_snapshots_name)

            # 写到tensorboard中
            self.writer.add_scalar('ValidLoss', val_loss, epoch)
            self.writer.add_scalar('ValidAccuracy', val_accuracy, epoch)

            # 每一个epoch完毕之后,执行学习率衰减
            if self.lr_scheduler == 'ReduceLR':
                self.exp_lr_scheduler.step(val_accuracy)
            elif self.lr_scheduler != 'CyclicLR':
                self.exp_lr_scheduler.step()
            global_step += len(train_loader)
        print('BEST ACC:{}'.format(self.max_accuracy_valid))

    def validation(self, valid_loader):
        tbar = tqdm.tqdm(valid_loader)
        self.model.eval()
        labels_predict_all, labels_all = np.empty(shape=(0, )), np.empty(
            shape=(0, ))
        epoch_loss = 0
        with torch.no_grad():
            for i, (_, images, labels) in enumerate(tbar):
                # 网络的前向传播
                labels_predict = self.solver.forward(images)
                loss = self.solver.cal_loss(labels_predict, labels,
                                            self.criterion)

                epoch_loss += loss

                # 先经过softmax函数,再经过argmax函数
                labels_predict = F.softmax(labels_predict, dim=1)
                labels_predict = torch.argmax(labels_predict,
                                              dim=1).detach().cpu().numpy()

                labels_predict_all = np.concatenate(
                    (labels_predict_all, labels_predict))
                labels_all = np.concatenate((labels_all, labels))

                descript = '[Valid][Loss: {:.4f}]'.format(loss)
                tbar.set_description(desc=descript)

            classify_report, my_confusion_matrix, acc_for_each_class, oa, average_accuracy, kappa = \
                self.classification_metric.get_metric(
                    labels_all,
                    labels_predict_all
                )

            if oa > self.max_accuracy_valid:
                is_best = True
                self.max_accuracy_valid = oa
                self.classification_metric.draw_cm_and_save_result(
                    classify_report,
                    my_confusion_matrix,
                    acc_for_each_class,
                    oa,
                    average_accuracy,
                    kappa,
                    font_fname="../font/simhei.ttf")
            else:
                is_best = False

            print('OA:{}, AA:{}, Kappa:{}'.format(oa, average_accuracy, kappa))

            return oa, epoch_loss / len(tbar), is_best

    def init_log(self):
        # 保存配置信息和初始化tensorboard
        TIMESTAMP = "log-{0:%Y-%m-%dT%H-%M-%S}".format(datetime.datetime.now())
        log_dir = os.path.join(self.config.train_local, self.config.model_type,
                               TIMESTAMP)
        writer = SummaryWriter(log_dir=log_dir)
        with codecs.open(os.path.join(log_dir, 'param.json'), 'w',
                         "utf-8") as json_file:
            json.dump({k: v
                       for k, v in config._get_kwargs()},
                      json_file,
                      ensure_ascii=False)

        seed = int(time.time())
        seed_torch(seed)
        with open(os.path.join(log_dir, 'seed.pkl'), 'wb') as f:
            pickle.dump({'seed': seed}, f, -1)

        return writer, TIMESTAMP
Esempio n. 48
0
def main(config):
    # For fast training
    cudnn.benchmark = True

    # Create directories if not exist
    if not os.path.exists(config.log_path):
        os.makedirs(config.log_path)
    if not os.path.exists(config.model_save_path):
        os.makedirs(config.model_save_path)
    if not os.path.exists(config.sample_path):
        os.makedirs(config.sample_path)
    if not os.path.exists(config.result_path):
        os.makedirs(config.result_path)

    if config.mode == 'train':
        from data_loader import get_loader
        # Data loader
        data_loader = get_loader(config.train_image_src, config.train_image_tgt,\
                                 config.batch_size, config.mode, config.mask_w, config.mask_h)
        data_loader2 = get_loader(config.val_image_src, config.val_image_tgt,\
                                 config.batch_size, config.mode, config.mask_w, config.mask_h)
        # Solver
        solver = Solver(data_loader, data_loader2, config)
        solver.train()
    elif config.mode == 'test':
        from data_loader3 import get_loader
        # Data loader
        data_loader = get_loader(image_src=config.test_image_src,
                                 mode='test',
                                 width=config.mask_w,
                                 height=config.mask_h)
        # Solver
        solver = Solver(data_loader, data_loader, config)
        solver.test()
    elif config.mode == 'evaluate':
        from data_loader4 import get_loader
        # Data loader
        data_loader = get_loader(config.eval_image_src, config.eval_image_tgt,\
                                 mode='evaluate', width=config.mask_w, height=config.mask_h)
        # Solver
        solver = Solver(data_loader, data_loader, config)
        solver.evaluate()
Esempio n. 49
0
    def __init__(self, config, fold, train_labels_number):
        """
        Args:
            config: 配置参数
            fold: int, 当前为第几折
            train_labels_number: list, 某一折的[number_class0, number__class1, ...]
        """
        self.config = config
        self.fold = fold
        self.epoch = config.epoch
        self.num_classes = config.num_classes
        self.lr_scheduler = config.lr_scheduler
        self.cut_mix = config.cut_mix
        self.beta = config.beta
        self.cutmix_prob = config.cutmix_prob
        self.train_url = config.train_url
        self.bucket_name = config.bucket_name

        self.image_size = config.image_size
        self.multi_scale = config.multi_scale
        self.multi_scale_size = config.multi_scale_size
        self.multi_scale_interval = config.multi_scale_interval
        if self.cut_mix:
            print('Using cut mix.')
        if self.multi_scale:
            print('Using multi scale training.')
        print('USE LOSS: {}'.format(config.loss_name))

        # 拷贝预训练权重
        print("=> using pre-trained model '{}'".format(config.model_type))
        if not mox.file.exists(
                '/home/work/.cache/torch/checkpoints/se_resnext101_32x4d-3b2fe3d8.pth'
        ):
            mox.file.copy(
                os.path.join(self.bucket_name,
                             'model_zoo/se_resnext101_32x4d-3b2fe3d8.pth'),
                '/home/work/.cache/torch/checkpoints/se_resnext101_32x4d-3b2fe3d8.pth'
            )
            print(
                'copy pre-trained model from OBS to: %s success' %
                (os.path.abspath(
                    '/home/work/.cache/torch/checkpoints/se_resnext101_32x4d-3b2fe3d8.pth'
                )))
        else:
            print('use exist pre-trained model at: %s' % (os.path.abspath(
                '/home/work/.cache/torch/checkpoints/se_resnext101_32x4d-3b2fe3d8.pth'
            )))

        # 拷贝预训练权重
        print("=> using pre-trained model '{}'".format(config.model_type))
        if not mox.file.exists(
                '/home/work/.cache/torch/checkpoints/efficientnet-b5-b6417697.pth'
        ):
            mox.file.copy(
                os.path.join(self.bucket_name,
                             'model_zoo/efficientnet-b5-b6417697.pth'),
                '/home/work/.cache/torch/checkpoints/efficientnet-b5-b6417697.pth'
            )
            print(
                'copy pre-trained model from OBS to: %s success' %
                (os.path.abspath(
                    '/home/work/.cache/torch/checkpoints/efficientnet-b5-b6417697.pth'
                )))
        else:
            print('use exist pre-trained model at: %s' % (os.path.abspath(
                '/home/work/.cache/torch/checkpoints/efficientnet-b5-b6417697.pth'
            )))

        # 加载模型
        prepare_model = PrepareModel()
        self.model = prepare_model.create_model(model_type=config.model_type,
                                                classes_num=self.num_classes,
                                                drop_rate=config.drop_rate,
                                                pretrained=True,
                                                bn_to_gn=config.bn_to_gn)
        self.model = torch.nn.DataParallel(self.model).cuda()

        # 加载优化器
        self.optimizer = prepare_model.create_optimizer(
            config.model_type, self.model, config)

        # 加载衰减策略
        self.exp_lr_scheduler = prepare_model.create_lr_scheduler(
            self.lr_scheduler,
            self.optimizer,
            step_size=config.lr_step_size,
            restart_step=config.restart_step,
            multi_step=config.multi_step)

        # 加载损失函数
        self.criterion = Loss(config.model_type, config.loss_name,
                              self.num_classes, train_labels_number,
                              config.beta_CB, config.gamma)

        # 实例化实现各种子函数的 solver 类
        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')
        self.solver = Solver(self.model, self.device)
        if config.restore:
            weight_path = os.path.join('checkpoints', config.model_type)
            if config.restore == 'last':
                lists = os.listdir(weight_path)  # 获得文件夹内所有文件
                lists.sort(key=lambda fn: os.path.getmtime(weight_path + '/' +
                                                           fn))  # 按照最近修改时间排序
                weight_path = os.path.join(weight_path, lists[-1],
                                           'model_best.pth')
            else:
                weight_path = os.path.join(weight_path, config.restore,
                                           'model_best.pth')
            self.solver.load_checkpoint(weight_path)

        # log初始化
        self.writer, self.time_stamp = self.init_log()
        self.model_path = os.path.join(self.config.train_local,
                                       self.config.model_type, self.time_stamp)

        # 初始化分类度量准则类
        with open(config.local_data_root + 'label_id_name.json',
                  'r',
                  encoding='utf-8') as json_file:
            self.class_names = list(json.load(json_file).values())
        self.classification_metric = ClassificationMetric(self.class_names,
                                                          self.model_path,
                                                          text_flag=0)

        self.max_accuracy_valid = 0
Esempio n. 50
0
    parser.add_argument('-sampler_label', default='align', type=str)
    parser.add_argument('-processed', action='store_true')
    parser.add_argument('-print_every_step', default=500, type=int)
    parser.add_argument('-valid_every_step', default=10000, type=int)
    parser.add_argument('-save_checkpoints', action='store_true')

    #output options
    parser.add_argument('-pred_dir',
                        default='./pred_dir/',
                        help='prediction dir',
                        dest='pred_dir')
    parser.add_argument('-filename',
                        default='pred.txt',
                        help='prediction file',
                        dest='filename')

    args = parser.parse_args()

    return args


if __name__ == '__main__':
    args = parse()
    solver = Solver(args)

    if args.train:
        solver.train()
    elif args.test:
        solver.test()
Esempio n. 51
0
class TrainVal():
    def __init__(self, config):
        self.model = models.shufflenet_v2_x1_0(pretrained=True)

        # # freeze model parameters
        # for param in self.model.parameters():
        #     param.requires_grad = False

        self.model.fc = nn.Sequential(nn.Linear(1024, config.class_num),
                                      nn.Sigmoid())
        # for param in self.model.fc.parameters():
        #     param.requires_grad = True

        # # model check
        # print(self.model)
        # for name, param in self.model.named_parameters():
        #     if param.requires_grad:
        #         print("requires_grad: True ", name)
        #     else:
        #         print("requires_grad: False ", name)

        self.device = torch.device("cpu")
        if torch.cuda.is_available():
            self.device = torch.device("cuda:%i" % config.device[0])
            self.model = torch.nn.DataParallel(self.model,
                                               device_ids=config.device)
        self.model = self.model.to(self.device)

        self.lr = config.lr
        self.weight_decay = config.weight_decay
        self.epoch = config.epoch
        self.splits = config.n_splits
        self.root = config.root

        self.solver = Solver(self.model, self.device)

        self.criterion = nn.CrossEntropyLoss()

        self.TIME = "{0:%Y-%m-%dT%H-%M-%S}-classify".format(
            datetime.datetime.now())
        self.model_path = os.path.join(config.root, config.save_path,
                                       config.model_name, self.TIME)
        if not os.path.exists(self.model_path):
            os.makedirs(self.model_path)

        self.max_accuracy_valid = 0
        self.seed = int(time.time())
        # self.seed = 1570421136
        seed_torch(self.seed)

        self.train_transform = transforms.Compose([
            transforms.Resize([256, 256]),
            transforms.RandomCrop(224),
            transforms.RandomRotation(degrees=(-40, 40)),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor()
        ])
        self.test_transform = transforms.Compose(
            [transforms.Resize(224),
             transforms.ToTensor()])

    def train(self, create_data=False):
        if create_data:
            df = pd.read_csv(os.path.join(self.root,
                                          'train info_cwt_coarse.csv'),
                             header=None)
            labels_1dim = np.argmax(np.array(df), axis=1)
            print('<' * 20 + ' Start creating datasets ' + '>' * 20)
            skf = StratifiedKFold(n_splits=self.splits,
                                  shuffle=True,
                                  random_state=55)
            for idx, [train_df_index, val_df_index
                      ] in tqdm(enumerate(skf.split(df, labels_1dim), 1)):
                for i in train_df_index:
                    try:
                        shutil.copy(
                            os.path.join(config.root,
                                         'Ni-coarse-cwt/%s.jpg' % (i + 1)),
                            os.path.join(
                                '/home/Yuanbincheng/project/dislocation_cls/2/4cls',
                                'train_%d/%d/%d.jpg' %
                                (idx, labels_1dim[i], i + 1)))
                    except FileNotFoundError:
                        try:
                            os.mkdir(
                                os.path.join(
                                    '/home/Yuanbincheng/project/dislocation_cls/2/4cls',
                                    'train_%d' % idx))
                        except (FileNotFoundError, FileExistsError):
                            os.mkdir(
                                os.path.join(
                                    '/home/Yuanbincheng/project/dislocation_cls/2/4cls',
                                    'train_%d/%d' % (idx, labels_1dim[i])))
                for i in val_df_index:
                    try:
                        shutil.copy(
                            os.path.join(config.root,
                                         'Ni-coarse-cwt/%s.jpg' % (i + 1)),
                            os.path.join(
                                '/home/Yuanbincheng/project/dislocation_cls/2/4cls',
                                'test_%d/%d/%d.jpg' %
                                (idx, labels_1dim[i], i + 1)))
                    except FileNotFoundError:
                        try:
                            os.mkdir(
                                os.path.join(
                                    '/home/Yuanbincheng/project/dislocation_cls/2/4cls',
                                    'test_%d' % idx))
                        except (FileNotFoundError, FileExistsError):
                            os.mkdir(
                                os.path.join(
                                    '/home/Yuanbincheng/project/dislocation_cls/2/4cls',
                                    'test_%d/%d' % (idx, labels_1dim[i])))
            print('<' * 20 + ' Finish creating datasets ' + '>' * 20)

        optimizer = optim.Adam(filter(lambda p: p.requires_grad,
                                      self.model.module.parameters()),
                               self.lr,
                               weight_decay=self.weight_decay)
        lr_scheduler = optim.lr_scheduler.StepLR(optimizer, 25, gamma=0.99)
        global_step, global_threshold, global_threshold_pop1, global_threshold_pop2, global_threshold_pop3 = 1, 1, 1, 1, 1

        for fold_index in range(self.splits):
            train_dataset = torchvision.datasets.ImageFolder(
                root='4cls/train_%d/' % (fold_index + 1),
                transform=self.train_transform)
            train_loader = DataLoader(train_dataset,
                                      batch_size=config.batch_size,
                                      shuffle=True,
                                      num_workers=config.num_workers)
            val_dataset = torchvision.datasets.ImageFolder(
                root='4cls/test_%d/' % (fold_index + 1),
                transform=self.test_transform)
            val_loader = DataLoader(val_dataset,
                                    batch_size=config.batch_size,
                                    shuffle=False,
                                    num_workers=config.num_workers)
            self.model.train()

            TIMESTAMP = '-fold'.join([self.TIME, str(fold_index)])
            self.writer = SummaryWriter(
                log_dir=os.path.join(self.model_path, TIMESTAMP))
            with codecs.open(
                    os.path.join(self.model_path, TIMESTAMP, TIMESTAMP) +
                    '.json', 'w', "utf-8") as json_file:
                json.dump({k: v
                           for k, v in config._get_kwargs()},
                          json_file,
                          ensure_ascii=False)

            with open(
                    os.path.join(self.model_path, TIMESTAMP, TIMESTAMP) +
                    '.pkl', 'wb') as f:
                pickle.dump({'seed': self.seed}, f, -1)

            for epoch in range(1, self.epoch + 1):
                epoch += self.epoch * fold_index
                epoch_loss, num_correct, num_pred = 0, 0, 0

                tbar = tqdm(train_loader)
                for i, (images, labels) in enumerate(tbar):
                    labels_predict = self.solver.forward(images)
                    loss = self.solver.cal_loss(labels, labels_predict,
                                                self.criterion)
                    epoch_loss += loss.item()
                    self.solver.backword(optimizer, loss)

                    # tmp = (labels_predict > 0.2).float()

                    labels_predictIdx, labels_predictMax = torch.max(
                        labels_predict,
                        1)[1].cpu(), torch.max(labels_predict, 1)[0].cpu()
                    correct_idx = labels_predictIdx == labels
                    num_correct += correct_idx.sum().item()
                    num_pred += labels_predictIdx.size(0)
                    # for p, t in zip(labels_predictMax[correct_idx], labels[correct_idx]):
                    for p in labels_predict.cpu()[correct_idx]:
                        self.writer.add_scalar('threshold_pop1', p[0].item(),
                                               global_threshold)
                        self.writer.add_scalar('threshold_pop2', p[1].item(),
                                               global_threshold)
                        self.writer.add_scalar('threshold_pop3', p[2].item(),
                                               global_threshold)
                        self.writer.add_scalar('threshold_pop4', p[3].item(),
                                               global_threshold)
                        global_threshold += 1
                        # if t == 0:
                        #     self.writer.add_scalar('threshold_pop1', p.item(), global_threshold_pop1)
                        #     global_threshold_pop1 += 1
                        # elif t == 1:
                        #     self.writer.add_scalar('threshold_pop2', p.item(), global_threshold_pop2)
                        #     global_threshold_pop2 += 1
                        # elif t == 2:
                        #     self.writer.add_scalar('threshold_pop3', p.item(), global_threshold_pop3)
                        #     global_threshold_pop3 += 1

                    self.writer.add_scalar('train_loss', loss.item(),
                                           global_step + i)
                    params_groups_lr = str()
                    for group_ind, param_group in enumerate(
                            optimizer.param_groups):
                        params_groups_lr = params_groups_lr + 'params_group_%d' % (
                            group_ind) + ': %.12f, ' % (param_group['lr'])
                    descript = "Fold: %d, Train Loss: %.7f, lr: %s" % (
                        fold_index, loss.item(), params_groups_lr)
                    tbar.set_description(desc=descript)

                lr_scheduler.step()
                global_step += len(train_loader)
                precision, recall, f1, val_loss, val_accuracy = self.validation(
                    val_loader)
                print(
                    'Finish Epoch [%d/%d] | Average training Loss: %.7f | Training accuracy: %.4f | Average validation Loss: %.7f | Validation accuracy: %.4f |'
                    % (epoch, self.epoch * config.n_splits,
                       epoch_loss / len(tbar), num_correct / num_pred,
                       val_loss, val_accuracy))

                if val_accuracy > self.max_accuracy_valid:
                    is_best = True
                    self.max_accuracy_valid = val_accuracy
                else:
                    is_best = False

                state = {
                    'epoch': epoch,
                    'state_dict': self.model.module.state_dict(),
                    'max_accuracy_valid': self.max_accuracy_valid,
                }

                self.solver.save_checkpoint(
                    os.path.join(self.model_path, TIMESTAMP,
                                 TIMESTAMP + '.pth'), state, is_best,
                    self.max_accuracy_valid)
                self.writer.add_scalar('valid_loss', val_loss, epoch)
                self.writer.add_scalar('valid_accuracy', val_accuracy, epoch)
                self.writer.add_scalar('valid_class_1_f1', f1[0], epoch)
                self.writer.add_scalar('valid_class_2_f1', f1[1], epoch)
                self.writer.add_scalar('valid_class_3_f1', f1[2], epoch)
                self.writer.add_scalar('valid_class_4_f1', f1[3], epoch)

    def validation(self, valid_loader):
        self.model.eval()
        tbar = tqdm(valid_loader)
        loss_sum, num_correct, num_pred = 0, 0, 0
        y_true, y_pre = [], []

        with torch.no_grad():
            for i, (images, labels) in enumerate(tbar):
                labels_predict = self.solver.forward(images)
                loss = self.solver.cal_loss(labels, labels_predict,
                                            self.criterion)
                loss_sum += loss.item()

                # tmp = (labels_predict > 0.2).float()
                labels_predictIdx = torch.max(labels_predict, 1)[1].cpu()
                num_correct += (labels_predictIdx == labels).sum().item()
                num_pred += labels_predictIdx.size(0)
                y_true.extend(labels.numpy().tolist())
                y_pre.extend(labels_predictIdx.numpy().tolist())

                descript = "Val Loss: {:.7f}".format(loss.item())
                tbar.set_description(desc=descript)
        loss_mean = loss_sum / len(tbar)
        res = confusion_matrix(y_true, y_pre)
        precision = np.array([
            res[i][i] / np.sum(res, axis=0)[i] for i in range(config.class_num)
        ])
        recall = np.array([
            res[i][i] / np.sum(res, axis=1)[i] for i in range(config.class_num)
        ])
        f1 = 2 * precision * recall / (precision + recall)
        for idx, [p, r, f] in enumerate(zip(precision, recall, f1)):
            print(
                "Class_%d_precision: %0.4f | Recall: %0.4f | F1-score: %0.4f |"
                % (idx, p, r, f))
        return precision, recall, f1, loss_mean, num_correct / num_pred
Esempio n. 52
0
from solver import Solver
from data_loader import get_loader
from configs import get_config

if __name__ == '__main__':
    config = get_config()

    data_loader = get_loader(batch_size=config.batch_size,
                             max_size=config.vocab_size,
                             is_train=True,
                             data_dir=config.data_dir)

    solver = Solver(config, data_loader)
    solver.build(is_train=True)
    solver.train()
Esempio n. 53
0
                                                  y_train,
                                                  test_size=0.1)

print(X_train.shape)

data = {
    'X_train': X_train,
    'y_train': y_train,
    'X_val': X_val,
    'y_val': y_val,
    'X_test': X_test,
    'y_test': y_test
}

model = SoftmaxClassifier()
solver = Solver(model,
                data,
                update_rule='sgd',
                optim_config={
                    'learning_rate': 2e-3,
                },
                lr_decay=1,
                num_epochs=1,
                batch_size=50,
                print_every=2)

solver.train()

acc = solver.check_accuracy(X=X_test, y=y_test)
print(acc)
Esempio n. 54
0
class Arm(Robot):
    def __init__(self):
        """Constructs Arm class.
        """
        servo_info = {}
        servo_info['s1'] = {
            'function': 'waist',
            'default_value': 90.0,
            'min_value': 0.0,
            'max_value': 180.0
        }
        servo_info['s2'] = {
            'function': 'shoulder',
            'default_value': 150.0,
            'min_value': 0.0,
            'max_value': 180.0
        }
        servo_info['s3'] = {
            'function': 'elbow',
            'default_value': 35.0,
            'min_value': 0.0,
            'max_value': 180.0
        }
        servo_info['s4'] = {
            'function': 'wrist_roll',
            'default_value': 140.0,
            'min_value': 0.0,
            'max_value': 180.0
        }
        servo_info['s5'] = {
            'function': 'wrist_pitch',
            'default_value': 85.0,
            'min_value': 0.0,
            'max_value': 180.0
        }
        servo_info['s6'] = {
            'function': 'grip',
            'default_value': 80.0,
            'min_value': 0.0,
            'max_value': 180.0
        }
        self._servo_info = servo_info

        segment_info = {}
        segment_info['seg1'] = {
            'base_servo': 's1',
            'segment_length': 1.0,
            'axis_of_rotation': 'Z'
        }
        segment_info['seg2'] = {
            'base_servo': 's2',
            'segment_length': 120.0,
            'axis_of_rotation': 'Y'
        }
        segment_info['seg3'] = {
            'base_servo': 's3',
            'segment_length': 1.0,
            'axis_of_rotation': 'Y'
        }
        segment_info['seg4'] = {
            'base_servo': 's4',
            'segment_length': 1.0,
            'axis_of_rotation': 'X'
        }
        segment_info['seg5'] = {
            'base_servo': 's5',
            'segment_length': 1.0,
            'axis_of_rotation': 'Y'
        }
        self._segment_info = arm_info

        current_angles = {}
        current_angles['s1'] = 0.0
        current_angles['s2'] = 0.0
        current_angles['s3'] = 0.0
        current_angles['s4'] = 0.0
        current_angles['s5'] = 0.0
        current_angles['s6'] = 0.0
        self._current_angles = current_angles

        self._servo_speed = 1.0
        self._solver = Solver(servo_info, segment_info)
        self._kit = ServoKit(channels=16)
        self.configure_board()

    def move_to(self, x_pos, y_pos, z_pos, roll=0, pitch=0):
        """Moves the arm to the specified position.

        Calculates and moves the arm so the claw is centered at the
        position (x_pos, y_pos, z_pos).

        Args:
            x_pos {float} -- Final X position of the claw.
            y_pos {float} -- Final Y position of the claw.
            z_pos {float} -- Final Z position of the claw.
            roll {float} -- Final roll angle of the wrist (default to 0).
            pitch {float} -- Final pitch angle of the wrist (default to 0).
        """
        angles = self._solver.specific_inverse_solve(x_pos, y_pos, z_pos, roll,
                                                     pitch)
        for angle in angles:
            self.set_part(angle, angles[angle]['final_angle'])

    def get_pos(self):
        """Calculates and returns current position of the arm.

        Calculates based on current positions of arm servo's, the
        current position of the claw, returning as (x, y, z).

        Return:
            A list containing the (x, y, z) position of the claw.
        """
        self._solver.forward_solve()

    def set_speed(self, ss):
        """Set's the speed at which the servo's move.

        Set's the arm rate of speed at which the servo's move into position.

        Args:
            ss {float} -- Rate of speed on a 1-10 scale: 1 being slowest, 10 being fastest.
        """
        if ss > 1.0 and ss < 10.0:
            self._servo_speed = ss

    def configure_board(self):
        """Sets mapping for Servo ID to Servo Number.
        """
        servo_no = 0
        for servo in self._servo_info:
            self._servo_info[servo]['servo#'] = servo_no
            servo_no += 1

    def set_default_position(self):
        """Loads the default position for the robot arm.

        Sets each servo to its default position found in the servo_info dictionary
        created during class initialization.
        """
        for servo, info in self._servo_info.items():
            self.set_part(servo, info['default_value'])

    def set_part(self, part, value):
        """Moves the specified part.

        moves the specified part to the given value.

        Arguments:
            part {str} -- item part to move
            value {float} -- value to apply to part
        """
        self._kit.servo[self._servo_info[part]['servo#']].angle = value
        self._current_angles[part] = value
Esempio n. 55
0
from solver import Solver, DiagonalSolver
from scipy.stats import ttest_rel
from numpy import std, mean
import sys

d = DiagonalSolver()
s = Solver()

try:
    limit = int(sys.argv[1])
except IndexError:
    limit = 200

print("The length of the constraints formula of the normal solver is " +
      str(len(s.S.xs)))
print(
    "The length of the constraints formula of the solver with added diagonal rule is "
    + str(len(d.S.xs)))

diagonal, normal = list(), list()
n = 0
with open('diagonal.txt') as puzzles:
    print("Running tests on {0} puzzles...".format(limit))
    for puzzle in puzzles:
        if n < limit:
            diagonal.append(d.solve(puzzle)[1])
            normal.append(s.solve(puzzle)[1])
            n += 1
        else:
            break
class TrainVal():
    def __init__(self, config, fold):
        # 加载网络模型
        self.model_name = config.model_name
        self.model = ClassifyResNet(self.model_name, 4, training=True)
        if torch.cuda.is_available():
            self.model = torch.nn.DataParallel(self.model)
            self.model = self.model.cuda()

        # 加载超参数
        self.lr = config.lr
        self.weight_decay = config.weight_decay
        self.epoch = config.epoch
        self.fold = fold

        # 实例化实现各种子函数的 solver 类
        self.solver = Solver(self.model)

        # 加载损失函数
        self.criterion = ClassifyLoss()

        # 创建保存权重的路径
        self.model_path = os.path.join(config.save_path, config.model_name)
        if not os.path.exists(self.model_path):
            os.makedirs(self.model_path)

        # 保存json文件和初始化tensorboard
        TIMESTAMP = "{0:%Y-%m-%dT%H-%M-%S-%d}-classify".format(
            datetime.datetime.now(), fold)
        self.writer = SummaryWriter(
            log_dir=os.path.join(self.model_path, TIMESTAMP))
        with codecs.open(self.model_path + '/' + TIMESTAMP + '.json', 'w',
                         "utf-8") as json_file:
            json.dump({k: v
                       for k, v in config._get_kwargs()},
                      json_file,
                      ensure_ascii=False)

        self.max_accuracy_valid = 0
        # 设置随机种子,注意交叉验证部分划分训练集和验证集的时候,要保持种子固定
        self.seed = int(time.time())
        # self.seed = 1570421136
        seed_torch(self.seed)
        with open(self.model_path + '/' + TIMESTAMP + '.pkl', 'wb') as f:
            pickle.dump({'seed': self.seed}, f, -1)

    def train(self, train_loader, valid_loader):
        ''' 完成模型的训练,保存模型与日志
        Args:
            train_loader: 训练数据的DataLoader
            valid_loader: 验证数据的Dataloader
            fold: 当前跑的是第几折
        '''
        optimizer = optim.Adam(self.model.module.parameters(),
                               self.lr,
                               weight_decay=self.weight_decay)
        lr_scheduler = optim.lr_scheduler.CosineAnnealingLR(
            optimizer, self.epoch + 10)
        global_step = 0

        for epoch in range(self.epoch):
            epoch += 1
            epoch_loss = 0
            self.model.train(True)

            tbar = tqdm.tqdm(train_loader)
            for i, (images, labels) in enumerate(tbar):
                # 网络的前向传播与反向传播
                labels_predict = self.solver.forward(images)
                loss = self.solver.cal_loss(labels, labels_predict,
                                            self.criterion)
                epoch_loss += loss.item()
                self.solver.backword(optimizer, loss)

                # 保存到tensorboard,每一步存储一个
                self.writer.add_scalar('train_loss', loss.item(),
                                       global_step + i)
                params_groups_lr = str()
                for group_ind, param_group in enumerate(
                        optimizer.param_groups):
                    params_groups_lr = params_groups_lr + 'params_group_%d' % (
                        group_ind) + ': %.12f, ' % (param_group['lr'])
                descript = "Fold: %d, Train Loss: %.7f, lr: %s" % (
                    self.fold, loss.item(), params_groups_lr)
                tbar.set_description(desc=descript)

            # 每一个epoch完毕之后,执行学习率衰减
            lr_scheduler.step()
            global_step += len(train_loader)

            # Print the log info
            print('Finish Epoch [%d/%d], Average Loss: %.7f' %
                  (epoch, self.epoch, epoch_loss / len(tbar)))

            # 验证模型
            class_neg_accuracy, class_pos_accuracy, class_accuracy, neg_accuracy, pos_accuracy, accuracy, loss_valid = \
                self.validation(valid_loader)

            if accuracy > self.max_accuracy_valid:
                is_best = True
                self.max_accuracy_valid = accuracy
            else:
                is_best = False

            state = {
                'epoch': epoch,
                'state_dict': self.model.module.state_dict(),
                'max_accuracy_valid': self.max_accuracy_valid,
            }

            self.solver.save_checkpoint(
                os.path.join(
                    self.model_path,
                    '%s_classify_fold%d.pth' % (self.model_name, self.fold)),
                state, is_best)
            self.writer.add_scalar('valid_loss', loss_valid, epoch)
            self.writer.add_scalar('valid_accuracy', accuracy, epoch)
            self.writer.add_scalar('valid_class_0_accuracy', class_accuracy[0],
                                   epoch)
            self.writer.add_scalar('valid_class_1_accuracy', class_accuracy[1],
                                   epoch)
            self.writer.add_scalar('valid_class_2_accuracy', class_accuracy[2],
                                   epoch)
            self.writer.add_scalar('valid_class_3_accuracy', class_accuracy[3],
                                   epoch)

    def validation(self, valid_loader):
        ''' 完成模型的验证过程

        Args:
            valid_loader: 验证数据的Dataloader
        '''
        self.model.eval()
        meter = Meter()
        tbar = tqdm.tqdm(valid_loader)
        loss_sum = 0

        with torch.no_grad():
            for i, (images, labels) in enumerate(tbar):
                # 完成网络的前向传播
                labels_predict = self.solver.forward(images)
                loss = self.solver.cal_loss(labels, labels_predict,
                                            self.criterion)
                loss_sum += loss.item()

                meter.update(labels, labels_predict.cpu())

                descript = "Val Loss: {:.7f}".format(loss.item())
                tbar.set_description(desc=descript)
        loss_mean = loss_sum / len(tbar)

        class_neg_accuracy, class_pos_accuracy, class_accuracy, neg_accuracy, pos_accuracy, accuracy = meter.get_metrics(
        )
        print(
            "Class_0_accuracy: %0.4f | Class_1_accuracy: %0.4f | Class_2_accuracy: %0.4f | Class_3_accuracy: %0.4f | "
            "Negative accuracy: %0.4f | positive accuracy: %0.4f | accuracy: %0.4f"
            % (class_accuracy[0], class_accuracy[1], class_accuracy[2],
               class_accuracy[3], neg_accuracy, pos_accuracy, accuracy))
        return class_neg_accuracy, class_pos_accuracy, class_accuracy, neg_accuracy, pos_accuracy, accuracy, loss_mean
Esempio n. 57
0
def gen_samples(params):
    # For fast training
    #cudnn.benchmark = True
    gpu_id = 0
    use_cuda = params['cuda']
    b_sz = params['batch_size']

    g_conv_dim = 64
    d_conv_dim = 64
    c_dim = 5
    c2_dim = 8
    g_repeat_num = 6
    d_repeat_num = 6
    select_attrs = []

    if params['use_same_g']:
        if len(params['use_same_g']) == 1:
            gCV = torch.load(params['use_same_g'][0])
    solvers = []
    configs = []
    for i, mfile in enumerate(params['model']):
        model = torch.load(mfile)
        configs.append(model['arch'])
        configs[-1]['pretrained_model'] = mfile
        configs[-1]['load_encoder'] = 1
        configs[-1]['load_discriminator'] = 0
        configs[-1]['image_size'] = params['image_size']
        if 'g_downsamp_layers' not in configs[-1]:
            configs[-1]['g_downsamp_layers'] = 2
        if 'g_dil_start' not in configs[-1]:
            configs[-1]['g_dil_start'] = 0
            configs[-1]['e_norm_type'] = 'drop'
            configs[-1]['e_ksize'] = 4
        if len(params['withExtMask']) and params['mask_size'] != 32:
            if params['withExtMask'][i]:
                configs[-1]['lowres_mask'] = 0
                configs[-1]['load_encoder'] = 0

        solvers.append(
            Solver(None,
                   None,
                   ParamObject(configs[-1]),
                   mode='test',
                   pretrainedcv=model))
        solvers[-1].G.eval()
        #solvers[-1].D.eval()
        if configs[-1]['train_boxreconst'] > 0 and solvers[-1].E is not None:
            solvers[-1].E.eval()
        if params['use_same_g']:
            solvers[-1].load_pretrained_generator(gCV)

    if len(params['dilateMask']):
        assert (len(params['model']) == len(params['dilateMask']))
        dilateWeightAll = []
        for di in xrange(len(params['dilateMask'])):
            if params['dilateMask'][di] > 0:
                dilateWeight = torch.ones(
                    (1, 1, params['dilateMask'][di], params['dilateMask'][di]))
                dilateWeight = Variable(dilateWeight,
                                        requires_grad=False).cuda()
            else:
                dilateWeight = None
            dilateWeightAll.append(dilateWeight)
    else:
        dilateWeightAll = [None for i in xrange(len(params['model']))]

    dataset = get_dataset('',
                          '',
                          params['image_size'],
                          params['image_size'],
                          params['dataset'],
                          params['split'],
                          select_attrs=configs[0]['selected_attrs'],
                          datafile=params['datafile'],
                          bboxLoader=1,
                          bbox_size=params['box_size'],
                          randomrotate=params['randomrotate'],
                          randomscale=params['randomscale'],
                          max_object_size=params['max_object_size'],
                          use_gt_mask=configs[0]['use_gtmask_inp'],
                          n_boxes=params['n_boxes'],
                          onlyrandBoxes=(params['extmask_type'] == 'randbox'))
    #data_iter = DataLoader(targ_split, batch_size=b_sz, shuffle=True, num_workers=8)
    targ_split = dataset  #train if params['split'] == 'train' else valid if params['split'] == 'val' else test
    data_iter = np.random.permutation(len(targ_split))

    if len(params['withExtMask']) and (params['extmask_type'] == 'mask'):
        gt_mask_data = get_dataset(
            '',
            '',
            params['mask_size'],
            params['mask_size'],
            params['dataset']
            if params['extMask_source'] == 'gt' else params['extMask_source'],
            params['split'],
            select_attrs=configs[0]['selected_attrs'],
            bboxLoader=0,
            loadMasks=True)
    if len(params['sort_by']):
        resFiles = [json.load(open(fil, 'r')) for fil in params['sort_by']]
        for i in xrange(len(resFiles)):
            #if params['sort_score'] not in resFiles[i]['images'][resFiles[i]['images'].keys()[0]]['overall']:
            for k in resFiles[i]['images']:
                img = resFiles[i]['images'][k]
                if 'overall' in resFiles[i]['images'][k]:
                    resFiles[i]['images'][k]['overall'][
                        params['sort_score']] = np.mean([
                            img['perclass'][cls][params['sort_score']]
                            for cls in img['perclass']
                        ])
                else:
                    resFiles[i]['images'][k]['overall'] = {}
                    resFiles[i]['images'][k]['overall'][
                        params['sort_score']] = np.mean([
                            img['perclass'][cls][params['sort_score']]
                            for cls in img['perclass']
                        ])
        idToScore = {
            int(k): resFiles[0]['images'][k]['overall'][params['sort_score']]
            for k in resFiles[0]['images']
        }
        idToScore = OrderedDict(
            reversed(sorted(idToScore.items(), key=lambda t: t[1])))
        cocoIdToindex = {v: i for i, v in enumerate(dataset.valid_ids)}
        data_iter = [cocoIdToindex[k] for k in idToScore]
        dataIt2id = {cocoIdToindex[k]: str(k) for k in idToScore}

    if len(params['show_ids']) > 0:
        cocoIdToindex = {v: i for i, v in enumerate(dataset.valid_ids)}
        data_iter = [cocoIdToindex[k] for k in params['show_ids']]

    print len(data_iter)

    print('-----------------------------------------')
    print('%s' % (' | '.join(targ_split.selected_attrs)))
    print('-----------------------------------------')

    flatten = lambda l: [item for sublist in l for item in sublist]

    if params['showreconst'] and len(params['names']) > 0:
        params['names'] = flatten([[nm, nm + '-R'] for nm in params['names']])

    #discriminator.load_state_dict(cv['discriminator_state_dict'])
    c_idx = 0
    np.set_printoptions(precision=2)
    padimg = np.zeros((params['image_size'], 5, 3), dtype=np.uint8)
    padimg[:, :, :] = 128
    if params['showperceptionloss']:
        vggLoss = VGGLoss(network='squeeze')
    cimg_cnt = 0
    mean_hist = [[], [], []]
    max_hist = [[], [], []]
    lengths_hist = [[], [], []]
    if len(params['n_iter']) == 0:
        params['n_iter'] = [0] * len(params['model'])
    while True:
        cimg_cnt += 1
        #import ipdb; ipdb.set_trace()
        idx = data_iter[c_idx]
        x, real_label, boxImg, boxlabel, mask, bbox, curCls = targ_split[
            data_iter[c_idx]]
        fp = [targ_split.getfilename(data_iter[c_idx])]

        #if configs[0]['use_gtmask_inp']:
        #    mask = mask[1:,::]

        x = x[None, ::]
        boxImg = boxImg[None, ::]
        mask = mask[None, ::]
        boxlabel = boxlabel[None, ::]
        real_label = real_label[None, ::]

        x, boxImg, mask, boxlabel = solvers[0].to_var(
            x, volatile=True), solvers[0].to_var(
                boxImg, volatile=True), solvers[0].to_var(
                    mask, volatile=True), solvers[0].to_var(boxlabel,
                                                            volatile=True)
        real_label = solvers[0].to_var(real_label, volatile=True)

        fake_image_list = [x]
        if params['showmask']:
            mask_image_list = [x - x]
        else:
            fake_image_list.append(x * (1 - mask) + mask)

        deformList = [[], []]
        if len(real_label[0, :].nonzero()):
            #rand_idx = random.choice(real_label[0,:].nonzero()).data[0]
            rand_idx = curCls[0]
            print configs[0]['selected_attrs'][rand_idx]
            if len(params['withExtMask']):
                cocoid = targ_split.getcocoid(idx)
                if params['extmask_type'] == 'mask':
                    mask = solvers[0].to_var(gt_mask_data.getbyIdAndclass(
                        cocoid,
                        configs[0]['selected_attrs'][rand_idx])[None, ::],
                                             volatile=True)
                elif params['extmask_type'] == 'box':
                    mask = solvers[0].to_var(dataset.getGTMaskInp(
                        idx,
                        configs[0]['selected_attrs'][rand_idx],
                        mask_type=2)[None, ::],
                                             volatile=True)
                elif params['extmask_type'] == 'randbox':
                    # Nothing to do here, mask is already set to random boxes
                    None
        else:
            rand_idx = curCls[0]
        if params['showdiff']:
            diff_image_list = [x - x] if params['showmask'] else [x - x, x - x]
        for i in xrange(len(params['model'])):
            if configs[i]['use_gtmask_inp']:
                mask = solvers[0].to_var(targ_split.getGTMaskInp(
                    idx,
                    configs[0]['selected_attrs'][rand_idx],
                    mask_type=configs[i]['use_gtmask_inp'])[None, ::],
                                         volatile=True)
            if len(params['withExtMask']) or params['no_maskgen']:
                withGTMask = True if params['no_maskgen'] else params[
                    'withExtMask'][i]
            else:
                withGTMask = False

            if configs[i]['train_boxreconst'] == 3:
                mask_target = torch.zeros_like(real_label)
                if len(real_label[0, :].nonzero()):
                    mask_target[0, rand_idx] = 1
                # This variable informs to the mask generator, which class to generate for
                boxlabelInp = boxlabel

            elif configs[i]['train_boxreconst'] == 2:
                boxlabelfake = torch.zeros_like(boxlabel)
                if configs[i]['use_box_label'] == 2:
                    boxlabelInp = torch.cat([boxlabel, boxlabelfake], dim=1)
                    if params['showreconst']:
                        boxlabelInpRec = torch.cat([boxlabelfake, boxlabel],
                                                   dim=1)
                mask_target = real_label
            else:
                boxlabelInp = boxlabel
                mask_target = real_label
            if params['showdeform']:
                img, maskOut, deform = solvers[i].forward_generator(
                    x,
                    boxImg=boxImg,
                    mask=mask,
                    imagelabel=mask_target,
                    boxlabel=boxlabelInp,
                    get_feat=True,
                    mask_threshold=params['mask_threshold'],
                    withGTMask=withGTMask,
                    dilate=dilateWeightAll[i],
                    n_iter=params['n_iter'][i])
                fake_image_list.append(img)
                deformList.append(deform)
            else:
                img, maskOut = solvers[i].forward_generator(
                    x,
                    boxImg=boxImg,
                    mask=mask,
                    imagelabel=mask_target,
                    boxlabel=boxlabelInp,
                    mask_threshold=params['mask_threshold'],
                    withGTMask=withGTMask,
                    dilate=dilateWeightAll[i],
                    n_iter=params['n_iter'][i])
                fake_image_list.append(img)
            if params['showmask']:
                mask_image_list.append(
                    solvers[i].getImageSizeMask(maskOut)[:, [0, 0, 0], ::])
            if params['showdiff']:
                diff_image_list.append(x - fake_image_list[-1])
            if params['showreconst']:
                if params['showdeform']:
                    img, maskOut, deform = solvers[i].forward_generator(
                        fake_image_list[-1],
                        boxImg=boxImg,
                        mask=mask,
                        imagelabel=mask_target,
                        boxlabel=boxlabelInp,
                        get_feat=True,
                        mask_threshold=params['mask_threshold'],
                        withGTMask=withGTMask,
                        dilate=dilateWeightAll[i],
                        n_iter=params['n_iter'][i])
                    fake_image_list.append(img)
                    deformList.append(deform)
                else:
                    img, maskOut = solvers[i].forward_generator(
                        fake_image_list[-1],
                        boxImg=boxImg,
                        mask=mask,
                        imagelabel=mask_target,
                        boxlabel=boxlabelInp,
                        mask_threshold=params['mask_threshold'],
                        withGTMask=withGTMask,
                        dilate=dilateWeightAll[i],
                        n_iter=params['n_iter'][i])
                    fake_image_list.append(img)
                if params['showdiff']:
                    diff_image_list.append(x - fake_image_list[-1])

        if not params['compute_deform_stats']:
            img = make_image(fake_image_list, padimg)
            if params['showdeform']:
                defImg = make_image_with_deform(
                    fake_image_list, deformList,
                    np.vstack([padimg, padimg, padimg]))
                img = np.vstack([img, defImg])
            if params['showmask']:
                imgmask = make_image(mask_image_list, padimg)
                img = np.vstack([img, imgmask])
            if params['showdiff']:
                imgdiff = make_image(diff_image_list, padimg)
                img = np.vstack([img, imgdiff])
            if len(params['names']) > 0:
                nameList = ['Input'
                            ] + params['names'] if params['showmask'] else [
                                'Input', 'Masked Input'
                            ] + params['names']
                imgNames = np.hstack(
                    flatten([[
                        make_image_with_text((32, x.size(3), 3), nm),
                        padimg[:32, :, :].astype(np.uint8)
                    ] for nm in nameList]))
                img = np.vstack([imgNames, img])
            if len(params['sort_by']):
                clsname = configs[0]['selected_attrs'][rand_idx]
                cocoid = dataIt2id[data_iter[c_idx]]
                curr_class_iou = [
                    resFiles[i]['images'][cocoid]['real_scores'][rand_idx]
                ] + [
                    resFiles[i]['images'][cocoid]['perclass'][clsname][
                        params['sort_score']]
                    for i in xrange(len(params['model']))
                ]
                if params['showperceptionloss']:
                    textToPrint = [
                        'P:%.2f, S:%.1f' % (vggLoss(
                            fake_image_list[0],
                            fake_image_list[i]).data[0], curr_class_iou[i])
                        for i in xrange(len(fake_image_list))
                    ]
                else:
                    textToPrint = [
                        'S:%.1f' % (curr_class_iou[i])
                        for i in xrange(len(fake_image_list))
                    ]
                if len(params['show_also']):
                    # Additional data to print
                    for val in params['show_also']:
                        curval = [0.] + [
                            resFiles[i]['images'][cocoid]['perclass'][clsname]
                            [val][rand_idx]
                            for i in xrange(len(params['model']))
                        ]
                        textToPrint = [
                            txt + ' %s:%.1f' % (val[0], curval[i])
                            for i, txt in enumerate(textToPrint)
                        ]

                imgScore = np.hstack(
                    flatten([[
                        make_image_with_text((32, x.size(3), 3),
                                             textToPrint[i]),
                        padimg[:32, :, :].astype(np.uint8)
                    ] for i in xrange(len(fake_image_list))]))
                img = np.vstack([img, imgScore])
            elif params['showperceptionloss']:
                imgScore = np.hstack(
                    flatten([[
                        make_image_with_text(
                            (32, x.size(3), 3),
                            '%.2f' % vggLoss(fake_image_list[0],
                                             fake_image_list[i]).data[0]),
                        padimg[:32, :, :].astype(np.uint8)
                    ] for i in xrange(len(fake_image_list))]))
                img = np.vstack([img, imgScore])

            #if params['showmask']:
            #    imgmask = make_image(mask_list)
            #    img = np.vstack([img, imgmask])
            #if params['compmodel']:
            #    imgcomp = make_image(fake_image_list_comp)
            #    img = np.vstack([img, imgcomp])
            #    if params['showdiff']:
            #        imgdiffcomp = make_image([fimg - fake_image_list_comp[0] for fimg in fake_image_list_comp])
            #        img = np.vstack([img, imgdiffcomp])
            cv2.imshow(
                'frame', img if params['scaleDisp'] == 0 else cv2.resize(
                    img, None, fx=params['scaleDisp'], fy=params['scaleDisp']))
            keyInp = cv2.waitKey(0)

            if keyInp & 0xFF == ord('q'):
                break
            elif keyInp & 0xFF == ord('b'):
                #print keyInp & 0xFF
                c_idx = c_idx - 1
            elif (keyInp & 0xFF == ord('s')):
                #sample_dir = join(params['sample_dump_dir'], basename(params['model'][0]).split('.')[0])
                sample_dir = join(
                    params['sample_dump_dir'],
                    '_'.join([params['split']] + params['names']))
                if not exists(sample_dir):
                    makedirs(sample_dir)
                fnames = ['%s.png' % splitext(basename(f))[0] for f in fp]
                fpaths = [join(sample_dir, f) for f in fnames]
                imgSaveName = fpaths[0]
                if params['savesepimages']:
                    saveIndividImages(fake_image_list, mask_image_list,
                                      nameList, sample_dir, fp,
                                      configs[0]['selected_attrs'][rand_idx])
                else:
                    print 'Saving into file: ' + imgSaveName
                    cv2.imwrite(imgSaveName, img)
                c_idx += 1
            else:
                c_idx += 1
        else:
            for di in xrange(len(deformList)):
                if len(deformList[di]) > 0 and len(deformList[di][0]) > 0:
                    for dLidx, d in enumerate(deformList[di]):
                        lengths, mean, maxl = compute_deform_statistics(
                            d[1], d[0])
                        mean_hist[dLidx].append(mean)
                        max_hist[dLidx].append(maxl)
                        lengthsH = np.histogram(lengths,
                                                bins=np.arange(0, 128, 0.5))[0]
                        if lengths_hist[dLidx] == []:
                            lengths_hist[dLidx] = lengthsH
                        else:
                            lengths_hist[dLidx] += lengthsH

        if params['compute_deform_stats'] and (cimg_cnt <
                                               params['compute_deform_stats']):
            print np.mean(mean_hist[0])
            print np.mean(mean_hist[1])
            print np.mean(mean_hist[2])
            print np.mean(max_hist[0])
            print np.mean(max_hist[1])
            print np.mean(max_hist[2])

            print lengths_hist[0]
            print lengths_hist[1]
            print lengths_hist[2]
            break
Esempio n. 58
0
def main():
    # print config.
    state = {k: v for k, v in config._get_kwargs()}
    print(state)

    # if use cuda.
    os.environ['CUDA_VISIBLE_DEVICES'] = config.gpu_id
    use_cuda = torch.cuda.is_available()

    # Random seed
    if config.manualSeed is None:
        config.manualSeed = random.randint(1, 10000)
    random.seed(config.manualSeed)
    torch.manual_seed(config.manualSeed)
    if use_cuda:
        torch.cuda.manual_seed_all(config.manualSeed)
        torch.backends.cudnn.benchmark = False  #True           # speed up training.

    # data loader
    from dataloader import get_loader
    if config.stage in ['finetune']:
        sample_size = config.finetune_sample_size
        crop_size = config.finetune_crop_size
    elif config.stage in ['keypoint']:
        sample_size = config.keypoint_sample_size
        crop_size = config.keypoint_crop_size

    # dataloader for pretrain
    train_loader_pt, val_loader_pt = get_loader(
        train_path=config.train_path_for_pretraining,
        val_path=config.val_path_for_pretraining,
        stage=config.stage,
        train_batch_size=config.train_batch_size,
        val_batch_size=config.val_batch_size,
        sample_size=sample_size,
        crop_size=crop_size,
        workers=config.workers)
    # dataloader for finetune
    train_loader_ft, val_loader_ft = get_loader(
        train_path=config.train_path_for_finetuning,
        val_path=config.val_path_for_finetuning,
        stage=config.stage,
        train_batch_size=config.train_batch_size,
        val_batch_size=config.val_batch_size,
        sample_size=sample_size,
        crop_size=crop_size,
        workers=config.workers)

    # load model
    from delf import Delf_V1
    model = Delf_V1(ncls=config.ncls,
                    load_from=config.load_from,
                    arch=config.arch,
                    stage=config.stage,
                    target_layer=config.target_layer,
                    use_random_gamma_rescale=config.use_random_gamma_rescale)

    # solver
    from solver import Solver
    solver = Solver(config=config, model=model)
    if config.stage in ['finetune']:
        epochs = config.finetune_epoch
    elif config.stage in ['keypoint']:
        epochs = config.keypoint_epoch

    # train/test for N-epochs. (50%: pretain with datasetA, 50%: finetune with datasetB)
    for epoch in range(epochs):
        if epoch < int(epochs * 0.5):
            print('[{:.1f}] load pretrain dataset: {}'.format(
                float(epoch) / epochs, config.train_path_for_pretraining))
            train_loader = train_loader_pt
            val_loader = val_loader_pt
        else:
            print('[{:.1f}] load finetune dataset: {}'.format(
                float(epoch) / epochs, config.train_path_for_finetuning))
            train_loader = train_loader_ft
            val_loader = val_loader_ft

        solver.train('train', epoch, train_loader, val_loader)
        if epoch % 10 == 0:
            solver.train('val', epoch, train_loader, val_loader)

    print('Congrats! You just finished DeLF training.')
Esempio n. 59
0
def main(config):

    assert config.model_type in ['U_Net', 'R2U_Net', 'AttU_Net', 'R2AttU_Net']

    if config.use_seed:
        np.random.seed(config.random_seed)
        torch.manual_seed(config.random_seed)
        if config.use_cuda: 
            torch.cuda.manual_seed_all(config.random_seed)


    #=========================================================================================
    #data_path = '../data/dataset'
    data_path = config.data_path
    if not os.path.exists(data_path): os.makedirs(data_path)
    dataset_path = config.dataset if config.dataset_path == "default" else config.dataset_path

    #outputs_path = '../data/outputs/dataset/model_type'
    outputs_path = os.path.join(data_path, config.outputs_path)
    if not os.path.exists(outputs_path): os.makedirs(outputs_path)
    outputs_path = os.path.join(outputs_path, dataset_path)
    if not os.path.exists(outputs_path): os.makedirs(outputs_path)
    network_name = config.model_type if config.network_name == "default" else config.network_name
    outputs_path = os.path.join(outputs_path,network_name) 
    if not os.path.exists(outputs_path): os.makedirs(outputs_path)

    model_name = config.model_name
    if config.model_name == "default":
        arma = 'arma' if config.use_arma else 'no-arma'
        model_stamp = datetime.datetime.now().strftime("%m%d") if config.model_stamp == "default" else config.model_stamp
        model_name = ('%s-%s-seed%d-%.5f-%.4f' %(config.model_type, arma, config.random_seed, config.lr, config.augmentation_prob))
        model_name += '_' + model_stamp


    #outputs_path = '../data/outputs/dataset/model_type/model_name'
    outputs_path = os.path.join(outputs_path, model_name)
    if not os.path.exists(outputs_path): os.makedirs(outputs_path)
    #model_path = '../data/outputs/dataset/model_type/model_name/models'
    model_path = os.path.join(outputs_path, config.model_path)
    if not os.path.exists(model_path): os.makedirs(model_path)
    #model_path = '../data/outputs/dataset/model_type/model_name/stats'
    stats_path = os.path.join(outputs_path, config.stats_path)
    if not os.path.exists(stats_path): os.makedirs(stats_path)
    #result_path = '../data/outputs/dataset/model_type/model_name/results'
    result_path = os.path.join(outputs_path, config.result_path)
    if not os.path.exists(result_path): os.makedirs(result_path)
    #tensorboard_path = '../data/outputs/dataset/model_type/model_name/tensorboard'
    tensorboard_path = os.path.join(outputs_path, config.tensorboard_path)
    if not os.path.exists(tensorboard_path): os.makedirs(tensorboard_path)
    tensorboard_writer = SummaryWriter(tensorboard_path)

    config.model_path = model_path
    config.result_path = result_path
    config.tensorboard = tensorboard_writer
    config.model_name = model_name


    print("Mode      : ", config.mode)
    print("Model Type: ", config.model_type)
    print("Model Name: ", config.model_name)
    print("Dataset   : ", config.dataset)
    #=========================================================================================

    # print(config)
    if config.dataset == "ISIC": 
        train_loader = get_loader(image_path=config.train_path,
                                image_size=config.image_size,
                                batch_size=config.batch_size,
                                num_workers=config.num_workers,
                                mode='train',
                                augmentation_prob=config.augmentation_prob)
        valid_loader = get_loader(image_path=config.valid_path,
                                image_size=config.image_size,
                                batch_size=config.batch_size,
                                num_workers=config.num_workers,
                                mode='valid',
                                augmentation_prob=0. ,
                                isshuffle=False)
        test_loader = get_loader(image_path=config.test_path,
                                image_size=config.image_size,
                                batch_size=config.batch_size,
                                num_workers=config.num_workers,
                                mode='test',
                                augmentation_prob=0.)

    if config.dataset == "ISBI":

        train_dataset = ISBI.ISBIDataset(
            os.path.join(config.data_path, "ISBI2012/Train-Volume/train-volume-*.tif"), os.path.join(config.data_path, "ISBI2012/Train-Labels/train-labels-*.tif"),
            length=22, is_pad=True, evaluate=False, totensor=True)

        valid_dataset = ISBI.ISBIDataset(
            os.path.join(config.data_path, "ISBI2012/Val-Volume/train-volume-*.tif"), os.path.join(config.data_path, "ISBI2012/Val-Labels/train-labels-*.tif"),
            length=8, is_pad=True, evaluate=True, totensor=True)

        test_dataset = ISBI.ISBIDataset(
            os.path.join(config.data_path, "ISBI2012/Test-Volume/test-volume-*.tif"), os.path.join(config.data_path, "ISBI2012/Test-Volume/test-volume-*.tif"),
            length=30, is_pad=True, evaluate=True, totensor=True)

        num_samples = len(train_dataset)
        split = int(np.floor(.7 * num_samples))

        indices = list(range(num_samples))
        train_sampler = SubsetRandomSampler(indices[:split])
        valid_sampler = SubsetRandomSampler(indices[split:])

        train_loader = torch.utils.data.DataLoader(train_dataset,
                        sampler = train_sampler, batch_size=config.batch_size,
                        num_workers=config.num_workers, pin_memory=True)

        valid_loader = torch.utils.data.DataLoader(valid_dataset, 
                        sampler=valid_sampler, batch_size=1, 
                        num_workers=config.num_workers, pin_memory=True)

        test_loader = torch.utils.data.DataLoader(test_dataset, 
                        batch_size=1, num_workers=config.num_workers, pin_memory=True)



    solver = Solver(config, train_loader, valid_loader, test_loader)

    
    # Train and sample the images
    if config.mode == 'train':
        solver.train()
    elif config.mode == 'test':
        solver.test()
Esempio n. 60
0
    def __init__(
            self,
            visit_sequence='monotone',  # order in which we visit the columns
            n_imputations=100,
            n_burn_in=10,  # this many replicates will be thrown away
            n_pmm_neighbors=5,  # number of nearest neighbors in PMM
            impute_type='col',  # also can be pmm
            model=BayesianRidgeRegression(lambda_reg=0.001, add_ones=True),
            n_nearest_columns=np.infty,
            init_fill_method="mean",
            min_value=None,
            max_value=None,
            verbose=True):
        """
        Parameters
        ----------
        visit_sequence : str
            Possible values: "monotone" (default), "roman", "arabic",
                "revmonotone".

        n_imputations : int
            Defaults to 100

        n_burn_in : int
            Defaults to 10

        impute_type : str
            "pmm" is Predictive Mean Matching.
            "col" (default) means fill in with samples from posterior predictive
                distribution.

        n_pmm_neighbors : int
            Number of nearest neighbors for PMM, defaults to 5.

        model : predictor function
            A model that has fit, predict, and predict_dist methods.
            Defaults to BayesianRidgeRegression(lambda_reg=0.001).
            Note that the regularization parameter lambda_reg
            is by default scaled by np.linalg.norm(np.dot(X.T,X)).
            Sensible lambda_regs to try: 0.1, 0.01, 0.001, 0.0001.

        n_nearest_columns : int
            Number of other columns to use to estimate current column.
            Useful when number of columns is huge.
            Default is to use all columns.

        init_fill_method : str
            Valid values: {"mean", "median", or "random"}
            (the latter meaning fill with random samples from the observed
            values of a column)

        verbose : boolean
        """
        Solver.__init__(self,
                        n_imputations=n_imputations,
                        min_value=min_value,
                        max_value=max_value,
                        fill_method=init_fill_method)
        self.visit_sequence = visit_sequence
        self.n_burn_in = n_burn_in
        self.n_pmm_neighbors = n_pmm_neighbors
        self.impute_type = impute_type
        self.model = model
        self.n_nearest_columns = n_nearest_columns
        self.verbose = verbose