예제 #1
0
def test_gauss_vs_cq():
    """
    Проверяем, как работают quad_gauss() и composite_quad() при одинакомом количестве обращений к функции
    """
    x0, x1 = 0, np.pi/2

    p = Harmonic(0, 1)
    y0 = p[x0, x1]

    n_nodes = 2
    Y_gauss = []
    Y_cquad = []

    n_intervals = np.arange(1, 256, 5)
    for n in n_intervals:
        n_evals = n * n_nodes
        Y_gauss.append(quad_gauss(p, x0, x1, n_evals))
        Y_cquad.append(composite_quad(p, x0, x1, n, n_nodes))

    accuracy_gauss = get_accuracy(Y_gauss, y0 * np.ones_like(Y_gauss))
    accuracy_gauss[accuracy_gauss > 17] = 17

    accuracy_cquad = get_accuracy(Y_cquad, y0 * np.ones_like(Y_cquad))
    accuracy_cquad[accuracy_cquad > 17] = 17

    plt.plot(np.log10(n_intervals), accuracy_gauss, '.:', label=f'gauss')
    plt.plot(np.log10(n_intervals), accuracy_cquad, '.:', label=f'2-node CQ')

    plt.legend()
    plt.ylabel('accuracy')
    plt.xlabel('log10(n_evals)')
    plt.suptitle(f'test gauss vs CQ')
    plt.show()
예제 #2
0
def test_harmonic(fname, func: callable):
    """
    Сравниваем аппроксимацию алгебраическими многочленами и гармониками
    """
    n = 50
    dim = 21
    m = 99

    xs0 = np.linspace(-1, 1, n)
    xs1 = np.linspace(-1, 1, m)

    ys0 = func(xs0)
    ys1 = func(xs1)

    colors = 'br'
    fig, (ax1, ax2) = plt.subplots(1, 2)
    ax1.plot(xs1, ys1, 'k-', label='exact')
    ax1.plot(xs0, ys0, 'k.')

    for color, approx_type in zip(colors, [Algebraic, Harmonic]):
        approx = approx_type(xs0, ys0, dim)
        ys1_num = approx(xs1)

        ax1.plot(xs1, ys1_num, f'{color}-', label=approx.name)
        ax2.plot(xs1, get_accuracy(ys1, ys1_num), f'{color}-', label=approx.name)

    ax1.legend(), ax1.set_title(f'y(x)')
    ax2.legend(), ax2.set_title('accuracy')
    fig.suptitle(f'{fname}')

    plt.show()
예제 #3
0
파일: utils.py 프로젝트: mhariat/MicroNet
 def on_forward(state):
     loss = state['loss'].item()
     accuracy = get_accuracy(state['output'].cpu(),
                             state['sample'][1].cpu())
     state['iterator'].write('batch %d loss %.3f accuracy %.3f ' %
                             (state['t'], loss, accuracy),
                             end='\n')
예제 #4
0
def summarize_train(writer, global_step, last_time, model, opt, inputs,
                    targets, optimizer, loss, pred, ans):
    if opt.summary_grad:
        for name, param in model.named_parameters():
            if not param.requires_grad:
                continue

            norm = torch.norm(param.grad.data.view(-1))
            writer.add_scalar('gradient_norm/' + name, norm, global_step)

    writer.add_scalar('input_stats/batch_size', targets.size(0), global_step)

    if inputs is not None:
        writer.add_scalar('input_stats/input_length', inputs.size(1),
                          global_step)
        i_nonpad = (inputs != opt.src_pad_idx).view(-1).type(torch.float32)
        writer.add_scalar('input_stats/inputs_nonpadding_frac',
                          i_nonpad.mean(), global_step)

    writer.add_scalar('input_stats/target_length', targets.size(1),
                      global_step)
    t_nonpad = (targets != opt.trg_pad_idx).view(-1).type(torch.float32)
    writer.add_scalar('input_stats/target_nonpadding_frac', t_nonpad.mean(),
                      global_step)

    writer.add_scalar('optimizer/learning_rate', optimizer.learning_rate(),
                      global_step)

    writer.add_scalar('loss', loss.item(), global_step)

    acc = utils.get_accuracy(pred, ans, opt.trg_pad_idx)
    writer.add_scalar('training/accuracy', acc, global_step)

    steps_per_sec = 100.0 / (time.time() - last_time)
    writer.add_scalar('global_step/sec', steps_per_sec, global_step)
예제 #5
0
def test_quad_vs_cq():
    """
    Проверяем, как работают ИКФ и СКФ при одинакомом количестве обращений к функции
    """
    x0, x1 = 0, np.pi/2

    p = Harmonic(0, 1)
    y0 = p[x0, x1]

    n_nodes = 2
    ys_nt_ct = []
    ys_gauss = []
    ys_cquad = []

    n_intervals = 2 ** np.arange(8)
    for n in n_intervals:
        n_evals = n * n_nodes
        ys_nt_ct.append(quad(p, x0, x1, np.linspace(0, 1, n_evals) * (x1-x0) + x0))
        ys_gauss.append(quad_gauss(p, x0, x1, n_evals))
        ys_cquad.append(composite_quad(p, x0, x1, n, n_nodes))

    for ys, label in zip((ys_nt_ct, ys_gauss, ys_cquad),
                         (f'newton-cotes', f'gauss', f'{n_nodes}-node CQ')):
        acc = get_accuracy(ys, y0 * np.ones_like(ys))
        acc[acc > 17] = 17

        plt.plot(np.log10(n_intervals), acc, '.:', label=label)

    plt.legend()
    plt.ylabel('accuracy')
    plt.xlabel('log10(n_evals)')
    plt.suptitle(f'test quad vs composite quad')
    plt.show()
예제 #6
0
def test_adaptive(ode, y0):
    """
    Проверяем алгоритмы выбора шага
    """
    t0, t1 = 0, 4 * np.pi

    atol = 1e-6
    rtol = 1e-3

    tss = []
    yss = []

    methods = (
        (ExplicitEulerMethod(), AdaptType.RUNGE),
        (RungeKuttaMethod(coeffs.rk4_coeffs), AdaptType.RUNGE),
        (EmbeddedRungeKuttaMethod(coeffs.dopri_coeffs), AdaptType.EMBEDDED),
    )

    for method, adapt_type in methods:
        ode.clear_call_counter()
        ts, ys = adaptive_step_integration(method=method,
                                           ode=ode,
                                           y_start=y0,
                                           t_span=(t0, t1),
                                           adapt_type=adapt_type,
                                           atol=atol,
                                           rtol=rtol)
        print(f'{method.name} took {ode.get_call_counter()} function calls')

        tss.append(np.array(ts))
        yss.append(ys)

    ts = np.array(sorted([t for ts in tss for t in ts]))
    exact = ode[ts].T
    y0 = np.array([y[0] for y in exact])

    # plots
    fig1, ax1 = plt.subplots(num='y(t)')
    fig1.suptitle('test_adaptive: y(t)')
    ax1.set_xlabel('t'), ax1.set_ylabel('y')
    ax1.plot(ts, y0, 'ko-', label='exact')

    fig2, ax2 = plt.subplots(num='dt(t)')
    fig2.suptitle('test_adaptive: step sizes')
    ax2.set_xlabel('t'), ax2.set_ylabel('dt')

    fig3, ax3 = plt.subplots(num='dy(t)')
    fig3.suptitle('test_adaptive: accuracies')
    ax3.set_xlabel('t'), ax3.set_ylabel('accuracy')

    for (m, _), ts, ys in zip(methods, tss, yss):
        ax1.plot(ts, [y[0] for y in ys], '.', label=m.name)
        ax2.plot(ts[:-1], ts[1:] - ts[:-1], '.-', label=m.name)
        ax3.plot(ts, get_accuracy(ode[ts].T, ys), '.-', label=m.name)

    ax1.legend()
    ax2.legend()
    ax3.legend()

    plt.show()
예제 #7
0
def test_quad_gauss_degree():
    """
    Проверяем АСТ для ИКФ Гаусса
    """
    x0, x1 = 0, 1

    max_degree = 8

    for deg in range(max_degree):
        p = Monome(deg)
        y0 = p[x0, x1]

        node_counts = range(1, 6)
        Y = [quad_gauss(p, x0, x1, node_count) for node_count in node_counts]
        accuracy = get_accuracy(Y, y0 * np.ones_like(Y))

        # Проверяем точность
        for node_count, acc in zip(node_counts, accuracy):
            if 2 * node_count >= deg + 1:
                assert acc > 6

        plt.plot(node_counts, accuracy, '.:', label=f'x^{deg}')

    plt.legend()
    plt.ylabel('accuracy')
    plt.xlabel('node count')
    plt.suptitle(f'test quad gauss')
    plt.show()
예제 #8
0
def test_quad_degree():
    """
    Проверяем АСТ для ИКФ
    Q: почему в  некоторых случаях x^n интегрируется почти без ошибок при n узлах ИКФ?
    """
    x0, x1 = 0, 1

    max_degree = 7
    max_nodes = 7

    for deg in range(max_degree):
        p = Monome(deg)
        y0 = p[x0, x1]

        node_counts = range(1, max_nodes+1)

        Y = [quad(p, x0, x1, np.linspace(x0, x1, node_count)) for node_count in node_counts]
        # Y = [quad(p, x0, x1, x0 + (x1-x0) * np.random.random(node_count)) for node_count in max_node_count]
        accuracy = get_accuracy(Y, y0 * np.ones_like(Y))

        # Проверяем точность
        for node_count, acc in zip(node_counts, accuracy):
            if node_count >= deg + 1:
                assert acc > 6

        plt.plot(node_counts, accuracy, '.:', label=f'x^{deg}')

    plt.legend()
    plt.ylabel('accuracy')
    plt.xlabel('node_count')
    plt.suptitle(f'test quad')
    plt.show()
예제 #9
0
def test_composite_quad_degree(v):
    """
    Проверяем сходимость СКФ при наличии неравных весов
    Q: скорость сходимости оказывается дробной, почему?
    """
    from .variants import params
    a, b, alpha, beta, f = params(v)
    x0, x1 = a, b
    # a, b = -10, 10

    L = 2
    n_intervals = [L**q for q in range(2, 11)]
    n_nodes = 3

    exact = sp_quad(lambda x: f(x) / (x - a)**alpha / (b - x)**beta, x0, x1)[0]

    Y = [
        composite_quad(f,
                       x0,
                       x1,
                       n_intervals=n,
                       n_nodes=n_nodes,
                       a=a,
                       b=b,
                       alpha=alpha,
                       beta=beta) for n in n_intervals
    ]
    accuracy = get_accuracy(Y, exact * np.ones_like(Y))

    x = np.log10(n_intervals)
    aitken_degree = aitken(*Y[5:8], L)
    a1, a0 = np.polyfit(x, accuracy, 1)
    assert a1 > 1, 'composite quad did not converge!'

    fig, (ax1, ax2) = plt.subplots(1, 2)

    # график весовой функции
    xs = np.linspace(x0, x1, n_intervals[-1] + 1)
    ys = 1 / ((xs - a)**alpha * (b - xs)**beta)

    ax1.plot(xs, ys, label='weights')
    ax = list(ax1.axis())
    ax[2] = 0.
    ax1.axis(ax)
    ax1.set_xlabel('x')
    ax1.set_ylabel('p(x)')
    ax1.legend()

    # график точности
    ax2.plot(x, accuracy, 'kh')
    ax2.plot(x, a1 * x + a0, 'b:', label=f'{a1:.2f}*x+{a0:.2f}')
    ax2.set_xlabel('log10(n_intervals)')
    ax2.set_ylabel('accuracy')
    ax2.legend()

    fig.suptitle(f'variant #{v} (alpha={alpha:4.2f}, beta={beta:4.2f})\n'
                 f'aitken estimation: {aitken_degree:.2f}')
    fig.tight_layout()

    plt.show()
예제 #10
0
def test_composite_quad(n_nodes):
    """
    Проверяем 2-, 3-, 5-узловые СКФ
    Q: объясните скорость сходимости для каждого случая
    """
    fig, ax = plt.subplots(1, 2)

    x0, x1 = 0, 1
    L = 2
    n_intervals = [L ** q for q in range(0, 8)]

    for i, degree in enumerate((5, 6)):
        p = Monome(degree)
        Y = [composite_quad(p, x0, x1, n_intervals=n, n_nodes=n_nodes) for n in n_intervals]
        accuracy = get_accuracy(Y, p[x0, x1] * np.ones_like(Y))
        x = np.log10(n_intervals)

        # оценка сходимости
        ind = np.isfinite(x) & np.isfinite(accuracy)
        k, b = np.polyfit(x[ind], accuracy[ind], 1)
        aitken_degree = aitken(*Y[0:6:2], L ** 2)

        ax[i].plot(x, k*x+b, 'b:', label=f'{k:.2f}*x+{b:.2f}')
        ax[i].plot(x, aitken_degree*x+b, 'm:', label=f'aitken ({aitken_degree:.2f})')
        ax[i].plot(x, accuracy, 'kh', label=f'accuracy for x^{degree}')
        ax[i].set_title(f'{n_nodes}-node CQ for x^{degree}')
        ax[i].set_xlabel('log10(n_intervals)')
        ax[i].set_ylabel('accuracy')
        ax[i].legend()

        if n_nodes < degree:
            assert np.abs(aitken_degree - k) < 0.5, \
                f'Aitken estimation {aitken_degree:.2f} is too far from actual {k:.2f}'

    plt.show()
예제 #11
0
    def test_convergence(self):
        """
        Проверка сходимости
        """
        func = np.abs
        a, b = -5, 5

        interp_params = [
            # color, interp_name, nodes_type, label
            ['b', LaGrange, NodeType.EQ],
            ['r', LaGrange, NodeType.CHEB],
            ['c', Spline3, NodeType.EQ],
        ]

        node_params = [
            # n_nodes, style
            [5, '-'],
            [17, '--'],
            [65, ':'],
            # [257, '-.'],
        ]

        # точки, в которых будем сравнивать с точным значением
        m = 1025
        xs_dense = np.linspace(a, b, m)
        ys_dense = func(xs_dense)

        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 6))
        ax1.plot(xs_dense, ys_dense, 'k-', label='exact')

        for (color, interp_name, nodes_type) in interp_params:
            for (n_nodes, style) in node_params:
                xs = self._get_nodes(nodes_type, a, b, n_nodes)
                ys = func(xs)

                interp = interp_name(xs, ys)
                ys_dense_num = interp(xs_dense)

                label = f'{interp.name}-{str(nodes_type.name)}-{n_nodes}'
                ax1.plot(xs_dense,
                         ys_dense_num,
                         f'{color}{style}',
                         label=label)
                ax1.plot(xs, ys, f'{color}.')
                ax2.plot(xs_dense,
                         get_accuracy(ys_dense, ys_dense_num),
                         f'{color}{style}',
                         label=label)

        ax1.set_title(f'y(x)')
        ax1.axis([a, b, -1, 6])
        ax1.legend()

        ax2.set_title('accuracy')
        ax2.legend()

        plt.show()
예제 #12
0
def evaluate(loader, model):
    print("Evaluate")

    # Set model to eval
    model.eval()

    accuracy = AverageMeter()
    positive_accuracy = AverageMeter()
    negative_accuracy = AverageMeter()
    y_true = None
    y_scores = None

    with torch.no_grad():
        for batch_idx, (x, y) in enumerate(loader):
            x = x.to(device=device).to(torch.float32)
            y = y.to(device=device).to(torch.float32)

            scores = model(x)
            scores = torch.squeeze(scores, 2)

            y = torch.unsqueeze(y, 1)
            loss = criterion(scores, y)

            scores = torch.squeeze(scores, 1)
            y = torch.squeeze(y, 1)

            if y_true is None:
                y_true = y
                y_scores = scores
            else:
                y_true = torch.cat((y_true, y))
                y_scores = torch.cat((y_scores, scores))

            acc = get_accuracy(y, scores)
            # neg_acc, pos_acc = get_accuracy_per_class(y.cpu(), scores.cpu())

            accuracy.update(acc)

            # positive_accuracy.update(pos_acc)
            # negative_accuracy.update(neg_acc)

    auc = roc_auc_score(y_true.cpu(), y_scores.cpu())

    wandb.log({
        "valid_acc": accuracy.avg,
        #    "positive_acc": positive_accuracy.avg,
        #    "negative_acc": negative_accuracy.avg,
        "valid_loss": loss.item(),
        "AUC": auc
    })

    accuracy.reset()

    # Set model back to train
    model.train()
예제 #13
0
def test_optimization(eps_y, check_accuracy, student, n_dim, projection):
    N = student

    A = np.array([[4, 1, 1], [1, 6 + .2 * N, -1], [1, -1, 8 + .2 * N]],
                 dtype='float')[:n_dim, :n_dim]
    b = np.array([1, -2, 3], dtype='float').T[:n_dim]
    x0 = np.zeros_like(b)

    x1 = np.linalg.solve(A, -b)
    y1 = (1 / 2 * x1.T @ A @ x1 + b.T @ x1).item()

    eps_x = np.sqrt(eps_y)

    methods = ['mngs', 'mps', 'newton']
    styles = ['go:', 'bo:', 'mo:']

    fig = plt.figure()
    ax1 = fig.add_subplot(121)
    ax2 = fig.add_subplot(122, projection=projection)

    for i, method in enumerate(methods):
        optimization.func.calls = 0
        xs, ys = getattr(optimization, method)(A=A,
                                               b=b,
                                               x0=x0,
                                               eps=eps_y,
                                               max_iter=30)

        assert np.equal(x0, xs[0]).all(), 'xs should start with initial point'
        if check_accuracy:
            assert np.linalg.norm(
                x1 - xs[-1]
            ) < eps_x, 'last xs should be close enough to the optimum'
            assert np.linalg.norm(
                y1 - ys[-1]
            ) < eps_y, 'last ys should be close enough to the optimum'
        assert optimization.func.calls == len(ys), f'function was called {optimization.func.calls} times, ' \
                                                   f'but there is {len(ys)} point in the output'

        ax1.plot(get_accuracy(ys, y1 * np.ones_like(ys)),
                 styles[i],
                 label=method)
        ax2.plot(*list(np.array(xs).T), styles[i], label=method)
    ax2.plot(*[[x] for x in x1], 'kp', label='exact')

    ax = ax1.axis()
    ax1.plot(ax[:2], -np.log10([eps_y, eps_y]), 'k-')
    ax1.set_xlabel('N iter')
    ax1.set_ylabel('accuracy')
    ax1.legend()
    ax2.legend()
    fig.suptitle(f'Results for {n_dim}D')
    plt.show()
예제 #14
0
    def _test_case(self, fname, func: callable, a, b, n_nodes, interp_params):
        """
        Общий метод проверки
        """
        k_dense = 10
        m = k_dense * n_nodes

        # точки, в которых будем сравнивать с точным значением
        xs_dense = np.array(
            sorted([
                *np.linspace(a, b, m),
                *self._get_nodes(NodeType.EQ, a, b, n_nodes),
                *self._get_nodes(NodeType.CHEB, a, b, n_nodes)
            ]))
        ys_dense = func(xs_dense)

        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(9, 6))
        ax1.plot(xs_dense, ys_dense, 'k-', label='exact')

        for (color, interp_name, nodes_type) in interp_params:
            xs = self._get_nodes(nodes_type, a, b, n_nodes)
            ys = func(xs)

            interp = interp_name(xs, ys)

            # проверяем, что попали в узлы интерполяции
            ys_num = interp(xs)
            assert (np.abs(ys - ys_num) < 1e-6).all()

            # смотрим, как у нас дела в остальных точках интервала
            ys_dense_num = interp(xs_dense)

            label = f'{interp.name}-{str(nodes_type.name)}'
            ax1.plot(xs_dense, ys_dense_num, f'{color}:', label=label)
            ax1.plot(xs, ys, f'{color}.')
            ax2.plot(xs_dense,
                     get_accuracy(ys_dense, ys_dense_num),
                     f'{color}-',
                     label=label)

        ax1.set_title(f'{fname}')
        ax1.legend()

        ax2.set_title('accuracy')
        ax2.legend()

        plt.show()
예제 #15
0
def test_polynomial(fname, func: callable):
    """
    Сравниваем аппроксимацию алгебраическими многочленами и многочленами Лежандра
    """
    n = 15
    dim = 5
    m = 101

    xs0 = np.linspace(-1, 1, n)
    xs1 = np.linspace(-1, 1, m)

    ys0 = func(xs0)
    ys1 = func(xs1)

    colors = 'bg'
    fig, (ax1, ax2) = plt.subplots(1, 2)
    ax1.set_title(f'{fname}')
    ax2.set_title('accuracy')
    ax1.plot(xs1, ys1, 'k-', label='exact')
    ax1.plot(xs0, ys0, 'k.')

    coeffs = []
    for color, approx_type in zip(colors, [Algebraic, Legendre]):
        approx = approx_type(xs0, ys0, dim)
        ys1_num = approx(xs1)
        coeffs.append(approx.coeffs)

        ax1.plot(xs1, ys1_num, f'{color}-', label=approx.name)
        ax2.plot(xs1,
                 get_accuracy(ys1, ys1_num),
                 f'{color}-',
                 label=approx.name)

        assert (len(approx.coeffs) == dim
                ), f'{approx_type} polynome length should be {dim}'
        assert (all(abs(ys1 - ys1_num) < 1)
                ), f'{approx_type} polynome approximation is too bad'

    alg_poly = coeffs[0]
    leg_poly = P.legendre.leg2poly(coeffs[1])
    assert (all(
        abs(alg_poly -
            leg_poly) < 1e-3)), 'algebraic and legendre are not consistent'

    ax1.legend()
    ax2.legend()
    plt.show()
예제 #16
0
def test_multi_step():
    """
    Проверяем методы Адамса
    Q: сравните правые графики для обоих случаев и объясните разницу
    """
    y0 = np.array([0., 1.])
    t0 = 0
    t1 = np.pi
    dt = 0.1

    f = Harmonic(y0, 1, 1)
    ts = np.arange(t0, t1 + dt, dt)
    exact = f[ts].T

    for one_step_method in [
            RungeKuttaMethod(collection.rk4_coeffs),
            ExplicitEulerMethod(),
    ]:
        fig, (ax1, ax2) = plt.subplots(1, 2)

        ax1.plot(ts, [e[0] for e in exact], 'k', label='Exact')
        for p, c in adams_coeffs.items():
            f.clear_call_counter()
            t_adams, y_adams = adams(f,
                                     y0,
                                     ts,
                                     c,
                                     one_step_method=one_step_method)
            n_calls = f.get_call_counter()
            print(
                f'{p}-order multi-step with one-step {one_step_method.name}: {n_calls} function calls'
            )

            err = get_accuracy(exact, y_adams)

            label = f"Adams's order {p}"
            ax1.plot(t_adams, [y[0] for y in y_adams], '.--', label=label)
            ax2.plot(t_adams, err, '.--', label=label)

        ax1.legend(), ax1.set_title('y(t)')
        ax2.legend(), ax2.set_title('accuracy')
        fig.suptitle(
            f'test_multi_step\none step method: {one_step_method.name}')
        fig.tight_layout()

    plt.show()
예제 #17
0
def test_one_step():
    """
    Проверяем методы Эйлера и Рунге-Кутты
    """
    y0 = np.array([0., 1.])
    t0 = 0
    t1 = np.pi

    ode = Harmonic(y0, 1, 1)

    for dt in [0.1, 0.01]:
        ts = np.arange(t0, t1 + dt, dt)

        exact = ode[ts].T
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4))
        ax1.plot(ts, [e[0] for e in exact], 'k', label='Exact')

        colors = 'rgbcmyk'
        for i, method in enumerate([
                ExplicitEulerMethod(),
                ImplicitEulerMethod(),
                RungeKuttaMethod(collection.rk4_coeffs),
                RungeKuttaMethod(collection.dopri_coeffs),
        ]):
            ode.clear_call_counter()
            _, y = fix_step_integration(method, ode, y0, ts)
            n_calls = ode.get_call_counter()
            print(
                f'One-step {method.name}: {len(y)-1} steps, {n_calls} function calls'
            )

            ax1.plot(ts, [_y[0] for _y in y],
                     f'{colors[i]}.--',
                     label=method.name)
            ax2.plot(ts,
                     get_accuracy(exact, y),
                     f'{colors[i]}.--',
                     label=method.name)

        ax1.legend(), ax1.set_title('y(t)')
        ax2.legend(), ax2.set_title('accuracy')

        fig.suptitle(f'test_one_step, dt={dt}')
        fig.tight_layout()

    plt.show()
예제 #18
0
파일: imaml.py 프로젝트: xiaohuangdi8/imaml
    def outer_loop(self, batch, is_train):
        
        train_inputs, train_targets, test_inputs, test_targets = self.unpack_batch(batch)

        loss_log = 0
        acc_log = 0
        grad_list = []
        loss_list = []

        for (train_input, train_target, test_input, test_target) in zip(train_inputs, train_targets, test_inputs, test_targets):

            with higher.innerloop_ctx(self.network, self.inner_optimizer, track_higher_grads=False) as (fmodel, diffopt):

                for step in range(self.args.n_inner):
                    self.inner_loop(fmodel, diffopt, train_input, train_target)
                
                train_logit = fmodel(train_input)
                in_loss = F.cross_entropy(train_logit, train_target)

                test_logit = fmodel(test_input)
                outer_loss = F.cross_entropy(test_logit, test_target)
                loss_log += outer_loss.item()/self.batch_size

                with torch.no_grad():
                    acc_log += get_accuracy(test_logit, test_target).item()/self.batch_size
            
                if is_train:
                    params = list(fmodel.parameters(time=-1))
                    in_grad = torch.nn.utils.parameters_to_vector(torch.autograd.grad(in_loss, params, create_graph=True))
                    outer_grad = torch.nn.utils.parameters_to_vector(torch.autograd.grad(outer_loss, params))
                    implicit_grad = self.cg(in_grad, outer_grad, params)
                    grad_list.append(implicit_grad)
                    loss_list.append(outer_loss.item())

        if is_train:
            self.outer_optimizer.zero_grad()
            weight = torch.ones(len(grad_list))
            weight = weight / torch.sum(weight)
            grad = mix_grad(grad_list, weight)
            grad_log = apply_grad(self.network, grad)
            self.outer_optimizer.step()
            
            return loss_log, acc_log, grad_log
        else:
            return loss_log, acc_log
예제 #19
0
def test_multi_step():
    """
    test Adams method
    Q: compare the right plot for both cases and explain the difference
    """
    y0 = np.array([0., 1.])
    t0 = 0
    t1 = 1.
    dt = 0.1

    f = Harmonic(y0, 1, 1)
    ts = np.arange(t0, t1 + dt, dt)
    exact = f[ts].T

    for one_step_method in [
            RungeKuttaMethod(collection.rk4_coeffs),
            ExplicitEulerMethod(),
    ]:
        _, (ax1, ax2) = plt.subplots(1, 2)

        ax1.plot(ts, [e[0] for e in exact], 'k', label='Exact')
        for p, c in adams_coeffs.items():
            f.clear_call_counter()
            t_adams, y_adams = adams(f,
                                     y0,
                                     ts,
                                     c,
                                     one_step_method=one_step_method)
            n_calls = f.get_call_counter()
            print(
                f'{p}-order multi-step with one-step {one_step_method.name}: {n_calls} function calls'
            )

            err = get_accuracy(exact, y_adams)

            label = f"Adams's order {p}"
            ax1.plot(t_adams, [y[0] for y in y_adams], '.--', label=label)
            ax2.plot(t_adams, err, '.--', label=label)

        ax1.set_xlabel('t'), ax1.set_ylabel('y'), ax1.legend()
        ax2.set_xlabel('t'), ax2.set_ylabel('accuracy'), ax2.legend()
        plt.suptitle(
            f'test_multi_step\none step method: {one_step_method.name}')
    plt.show()
예제 #20
0
def test_one_step():
    """
    test Euler and RK methods
    """
    y0 = np.array([0., 1.])
    t0 = 0
    t1 = np.pi / 2
    dt = 0.1

    f = Harmonic(y0, 1, 1)
    ts = np.arange(t0, t1 + dt, dt)

    exact = f[ts].T
    _, (ax1, ax2) = plt.subplots(1, 2)
    ax1.plot(ts, [e[0] for e in exact], 'k', label='Exact')

    colors = 'rgbcmyk'
    for i, method in enumerate([
            ExplicitEulerMethod(),
            ImplicitEulerMethod(),
            RungeKuttaMethod(collection.rk4_coeffs),
            RungeKuttaMethod(collection.dopri_coeffs),
    ]):
        f.clear_call_counter()
        _, y = fix_step_integration(method, f, y0, ts)
        n_calls = f.get_call_counter()
        print(
            f'One-step {method.name}: {len(y)-1} steps, {n_calls} function calls'
        )

        ax1.plot(ts, [_y[0] for _y in y], f'{colors[i]}.--', label=method.name)
        ax2.plot(ts,
                 get_accuracy(exact, y),
                 f'{colors[i]}.--',
                 label=method.name)

    ax1.set_xlabel('t'), ax1.set_ylabel('y'), ax1.legend()
    ax2.set_xlabel('t'), ax2.set_ylabel('accuracy'), ax2.legend()
    plt.suptitle('test_one_step')
    plt.show()
예제 #21
0
def test_quad_degree():
    """
    Проверяем АСТ для ИКФ
    Q: почему в некоторых случаях x^n интегрируется почти без ошибок при n узлах ИКФ?

    A: Поскольку многочлен Лагранжа Ln является алгебраическим многочленом степени n − 1, то по построению, он имеет
       алгебраический порядок точности не ниже n − 1. Однако если n нечетно, т.е. когда середина отрезка [a, b] входит
       в состав сетки, то формула оказывается точна и для многочленов степени n.
    """
    x0, x1 = 0, 1

    max_degree = 7
    max_nodes = 7

    for deg in range(max_degree):
        p = Monome(deg)
        y0 = p[x0, x1]

        node_counts = range(1, max_nodes + 1)

        Y = [
            quad(p, x0, x1, np.linspace(x0, x1, node_count))
            for node_count in node_counts
        ]
        # Y = [quad(p, x0, x1, x0 + (x1-x0) * np.random.random(node_count)) for node_count in node_counts]
        accuracy = get_accuracy(Y, y0 * np.ones_like(Y))

        # Проверяем точность
        for node_count, acc in zip(node_counts, accuracy):
            if node_count >= deg + 1:
                assert acc > 6

        plt.plot(node_counts, accuracy, '.:', label=f'x^{deg}')

    plt.legend()
    plt.ylabel('accuracy')
    plt.xlabel('node_count')
    plt.suptitle(f'test quad')
    plt.show()
예제 #22
0
def test_iteratives(tol, check):
    """
    Сравниваем итерационные методы
    Q: который метод лучше? Почему?
    Q: почему во втором случае мы не всегда можем достичь заданной точности?
    """
    n = 5
    A = np.array([
        [n + 2, 1, 1],
        [1, n + 4, 1],
        [1, 1, n + 6],
    ],
                 dtype='float64')

    b = np.array([n + 4, n + 6, n + 8], dtype='float64') * np.pi

    methods = [richardson, jacobi, seidel]
    colors = 'mgb'
    names = ['Richardson', 'Jacobi', 'Gauss-Seidel']

    for method, color, name in zip(methods, colors, names):
        xs, ys = method(A, b, tol)
        plt.plot(range(len(ys)),
                 get_accuracy(ys, np.zeros_like(ys), eps=tol / 10),
                 f'{color}.-',
                 label=name)
        if check:
            assert np.linalg.norm(A @ xs[-1] -
                                  b) <= tol, f'{name} method failed'

    axes = plt.axis()
    plt.plot(axes[:2], -np.log10([tol, tol]), 'k:', label='tolerance')

    plt.title(f'test iterative methods for tol {tol}')
    plt.ylabel('accuracy')
    plt.xlabel('N iter')
    plt.legend()

    plt.show()
예제 #23
0
def evaluate(loader, model):
    print("Evaluate")

    # Set model to eval
    model.eval()

    accuracy = AverageMeter()
    positive_accuracy = AverageMeter()
    negative_accuracy = AverageMeter()
    with torch.no_grad():
        for batch_idx, (x, y) in enumerate(loader):
            x = x.to(device=device)
            y = y.to(device=device).to(torch.float32)
            y = torch.unsqueeze(y, 1)

            scores = model(x)
            loss = criterion(scores, y)

            scores = torch.squeeze(scores, 1)
            y = torch.squeeze(y, 1)

            acc = get_accuracy(y, scores)
            neg_acc, pos_acc = get_accuracy_per_class(y.cpu(), scores.cpu())

            accuracy.update(acc)
            positive_accuracy.update(pos_acc)
            negative_accuracy.update(neg_acc)

    
    wandb.log({
    "valid_acc": accuracy.avg,
    "positive_acc": positive_accuracy.avg,
    "negative_acc": negative_accuracy.avg,
    "valid_loss": loss.item()
    })

    # Set model back to train
    model.train()
예제 #24
0
def test_fast_inv_sqrt():
    """
    Проверяем быстрое вычисление обратного квадратного корня при помощи трюка с плавающей запятой
    https://en.wikipedia.org/wiki/Fast_inverse_square_root
    """
    def fast_inv_sqrt(x, n_iter=1):
        c = 0x5f3759df
        s = tc.float2bin(x)
        i = c - tc.bin2dec(f'0{s[:-1]}', signed=False)
        y = tc.bin2float(tc.dec2bin(i, signed=False))
        for _ in range(n_iter):
            y *= 1.5 - 0.5 * x * y * y
        return y

    X = np.linspace(0, 16, 100)[1:]
    Y0 = np.power(X, -0.5)

    Y = [[fast_inv_sqrt(x, n_iter=i) for x in X] for i in range(3)]
    colors = 'rgb'

    fig, (ax1, ax2) = plt.subplots(1, 2)
    ax1.plot(X, Y0, 'k-', label='exact')
    for i in range(3):
        ax1.plot(X,
                 Y[i],
                 f'{colors[i]}.:',
                 label=f'{i}-iterations approximation')
    ax1.legend()

    for i in range(3):
        ax2.plot(X,
                 get_accuracy(Y0, Y[i]),
                 f'{colors[i]}.:',
                 label=f'{i}-iterations accuracy')
    ax2.legend()

    plt.show()
               conv_stride=1,
               first_pool_size=None,
               first_pool_stride=None,
               block_sizes=[2, 2, 2, 2],
               block_strides=[1, 2, 2, 2],
               data_format='channels_first')

############################################
# Loss, Accuracy, Train, Summary and Saver #
############################################
weight_decay = 2e-4

logits = resnet(images, training=True)

cross_entropy = utils.get_cross_entropy(logits, labels)
accuracy = utils.get_accuracy(logits, labels)
tf.summary.scalar('cross_entropy', cross_entropy)
tf.summary.scalar('accuracy', accuracy)

reg_loss = utils.get_reg_loss(weight_decay)
tf.summary.scalar('reg_loss', reg_loss)

total_loss = cross_entropy + reg_loss
tf.summary.scalar('total_loss', total_loss)

global_step = tf.train.create_global_step()
learning_rate = utils.configure_learning_rate(global_step, TRAIN_SAMPLES,
                                              FLAGS)
tf.summary.scalar('learning_rate', learning_rate)

optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,
예제 #26
0
    label_batch = [item[1] for item in query_set]

    label_example = label2tensor(label_batch)
    label_dict = label_example[0]
    logging.info('select labels:{}'.format(' '.join(list(label_dict.keys()))))

    # train
    model.train()
    class_vector, prob = model(query_set=query_batch, support_set=train_batch)

    loss = loss_f(prob, label_example[1])
    logging.info('training loss: {}'.format(loss.data))

    optimize.zero_grad()
    loss.backward()
    optimize.step()

    accuracy = get_accuracy(prob, labels=label_example[1])
    logging.info('eval accuracy: {}'.format(accuracy))

    # eval
    # model.eval()
    #
    # valid_batch = data_loader.build_example(valid_set, labeled=True)
    # valid_label = [label_dict[item[1]] for item in valid_set]
    #
    # with torch.no_grad():
    #     class_vector_valid, prob_valid = model(query_set=valid_batch, class_vector=class_vector, mode='eval')
    #     valid_accuracy = get_accuracy(prob_valid, valid_label)
    #     logging.info('eval accuracy: {}'.format(valid_accuracy))
예제 #27
0
        optimizer.zero_grad()
        global_step += 1

    validation_loss = 0
    correct_count = 0
    model.eval()
    for batch in val_dataloader:
        if torch.cuda.is_available():
            batch = (item.cuda() for item in batch)
        input_ids, input_mask, segment_ids, label_ids = batch

        with torch.no_grad():
            val_logits = model(input_ids, segment_ids, input_mask)

        val_loss = loss_fct(val_logits.view(-1, 2), label_ids.view(-1))
        correct_count = get_accuracy(val_logits.view(-1, 2),
                                     label_ids.view(-1), correct_count)
        validation_loss += val_loss.item()

    training_loss = training_loss / len(train_dataloader)
    validation_loss = validation_loss / len(val_data)
    accuracy = correct_count / len(val_data)
    logging.info(
        '{}/{}, train loss: {}, validation loss: {}, val_accuracy: {}'.format(
            i, epochs, training_loss, validation_loss, accuracy))

    if validation_loss < last_val_loss:
        model_to_save = model.module if hasattr(model, 'module') else model
        output_model_file = os.path.join(args.output_dir,
                                         'model_' + str(i) + '.bin')
        torch.save(model_to_save.state_dict(), output_model_file)
        output_config_file = os.path.join(args.output_dir,