Пример #1
0
def re_scatter(url_re, url_init, threshold = 0.05):
    # Scatter plot the relative errors given by the prediction of neural networks.
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d', num = 8000)

    redata = np.load(url_re)
    re = redata['re']

    initdata = np.load(url_init)
    initconditions = initdata['x0']

    xs = initconditions[:, 1]
    ys = initconditions[:, 2]
    zs = initconditions[:, 3]

    sup_index = re > threshold
    re[sup_index] = 1

    sub_index = re <= threshold
    re[sub_index] = 0

    # rate gives the ratio of well fitted trajectories.
    rate = (num - np.count_nonzero(re))/num
    print('Rate = {:+1.3e}'.format(rate))

    color = re
    p = ax.scatter(xs, ys, zs, c=color, cmap='viridis')
    fig.colorbar(p)

    ax.set_xlabel('x2')
    ax.set_ylabel('x3')
    ax.set_zlabel('x4')

    plt.show()
Пример #2
0
def realign_image(arr, shift):
    """
    Translate and rotate image via Fourier

    Parameters
    ----------
    arr : ndarray
        Image array.

    shift: tuple
        Mininum and maximum values to rescale data.

    angle: float, optional
        Mininum and maximum values to rescale data.

    Returns
    -------
    ndarray
        Output array.
    """
    # if both shifts are integers, do circular shift; otherwise perform Fourier shift.
    if np.count_nonzero(np.abs(np.array(shift) - np.round(shift)) < 0.01) == 2:
        temp = np.roll(arr, int(shift[0]), axis=0)
        temp = np.roll(temp, int(shift[1]), axis=1)
        temp = temp.astype('float32')
    else:
        temp = fourier_shift(np.fft.fftn(arr), shift)
        temp = np.fft.ifftn(temp)
        temp = np.abs(temp).astype('float32')
    return temp
Пример #3
0
def split_data_crossvalid(data):
    """Split data using crossvalid"""
    X_trainfolder = []
    X_testfolder = []
    y_trainfolder = []
    y_testfolder = []
    data = data[data[:, 0].argsort()]
    number_one = np.count_nonzero(data[:, :1])
    data_one = data[np.where(data[:, 0] == 1)]
    data_zero = data[np.where(data[:, 0] == 0)]
    one_ratio = round(number_one / len(data), 1)
    one_zero_ratio = 1 - one_ratio
    batch_one = int(70 * one_ratio)
    batch_zero = int(70 * one_zero_ratio)
    batchs = len(data) // 70
    for i in range(batchs):
        test_one = data_one[i * batch_one:(i + 1) * batch_one, :]
        train_one = np.delete(data_one, test_one, axis = 0)
        test_zero = data_zero[i * batch_zero:(i + 1) * batch_zero, :]
        train_zero = np.delete(data_zero, test_zero, axis = 0)
        train_sets = np.concatenate((train_one, train_zero), axis=0)
        test_sets = np.concatenate((test_one, test_zero), axis=0)
        np.random.shuffle(train_sets)
        np.random.shuffle(test_sets)
        X_trainfolder.append(train_sets[:, 1:])
        y_trainfolder.append(train_sets[:, 0])
        X_testfolder.append(test_sets[:, 1:])
        y_testfolder.append(test_sets[:, 0])
    return X_trainfolder, y_trainfolder, X_testfolder, y_testfolder
Пример #4
0
def auc_calc_beta(x_test, y_test, nn, N, perc, model, weightlist=None): 
    ''' 
    Options for model "mc", "bbvi", "ensemble", "deterministic"
    For BBVI, pass a list of weights. 
    '''
    p = []
    n_test = len(y_test)
    if model != "deterministic": 
        if model == "mc":
            p_allw, p_mean, entropymean = myentropy(nn, [nn.weights]*N, x_test.T, returnallp=True)
        elif (model == "bbvi") and weightlist is not None: 
            p_allw, p_mean, entropymean = myentropy(nn, weightlist, x_test.T, returnallp=True)
        elif model == "ensemble": # deterministic 
            p_allw, p_mean, entropymean = myentropy(nn, weightlist, x_test.T, returnallp=True)
        #p_allw has dimension: NWeightSamples x NXData
        idx = np.argsort(entropymean)
        y_test = y_test[idx]
        p_mean = p_mean[idx]
        p_allw = p_allw[:, idx]
        y_pred_retained_allw = p_allw[:, 0:int(perc*n_test)]
        y_pred_retained = p_mean[0:int(perc*n_test)] # choosing samples with smallest entropy to evaluate 
        y_test_retained = y_test[0:int(perc*n_test)]
        ypredmean = np.round(y_pred_retained)
        ypred_allw = np.round(y_pred_retained_allw) #NW x NX
        auc_allw = np.zeros(ypred_allw.shape[0])
        for w in range(ypred_allw.shape[0]):
            auc_allw[w] = np.count_nonzero(ypred_allw[w, :]==y_test_retained)/len(y_test_retained) * 100
        return auc_allw
    else: 
        auc = auc_calc_proba(x_test, y_test, nn, N, perc)
        return auc #this only returns the mean accuracy
Пример #5
0
def forward(x, y, W_hh, W_oh, b_h, b_o):
    h = relu(np.dot(W_hh, x.T) + b_h)
    y_hat = softmax(np.dot(W_oh, h) + b_o)
    pred = np.expand_dims(np.argmax(y_hat, axis=0), axis=0).T
    num_wrong = np.count_nonzero(encoder.inverse_transform(y) - pred)
    acc = (len(x) - num_wrong) / len(x)
    err = Error(W_hh, W_oh, b_h, b_o, x, y)
    return acc, err
Пример #6
0
def prune_function(mdl, j):
    zer = 0
    b = mdl.params_flat
    while zer <= (j):
        i = random.randrange(0, len(b))
        b[i] = 0.
        nonzer = (np.count_nonzero(b))
        zer = len(b) - nonzer
    return b, zer, nonzer
Пример #7
0
def split_data(data, train_test_ratio):
    """split the data"""
    data = data[data[:, 0].argsort()]
    number_one = np.count_nonzero(data[:, :1])
    number_zero = len(data) - number_one
    data_one = data[:number_one, :]
    data_zero = data[number_one:, :]
    batch_one_train = int(number_one * train_test_ratio / (1 + train_test_ratio))
    batch_zero_train = int(number_zero * train_test_ratio / (1 + train_test_ratio))
    train_sets = np.concatenate((data_one[:batch_one_train, :], data_zero[:batch_zero_train, :]), axis=0)
    test_sets = np.concatenate((data_one[batch_one_train:, :], data_zero[batch_zero_train:, :]), axis=0)
    np.random.shuffle(train_sets)
    np.random.shuffle(test_sets)
    return train_sets, test_sets
Пример #8
0
    def test_qnode_gradient_multiple_gate_parameters(self):
        "Tests that gates with multiple free parameters yield correct gradients."
        self.logTestName()
        par = [0.5, 0.3, -0.7]

        def qf(x, y, z):
            qml.RX(0.4, [0])
            qml.Rot(x, y, z, [0])
            qml.RY(-0.2, [0])
            return qml.expval.PauliZ(0)

        q = qml.QNode(qf, self.dev1)
        value = q(*par)
        grad_A = q.jacobian(par, method='A')
        grad_F = q.jacobian(par, method='F')

        # analytic method works for every parameter
        self.assertTrue(q.grad_method_for_par == {0: 'A', 1: 'A', 2: 'A'})
        # gradient has the correct shape and every element is nonzero
        self.assertEqual(grad_A.shape, (1, 3))
        self.assertEqual(np.count_nonzero(grad_A), 3)
        # the different methods agree
        self.assertAllAlmostEqual(grad_A, grad_F, delta=self.tol)
Пример #9
0
    def test_qnode_gradient_multiple_gate_parameters(self, tol):
        """Tests that gates with multiple free parameters yield correct gradients."""
        par = [0.5, 0.3, -0.7]

        def qf(x, y, z):
            qml.RX(0.4, wires=[0])
            qml.Rot(x, y, z, wires=[0])
            qml.RY(-0.2, wires=[0])
            return qml.expval(qml.PauliZ(0))

        dev = qml.device('default.qubit', wires=1)
        q = qml.QNode(qf, dev)
        value = q(*par)
        grad_A = q.jacobian(par, method='A')
        grad_F = q.jacobian(par, method='F')

        # analytic method works for every parameter
        assert q.grad_method_for_par == {0: 'A', 1: 'A', 2: 'A'}
        # gradient has the correct shape and every element is nonzero
        assert grad_A.shape == (1, 3)
        assert np.count_nonzero(grad_A) == 3
        # the different methods agree
        assert np.allclose(grad_A, grad_F, atol=tol, rtol=0)
Пример #10
0
        def animate(k):
            # clear the panel
            ax1.cla()
            ax2.cla()

            # print rendering update
            if np.mod(k + 1, 5) == 0:
                print('rendering animation frame ' + str(k + 1) + ' of ' +
                      str(len(num_elements)))
            if k == len(num_elements) - 1:
                print('animation rendering complete!')
                time.sleep(1)
                clear_output()

            # loop over panels, produce plots
            self.D = num_elements[k]

            # fit to data
            F = 0
            predict = 0
            w = 0
            if basis == 'poly':
                w = weight_history[k]
                self.D = len(w) - 1
                ax1.set_title(str(self.D) + ' poly units', fontsize=14)
                self.predict = self.poly_predict

            elif basis == 'tanh':
                w = weight_history[k]
                self.D = len(w) - 1
                ax1.set_title(str(self.D) + ' tanh units', fontsize=14)
                self.predict = self.tanh_predict

            elif basis == 'tree':
                w = weight_history[self.D]
                ax1.set_title(str(np.count_nonzero(w)) + ' tree units',
                              fontsize=14)
                self.predict = self.tree_predict
                self.weight_history = weight_history

            ####### plot all and dress panel ######
            # produce learned predictor
            ind0 = np.argwhere(self.y == +1)
            ind0 = [e[0] for e in ind0]
            ax1.scatter(self.x[ind0, 0],
                        self.x[ind0, 1],
                        s=55,
                        color=self.colors[0],
                        edgecolor='k')

            ind1 = np.argwhere(self.y == -1)
            ind1 = [e[0] for e in ind1]
            ax1.scatter(self.x[ind1, 0],
                        self.x[ind1, 1],
                        s=55,
                        color=self.colors[1],
                        edgecolor='k')

            # plot decision boundary
            r1 = np.linspace(xmin1, xmax1, 100)
            r2 = np.linspace(xmin2, xmax2, 100)
            s, t = np.meshgrid(r1, r2)
            s = np.reshape(s, (np.size(s), 1))
            t = np.reshape(t, (np.size(t), 1))
            h = np.concatenate((s, t), axis=1)
            z = []
            for j in range(len(h)):
                a = self.predict(h[j, :], w)
                z.append(a)
            z = np.asarray(z)
            z = np.tanh(z)

            # reshape it
            s.shape = (np.size(r1), np.size(r2))
            t.shape = (np.size(r1), np.size(r2))
            z.shape = (np.size(r1), np.size(r2))

            #### plot contour, color regions ####
            ax1.contour(s,
                        t,
                        z,
                        colors='k',
                        linewidths=2.5,
                        levels=[0],
                        zorder=2)
            ax1.contourf(s,
                         t,
                         z,
                         colors=[self.colors[1], self.colors[0]],
                         alpha=0.15,
                         levels=range(-1, 2))

            # cleanup panel
            ax1.set_xlim([xmin1, xmax1])
            ax1.set_ylim([xmin2, xmax2])
            ax1.set_xlabel(r'$x_1$', fontsize=14, labelpad=10)
            ax1.set_ylabel(r'$x_2$', rotation=0, fontsize=14, labelpad=10)
            ax1.set_xticks(np.arange(round(xmin1), round(xmax1) + 1, 1.0))
            ax1.set_yticks(np.arange(round(xmin2), round(xmax2) + 1, 1.0))

            # cost function value
            ax2.plot([v - 1 for v in num_elements[:k + 1]],
                     cost_evals[:k + 1],
                     color='b',
                     linewidth=1.5,
                     zorder=1)
            ax2.scatter([v - 1 for v in num_elements[:k + 1]],
                        cost_evals[:k + 1],
                        color='b',
                        s=70,
                        edgecolor='w',
                        linewidth=1.5,
                        zorder=3)

            ax2.set_xlabel('iteration', fontsize=12)
            ax2.set_title('cost function plot', fontsize=12)

            # cleanp panel
            ax2.set_xlim([minxc, maxxc])
            ax2.set_ylim([minc, maxc])
            ax2.xaxis.set_major_locator(MaxNLocator(integer=True))
Пример #11
0
            #print(sigma_grads_running_dot_product)

            criterion1 = mean_grads_running_dot_product < 0
            criterion2 = sigma_grads_running_dot_product < 0
            criterion3 = np.abs(elbo_prev - elbo) < np.abs(
                elbo_threshold_swa * elbo_prev)
            criterion7 = s_mean < 0
            criterion8 = s_log_var < 0
            elbo_diff_list.append(elbo - elbo_prev)

            elbo_diff_median = np.median(np.array(elbo_diff_list[-21:-1]))
            elbo_diff_mean = np.mean(np.array(elbo_diff_list[-21:-1]))
            elbo_diff_last_20 = elbo_diff_list[-20:-1]
            #elbo_diff_max = np.max(np.array(elbo_diff_list[-21:-1]))
            elbo_diff_list_abs = [0 for i in elbo_diff_last_20 if i < 0]
            val1 = len(elbo_diff_list_abs) - np.count_nonzero(
                np.asarray(elbo_diff_list_abs))
            criterion4 = val1 > 5

            criterion6 = itt > 4000
            if len(elbo_mean_list) > 6:
                criterion5 = np.abs(elbo_mean_list[-1] - elbo_mean_list[-2]
                                    ) < np.abs(elbo_mean_list[-2] -
                                               elbo_mean_list[-5]) * 0.10

            #if criterion1 and criterion2 and criterion3 and criterion6 and start_swa is False:
            #    start_swa = True
            #    start_swa_iter = itt+1
            #    print(start_swa_iter)
            #    num_samples_swa =4
            #print(elbo_diff_list)
Пример #12
0
def main():
    coeff = []
    ang_sb = []
    ang_np = []
    p_angles = []
    inputs_train, targets_train, inputs_test, targets_test = data.generate_data(
        args)
    results = {
        'inputs_train': inputs_train,
        'targets_train': targets_train,
        'inputs_test': inputs_test,
        'targets_test': targets_test
    }

    # Actual Model that is being observed
    mdl = model.create_model(args, inputs_train, targets_train)
    print(
        "\n===============================================================================================\n"
    )

    start_params = mdl.params_flat

    # NOTE Pickling Initial Weights
    with open('outfile', 'wb') as sp:
        pickle.dump(mdl.params_flat, sp)

    new_params = train_model(args, mdl, results)

    with open('outfile', 'rb') as sp:
        start_params = pickle.load(sp)

    # NOTE Lottery Ticket Pruning Loop
    per = args.per
    nonzer = (np.count_nonzero(mdl.params_flat))
    zer = len(mdl.params_flat) - nonzer
    x1 = nonzer - zer
    z1 = int(((x1 / 100.) * per))
    zer = z1 + zer
    print(" {} + {} = {}".format(0, nonzer, len(mdl.params_flat)))

    new_params, inputs, outputs = train_model(args, mdl, results)
    hess = mdl.hessian(mdl.params_flat)  # Calculating Hessian
    # Converting the Hessian to Tensor
    hess = torch.tensor(hess).float()
    eigenvalues, eigenvec = torch.symeig(hess, eigenvectors=True)

    hess, eigenvalues, eigenvec, coeff, ang_np, ang_sb, p_angles, top_vec = invar(
        mdl, args, inputs_train, targets_train, hess, eigenvalues, eigenvec,
        coeff, ang_np, ang_sb, p_angles)

    # NOTE Pruning Loop

    print(
        "===============================================================================================\n"
    )
    for i in tqdm(range(0, args.prune_iter),
                  desc="Pruning Progress",
                  dynamic_ncols=True):
        print("\n{} +".format(zer)),

        pruned_params_flat, zer, nonzer = prune_function(mdl, zer)
        print("{} = {}".format(nonzer, len(mdl.params_flat)))
        x1 = nonzer - zer
        z1 = int((x1 / 100.) * per)
        zer = z1 + zer

        for p in range(0, len(start_params)):
            if (pruned_params_flat[p] != 0.):
                pruned_params_flat[p] = start_params[p]

        mdl.params_flat = pruned_params_flat
        new_params, coeff = train_pruned_model(args, mdl, results, top_vec,
                                               coeff)

    coeff = torch.tensor(coeff)
    for i in range(coeff.shape[0]):
        a = torch.zeros(coeff[i].shape[0]).long()
        b = torch.arange(0, coeff[i].shape[0])
        c = torch.where(((coeff[i] > -0.1) & (coeff[i] < 0.1)), b, a)
        z = torch.zeros(coeff[i].shape[0]).fill_(0)
        z[torch.nonzero(c)] = coeff[i][torch.nonzero(c)]
        z = np.array(z)
        plt.plot(z)
    plt.xlabel('Dimension', fontsize=14)
    plt.ylabel('Coefficient', fontsize=14)
    pnpy = args.results_folder + '/plot1.png'
    plt.savefig(pnpy, format='png', pad_inches=5)

    args.suffix = args.results_folder + '/coeff.npy'
    np.save(args.suffix, coeff)
    args.suffix = args.results_folder + '/ang_sb.npy'
    np.save(args.suffix, ang_sb)
    args.suffix = args.results_folder + '/ang_np.npy'
    np.save(args.suffix, ang_np)
    args.suffix = args.results_folder + '/p_angles.npy'
    np.save(args.suffix, p_angles)

    return args.results_folder