Example #1
0
def main(npzout: str, npzin: str = None):
    collection = []
    with open("./resources/Collection.txt") as fileinput:
        for row in fileinput:
            collection.append(Counter(row.split()))

    if npzin is None:
        p_wt = normalize(cp.random.rand(LEXICON_SIZE, TOPIC_SIZE), axis=0)
        p_td = normalize(cp.random.rand(COLLECTION_SIZE, TOPIC_SIZE), axis=0)
    else:
        npzfile = cp.load(npzin)
        assert filecheck(npzfile)
        p_wt = npzfile["p_wt"]
        p_td = npzfile["p_td"]

    baseline_likelihood = log_likelihood(collection, p_wt, p_td)
    print("likelihood(baseline): %f" % (baseline_likelihood))
    prev_likelihood = baseline_likelihood
    for index in range(1000):
        p_wt, p_td = em(collection, p_wt, p_td)
        cp.savez(npzout, p_wt=p_wt, p_td=p_td)
        likelihood = log_likelihood(collection, p_wt, p_td)
        print("likelihood(%d): %f" % (index + 1, likelihood))
        if (likelihood -
                prev_likelihood) / abs(prev_likelihood) < UPDATE_THRESHOLD:
            break
        prev_likelihood = likelihood
Example #2
0
def main():

    n, d, epochs, ny, seed_nr, m = 100, 1000, 200, 3, 14, [200, 400, 800, 1600, 3200]
    save_weights, plt_fig, save_fig, timer,verbose = "no", "no", "no", "no", 0
    cp.random.seed(seed_nr)
    epoch_vec = cp.arange(epochs + 1)

    if timer =="yes":
        time_all()
    loss_k, pattern_change, max_dist, least_eig = iterate(epochs, m, n, d, ny,verbose)

    if save_weights == "yes":
        cp.savez("Model"+str(np.datetime64("now")), loss_k=loss_k, epoch_vec=epoch_vec, m=m,
                 pattern_change=pattern_change, max_dist=max_dist,
                 least_eig=least_eig, ny=ny, n=n, d=d, seed_nr=seed_nr)
    if plt_fig == "yes":
        plot_fig(loss_k, epoch_vec, m, pattern_change, max_dist, least_eig, n, d,save_fig)
Example #3
0
 def save(self, path: str):
     data = {f"p{i}": p.data for i, p in enumerate(self.params)}
     xp.savez(path, **data)
Example #4
0
    P_NN = NN_softmax_form(dist_mtx, args.epsilon, axis=1)
    p_nn = compute_accuracy(P_NN, y_gallery, y_query, args.majority_vote)
    print("NN test accuracy (top-[1, 5, 10]): {}%".format(p_nn), flush=True)

    # ISF
    P_ISF, hp_isf = isoftmax(dist_mtx, args.epsilon, axis=1)
    p_isf = compute_accuracy(P_ISF, y_gallery, y_query, args.majority_vote)
    print('ISF test accuracy (top-[1, 5, 10]): {}%'.format(p_isf), flush=True)

    # HNN primal
    P_HNN0 = HNN_primal(dist_mtx, args.epsilon, args.iters)
    p_hnn0 = compute_accuracy(P_HNN0, y_gallery, y_query, args.majority_vote)
    print('HNN primal test accuracy (top-[1, 5, 10]): {}%'.format(p_hnn0),
          flush=True)

    # HNN dual
    print("Running HNN dual ...", flush=True)
    HNN_dual = HNN(X_query, X_gallery, dist_function, args.epsilon)
    HNN_dual.gallery_weight(args.iters, batch=128, lr=100)
    HNN_dual.get_full_plan()
    hp_hnn = -HNN_dual.beta / args.epsilon
    P_HNN1 = HNN_dual.P
    p_hnn1 = compute_accuracy(P_HNN1, y_gallery, y_query, args.majority_vote)
    print('HNN dual test accuracy (top-[1, 5, 10]): {}%'.format(p_hnn1),
          flush=True)

    if args.save_matrices:
        xp.savez(args.save_matrices, P_NN=P_NN, P_ISF=P_ISF,
                 P_HNN_primal=P_HNN0, P_HNN_dual=P_HNN1,
                 hp_isf=hp_isf, hp_hnn=hp_hnn)
Example #5
0
def gen_data_npz(fimg, img, mask, config, ntiles=1000, save_dir='train'):
    """
    Extract random patches from cupy arrays.
    Args:
        fimg (str): data filename
        img (cupy.array): cupy array with data
        mask (cupy.array): cupy array with mask
        save_dir (str): directory to save output
    Return:
        save dataset to save_dir.
    ----------
    Example
    ----------
        gen_data_npz('image.tif', arr, mask, config, 8000, 'output')
    """
    # set dimensions of the input image array, and get desired tile size
    z_dim, x_dim, y_dim = img.shape
    tsz = config.TILE_SIZE

    # placeholders for final datasets
    img_cp = cp.empty((ntiles, tsz, tsz, z_dim), dtype=cp.float32)
    mask_np = np.empty((ntiles, tsz, tsz, config.N_CLASSES), dtype=np.float16)

    # generate n number of tiles
    for i in tqdm(range(ntiles)):

        # Generate random integers from image
        xc = random.randint(0, x_dim - tsz)
        yc = random.randint(0, y_dim - tsz)

        # verify data is not on nodata region
        while cp.any(img[:, xc:(xc + tsz),
                         yc:(yc + tsz)] == config.NODATA_VAL):
            xc = random.randint(0, x_dim - tsz)
            yc = random.randint(0, y_dim - tsz)

        # change order to (h, w, c)
        tile_img = cp.moveaxis(img[:, xc:(xc + tsz), yc:(yc + tsz)], 0, -1)

        # TODO: replace with cuml One-hot encoder on future date when they fix
        # a bug on the output types. Using to_categorical in the meantime
        # Converts labels into one-hot encoding labels
        tile_mask = to_categorical(cp.asnumpy(mask[xc:(xc + tsz),
                                                   yc:(yc + tsz)]),
                                   num_classes=config.N_CLASSES,
                                   dtype='float16')

        # maybe standardize here? depends on performance of single img vs batch
        img_cp[i, :, :, :] = tile_img
        mask_np[i, :, :, :] = tile_mask

    # normalize
    if config.NORMALIZE:
        img_cp = img_cp / config.normalization_factor

    # standardize
    if config.STANDARDIZE:
        img_cp = batch_normalize(img_cp, axis=(0, 1), c=1e-8)

    # save dataset into local disk, npz format with x and y labels
    cp.savez(f'{save_dir}/{fimg[:-4]}.npz', x=img_cp, y=cp.asarray(mask_np))
Example #6
0
        # if times % 1000000 * Annealing[0] * Annealing[1] == 0:
        #     L = L / Annealing[1]
        #     Annealing[0] += 1
        #     if L < 0.000001:
        #         plt.show()
        print()
print("第%d次訓練(學習率:%f):" % (times, L))
print("output:\n", output[2])
print("總誤差:", error)
print(expect_data.shape)
print("輸出位置:", output[1])
print("輸出位置對應答案的值", result)
print("答對筆數/總筆數: %d/%d" % (k, train_datas))
# plt.plot(x, y)
# plt.show()
cp.savez("weight_%f.npz" % (L), weight, weight2,
         weight3, weight4, baise, baise2, baise3, baise4)
# with open("weight_%f.txt" % (L), 'w', newline='') as csvfile:
#     spamwriter = csv.writer(csvfile)
#     for i in weight:
#         spamwriter.writerow(map(str, i))
# with open("weight2_%f.txt" % (L), 'w', newline='') as csvfile:
#     spamwriter = csv.writer(csvfile)
#     for i in weight2:
#         spamwriter.writerow(map(str, i))
# with open("baise_%f.txt" % (L), 'w', newline='') as csvfile:
#     spamwriter = csv.writer(csvfile)
#     for i in baise:
#         spamwriter.writerow(map(str, i))
# with open("baise2_%f.txt" % (L), 'w', newline='') as csvfile:
#     spamwriter = csv.writer(csvfile)
#     for i in baise2:
Example #7
0
    plt.rcParams['grid.linewidth'] = 0.3
    plt.rcParams["legend.markerscale"] = 2
    plt.rcParams["legend.fancybox"] = False
    plt.rcParams["legend.edgecolor"] = 'black'
    for i in range(10):
        plt.plot([0, 0.5, 1, 1.5, 2, 2.5, 3],
                 rt.v_to_dbm(abs(test[i]), 50),
                 label=f'ant1.y={i*0.1}')
    plt.title('ant1=(0, 0.0-0.1), ant2=(1.2, 1.5) [m]')
    plt.ylabel('rx power [dBm]')
    plt.xlabel('mesurement point x [m], (y=0 [m])')
    plt.grid()
    plt.legend()
    plt.show()


# シリアライズしたデータの順番
# shape = (30, 30, 30, 30, 7)
# (x1_position, y1_position, x2_position, y2_position, rx_power)
if __name__ == '__main__':
    deg_start = sys.argv[1]
    deg_stop = sys.argv[2]
    print(f'processing {deg_start} from {deg_stop}')
    # temp = cp.empty([int(deg_stop)+1, const.x_num, const.y_num, const.x_num,
    #                  const.y_num, const.rx_antenna_num])
    os.makedirs("traning_data", exist_ok=True)
    # rangeはendpointが含まれないので,stop+1している
    for deg in tqdm(range(int(deg_start), int(deg_stop) + 1, 1)):
        cp.savez("/tmp/training_data/training_data_" + str(deg),
                 calc_antennas_vector(deg))
Example #8
0
 def time_vb_savez_squares(self):
     np.savez('tmp.npz', self.squares)
Example #9
0
    def update(self):
        """
        Where the magic happens. Finds a threshold that will limit the number of params in the network
        to the tracked_size, and resets those params to the initial value to emulate how DropBack would
        work in real hardware.

        Chainer will calculate all grads, and this updater inserts itself before the next
        forward pass can occur to set the parameters back to what they should be. Only the params with the largest
        current-initial value will not be reset to initial. This emulates the accumulated gradient updates of the actual
        algorithm.
        :return:
        """
        if self.first_iter:
            self.first_iter = False
            self.params = [i for i in self.opt.target.params()]
            for i, p in enumerate(self.params):
                self.init_params.append(xp.copy(p.data))
            if not os.path.exists(self.output_dir):
                os.makedirs(self.output_dir)
            xp.savez(
                os.path.join(self.output_dir,
                             'init_params_{0}'.format(self.time_stamp)),
                self.init_params)
            if self.tracked_size:
                self.frozen_masks = [None] * len(self.params)
        super(DropBack, self).update()
        if self.decay_init and not self.first_iter:
            for i, _ in enumerate(self.init_params):
                self.init_params[i] = self.init_params[i] * .90
        if self.tracked_size:
            if not self.freeze:
                abs_values = []
                for i, param in enumerate(self.params):
                    if param.name == 'b':
                        values = (xp.abs(param.data).flatten()).copy()
                    else:
                        values = (
                            xp.abs(param.data -
                                   self.init_params[i]).flatten()).copy()
                    abs_values.append(values)
                abs_vals = xp.concatenate(abs_values)
                thresh = xp.partition(abs_vals,
                                      self.tracked_size)[-self.tracked_size]
            for i, param in enumerate(self.params):
                if param.name == 'b':
                    if self.freeze:
                        mask = self.frozen_masks[i]
                    else:
                        mask = xp.abs(param.data) > thresh
                    param.data = mask * param.data
                else:
                    if self.freeze:
                        mask = self.frozen_masks[i]
                    else:
                        mask = xp.abs(param.data -
                                      self.init_params[i]) > thresh
                    param.data = mask * param.data + self.init_params[i] * ~mask
                self.frozen_masks[i] = mask
            if self.iteration == 3465:
                print("Checking inv...")
                total_sum = sum([
                    xp.count_nonzero(p.data != self.init_params[i])
                    for i, p in enumerate(self.params)
                ])
                print(
                    "********\n\n Total non zero is: {}\n\n1*********".format(
                        total_sum))
                assert total_sum <= self.tracked_size * 1.1
        if self.track:
            if (self.iteration - 1) % 100 == 0:
                flat_now = xp.concatenate(
                    [i.array.ravel() for i in self.params])
                flat_0 = xp.concatenate([i.ravel() for i in self.init_params])
                xp.savez(
                    os.path.join(self.output_dir, f'l2_{self.iteration-1}'),
                    xp.linalg.norm(flat_now - flat_0))
                xp.savez(
                    os.path.join(self.output_dir,
                                 f'param_hist_{self.iteration-1}'),
                    xp.concatenate([
                        i.array.ravel() for i in self.params
                        if i.name == 'b' or i.name == 'W'
                    ]))