Exemplo n.º 1
0
def main(args):
    np.random.seed(args.seed)
    gens = data.instantiate_generators()

    X, t_phn, t_spk = data.generate(gens, 100)
    X_val, t_phn_val, t_spk_val = data.generate(gens, 100)

    plotter = plotting.Plotter(args.no_plot)
    plotter.plot(X, t_phn, t_spk, name="Raw data")
    raw_bl, raw_ur = plotter.plot(X_val,
                                  t_phn_val,
                                  t_spk_val,
                                  name="Raw validation data")

    torch.manual_seed(args.seed)
    bn_extractor_init, phn_decoder_init, spk_decoder_init = model.create_models(
        args.bne_width)

    bn_extractor = copy.deepcopy(bn_extractor_init)
    spk_decoder = copy.deepcopy(spk_decoder_init)
    phn_decoder = copy.deepcopy(phn_decoder_init)

    print("\nTraining in disconcert, from same init:")
    adversary_train(bn_extractor, phn_decoder, spk_decoder,
                    (X, [t_phn, t_spk]), (X_val, [t_phn_val, t_spk_val]),
                    args.nb_epochs)

    bl, ur = plotter.plot(X,
                          t_phn,
                          t_spk,
                          name="BN features, PHN-SPK optimized",
                          transform=bn_extractor)
    plotting.plot_preds(plotter,
                        "PHN decoding in disconcertly trained BN space", bl,
                        ur, phn_decoder)
Exemplo n.º 2
0
def main(args):
    np.random.seed(args.seed)
    gens = data.instantiate_generators()

    X, t_phn, t_spk = data.generate(gens, 100)
    X_val, t_phn_val, t_spk_val = data.generate(gens, 100)

    plotter = plotting.Plotter(args.no_plot)
    plotter.plot(X, t_phn, t_spk, name="Raw data")
    raw_bl, raw_ur = plotter.plot(X_val,
                                  t_phn_val,
                                  t_spk_val,
                                  name="Raw validation data")

    torch.manual_seed(args.seed)
    bne, phn_dec, spk_dec = model.create_models(args.bne_width)

    print("\nTraining PHN network")
    training.train(bne, [phn_dec],
                   itertools.chain(bne.parameters(), phn_dec.parameters()),
                   (X, [t_phn]), (X_val, [t_phn_val]), args.nb_epochs)

    bl, ur = plotter.plot(X,
                          t_phn,
                          t_spk,
                          name="BN features, PHN optimized",
                          transform=bne)
    plotting.plot_preds(plotter, "PHN decoding in raw space", raw_bl, raw_ur,
                        lambda x: phn_dec(bne(x)))
    plotting.plot_preds(plotter, "PHN decoding in BN space", bl, ur, phn_dec)

    print("\nTraining SPK decoder")
    training.train(bne, [spk_dec], spk_dec.parameters(), (X, [t_spk]),
                   (X_val, [t_spk_val]), args.nb_epochs)
def generate_all(rm: ResourceManager):
    # do simple lang keys first, because it's ordered intentionally
    rm.lang(DEFAULT_LANG)

    # generic assets / data
    assets.generate(rm)
    data.generate(rm)
    world_gen.generate(rm)
    recipes.generate(rm)

    rm.flush()
Exemplo n.º 4
0
    def __init__(self, n=20, n_features=3, rand=False):
        self.r = rand
        if rand:
            self.n = n
            self.d, self.t = data.generate(n, num_features=n_features)
            assert len(self.d) == len(self.t)
        else:
            # d = data.loadIris()
            # self.d, self.t, self.testd, self.testt = data.split(d)
            # self.n = len(self.d)

            # self.d, self.t = data.loadCredit()
            self.d, self.t, self.testd, self.testt = data.loadCredit()
            self.n = len(self.d)

        print("Data shape: {}".format(self.d.shape))

        self.stumps = self.makeFewerStumps()

        print("Tree space: {}".format(len(self.stumps)))

        self.weights = np.ones((self.n, )) * float(1 / self.n)
        self.alphas = []
        self.H = []
        self.acc = []
Exemplo n.º 5
0
def main():
    batch_size = 32
    image_channels = 1
    k = 1

    num_features = 32

    pose_regressor = PoseRegressor(image_channels, k, num_features)
    generator = Generator(image_channels, k, num_features)

    model = Imm(pose_regressor, generator)

    optimizer = torch.optim.Adam(model.parameters())

    num_iterations = 1e5
    for it in range(int(num_iterations)):
        xt, xtp1 = data.generate(batch_size)
        xt = torch.as_tensor(xt).unsqueeze(1)
        xtp1 = torch.as_tensor(xtp1).unsqueeze(1)

        optimizer.zero_grad()
        generated = model(xt, xtp1)
        loss = torch.nn.functional.binary_cross_entropy(generated, xtp1)

        loss.backward()
        optimizer.step()
        if it % 100 == 0:
            loss_mse = torch.nn.functional.mse_loss(generated, xtp1).detach()
            print(it, loss.item(), loss_mse.item())
def bootstrap(db, opts):
    batch = db.batch()
    for _ in range(0, opts.num):
        d = data.generate(os.path.join(opts.schema, 'user-schema.json'))
        user_id = helper.id_from_name(d.get('first'), d.get('last'))
        user_ref = db.document('users/{}'.format(user_id))
        batch.set(user_ref, d)
        logging.info('Adding doc "users/{}".'.format(user_ref.id))
    batch.commit()
Exemplo n.º 7
0
    def __init__(self, mat_in=None, mat_out=None, data_params=None, seed=None):
        """
        Constructor of the NetworkBuilder class -- the NetworkBuilder collects
        information about a network (storage matrix, noise parameters and input
        data).

        :param mat_in: Nxm matrix containing the input data
        :param mat_out: Nxn matrix containing the output data
        """

        # Make sure that either data parameters are given or an input
        # and output matrix
        assert (mat_in is None) == (mat_out is None)
        assert (mat_in is None) != (data_params is None)

        if mat_in is None:
            # Use the supplied data parameters -- generate the data matrices
            self.data_params = DataParameters(data_params)

            # Select the data generation function
            if self.data_params["algorithm"] == "balanced":
                gen_fun = data.generate
            elif self.data_params["algorithm"] == "random":
                gen_fun = data.generate_random
            elif self.data_params["algorithm"] == "unique":
                gen_fun = lambda n_bits, n_ones, n_samples, seed: data.generate(
                    n_bits, n_ones, n_samples, seed, balance=False
                )

            # Generate the data with the given seed
            self.mat_in = gen_fun(
                n_bits=self.data_params["n_bits_in"],
                n_ones=self.data_params["n_ones_in"],
                n_samples=self.data_params["n_samples"],
                seed=(None if seed is None else (seed + 5)),
            )
            self.mat_out = gen_fun(
                n_bits=self.data_params["n_bits_out"],
                n_ones=self.data_params["n_ones_out"],
                n_samples=self.data_params["n_samples"],
                seed=(None if seed is None else (seed + 6) * 2),
            )

        else:
            # If a matrices are given, derive the data parameters from those
            self.mat_in = mat_in
            self.mat_out = mat_out

            assert mat_in.shape[0] == mat_out.shape[0]
            self.data_params = DataParameters(
                n_bits_in=mat_in.shape[1],
                n_bits_out=mat_out.shape[1],
                n_ones_in=self._n_ones(mat_in),
                n_ones_out=self._n_ones(mat_out),
                n_samples=mat_in.shape[0],
            )
Exemplo n.º 8
0
def deploy(parsers, args):
    reservation = _cli_util.read_reservation_cli()
    return _data.generate(reservation,
                          args.key_path,
                          args.admin_id,
                          args.cmd,
                          args.paths,
                          args.stripe,
                          args.multiplier,
                          mountpoint_path=args.mountpoint,
                          silent=args.silent) if reservation else False
Exemplo n.º 9
0
    def __init__(self, mat_in=None, mat_out=None, data_params=None, seed=None):
        """
        Constructor of the NetworkBuilder class -- the NetworkBuilder collects
        information about a network (storage matrix, noise parameters and input
        data).

        :param mat_in: Nxm matrix containing the input data
        :param mat_out: Nxn matrix containing the output data
        """

        # Make sure that either data parameters are given or an input
        # and output matrix
        assert ((mat_in is None) == (mat_out is None))
        assert ((mat_in is None) != (data_params is None))

        if mat_in is None:
            # Use the supplied data parameters -- generate the data matrices
            self.data_params = DataParameters(data_params)

            # Select the data generation function
            if self.data_params["algorithm"] == "balanced":
                gen_fun = data.generate
            elif self.data_params["algorithm"] == "random":
                gen_fun = data.generate_random
            elif self.data_params["algorithm"] == "unique":
                gen_fun = (lambda n_bits, n_ones, n_samples, seed:
                           data.generate(n_bits, n_ones, n_samples, seed,
                                         balance=False))

            # Generate the data with the given seed
            self.mat_in = gen_fun(
                n_bits=self.data_params["n_bits_in"],
                n_ones=self.data_params["n_ones_in"],
                n_samples=self.data_params["n_samples"],
                seed=(None if seed is None else (seed + 5)))
            self.mat_out = gen_fun(
                n_bits=self.data_params["n_bits_out"],
                n_ones=self.data_params["n_ones_out"],
                n_samples=self.data_params["n_samples"],
                seed=(None if seed is None else (seed + 6) * 2))

        else:
            # If a matrices are given, derive the data parameters from those
            self.mat_in = mat_in
            self.mat_out = mat_out

            assert (mat_in.shape[0] == mat_out.shape[0])
            self.data_params = DataParameters(
                n_bits_in=mat_in.shape[1],
                n_bits_out=mat_out.shape[1],
                n_ones_in=self._n_ones(mat_in),
                n_ones_out=self._n_ones(mat_out),
                n_samples=mat_in.shape[0])
Exemplo n.º 10
0
def inspect_distances(model, x, y, batch_size):
    distances = []
    targets = []

    for (x_batch, y_batch) in tqdm(generate(x, y, batch_size=batch_size),
                                   total=len(y) // batch_size):
        dist = np.squeeze(model.predict_on_batch(x_batch))
        target = np.squeeze(y_batch)
        distances.append(dist)
        targets.append(target)

    distances = np.concatenate(distances)
    targets = np.concatenate(targets)

    return distances, targets
Exemplo n.º 11
0
def run(graph, params: Params) -> List[EpochStats]:
    stats = []

    # Initialise population
    graph = data.generate()
    pop = __init_pop(graph, params.pop_size)
    scores = __evaluate_pop(pop)
    stats.append(EpochStats(scores))

    for i in tqdm(range(params.epochs)):
        new_pop = __crossover(pop, params.p_crossover)
        new_pop = __mutation(new_pop, params.p_mutation_pop,
                             params.p_mutation_specimen)
        new_pop, new_scores = __selection(pop, scores, new_pop)

        stats.append(EpochStats(scores))

        pop = new_pop
        scores = new_scores

    return stats
Exemplo n.º 12
0
def main():
    graph = data.generate()

    batches = [
        Batch(graph, ga.Params(200, 40, 0.4, 0.5, 0.1)),
        Batch(graph, ga.Params(500, 100, 0.4, 0.5, 0.1)),
        Batch(graph, ga.Params(1000, 200, 0.4, 0.5, 0.1)),
    ]

    for batch in tqdm(batches):
        graph = batch.graph
        params = batch.params

        params_hash = __batch_as_hash(params)

        logs_dir = LOGS.joinpath(params_hash)

        with Logger(logs_dir) as logger:
            ga_stats = ga.run(graph, params)

            worst = []
            mean = []
            best = []

            for epoch_stats in ga_stats:
                worst.append(np.min(epoch_stats.scores))
                mean.append(np.mean(epoch_stats.scores))
                best.append(np.max(epoch_stats.scores))

            summary = {}
            summary['worst'] = worst
            summary['mean'] = mean
            summary['best'] = best

            data_frame = pd.DataFrame.from_dict(summary)
            logger.save_csv(data_frame, 'summary')
            logger.add_entries(params.as_dict())
Exemplo n.º 13
0
def main():
    # Set up data
    batch_size = 1600  # set batch size
    test_split = 10000  # number of testing samples to use

    # generate data
    # makes a torch.tensor() with arrays of (n_samples X parameters) and (n_samples X data)
    # labels are the colours and pos are the x,y coords
    # however, labels are 1-hot encoded
    pos, labels = data.generate(labels='all', tot_dataset_size=2**20)

    # just simply renaming the colors properly.
    #c = np.where(labels[:test_split])[1]
    c = labels[:test_split, :]
    plt.figure(figsize=(6, 6))
    plt.scatter(pos[:test_split, 0],
                pos[:test_split, 1],
                c=c,
                cmap='Set1',
                s=0.25)
    plt.xticks([])
    plt.yticks([])
    plt.savefig('/data/public_html/chrism/FrEIA/test_distribution.png')
    plt.close()

    # setting up the model
    ndim_tot = 16  # ?
    ndim_x = 2  # number of parameter dimensions (x,y)
    ndim_y = 3  # number of label dimensions (colours for 1-hot encoding)
    ndim_z = 2  # number of latent space dimensions?

    # define different parts of the network
    # define input node
    inp = InputNode(ndim_tot, name='input')

    # define hidden layer nodes
    t1 = Node([inp.out0], rev_multiplicative_layer, {
        'F_class': F_fully_connected,
        'clamp': 2.0,
        'F_args': {
            'dropout': 0.0
        }
    })

    t2 = Node([t1.out0], rev_multiplicative_layer, {
        'F_class': F_fully_connected,
        'clamp': 2.0,
        'F_args': {
            'dropout': 0.0
        }
    })

    t3 = Node([t2.out0], rev_multiplicative_layer, {
        'F_class': F_fully_connected,
        'clamp': 2.0,
        'F_args': {
            'dropout': 0.0
        }
    })

    # define output layer node
    outp = OutputNode([t3.out0], name='output')

    nodes = [inp, t1, t2, t3, outp]
    model = ReversibleGraphNet(nodes)

    # Train model
    # Training parameters
    n_epochs = 3000
    meta_epoch = 12  # what is this???
    n_its_per_epoch = 4
    batch_size = 1600

    lr = 1e-2
    gamma = 0.01**(1. / 120)
    l2_reg = 2e-5

    y_noise_scale = 3e-2
    zeros_noise_scale = 3e-2

    # relative weighting of losses:
    lambd_predict = 300.  # forward pass
    lambd_latent = 300.  # laten space
    lambd_rev = 400.  # backwards pass

    # padding both the data and the latent space
    # such that they have equal dimension to the parameter space
    pad_x = torch.zeros(batch_size, ndim_tot - ndim_x)
    pad_yz = torch.zeros(batch_size, ndim_tot - ndim_y - ndim_z)

    print(pad_x.shape, pad_yz.shape)

    # define optimizer
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=lr,
                                 betas=(0.8, 0.8),
                                 eps=1e-04,
                                 weight_decay=l2_reg)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=meta_epoch,
                                                gamma=gamma)

    # define the three loss functions
    loss_backward = MMD_multiscale
    loss_latent = MMD_multiscale
    loss_fit = fit

    # set up test set data loader
    test_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(
        pos[:test_split], labels[:test_split]),
                                              batch_size=batch_size,
                                              shuffle=True,
                                              drop_last=True)

    # set up training set data loader
    train_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(
        pos[test_split:], labels[test_split:]),
                                               batch_size=batch_size,
                                               shuffle=True,
                                               drop_last=True)

    # initialisation of network weights
    for mod_list in model.children():
        for block in mod_list.children():
            for coeff in block.children():
                coeff.fc3.weight.data = 0.01 * torch.randn(
                    coeff.fc3.weight.shape)

    model.to(device)

    # initialize gif for showing training procedure
    fig, axes = plt.subplots(1, 2, figsize=(8, 4))
    axes[0].set_xticks([])
    axes[0].set_yticks([])
    axes[0].set_title('Predicted labels (Forwards Process)')
    axes[1].set_xticks([])
    axes[1].set_yticks([])
    axes[1].set_title('Generated Samples (Backwards Process)')
    #fig.show()
    #fig.canvas.draw()

    # number of test samples to use after training
    N_samp = 4096

    # choose test samples to use after training
    x_samps = torch.cat([x for x, y in test_loader], dim=0)[:N_samp]
    y_samps = torch.cat([y for x, y in test_loader], dim=0)[:N_samp]
    #c = np.where(y_samps)[1]
    #c = y_samps[:,0]
    c = np.array(y_samps).reshape(N_samp, ndim_y)
    y_samps += y_noise_scale * torch.randn(N_samp, ndim_y)
    y_samps = torch.cat([
        torch.randn(N_samp, ndim_z), zeros_noise_scale *
        torch.zeros(N_samp, ndim_tot - ndim_y - ndim_z), y_samps
    ],
                        dim=1)
    y_samps = y_samps.to(device)

    # start training loop
    try:
        #     print('#Epoch \tIt/s \tl_total')
        t_start = time()
        # loop over number of epochs
        for i_epoch in tqdm(range(n_epochs), ascii=True, ncols=80):

            scheduler.step()

            # Initially, the l2 reg. on x and z can give huge gradients, set
            # the lr lower for this
            if i_epoch < 0:
                for param_group in optimizer.param_groups:
                    param_group['lr'] = lr * 1e-2

    #         print(i_epoch, end='\t ')
            train(model, train_loader, n_its_per_epoch, zeros_noise_scale,
                  batch_size, ndim_tot, ndim_x, ndim_y, ndim_z, y_noise_scale,
                  optimizer, lambd_predict, loss_fit, lambd_latent,
                  loss_latent, lambd_rev, loss_backward, i_epoch)

            # predict the locations of test labels
            rev_x = model(y_samps, rev=True)
            rev_x = rev_x.cpu().data.numpy()

            # predict the label given a location
            #pred_c = model(torch.cat((x_samps, torch.zeros(N_samp, ndim_tot - ndim_x)),
            #                         dim=1).to(device)).data[:, -8:].argmax(dim=1)
            pred_c = model(
                torch.cat((x_samps, torch.zeros(N_samp, ndim_tot - ndim_x)),
                          dim=1).to(device)).data[:, -1:].argmax(dim=1)

            axes[0].clear()
            #axes[0].scatter(tmp_x_samps[:,0], tmp_x_samps[:,1], c=pred_c, cmap='Set1', s=1., vmin=0, vmax=9)
            axes[0].axis('equal')
            axes[0].axis([-3, 3, -3, 3])
            axes[0].set_xticks([])
            axes[0].set_yticks([])

            axes[1].clear()
            axes[1].scatter(rev_x[:, 0],
                            rev_x[:, 1],
                            c=c,
                            cmap='Set1',
                            s=1.,
                            vmin=0,
                            vmax=9)
            axes[1].axis('equal')
            axes[1].axis([-3, 3, -3, 3])
            axes[1].set_xticks([])
            axes[1].set_yticks([])

            fig.canvas.draw()
            plt.savefig('/data/public_html/chrism/FrEIA/training_pred.png')

    except KeyboardInterrupt:
        pass
    finally:
        print("\n\nTraining took {(time()-t_start)/60:.2f} minutes\n")
Exemplo n.º 14
0
def cm2inch(value):
    return value / 2.54


# Parameters
n_bits = 96
n_ones = 8
n_samples = entropy.optimal_sample_count(n_bits_in=n_bits,
                                         n_bits_out=n_bits,
                                         n_ones_in=n_ones,
                                         n_ones_out=n_ones)

# Generate the input and output data
print("Create input data...")
X = data.generate(n_bits=n_bits, n_ones=n_ones, n_samples=n_samples)
print("Create output data...")
Y = data.generate(n_bits=n_bits, n_ones=n_ones, n_samples=n_samples)

# Train the BiNAM
print("Training BiNAM...")
M = binam.BiNAM(n_bits, n_bits)
M.train_matrix(X, Y)

print("Running experiments...")
xs = np.linspace(0.0, 1.0, 100)
nxs = len(xs)

info_p0_fix = np.zeros(nxs)
info_p1_fix = np.zeros(nxs)
info_p0_adap = np.zeros(nxs)
Exemplo n.º 15
0
 def __init__(self):
     PlanetaryBody.__init__(self)
     self.moons = data.generate(Moon,0,10)
Exemplo n.º 16
0
#!/usr/bin/python3
# Filename: Insertion_Sort.py
'''插入排序'''

import data
# import tracemalloc

# tracemalloc.start()

A = data.generate()

# 核心算法
for j in range(1, len(A)):
    key = A[j]
    # 将 A[j] 插入排好序的 A[1...j-1]
    i = j - 1
    while i >= 0 and A[i] > key:
        A[i + 1] = A[i]
        i = i - 1
    A[i + 1] = key

data.output(A)

# 估计内存使用
# snapshot = tracemalloc.take_snapshot()
# top_stats = snapshot.statistics('filename')

# print("[ Top 10 ]")
# for stat in top_stats[:10]:
# print(stat)
Exemplo n.º 17
0
 def __init__(self):
     AstronomicalObject.__init__(self)
     self.planets = data.generate(Planet,1,7)
     
     for i in range(0, len(self.planets)):
         self.planets[i].suffix = data.romanNumeral(i+1)
Exemplo n.º 18
0
 def __init__(self):
     PlanetaryBody.__init__(self)
     self.moons = data.generate(Moon, 0, 10)
Exemplo n.º 19
0
elif model_name == "facenet":
    # facenet only supports shapes starting from 160
    if input_shape < 160:
        sys.exit(1)
    input_shape = (input_shape, input_shape, 3)
    model = create_facenet_network(input_shape=input_shape)

if model_name in ["vggface", "facenet"]:  # keras vs tf iterator ndim error
    (train_dataset, val_dataset), _ = load(
        n_classes=n_classes,
        samples_per_class=samples_per_class,
        input_shape=input_shape,
    )
    train_steps = len(train_dataset[1]) // batch_size
    val_steps = len(val_dataset[1]) // batch_size
    train_dataset = generate(*train_dataset, batch_size)
    val_dataset = generate(*val_dataset, batch_size)
    fit_params = dict(
        x=train_dataset,
        epochs=epochs,
        validation_data=val_dataset,
        steps_per_epoch=train_steps,
        validation_steps=val_steps,
    )

# Compile model
optimizer = optimizers.Adam()
model.compile(
    optimizer=optimizer, loss=contrastive_loss, metrics=[accuracy],
)
Exemplo n.º 20
0
import data
import utils
import entropy

def cm2inch(value):
    return value / 2.54

# Parameters
n_bits = 96
n_ones = 8
n_samples = entropy.optimal_sample_count(n_bits_in = n_bits,
        n_bits_out = n_bits, n_ones_in=n_ones, n_ones_out = n_ones)

# Generate the input and output data
print("Create input data...")
X = data.generate(n_bits=n_bits, n_ones=n_ones, n_samples=n_samples)
print("Create output data...")
Y = data.generate(n_bits=n_bits, n_ones=n_ones, n_samples=n_samples)

# Train the BiNAM
print("Training BiNAM...")
M = binam.BiNAM(n_bits, n_bits)
M.train_matrix(X, Y)

print("Running experiments...")
xs = np.linspace(0.0, 1.0, 100)
nxs = len(xs)

info_p0_fix = np.zeros(nxs)
info_p1_fix = np.zeros(nxs)
info_p0_adap = np.zeros(nxs)
#!/usr/bin/python3
# Filename: Insertion_Sort.py

'''插入排序'''

import data
# import tracemalloc

# tracemalloc.start()

A = data.generate()

# 核心算法
for j in range(1,len(A)):
    key = A[j]
    # 将 A[j] 插入排好序的 A[1...j-1]
    i = j-1
    while i >= 0 and A[i] > key:
        A[i+1] = A[i]
        i = i-1
    A[i+1] = key

data.output(A)

# 估计内存使用
# snapshot = tracemalloc.take_snapshot()
# top_stats = snapshot.statistics('filename')

# print("[ Top 10 ]")
# for stat in top_stats[:10]:
    # print(stat)
Exemplo n.º 22
0
    train_op = tf.contrib.layers.optimize_loss(loss, global_step, learning_rate=FLAGS.lr, optimizer='Adam')

#Metrics
acc  = tf.reduce_mean(tf.cast(tf.equal(tf.argmax(output, 1) ,tf.argmax(y, 1)), tf.float32))
init_op = tf.initialize_all_variables()

sess = tf.Session(config=tf.ConfigProto(log_device_placement=True, allow_soft_placement = True))
sess.run(init_op)
sess.run(embedding_init, feed_dict = {pretrained_we:pretrained_embedding_matrix})

saver = tf.train.Saver()
if FLAGS.restore_checkpoint:
    saver.restore(sess, '%s' %FLAGS.restore_checkpoint )
    print('Restoring Model... ')
start, best_val = time.time(),-1
for batch_x, batch_y, batch_mask in generate('%s/train.h5' %FLAGS.data_dir, FLAGS.epoch, FLAGS.batchsize):
    l,a,g,_ = sess.run([loss, acc, global_step, train_op], feed_dict = {x: batch_x, y: batch_y, mask: batch_mask})
    print 'Train Iteration %d: Loss %.3f acc: %.3f ' %(g,l,a)
    if g%150000 == 0: #0.5 epoch
        print ('Time taken for %d  iterations %.3f' %(g,time.time()-start))
        avg_loss, avg_acc, examples = 0.0, 0.0, 0.0
        for val_x, val_y, val_mask in generate('%s/dev.h5' %FLAGS.data_dir,1, 32):
            l, a = sess.run([loss, acc], feed_dict = {x:val_x, y:val_y, mask: val_mask})
            avg_loss +=l*val_y.shape[0]
            avg_acc +=a*val_y.shape[0]
            examples += val_y.shape[0]
            print examples, avg_loss*1./examples, avg_acc*1./examples
        print('Val loss %.3f accuracy %.3f' %(avg_loss*1./examples, avg_acc*1./examples))
        val = avg_acc*1./examples
        if best_val < val:          
            best_val = val
Exemplo n.º 23
0
import data
import numpy as np
import sklearn
import sklearn.linear_model
import matplotlib.pyplot as plt

# Coding environment parameters
print_loss = True
plot_results = False
gradient_checking = False
annealing = False  # Set initial learning rate to 0.02
# Data Gathering and pre-processing
X_train, y_train = data.generate(0, 200)
num_examples = len(X_train) # training set size

# Neural network parameters
num_epochs = 1401  # number of epochs to train the set
learning_rate = [0.01, 900] # learning rate for gradient descent
regularization = 0.01 # regularization strength (lambda)
nn_hidden_units = 3 # number of hidden units per layer
nn_input_dim = 2 # input layer dimensionality
nn_output_dim = 2 # output layer dimensionality
activation_type = "relu" # or "sigm" or "rect"

def feed_forward(model, inputs, activation_function=0):
  W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2']
  if activation_function == 0:
    activation_function = set_nonlinearity("tanh", 1)
  z1 = inputs.dot(W1) + b1
  a1 = activation_function(z1)
  z2 = a1.dot(W2) + b2
Exemplo n.º 24
0
batch_size = 32
image_channels = 1
k = 1

num_features = 32

feature_encoder = transporter.FeatureEncoder(image_channels, 3)
pose_regressor = transporter.PoseRegressor(image_channels, k, num_features)
refine_net = transporter.RefineNet(3, 1)

transporter = transporter.Transporter(feature_encoder, pose_regressor,
                                      refine_net)

optimizer = torch.optim.Adam(transporter.parameters())

num_iterations = 1e5
for it in range(int(num_iterations)):
    xt, xtp1 = data.generate(batch_size)
    xt = torch.as_tensor(xt).unsqueeze(1)
    xtp1 = torch.as_tensor(xtp1).unsqueeze(1)

    optimizer.zero_grad()
    reconstruction = transporter(xt, xtp1)
    # loss = torch.nn.functional.mse_loss(reconstruction, xtp1)
    loss = torch.nn.functional.binary_cross_entropy(reconstruction, xtp1)
    loss.backward()

    optimizer.step()
    if it % 100 == 0:
        loss_mse = torch.nn.functional.mse_loss(reconstruction, xtp1).detach()
        print(it, loss.item(), loss_mse.item())
Exemplo n.º 25
0
#!/usr/bin/env python
import data


if __name__ == "__main__":
    N = 100000
    while True:
        points = [(r, x) for r, x in data.generate(N)]
        for i in range(100):
            data.send(points, N=1000 * len(points))
            for idx in range(len(points)):
                r, x = points[idx]
                for j in range(1000):
                    x = r * x * (1 - x)
                points[idx] = r, x
Exemplo n.º 26
0
def main():

    # Set up simulation parameters
    batch_size = 128  # set batch size
    r = 3  # the grid dimension for the output tests
    test_split = r * r  # number of testing samples to use
    sig_model = 'sg'  # the signal model to use
    sigma = 0.2  # the noise std
    ndata = 128  #32 number of data samples in time series
    bound = [0.0, 1.0, 0.0, 1.0]  # effective bound for likelihood
    seed = 1  # seed for generating data
    out_dir = "/home/hunter.gabbard/public_html/CBC/cINNamon/gausian_results/"
    n_neurons = 0
    do_contours = True  # if True, plot contours of predictions by INN
    plot_cadence = 50
    do_latent_struc = False  # if True, plot latent space 2D structure
    conv_nn = False  # if True, use convolutional nn structure

    # setup output directory - if it does not exist
    os.system('mkdir -p %s' % out_dir)

    # generate data
    pos, labels, x, sig = data.generate(
        model=sig_model,
        tot_dataset_size=int(1e6),  # 1e6
        ndata=ndata,
        sigma=sigma,
        prior_bound=bound,
        seed=seed)

    if do_latent_struc:
        # calculate mode of x-space for both pars
        mode_1 = stats.mode(np.array(pos[:, 0]))
        mode_2 = stats.mode(np.array(pos[:, 1]))

    # seperate the test data for plotting
    pos_test = pos[-test_split:]
    labels_test = labels[-test_split:]
    sig_test = sig[-test_split:]

    # plot the test data examples
    plt.figure(figsize=(6, 6))
    fig_post, axes = plt.subplots(r, r, figsize=(6, 6))
    cnt = 0
    for i in range(r):
        for j in range(r):
            axes[i, j].plot(x, np.array(labels_test[cnt, :]), '.')
            axes[i, j].plot(x, np.array(sig_test[cnt, :]), '-')
            cnt += 1
            axes[i, j].axis([0, 1, -1.5, 1.5])
    plt.savefig("%stest_distribution.png" % out_dir, dpi=360)
    plt.close()

    # setting up the model
    ndim_x = 2  # number of posterior parameter dimensions (x,y)
    ndim_y = ndata  # number of label dimensions (noisy data samples)
    ndim_z = 200  # number of latent space dimensions?
    ndim_tot = max(
        ndim_x,
        ndim_y + ndim_z) + n_neurons  # must be > ndim_x and > ndim_y + ndim_z

    # define different parts of the network
    # define input node
    inp = InputNode(ndim_tot, name='input')

    # define hidden layer nodes
    filtsize = 3
    dropout = 0.0
    clamp = 1.0
    if conv_nn == True:
        t1 = Node(
            [inp.out0], rev_multiplicative_layer, {
                'F_class': F_conv,
                'clamp': clamp,
                'F_args': {
                    'kernel_size': filtsize,
                    'leaky_slope': 0.1,
                    'batch_norm': False
                }
            })

        t2 = Node(
            [t1.out0], rev_multiplicative_layer, {
                'F_class': F_conv,
                'clamp': clamp,
                'F_args': {
                    'kernel_size': filtsize,
                    'leaky_slope': 0.1,
                    'batch_norm': False
                }
            })

        t3 = Node(
            [t2.out0], rev_multiplicative_layer, {
                'F_class': F_conv,
                'clamp': clamp,
                'F_args': {
                    'kernel_size': filtsize,
                    'leaky_slope': 0.1,
                    'batch_norm': False
                }
            })
        #t4 = Node([t1.out0], rev_multiplicative_layer,
        #          {'F_class': F_conv, 'clamp': 2.0,
        #           'F_args':{'kernel_size': filtsize,'leaky_slope':0.1,
        #           'batch_norm':False}})

        #t5 = Node([t2.out0], rev_multiplicative_layer,
        #          {'F_class': F_conv, 'clamp': 2.0,
        #           'F_args':{'kernel_size': filtsize,'leaky_slope':0.1,
        #           'batch_norm':False}})

    else:
        t1 = Node(
            [inp.out0], rev_multiplicative_layer, {
                'F_class': F_fully_connected,
                'clamp': clamp,
                'F_args': {
                    'dropout': dropout
                }
            })

        t2 = Node(
            [t1.out0], rev_multiplicative_layer, {
                'F_class': F_fully_connected,
                'clamp': clamp,
                'F_args': {
                    'dropout': dropout
                }
            })

        t3 = Node(
            [t2.out0], rev_multiplicative_layer, {
                'F_class': F_fully_connected,
                'clamp': clamp,
                'F_args': {
                    'dropout': dropout
                }
            })

    # define output layer node
    outp = OutputNode([t3.out0], name='output')

    nodes = [inp, t1, t2, t3, outp]
    model = ReversibleGraphNet(nodes)

    # Train model
    # Training parameters
    n_epochs = 12000
    meta_epoch = 12  # what is this???
    n_its_per_epoch = 12

    lr = 1e-2
    gamma = 0.01**(1. / 120)
    l2_reg = 2e-5

    y_noise_scale = 3e-2
    zeros_noise_scale = 3e-2

    # relative weighting of losses:
    lambd_predict = 4000.  #300 forward pass
    lambd_latent = 900.  #300 laten space
    lambd_rev = 1000.  #400 backwards pass

    # padding both the data and the latent space
    # such that they have equal dimension to the parameter space
    #pad_x = torch.zeros(batch_size, ndim_tot - ndim_x)
    #pad_yz = torch.zeros(batch_size, ndim_tot - ndim_y - ndim_z)

    # define optimizer
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=lr,
                                 betas=(0.8, 0.8),
                                 eps=1e-04,
                                 weight_decay=l2_reg)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=meta_epoch,
                                                gamma=gamma)

    # define the three loss functions
    loss_backward = MMD_multiscale
    loss_latent = MMD_multiscale
    loss_fit = fit

    # set up training set data loader
    train_loader = torch.utils.data.DataLoader(torch.utils.data.TensorDataset(
        pos[test_split:], labels[test_split:]),
                                               batch_size=batch_size,
                                               shuffle=True,
                                               drop_last=True)

    # initialisation of network weights
    for mod_list in model.children():
        for block in mod_list.children():
            for coeff in block.children():
                if conv_nn == True:
                    coeff.conv3.weight.data = 0.01 * torch.randn(
                        coeff.conv3.weight.shape)
    model.to(device)

    # number of test samples to use after training
    N_samp = 2500

    # precompute true likelihood on the test data
    Ngrid = 64
    cnt = 0
    lik = np.zeros((r, r, Ngrid * Ngrid))
    true_post = np.zeros((r, r, N_samp, 2))
    lossf_hist = []
    lossrev_hist = []
    losstot_hist = []
    losslatent_hist = []
    beta_score_hist = []

    for i in range(r):
        for j in range(r):
            mvec, cvec, temp, post_points = data.get_lik(np.array(
                labels_test[cnt, :]).flatten(),
                                                         n_grid=Ngrid,
                                                         sig_model=sig_model,
                                                         sigma=sigma,
                                                         xvec=x,
                                                         bound=bound)
            lik[i, j, :] = temp.flatten()
            true_post[i, j, :] = post_points[:N_samp]
            cnt += 1

    # start training loop
    try:
        t_start = time()
        # loop over number of epochs
        for i_epoch in tqdm(range(n_epochs), ascii=True, ncols=80):

            scheduler.step()

            # Initially, the l2 reg. on x and z can give huge gradients, set
            # the lr lower for this
            if i_epoch < 0:
                print('inside this iepoch<0 thing')
                for param_group in optimizer.param_groups:
                    param_group['lr'] = lr * 1e-2

            # train the model
            losstot, losslatent, lossrev, lossf, lambd_latent = train(
                model, train_loader, n_its_per_epoch, zeros_noise_scale,
                batch_size, ndim_tot, ndim_x, ndim_y, ndim_z, y_noise_scale,
                optimizer, lambd_predict, loss_fit, lambd_latent, loss_latent,
                lambd_rev, loss_backward, conv_nn, i_epoch)

            # append current loss value to loss histories
            lossf_hist.append(lossf.data.item())
            lossrev_hist.append(lossrev.data.item())
            losstot_hist.append(losstot)
            losslatent_hist.append(losslatent.data.item())
            pe_losses = [
                losstot_hist, losslatent_hist, lossrev_hist, lossf_hist
            ]

            # loop over a few cases and plot results in a grid
            cnt = 0
            beta_max = 0
            if ((i_epoch % plot_cadence == 0) & (i_epoch > 0)):
                # use the network to predict parameters\

                if do_latent_struc:
                    # do latent space structure plotting
                    y_samps_latent = np.tile(np.array(labels_test[0, :]),
                                             1).reshape(1, ndim_y)
                    y_samps_latent = torch.tensor(y_samps_latent,
                                                  dtype=torch.float)
                    x1_i_dist = []
                    x2_i_dist = []
                    x1_i_par = np.array([])
                    x2_i_par = np.array([])

                    # define latent space mesh grid
                    z_mesh = np.mgrid[-0.99:-0.01:100j, -0.99:-0.01:100j]
                    z_mesh = np.vstack([z_mesh, np.zeros((2, 100, 100))])

                    #for z_i in range(10000):
                    for i in range(z_mesh.shape[1]):
                        for j in range(z_mesh.shape[2]):
                            a = torch.randn(1, ndim_z)
                            a[0, 0] = z_mesh[0, i, j]
                            a[0, 1] = z_mesh[1, i, j]
                            x_i = model(torch.cat([
                                a,
                                torch.zeros(1, ndim_tot - ndim_y - ndim_z),
                                y_samps_latent
                            ],
                                                  dim=1).to(device),
                                        rev=True)
                            x_i = x_i.cpu().data.numpy()

                            # calculate hue and intensity
                            if np.abs(mode_1[0][0] -
                                      x_i[0][0]) < np.abs(mode_2[0][0] -
                                                          x_i[0][1]):
                                z_mesh[2, i,
                                       j] = np.abs(mode_1[0][0] - x_i[0][0])
                                z_mesh[3, i, j] = 0

                            else:
                                z_mesh[2, i,
                                       j] = np.abs(mode_2[0][0] - x_i[0][1])
                                z_mesh[3, i, j] = 1

                    z_mesh[2, :, :][z_mesh[3, :, :] == 0] = z_mesh[2, :, :][
                        z_mesh[3, :, :] == 0] / np.max(
                            z_mesh[2, :, :][z_mesh[3, :, :] == 0])
                    z_mesh[2, :, :][z_mesh[3, :, :] == 1] = z_mesh[2, :, :][
                        z_mesh[3, :, :] == 1] / np.max(
                            z_mesh[2, :, :][z_mesh[3, :, :] == 1])

                    bg_color = 'black'
                    fg_color = 'red'

                    fig = plt.figure(facecolor=bg_color, edgecolor=fg_color)
                    axes = fig.add_subplot(111)
                    axes.patch.set_facecolor(bg_color)
                    axes.xaxis.set_tick_params(color=fg_color,
                                               labelcolor=fg_color)
                    axes.yaxis.set_tick_params(color=fg_color,
                                               labelcolor=fg_color)
                    for spine in axes.spines.values():
                        spine.set_color(fg_color)
                    plt.scatter(z_mesh[0, :, :][z_mesh[3, :, :] == 0],
                                z_mesh[1, :, :][z_mesh[3, :, :] == 0],
                                s=1,
                                c=z_mesh[2, :, :][z_mesh[3, :, :] == 0],
                                cmap='Greens',
                                axes=axes)
                    plt.scatter(z_mesh[0, :, :][z_mesh[3, :, :] == 1],
                                z_mesh[1, :, :][z_mesh[3, :, :] == 1],
                                s=1,
                                c=z_mesh[2, :, :][z_mesh[3, :, :] == 1],
                                cmap='Purples',
                                axes=axes)
                    plt.xlabel('z-space', color=fg_color)
                    plt.ylabel('z-space', color=fg_color)
                    plt.savefig('%sstruct_z.png' % out_dir, dpi=360)
                    plt.close()

                # end of latent space structure plotting

                # initialize plot for showing testing results
                fig, axes = plt.subplots(r, r, figsize=(6, 6))
                for i in range(r):
                    for j in range(r):

                        # convert data into correct format
                        y_samps = np.tile(np.array(labels_test[cnt, :]),
                                          N_samp).reshape(N_samp, ndim_y)
                        y_samps = torch.tensor(y_samps, dtype=torch.float)
                        #y_samps += y_noise_scale * torch.randn(N_samp, ndim_y)
                        y_samps = torch.cat(
                            [
                                torch.randn(N_samp,
                                            ndim_z),  #zeros_noise_scale * 
                                torch.zeros(N_samp,
                                            ndim_tot - ndim_y - ndim_z),
                                y_samps
                            ],
                            dim=1)
                        y_samps = y_samps.to(device)

                        if conv_nn == True:
                            y_samps = y_samps.reshape(y_samps.shape[0],
                                                      y_samps.shape[1], 1, 1)
                        rev_x = model(y_samps, rev=True)
                        rev_x = rev_x.cpu().data.numpy()

                        if conv_nn == True:
                            rev_x = rev_x.reshape(rev_x.shape[0],
                                                  rev_x.shape[1])

                        # plot the samples and the true contours
                        axes[i, j].clear()
                        axes[i, j].contour(mvec,
                                           cvec,
                                           lik[i, j, :].reshape(Ngrid, Ngrid),
                                           levels=[0.68, 0.9, 0.99])
                        axes[i, j].scatter(rev_x[:, 0],
                                           rev_x[:, 1],
                                           s=0.5,
                                           alpha=0.5,
                                           color='red')
                        axes[i, j].scatter(true_post[i, j, :, 1],
                                           true_post[i, j, :, 0],
                                           s=0.5,
                                           alpha=0.5,
                                           color='blue')
                        axes[i, j].plot(pos_test[cnt, 0],
                                        pos_test[cnt, 1],
                                        '+r',
                                        markersize=8)
                        axes[i, j].axis(bound)

                        # add contours to results
                        try:
                            if do_contours:
                                contour_y = np.reshape(rev_x[:, 1],
                                                       (rev_x[:, 1].shape[0]))
                                contour_x = np.reshape(rev_x[:, 0],
                                                       (rev_x[:, 0].shape[0]))
                                contour_dataset = np.array(
                                    [contour_x, contour_y])
                                kernel_cnn = make_contour_plot(
                                    axes[i, j],
                                    contour_x,
                                    contour_y,
                                    contour_dataset,
                                    'red',
                                    flip=False,
                                    kernel_cnn=False)

                                # run overlap tests on results
                                contour_x = np.reshape(
                                    true_post[i, j][:, 1],
                                    (true_post[i, j][:, 1].shape[0]))
                                contour_y = np.reshape(
                                    true_post[i, j][:, 0],
                                    (true_post[i, j][:, 0].shape[0]))
                                contour_dataset = np.array(
                                    [contour_x, contour_y])
                                ks_score, ad_score, beta_score = overlap_tests(
                                    rev_x, true_post[i, j], pos_test[cnt],
                                    kernel_cnn, gaussian_kde(contour_dataset))
                                axes[i, j].legend([
                                    'Overlap: %s' %
                                    str(np.round(beta_score, 3))
                                ])

                                beta_score_hist.append([beta_score])
                        except ValueError as e:
                            pass

                        cnt += 1

                # sve the results to file
                fig_post.canvas.draw()
                plt.savefig('%sposteriors_%s.png' % (out_dir, i_epoch),
                            dpi=360)
                plt.savefig('%slatest.png' % out_dir, dpi=360)

                plot_losses(pe_losses,
                            '%spe_losses.png' % out_dir,
                            legend=['PE-GEN'])
                plot_losses(pe_losses,
                            '%spe_losses_logscale.png' % out_dir,
                            logscale=True,
                            legend=['PE-GEN'])

    except KeyboardInterrupt:
        pass
    finally:
        print("\n\nTraining took {(time()-t_start)/60:.2f} minutes\n")
Exemplo n.º 27
0
        if draw_line:
            gene_line(draw, width, height, linecolor,
                      random.choice(line_number))
        extra_data = (0.85, 0.3 * (random.random() - 0.5),
                      0.14 * (random.random() - 0.5),
                      0.14 * (random.random() - 0.5),
                      0.8 + 0.3 * (random.random() - 0.5), 0.2)
        image = image.transform(
            (width + 3 + int(3 * (random.random() - 0.5)),
             height + int(5 + 10 * (random.random() - 0.5))), Image.AFFINE,
            extra_data, Image.BILINEAR)

        # image = image.filter(ImageFilter.EDGE_ENHANCE_MORE)

        image = image.convert('L')
        image = image.rotate(30 * (random.random() - 0.5))
        image = image.resize([
            int(image.size[0] * (1 + 0.4 * (random.random() - 0.5))),
            int(image.size[1] * (1 + 0.4 * (random.random() - 0.5)))
        ], Image.LANCZOS)
        image = image.filter(ImageFilter.GaussianBlur(radius=2))
        image = image.resize([160, 60], Image.LANCZOS)
        f.write(adress + str(i) + '.jpg ' + label + "\n")
        image.save(adress + str(i) + '.jpg', 'jpeg')
    f.close()


if __name__ == "__main__":
    gene_code(5000)
    data.generate(adress + 'generate_text.txt', 'train_g.tfrecords')
    print('Dataset saved as "train_g.tfrecords"')
Exemplo n.º 28
0
    def __init__(self):
        AstronomicalObject.__init__(self)
        self.planets = data.generate(Planet, 1, 7)

        for i in range(0, len(self.planets)):
            self.planets[i].suffix = data.romanNumeral(i + 1)