Esempio n. 1
0
def run_training():
    if not os.path.exists(os.path.dirname(FLAGS.checkpoints_dir)):
        os.mkdir(os.path.dirname(FLAGS.checkpoints_dir))
    if not os.path.exists(FLAGS.checkpoints_dir):
        os.mkdir(FLAGS.checkpoints_dir)
    poems_vector, word_to_int, vocabularies,out_vector = process_poems(FLAGS.file_path)
    x_batches, y_batches,length,outlength = generate_batch(FLAGS.batch_size, poems_vector, word_to_int,out_vector)

    input_data = [tf.placeholder(tf.int32, (None,)) for t in range(length)]
    labels = [tf.placeholder(tf.int32, (None,)) for t in range(length)]
    output_targets = [tf.placeholder(tf.int32, (None,)) for t in range(outlength)]
    rnn_model("gru",input_data,output_targets,labels,len(vocabularies))
    for i in range(len(x_batches)):
        print("training epoch:"+str(i))
        xb = x_batches[i]
        yb = y_batches[i]
        for j in range(len(xb)):
            X = xb[j]
            Y = yb[j]
            feed_dict = {input_data[t]: X[t] for t in range(length)}
            feed_dict.update({labels[t]: Y[t] for t in range(length)})
            _, loss_t, summary = sess.run([train_op, loss, summary_op], feed_dict)
        # loss_t,summary = train_batch(x_batches[i],y_batches[i])
            summary_writer.add_summary(summary,i)
    summary_writer.flush()
Esempio n. 2
0
def create_dataloader(dataset,
                      trans_fn=None,
                      mode='train',
                      batch_size=1,
                      pad_token_id=0):
    """
    Creats dataloader.
    Args:
        dataset(obj:`paddle.io.Dataset`): Dataset instance.
        mode(obj:`str`, optional, defaults to obj:`train`): If mode is 'train', it will shuffle the dataset randomly.
        batch_size(obj:`int`, optional, defaults to 1): The sample number of a mini-batch.
        pad_token_id(obj:`int`, optional, defaults to 0): The pad token index.
    Returns:
        dataloader(obj:`paddle.io.DataLoader`): The dataloader which generates batches.
    """
    if trans_fn:
        dataset = dataset.map(trans_fn, lazy=True)

    shuffle = True if mode == 'train' else False
    sampler = paddle.io.BatchSampler(
        dataset=dataset, batch_size=batch_size, shuffle=shuffle)
    dataloader = paddle.io.DataLoader(
        dataset,
        batch_sampler=sampler,
        return_list=True,
        collate_fn=lambda batch: data.generate_batch(batch, pad_token_id=pad_token_id))
    return dataloader
Esempio n. 3
0
def test_cache(composers):
    all_pieces = dict()
    for composer in composers:
        start = time.time()
        pieces = data.getpices(path="../midis", composer=composer)
        print("Loading {} {} pieces took {} seconds".format(
            composer, len(pieces),
            time.time() - start))
        all_pieces.update(pieces)

    midi_cache = data.initialize_cache(all_pieces)
    gen = data.generate_batch(midi_cache, batch_size=10)

    for i in range(5):
        pre_start = time.time()
        batch_in, batch_out = next(gen)

        print("One batch took {}".format(time.time() - pre_start))
        print("Batch iput is size: ({}, {}, {})".format(
            len(batch_in), len(batch_in[0]), len(batch_in[0][0])))
        print("Batch output is size: ({}, {}, {})".format(
            len(batch_out), len(batch_out[0]), len(batch_out[0][0])))
Esempio n. 4
0
    def run_episode(data_type, sumvar):
        '''run over batches
    return different losses
    type: 'train', 'val' or 'test'
    '''
        activation_images = []
        depth_predictions = []
        endpoint_activations = []
        start_time = time.time()
        data_loading_time = 0
        calculation_time = 0
        start_data_time = time.time()
        tot_loss = []
        ctr_loss = []
        dep_loss = []
        odo_loss = []
        q_loss = []
        for index, ok, batch in data.generate_batch(data_type):
            data_loading_time += (time.time() - start_data_time)
            start_calc_time = time.time()
            if ok:
                inputs = np.array([_['img'] for _ in batch])
                state = []
                targets = np.array([[_['ctr']] for _ in batch])
                # target_depth = np.array([_['depth'] for _ in batch]).reshape((-1,55,74,FLAGS.n_frames if FLAGS.n_fc else 1)) if FLAGS.auxiliary_depth or FLAGS.rl else []
                target_depth = np.array([_['depth'] for _ in batch]).reshape(
                    (-1, 55, 74)) if FLAGS.auxiliary_depth or FLAGS.rl else []
                target_odom = np.array([_['odom'] for _ in batch]).reshape(
                    (-1, 6)) if FLAGS.auxiliary_odom else []
                # target_odom = np.array([_['odom'] for _ in batch]).reshape((-1,4)) if FLAGS.auxiliary_odom else []
                prev_action = np.array([_['prev_act'] for _ in batch]).reshape(
                    (-1, 1)) if FLAGS.auxiliary_odom else []
                if data_type == 'train':
                    losses = model.backward(inputs,
                                            state,
                                            targets,
                                            depth_targets=target_depth,
                                            odom_targets=target_odom,
                                            prev_action=prev_action)
                elif data_type == 'val' or data_type == 'test':
                    state, losses, aux_results = model.forward(
                        inputs,
                        state,
                        auxdepth=False,
                        auxodom=False,
                        prev_action=prev_action,
                        targets=targets,
                        target_depth=target_depth,
                        target_odom=target_odom)
                tot_loss.append(losses['t'])
                if not FLAGS.no_control and (not FLAGS.rl
                                             or FLAGS.auxiliary_ctr):
                    ctr_loss.append(losses['c'])
                if FLAGS.auxiliary_depth: dep_loss.append(losses['d'])
                if FLAGS.auxiliary_odom: odo_loss.append(losses['o'])
                if FLAGS.rl: q_loss.append(losses['q'])

                if index == 1 and data_type == 'val':
                    if FLAGS.plot_activations:
                        activation_images = model.plot_activations(
                            inputs, targets)
                    if FLAGS.plot_depth:
                        depth_predictions = model.plot_depth(
                            inputs, target_depth)
                    if FLAGS.plot_histograms:
                        # stime = time.time()
                        endpoint_activations = model.get_endpoint_activations(
                            inputs)
                        # print('plot activations: {}'.format((stime-time.time())))
            calculation_time += (time.time() - start_calc_time)
            start_data_time = time.time()
        sumvar['loss_' + data_type + '_total'] = np.mean(tot_loss)
        if not FLAGS.no_control and (not FLAGS.rl or FLAGS.auxiliary_ctr):
            sumvar['loss_' + data_type + '_control'] = np.mean(ctr_loss)
        if FLAGS.auxiliary_depth:
            sumvar['loss_' + data_type + '_depth'] = np.mean(dep_loss)
        if FLAGS.auxiliary_odom:
            sumvar['loss_' + data_type + '_odom'] = np.mean(odo_loss)
        if FLAGS.rl: sumvar['loss_' + data_type + '_q'] = np.mean(q_loss)
        if len(activation_images) != 0:
            sumvar['conv_activations'] = activation_images
        if len(depth_predictions) != 0:
            sumvar['depth_predictions'] = depth_predictions
        if FLAGS.plot_histograms:
            for i, ep in enumerate(model.endpoints):
                sumvar['activations_{}'.format(ep)] = endpoint_activations[i]
        print(
            '>>{0} [{1[2]}/{1[1]}_{1[3]:02d}:{1[4]:02d}]: data {2}; calc {3}'.
            format(data_type.upper(), tuple(time.localtime()[0:5]),
                   print_dur(data_loading_time), print_dur(calculation_time)))
        # print('losses: tot {0:.3g}; ctrl {1:.3g}; depth {2:.3g}; odom {2:.3g}; q {3:.3g}'.format(np.mean(tot_loss), np.mean(ctr_loss), np.mean(dep_loss), np.mean(odo_loss), np.mean(q_loss)))
        if data_type == 'val' or data_type == 'test':
            print('{}'.format(str(sumvar)))
        sys.stdout.flush()
        return sumvar
Esempio n. 5
0
def main():
    print("ROOT = {}".format(ROOT))
    vae = models.VAE().to(device=DEVICE)
    optimizer = torch.optim.Adamax(vae.parameters(), lr=2e-5, betas=(0.5, 0.9))
    scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer,
                                                  lambda e: max(0.5**e, 0.05))
    if CONTINUE is not None:
        vae.load_state_dict(
            torch.load(ROOT + 'dump-{:06d}.pt'.format(CONTINUE)))
        # pre-advance the learning rate
        for _ in range(CONTINUE // 20000):
            scheduler.step()
        start_iter = CONTINUE + 1
    else:
        start_iter = 0

    # run loop
    with open(ROOT + 'out.csv', 'w') as logfile:
        print("batch;wi losses;z loss;x loss;ELBO", file=logfile)
        for b in range(start_iter, ITERNUM):
            if b % 20000 == 0:
                scheduler.step()

            vae.zero_grad()
            bmin = 1
            bmax = max(2, min(6, 1 + int(b / 2500)))
            loss_w = torch.zeros((), device=DEVICE)
            loss_z = torch.zeros((), device=DEVICE)
            loss_x = torch.zeros((), device=DEVICE)
            images = []
            recs = []
            gens = []
            for _ in range(BATCH_SPLIT):
                (img, col_labels,
                 loc_labels) = data.generate_batch(BATCHLEN // BATCH_SPLIT,
                                                   (bmin, bmax),
                                                   device=DEVICE)
                (rec_img, l_w, l_z, l_x,
                 K) = vae.rec_with_losses(img, (col_labels, loc_labels))
                loss_w += l_w / BATCH_SPLIT
                loss_z += l_z / BATCH_SPLIT
                loss_x += l_x / BATCH_SPLIT
                images.append(img)
                recs.append(rec_img.detach())
                with torch.no_grad():
                    gens.append(vae.generate((col_labels, loc_labels)))
            elbo = loss_w + loss_z + loss_x
            print("{};{:.2e};{:.2e};{:.2e};{:.2e}".format(
                b, float(loss_w), float(loss_z), float(loss_x), float(elbo)),
                  file=logfile)
            logfile.flush()
            # some flukes can make the loss get incredibly big in rare occasions, ignore them
            if math.isfinite(float(elbo)) and float(elbo) < 1e8:
                elbo.backward()
                optimizer.step()

            if b % 100 == 0:
                save_image(torch.cat(images, dim=0),
                           ROOT + "{0:06d}-orig.png".format(b),
                           nrow=16)
                save_image(torch.cat(recs, dim=0),
                           ROOT + "{0:06d}-rec.png".format(b),
                           nrow=16)
                save_image(torch.cat(gens, dim=0),
                           ROOT + "{0:06d}-gen.png".format(b),
                           nrow=16)

            if b % MODEL_DUMP_PERIOD == 0:
                torch.save(vae.state_dict(), ROOT + 'dump-{:06d}.pt'.format(b))
Esempio n. 6
0
    def train(self,
              cache,
              batch_size=32,
              predict_freq=100,
              show_freq=10,
              save_freq=500,
              max_epoch=10000,
              saveto='NewSong',
              step=319,
              conservativity=1,
              pre_trained_model=None):
        '''
        This is the train function for the biaxial_model. It alos predicts while trianing.
        Args:
            pieces: dict, containing the all note statematrixs as training set;
            batch_size: how many sample in one batch;
            predict_freq: int, every predict_freq we generate a new song;
            save_freq: int, the frequency to save the model;
            show_freq: int, the frequency to show the loss;
            max_epoch: max steps we gonna train;
            saveto: str, the dir we save or new songs;
            step: int, the length we generate a new song. one step means on step for the time model
            conservativity: The conservativity of number of notes of song we generate.
            pre_trained_model: used for restore training
        '''

        cur_model_name = 'biaxial_rnn_{}'.format(int(time.time()))
        batch_generator = data.generate_batch(cache, batch_size)
        val_barch_generator = data.generate_val_batch(cache, batch_size)

        loss_log = []
        min_loss = np.inf
        with tf.Session() as sess:
            merge = tf.summary.merge_all()
            writer = tf.summary.FileWriter("log/{}".format(cur_model_name),
                                           sess.graph)

            saver = tf.train.Saver()
            sess.run(tf.global_variables_initializer())

            if pre_trained_model is not None:
                try:
                    print("Load the model from: {}".format(pre_trained_model))
                    saver.restore(sess, 'model/{}'.format(pre_trained_model))
                    #writer = tf.summary.FileWriterCache.get('log/{}'.format(pre_trained_model))
                except Exception:
                    print("Load model Failed!")
                    pass
            if self.new_trainer is not None:
                optimizer = self.optimizer2
            else:
                optimizer = self.optimizer

            for i in range(max_epoch):
                X_train, y_train = next(batch_generator)
                _, loss, merge_result = sess.run((optimizer, self.loss, merge),
                                                 feed_dict={
                                                     self.input_mat: X_train,
                                                     self.output_mat: y_train
                                                 })

                loss_log.append(loss)
                pickle.dump(
                    loss_log,
                    open('model/' + cur_model_name + '_loss_log.pkl', 'wb'))

                if i % show_freq == 0:
                    print('Step {0}: loss is {1}'.format(i, loss))
                if (i + 1) % 10 == 0:
                    writer.add_summary(merge_result, i)

                # generate a new song
                if (i + 1) % predict_freq == 0:
                    xIpt, xOpt = map(np.array, data.getPieceSegment(cache))
                    new_state_matrix = sess.run(self.new_song,
                                                feed_dict={
                                                    self.predict_seed:
                                                    xIpt[0],
                                                    self.step_to_sumulate:
                                                    [step],
                                                    self.conservativity:
                                                    [conservativity]
                                                })
                    newsong = np.concatenate(
                        (np.expand_dims(xOpt[0], 0), new_state_matrix))

                    songname = str(time.time()) + '.mid'
                    if not os.path.exists(os.path.join(saveto,
                                                       cur_model_name)):
                        os.makedirs(os.path.join(saveto, cur_model_name))
                    noteStateMatrixToMidi(newsong,
                                          name=os.path.join(
                                              saveto, cur_model_name,
                                              songname))
                    print('New Songs {} saved to \'{}\''.format(
                        songname, os.path.join(saveto, cur_model_name)))

                # save the models for restoring training
                if (i + 1) % save_freq == 0:
                    if not os.path.exists('model/'):
                        os.makedirs('model/')
                    saver.save(sess, 'model/{}'.format(cur_model_name))
                    print('{} Saved'.format(cur_model_name))

                    # Get validation data and validate
                    xIpt_val, xOpt_val = next(val_barch_generator)
                    val_loss = sess.run((self.loss),
                                        feed_dict={
                                            self.input_mat: xIpt_val,
                                            self.output_mat: xOpt_val
                                        })

                    print(
                        "Validation of loss of {} achieved on step {}".format(
                            val_loss, i))
                    if val_loss < min_loss:
                        min_loss = val_loss

                        # Save the best model
                        saver.save(
                            sess, 'model/{}_{}'.format('best', cur_model_name))
                        print('{}_{} Saved'.format('best', cur_model_name))
Esempio n. 7
0
                               transpose_b=True)

        #tensorflow变量初始化
        init = tf.global_variables_initializer()

#训练
num_steps = 1001

with tf.Session(graph=graph) as session:
    #初始化所有变量
    init.run()
    print("Initialized")

    average_loss = 0
    for step in range(num_steps):
        batch_inputs, batch_labels = data.generate_batch(
            batch_size, num_skips, skip_window)
        feed_dict = {train_inputs: batch_inputs, train_labels: batch_labels}

        #更新参数
        _, loss_val = session.run([optimizer, loss], feed_dict=feed_dict)
        average_loss += loss_val

        if step % 200 == 0:
            if step > 0:
                average_loss /= 2000
            print("Average loss at step", step, ":", average_loss)
            average_loss = 0

        #计算验证样本的相似度
        if step % 1000 == 0:
            sim = similarity.eval()
Esempio n. 8
0
def main():
    if X_SIGMA is not None:
        vae = models.VAE(x_logvar=2 * math.log(X_SIGMA)).to(device=DEVICE)
    else:
        vae = models.VAE().to(device=DEVICE)
    optimizer = torch.optim.Adamax(vae.parameters(), lr=1e-4, betas=(0.5, 0.9))
    scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer,
                                                  lambda e: max(0.5**e, 0.01))

    # run loop
    with open(ROOT + 'out.csv', 'w') as logfile:
        print('batch;wi losses;z loss;x loss;ELBO;', file=logfile)
        for b in range(ITERNUM):
            if b % 20000 == 0:
                scheduler.step()

            vae.zero_grad()
            min_curves = 1
            max_curves = max(2, min(16, 1 + int(b / 2500)))
            # average the loss over different batch sizes
            loss_w = torch.zeros((), device=DEVICE)
            loss_z = torch.zeros((), device=DEVICE)
            loss_x = torch.zeros((), device=DEVICE)
            for _ in range(BATCH_SPLIT):
                (fs, curves) = data.generate_batch(BATCHLEN // BATCH_SPLIT,
                                                   freqrg=(1, 10),
                                                   nfreqrg=(min_curves,
                                                            max_curves),
                                                   device=DEVICE)
                (l_w, l_z, l_x, K) = vae.rec_losses(curves, fs)
                loss_w += l_w / BATCH_SPLIT
                loss_z += l_z / BATCH_SPLIT
                loss_x += l_x / BATCH_SPLIT
            elbo = loss_w + loss_z + loss_x
            print("{};{:.2e};{:.2e};{:.2e};{:.2e}".format(
                b, float(loss_w), float(loss_z), float(loss_x), float(elbo)),
                  file=logfile)
            logfile.flush()
            elbo.backward()
            # update gradients
            optimizer.step()
            if b % DUMP_PERIOD == 0:
                (mus, sigmas) = vae.rec_output(curves, fs)
                with open(ROOT + 'curve-{:06d}.csv'.format(b), 'w') as outfile:
                    print('x;mu;sigma', file=outfile)
                    for i in range(curves.size(1)):
                        print("{};{:.3f};{:.3f};{:.3f}".format(
                            i, curves[0, i], mus[0, i], sigmas[0, i]),
                              file=outfile)
                xs = vae.generate_same_latent(fs[0:1, :], 4)
                with open(ROOT + 'gen-{:06d}.csv'.format(b), 'w') as outfile:
                    for i in range(xs.size(1)):
                        print("{}".format(i), file=outfile, end='')
                        for j in range(xs.size(0)):
                            print(";{:.3f}".format(xs[j, i]),
                                  file=outfile,
                                  end='')
                        print("", file=outfile)

            if b % MODEL_DUMP_PERIOD == 0:
                torch.save(vae.state_dict(), ROOT + 'dump-{:06d}.pt'.format(b))
Esempio n. 9
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import numpy as np
from data import produce_signals
from data import generate_batch

signals = produce_signals()
# for a in signals:
#     print(a)

x_batches, y_batches = generate_batch(10, signals)
for i in range(10):
    print("batch %d", i)
    print(x_batches[i])
    print(y_batches[i])

# tmp = [1, 2, 3, 4, 5, 6]
# xdata = np.zeros((1, 5))
# ydata = np.zeros((1, 5))
# print(tmp)
# xdata = tmp[: -1]
# ydata = tmp[: 1]
# print(xdata)
# print(ydata)
Esempio n. 10
0
def run_episode(mode, sumvar, model, update_importance_weights=False):
    '''run over batches
  return different losses
  type: 'train', 'val' or 'test'
  '''
    depth_predictions = []
    start_time = time.time()
    data_loading_time = 0
    calculation_time = 0
    start_data_time = time.time()
    # results = {}
    # results['control'] = []
    results = {'total': []}
    results['ce'] = []
    for k in model.lll_losses.keys():
        results['lll_' + k] = []
    # results['accuracy'] = []
    # if FLAGS.auxiliary_depth: results['depth'] = []

    all_inputs = []

    for index, ok, batch in data.generate_batch(mode):
        data_loading_time += (time.time() - start_data_time)
        start_calc_time = time.time()
        if ok:
            inputs = np.array([_['img'] for _ in batch])
            targets = np.array([[_['ctr']] for _ in batch])
            if update_importance_weights and len(all_inputs) < 2000:
                try:
                    all_inputs = np.concatenate([all_inputs, inputs], axis=0)
                except:
                    all_inputs = inputs[:]
            # print("targets: {}".format(targets))
            # try:
            target_depth = np.array([_['depth'] for _ in batch]).reshape(
                (-1, 55, 74)) if FLAGS.auxiliary_depth else []
            if len(target_depth) == 0 and FLAGS.auxiliary_depth:
                raise ValueError('No depth in batch.')
            if mode == 'train':
                # model.backward(inputs, targets=targets, depth_targets=target_depth, sumvar=sumvar)
                losses = model.backward(inputs,
                                        targets=targets,
                                        depth_targets=target_depth,
                                        sumvar=sumvar)
                for k in losses.keys():
                    results[k].append(losses[k])
            elif mode == 'val' or mode == 'test':
                _, aux_results = model.forward(inputs,
                                               auxdepth=False,
                                               targets=targets,
                                               depth_targets=target_depth)
            if index == 1 and mode == 'val' and FLAGS.plot_depth:
                depth_predictions = tools.plot_depth(inputs, target_depth,
                                                     model)
        else:
            print('Failed to run {}.'.format(mode))
        calculation_time += (time.time() - start_calc_time)
        start_data_time = time.time()

    if update_importance_weights:
        model.update_importance_weights(all_inputs)

    for k in results.keys():
        if len(results[k]) != 0:
            sumvar['Loss_' + mode + '_' + k] = np.mean(results[k])
    if len(depth_predictions) != 0:
        sumvar['depth_predictions'] = depth_predictions
    print('>>{0} [{1[2]}/{1[1]}_{1[3]:02d}:{1[4]:02d}]: data {2}; calc {3}'.
          format(mode.upper(), tuple(time.localtime()[0:5]),
                 tools.print_dur(data_loading_time),
                 tools.print_dur(calculation_time)))
    return sumvar