Example #1
0
    :param case: experiment case name
    :param ckpt: checkpoint to load model
    :param gpu: comma separated list of GPU(s) to use
    :param r: start from the beginning.
    '''

    hp.set_hparam_yaml(case)
    if r:
        remove_all_files(hp.logdir)

    # model
    model = IAFVocoder(batch_size=hp.train.batch_size, length=hp.signal.length)

    # dataset
    dataset = Dataset(hp.data_path,
                      hp.train.batch_size,
                      length=hp.signal.length)
    print('dataset size is {}'.format(len(dataset.wav_files)))

    # set logger for event and model saver
    logger.set_logger_dir(hp.logdir)

    train_conf = TrainConfig(
        model=model,
        data=TFDatasetInput(dataset()),
        callbacks=[
            ModelSaver(checkpoint_dir=hp.logdir),
            RunUpdateOps()  # for batch norm, exponential moving average
            # TODO GenerateCallback()
        ],
        max_epoch=hp.train.num_epochs,
from hparam import hparam as hp
from models import IAFVocoder


def generate(case='default', ckpt=None, debug=False):
    '''
    :param case: experiment case name
    :param ckpt: checkpoint to load model
    :param debug: use debug mode session.
    '''

    hp.set_hparam_yaml(case)

    with tf.Graph().as_default():
        # dataset
        dataset = Dataset(hp.data_path, hp.generate.batch_size, length=hp.generate.length, is_training=False)
        print('dataset size is {}'.format(len(dataset.wav_files)))

        # model
        model = IAFVocoder(batch_size=hp.generate.batch_size, length=hp.generate.length)

        # sample
        iterator = dataset().make_one_shot_iterator()
        gt_wav_op, melspec = iterator.get_next()

        # feed forward
        pred_wav_op = model(gt_wav_op, melspec, is_training=False)

        # summaries
        tf.summary.audio('audio/pred', pred_wav_op, hp.signal.sr)
        tf.summary.audio('audio/gt', gt_wav_op, hp.signal.sr)
Example #3
0
def main():
    config_path = os.path.join('config.yml')
    config = Config(config_path)
    config.print()
    # Init cuda environment
    os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(str(e) for e in config.GPU)

    # Init random seed to less result random
    tf.set_random_seed(config.SEED)
    np.random.seed(config.SEED)
    random.seed(config.SEED)

    # Init training data
    dataset = Dataset(config)
    batch_concat = dataset.batch_concat
    val_concat = dataset.val_concat

    # Init the model
    model = TextRemoval(config)

    gen_loss, dis_loss, t_psnr, t_ssim = model.build_whole_model(batch_concat)
    gen_optim, dis_optim = model.build_optim(gen_loss, dis_loss)

    val_psnr, val_ssim = model.build_validation_model(val_concat)

    # Create the graph
    config_graph = tf.ConfigProto()
    config_graph.gpu_options.allow_growth = True

    with tf.Session(config=config_graph) as sess:
        # Merge all the summaries
        merged = tf.summary.merge_all()

        train_writer = tf.summary.FileWriter(config.CHECKPOINTS + 'train',
                                             sess.graph)
        eval_writer = tf.summary.FileWriter(config.CHECKPOINTS + 'eval')
        saver = tf.train.Saver()

        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        #For restore the train
        checkpoint = tf.train.get_checkpoint_state(config.LOAD_MODEL)
        if (checkpoint and checkpoint.model_checkpoint_path):
            print(checkpoint.model_checkpoint_path)
            meta_graph_path = checkpoint.model_checkpoint_path + ".meta"
            restore = tf.train.import_meta_graph(meta_graph_path)
            restore.restore(sess,
                            tf.train.latest_checkpoint(config.LOAD_MODEL))
            epoch = int(meta_graph_path.split("-")[-1].split(".")[0])
            step = int(epoch * dataset.len_train / dataset.batch_size)
            # flag=1
        else:
            step = 0
            epoch = 0

        # Start input enqueue threads
        progbar = Progbar(dataset.len_train // dataset.batch_size,
                          width=20,
                          stateful_metrics=[
                              'epoch', 'iter', 'gen_loss', 'dis_loss', 'psnr',
                              'ssim'
                          ])
        tmp_epoch = epoch
        while epoch < config.EPOCH:
            step += 1
            epoch = int(step * dataset.batch_size / dataset.len_train)
            if (tmp_epoch < epoch):
                tmp_epoch = epoch
                # print("\n")
                progbar = Progbar(dataset.len_train // dataset.batch_size,
                                  width=20,
                                  stateful_metrics=[
                                      'epoch', 'iter', 'gen_loss', 'dis_loss',
                                      'psnr', 'ssim'
                                  ])

            g_loss, _ = sess.run([gen_loss, gen_optim])
            d_loss, _ = sess.run([dis_loss, dis_optim])
            tr_psnr, tr_ssim = sess.run([t_psnr, t_ssim])
            logs = [("epoch", epoch), ("iter", step), ("g_loss", g_loss),
                    ("d_loss", d_loss), ("psnr", tr_psnr), ("ssim", tr_ssim)]
            progbar.add(1, values=logs)

            if step % config.SUMMARY_INTERVAL == 0:
                # Run validation
                v_psnr = []
                v_ssim = []
                for i in range(dataset.len_val // dataset.val_batch_size):
                    val_psnr_tmp, val_ssim_tmp = sess.run([val_psnr, val_ssim])
                    v_psnr.append(val_psnr_tmp)
                    v_ssim.append(val_ssim_tmp)
                eval_writer.add_summary(
                    tf.Summary(value=[
                        tf.Summary.Value(tag='val_psnr',
                                         simple_value=np.mean(v_psnr))
                    ]), epoch)
                eval_writer.add_summary(
                    tf.Summary(value=[
                        tf.Summary.Value(tag='val_ssim',
                                         simple_value=np.mean(v_ssim))
                    ]), epoch)

                # Train summary
                summary = sess.run(merged)
                train_writer.add_summary(summary, epoch)
            if step % config.SAVE_INTERVAL == 0:
                if (checkpoint and checkpoint.model_checkpoint_path):
                    saver.save(sess,
                               config.CHECKPOINTS + 'textremoval',
                               global_step=epoch,
                               write_meta_graph=False)
                else:
                    saver.save(sess,
                               config.CHECKPOINTS + 'textremoval',
                               global_step=epoch,
                               write_meta_graph=True)
    sess.close()
Example #4
0
def main(params):
    
    data = Dataset()
    data.load_data(params["data"])
    data.train, data.train_lab = shuffle(data.train, data.train_lab)
    data.train = [torch.tensor(x) for x in data.train]
    data.test = [torch.tensor(x) for x in data.test]
    data.val = [torch.tensor(x) for x in data.val]
    data.train_lab = torch.tensor([np.argmax(x) for x in data.train_lab], dtype = torch.int64)
    data.test_lab = torch.tensor([np.argmax(x) for x in data.test_lab], dtype = torch.int64)
    data.val_lab = torch.tensor([np.argmax(x) for x in data.val_lab], dtype = torch.int64)
    
    batch_size = 256
    n_gram = 1
    num_epoch = 20
    if "num_epoch" in params:
        num_epoch = int(params["num_epoch"])
    if "n_gram" in params:
        n_gram = int(params["n_gram"])
    if "batch" in params:
        batch_size = int(params["batch"])
    
    print("struture used:", params["cell"])
    print("batch_size:", batch_size)
    print("no. epochs:", num_epoch)
    print("n-gram:", n_gram)
    
    n_hidden = 300
    input_size = 300*n_gram 

    W_embd = np.array(cPickle.load(open(data.embpath, 'rb'), encoding = "latin1"))
    W_embd = torch.from_numpy(W_embd)
    classifier = LSTMclassifier(input_size ,n_hidden, data.num_class, W_embd, params["cell"], n_gram)
   
    
    for epoch in range(num_epoch):

        start_time = time.time()        
        train_loss, train_acc = train_model(classifier, data.train, data.train_lab, batch_size, epoch)
        end_time = time.time()
        elapsed_time = end_time - start_time
        hours, rest = divmod(elapsed_time, 3600)
        minutes, sec = divmod(rest, 60)
        """
        Change the path to save the model weights and results
        """
        torch.save(classifier, "./checkpoints/"+data.data+"_" + params['cell'] +"_epoch"+str(epoch+1)+".pth")

        val_loss, val_acc = eval_model(classifier, data.val, data.val_lab, batch_size)
        test_loss, test_acc = eval_model(classifier, data.test, data.test_lab, batch_size)
        
        print(f'Epoch: {epoch+1:02}, Time(hr,min): {hours, minutes},Train Loss: {train_loss:.3f}, Train Acc: {train_acc:.2f}%, Val. Loss: {val_loss:3f}, Val. Acc: {val_acc:.2f}%,Test Loss: {test_loss:.3f}, Test Acc: {test_acc:.2f}%')
        
        text_file = open("./result/"+ params['cell'] +"_"+data.data + ".txt", "a+")
        n = text_file.write(f'Epoch: {epoch+1:02}, Time(hr,min): {hours, minutes},Train Loss: {train_loss:.3f}, Train Acc: {train_acc:.2f}%, Val. Loss: {val_loss:3f}, Val. Acc: {val_acc:.2f}%,Test Loss: {test_loss:.3f}, Test Acc: {test_acc:.2f}%')
        m = text_file.write("\n")
        text_file.close()
    
    print(f'Test Loss: {test_loss:.3f}, Test Acc: {test_acc:.2f}%')

    print('done') 
Example #5
0
def main():
    config_path = os.path.join('config.yml')
    config = Config(config_path)
    config.print()

    # Init cuda environment
    os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(str(e) for e in config.GPU)

    # Init random seed to less result random
    tf.set_random_seed(config.SEED)
    np.random.seed(config.SEED)
    random.seed(config.SEED)

    # Init training data
    with tf.variable_scope('input_data'):
        dataset = Dataset(config)
        batch_img = dataset.batch_image
        batch_mask = dataset.batch_mask
        val_img = dataset.val_image
        val_mask = dataset.val_mask

    # Init the model
    model = GDNInpainting(config)

    # Build train model
    gen_loss, dis_loss, psnr = model.build_whole_model(batch_img, batch_mask)
    gen_optim, dis_optim = model.build_optim(gen_loss, dis_loss)

    # Build validate model
    val_weighted_loss, val_l1_loss, val_dis_loss, val_psnr = model.build_validation_model(
        val_img, val_mask)

    # Create the graph
    config_graph = tf.ConfigProto()
    config_graph.gpu_options.allow_growth = True
    with tf.Session(config=config_graph) as sess:
        # Merge all the summaries
        merged = tf.summary.merge_all()

        train_writer = tf.summary.FileWriter(config.CHECKPOINTS + 'train',
                                             sess.graph)
        eval_writer = tf.summary.FileWriter(config.CHECKPOINTS + 'eval')

        saver = tf.train.Saver()

        sess.run(tf.global_variables_initializer())
        sess.run(tf.local_variables_initializer())

        if config.LOAD_MODEL is not None:
            checkpoint = tf.train.get_checkpoint_state(config.LOAD_MODEL)
            meta_graph_path = checkpoint.model_checkpoint_path + ".meta"
            restore = tf.train.import_meta_graph(meta_graph_path)
            restore.restore(sess,
                            tf.train.latest_checkpoint(config.LOAD_MODEL))
            step = int(meta_graph_path.split("-")[2].split(".")[0]) * (
                dataset.len_train / dataset.batch_size)
        else:
            step = 0

        # Start input enqueue threads
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        progbar = Progbar(
            dataset.len_train,
            width=20,
            stateful_metrics=['epoch', 'iter', 'gen_loss', 'dis_loss', 'psnr'])
        try:
            while not coord.should_stop():
                step += 1
                epoch = int(step / dataset.len_train * dataset.batch_size)
                g_loss, d_loss, t_psnr, _, _ = sess.run(
                    [gen_loss, dis_loss, psnr, gen_optim, dis_optim])

                logs = [
                    ("epoch", epoch),
                    ("iter", step),
                    ("gen_loss", g_loss),
                    ("dis_loss", d_loss),
                    ("psnr", t_psnr),
                ]
                progbar.add(dataset.batch_size, values=logs)

                if step % (dataset.len_train / dataset.batch_size) == 0:
                    progbar = Progbar(dataset.len_train,
                                      width=20,
                                      stateful_metrics=[
                                          'epoch',
                                          'iter',
                                          'gen_loss',
                                          'dis_loss',
                                      ])

                if (step + 1) % config.SUMMARY_INTERVAL == 0:
                    # Run validation
                    v_psnr = []
                    w_loss = []
                    l1_loss = []
                    dd_loss = []

                    for i in range(dataset.len_val // dataset.val_batch_size):
                        ts_psnr, ts_weighted_loss, ts_l1_loss, ts_dd_loss = sess.run(
                            [
                                val_psnr, val_weighted_loss, val_l1_loss,
                                val_dis_loss
                            ])
                        v_psnr.append(ts_psnr)
                        w_loss.append(ts_weighted_loss)
                        l1_loss.append(ts_l1_loss)
                        dd_loss.append(ts_dd_loss)

                    eval_writer.add_summary(
                        tf.Summary(value=[
                            tf.Summary.Value(tag='psnr',
                                             simple_value=np.mean(v_psnr))
                        ]), epoch)
                    eval_writer.add_summary(
                        tf.Summary(value=[
                            tf.Summary.Value(tag='loss/gen_weighted_loss',
                                             simple_value=np.mean(w_loss))
                        ]), epoch)
                    eval_writer.add_summary(
                        tf.Summary(value=[
                            tf.Summary.Value(tag='loss/gen_l1_loss',
                                             simple_value=np.mean(l1_loss))
                        ]), epoch)
                    eval_writer.add_summary(
                        tf.Summary(value=[
                            tf.Summary.Value(tag='loss/dis_loss',
                                             simple_value=np.mean(dd_loss))
                        ]), epoch)

                    # Train summary
                    summary = sess.run(merged)
                    train_writer.add_summary(summary, epoch)

                if (step + 1) % config.SAVE_INTERVAL == 0:
                    saver.save(sess,
                               config.CHECKPOINTS + 'log',
                               global_step=epoch,
                               write_meta_graph=False)

        except tf.errors.OutOfRangeError:
            print('Done training -- epoch limit reached')
        finally:
            coord.request_stop()

        # Wait for threads to finish
        coord.join(threads)
    sess.close()