Exemplo n.º 1
0
def train():
    optim = tf.train.AdamOptimizer(args.meta_lr)
    train_op = get_train_op(optim, net['cent'], clip=[-10., 10.])

    saver = tf.train.Saver(tf.trainable_variables())
    logfile = open(os.path.join(savedir, 'train.log'), 'w')

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    sess.run(tf.global_variables_initializer())

    # train
    train_logger = Accumulator('cent', 'acc')
    train_to_run = [train_op, net['cent'], net['acc']]

    for i in range(args.n_train_iters + 1):
        # feed_dict
        epi = model.episodes
        placeholders = [epi['xs'], epi['ys'], epi['xq'], epi['yq']]
        episode = data.generate_episode(args,
                                        training=True,
                                        n_episodes=args.metabatch)
        fdtr = dict(zip(placeholders, episode))

        train_logger.accum(sess.run(train_to_run, feed_dict=fdtr))

        if i % 5 == 0:
            line = 'Iter %d start, learning rate %f' % (i, args.meta_lr)
            print('\n' + line)
            logfile.write('\n' + line + '\n')
            train_logger.print_(header='train',
                                episode=i * args.metabatch,
                                logfile=logfile)
            train_logger.clear()

            # validation (with test classes... be cautious!)
            test_logger = Accumulator('cent', 'acc')
            test_to_run = [net['cent'], net['acc']]

        if i % 20 == 0:
            for j in range(10):
                # feed_dict
                epi = model.episodes
                placeholders = [epi['xs'], epi['ys'], epi['xq'], epi['yq']]
                episode = data.generate_episode(args,
                                                training=False,
                                                n_episodes=args.metabatch)
                fdte = dict(zip(placeholders, episode))
                test_logger.accum(sess.run(test_to_run, feed_dict=fdte))

            test_logger.print_(header='test ',
                               episode=i * args.metabatch,
                               logfile=logfile)
            test_logger.clear()

        if i % args.save_freq:
            saver.save(sess, os.path.join(savedir, 'model'))

    logfile.close()
def test():
    sess = tf.Session()
    saver = tf.train.Saver()
    saver.restore(sess, os.path.join(savedir, 'model'))
    logger = Accumulator('cent', 'acc')
    for j in range(n_test_batches):
        logger.accum(sess.run([tcent, tacc]))
    logger.print_(header='test')
Exemplo n.º 3
0
def train():
    if args.model == 'softmax':
        loss = net['cent'] + net['wd']
    else:
        loss = net['cent'] + net['wd'] + net['kl'] + net['aux'] + net['neg_ent']

    global_step = tf.train.get_or_create_global_step()
    lr_step = n_train_batches * args.n_epochs / 3
    lr = tf.train.piecewise_constant(tf.cast(global_step,
                                             tf.int32), [lr_step, lr_step * 2],
                                     [1e-3, 1e-4, 1e-5])
    train_op = tf.train.AdamOptimizer(lr).minimize(loss,
                                                   global_step=global_step)

    saver = tf.train.Saver(net['weights'])
    logfile = open(os.path.join(savedir, 'train.log'), 'w', 0)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    train_logger = Accumulator('cent', 'acc')
    train_to_run = [train_op, net['cent'], net['acc']]
    val_logger = Accumulator('cent', 'acc')
    val_to_run = [tnet['cent'], tnet['acc']]

    for i in range(args.n_epochs):
        # shuffle the training data every epoch
        xytr = np.concatenate((xtr, ytr), axis=1)
        np.random.shuffle(xytr)
        xtr_, ytr_ = xytr[:, :784], xytr[:, 784:]

        line = 'Epoch %d start, learning rate %f' % (i + 1, sess.run(lr))
        print(line)
        logfile.write(line + '\n')
        train_logger.clear()
        start = time.time()
        for j in range(n_train_batches):
            bx, by = xtr_[j * bs:(j + 1) * bs, :], ytr_[j * bs:(j + 1) * bs, :]
            train_logger.accum(sess.run(train_to_run, {x: bx, y: by}))
        train_logger.print_(header='train',
                            epoch=i + 1,
                            time=time.time() - start,
                            logfile=logfile)

        val_logger.clear()
        for j in range(n_val_batches):
            bx, by = xva[j * bs:(j + 1) * bs, :], yva[j * bs:(j + 1) * bs, :]
            val_logger.accum(sess.run(val_to_run, {x: bx, y: by}))
        val_logger.print_(header='val',
                          epoch=i + 1,
                          time=time.time() - start,
                          logfile=logfile)
        print()
        logfile.write('\n')

    logfile.close()
    saver.save(sess, os.path.join(savedir, 'model'))
Exemplo n.º 4
0
def test():
    sess = tf.Session()
    saver = tf.train.Saver(tnet['weights'])
    saver.restore(sess, os.path.join(savedir, 'model'))

    logfile = open(os.path.join(savedir, 'test.log'), 'w', 0)
    logger = Accumulator('cent', 'acc')
    logger.accum(sess.run([tnet['cent'], tnet['acc']], {x: xte, y: yte}))
    logger.print_(header='test', logfile=logfile)
    logfile.close()
Exemplo n.º 5
0
def test():
    sess = tf.Session()
    saver = tf.train.Saver(tnet['weights'])
    saver.restore(sess, os.path.join(savedir, 'model'))

    logger = Accumulator('elbo')
    for j in range(n_test_batches):
        bx = xte[j*args.batch_size:(j+1)*args.batch_size,:]
        logger.accum(sess.run(tnet['elbo'], {x:bx}))
    print()
    logger.print_(header='test')
    print()
Exemplo n.º 6
0
def train():
    loss = -net['elbo']  # negative ELBO

    global_step = tf.train.get_or_create_global_step()
    lr = tf.train.piecewise_constant(tf.cast(global_step, tf.int32),
                                     [n_train_batches * args.n_epochs / 2],
                                     [1e-3, 1e-4])
    train_op = tf.train.AdamOptimizer(lr).minimize(loss,
                                                   global_step=global_step)

    saver = tf.train.Saver(net['weights'])
    logfile = open(os.path.join(savedir, 'train.log'), 'w', 0)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    # to run
    train_logger = Accumulator('elbo')
    train_to_run = [train_op, net['elbo']]

    for i in range(args.n_epochs):
        # shuffle the training data
        idx = np.random.choice(range(1000), size=1000, replace=False)
        xtr_ = xtr[idx]

        # run the epoch
        line = 'Epoch %d start, learning rate %f' % (i + 1, sess.run(lr))
        print('\n' + line)
        logfile.write('\n' + line + '\n')
        train_logger.clear()
        start = time.time()
        for j in range(n_train_batches):
            bx = xtr_[j * args.batch_size:(j + 1) * args.batch_size, :]
            train_logger.accum(sess.run(train_to_run, {x: bx}))
        train_logger.print_(header='train',
                            epoch=i + 1,
                            time=time.time() - start,
                            logfile=logfile)

    # save the model
    logfile.close()
    saver.save(sess, os.path.join(savedir, 'model'))
def train():
    saver = tf.train.Saver()
    logfile = open(os.path.join(savedir, 'train.log'), 'w', 0)

    train_logger = Accumulator('cent', 'acc')
    train_to_run = [train_op, cent, acc]
    test_logger = Accumulator('cent', 'acc')
    test_to_run = [tcent, tacc]

    argdict = vars(args)
    print(argdict)
    for k, v in argdict.iteritems():
        logfile.write(k + ': ' + str(v) + '\n')
    logfile.write('\n')

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    for i in range(args.n_epochs):
        line = 'Epoch %d start, learning rate %f' % (i+1, sess.run(lr))
        print(line)
        logfile.write(line + '\n')
        start = time.time()
        train_logger.clear()
        for j in range(n_train_batches):
            train_logger.accum(sess.run(train_to_run))
            if (j+1) % args.print_freq == 0:
                train_logger.print_(header='train', epoch=i+1, it=j+1,
                        time=time.time()-start, logfile=logfile)

        if (i+1) % args.eval_freq == 0:
            test_logger.clear()
            for j in range(n_test_batches):
                test_logger.accum(sess.run(test_to_run))
            test_logger.print_(header='test', epoch=i+1,
                    time=time.time()-start, logfile=logfile)

        print()
        logfile.write('\n')

        if (i+1) % args.save_freq == 0:
            saver.save(sess, os.path.join(savedir, 'model'))
Exemplo n.º 8
0
def train():
    global_step = tf.train.get_or_create_global_step()
    optim = tf.train.AdamOptimizer(args.meta_lr)
    train_op = get_train_op(optim,
                            net['centq'],
                            clip=[-3., 3.],
                            global_step=global_step)

    saver = tf.train.Saver(tf.trainable_variables())
    logfile = open(os.path.join(savedir, 'train.log'), 'w')

    argdict = vars(args)
    print(argdict)
    for k, v in argdict.iteritems():
        logfile.write(k + ': ' + str(v) + '\n')
    logfile.write('\n')

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    sess.run(tf.global_variables_initializer())

    train_logger = Accumulator('cents', 'centq', 'accs', 'accq')
    train_to_run = [
        train_op, net['cents'], net['centq'], net['accs'],
        tf.reduce_mean(net['accq'])
    ]

    test_logger = Accumulator('cents', 'centq', 'accs', 'accq')
    test_to_run = [
        tnet['cents'], tnet['centq'], tnet['accs'],
        tf.reduce_mean(tnet['accq'])
    ]

    start = time.time()
    for i in range(args.n_train_iters + 1):
        epi = model.episodes
        placeholders = [epi['xs'], epi['ys'], epi['xq'], epi['yq']]
        episode = data.generate_episode(args,
                                        training=True,
                                        n_episodes=args.metabatch)
        fdtr = dict(zip(placeholders, episode))
        train_logger.accum(sess.run(train_to_run, feed_dict=fdtr))

        if i % 5 == 0:
            line = 'Iter %d start, learning rate %f' % (i, args.meta_lr)
            print('\n' + line)
            logfile.write('\n' + line + '\n')
            train_logger.print_(header='train',
                                episode=i * args.metabatch,
                                time=time.time() - start,
                                logfile=logfile)
            train_logger.clear()

        if i % 100 == 0:
            for j in range(10):
                epi = model.episodes
                placeholders = [epi['xs'], epi['ys'], epi['xq'], epi['yq']]
                episode = data.generate_episode(args,
                                                training=False,
                                                n_episodes=args.metabatch)
                fdte = dict(zip(placeholders, episode))
                test_logger.accum(sess.run(test_to_run, feed_dict=fdte))

            test_logger.print_(header='test ',
                               episode=i * args.metabatch,
                               time=time.time() - start,
                               logfile=logfile)
            test_logger.clear()

        if i % args.save_freq == 0:
            saver.save(sess, os.path.join(savedir, 'model'))

    logfile.close()
Exemplo n.º 9
0
Arquivo: run.py Projeto: v3551G/amtfl
def run():
    loss = net['cent_loss'] + net['recon_loss'] + net['l1_decay'] + net[
        'l2_decay']
    global_step = tf.train.get_or_create_global_step()
    lr = tf.train.piecewise_constant(tf.cast(global_step, tf.int32),
                                     [n_train_batches * args.n_epochs / 2],
                                     [1e-4, 1e-5])
    train_op = tf.train.AdamOptimizer(lr).minimize(loss,
                                                   global_step=global_step)

    saver = tf.train.Saver(net['weights'])
    logfile = open(os.path.join(savedir, 'train.log'), 'w', 0)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    train_logger = Accumulator('cent_loss', 'recon_loss', 'l1_decay',
                               'l2_decay', 'acc')
    train_to_run = [
        train_op, net['cent_loss'], net['recon_loss'], net['l1_decay'],
        net['l2_decay'], net['acc']
    ]
    val_logger = Accumulator('cent_loss', 'acc')
    val_to_run = [tnet['cent_loss'], tnet['acc']]

    for i in range(args.n_epochs):
        xytr = np.concatenate((xtr, ytr), axis=1)
        np.random.shuffle(xytr)
        xtr_, ytr_ = xytr[:, :784], xytr[:, 784:]

        if (i + 1) % 100 == 0:
            line = 'Epoch %d start, learning rate %f' % (i + 1, sess.run(lr))
            print(line)
            logfile.write(line + '\n')

        train_logger.clear()
        start = time.time()

        # Train
        for j in range(n_train_batches):
            bx = xtr_[j * batch_size:(j + 1) * batch_size, :]
            by = ytr_[j * batch_size:(j + 1) * batch_size, :]
            train_logger.accum(sess.run(train_to_run, {x: bx, y: by}))

        if (i + 1) % 100 == 0:
            train_logger.print_(header='train',
                                epoch=i + 1,
                                time=time.time() - start,
                                logfile=logfile)

            # Validation
            val_logger.clear()
            for j in range(n_val_batches):
                bx = xva[j * batch_size:(j + 1) * batch_size, :]
                by = yva[j * batch_size:(j + 1) * batch_size, :]
                val_logger.accum(sess.run(val_to_run, {x: bx, y: by}))
            val_logger.print_(header='val',
                              epoch=i + 1,
                              time=time.time() - start,
                              logfile=logfile)

            print()
            logfile.write('\n')

    # Test
    logger = Accumulator('acc')
    for j in range(n_test_batches):
        bx = xte[j * batch_size:(j + 1) * batch_size, :]
        by = yte[j * batch_size:(j + 1) * batch_size, :]
        logger.accum(sess.run(tnet['acc'], {x: bx, y: by}))
    logger.print_(header='test')

    logfile.close()
    saver.save(sess, os.path.join(savedir, 'model'))
Exemplo n.º 10
0
def meta_train():
    global_step = tf.train.get_or_create_global_step()
    optim = tf.train.AdamOptimizer(args.meta_lr)
    outer_loss = net_cent + net_kl
    meta_train_op = get_train_op(optim,
                                 outer_loss,
                                 clip=[-3., 3.],
                                 global_step=global_step)

    saver = tf.train.Saver(tf.trainable_variables(), max_to_keep=10)
    logfile = open(os.path.join(args.savedir, 'train.log'), 'w')

    # flush out the arguments
    argdict = vars(args)
    print(argdict)
    for k, v in argdict.items():
        logfile.write(k + ': ' + str(v) + '\n')
    logfile.write('\n')

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    sess.run(tf.global_variables_initializer())

    meta_train_logger = Accumulator('cent', 'kl', 'id_acc')
    meta_train_to_run = [meta_train_op, net_cent, net_kl, net_acc_mean]

    # multi-dataset meta-validation loggers
    if len(args.id_dataset) > 1:
        meta_val_logger = OrderedDict([
            (data_name, Accumulator('cent', 'kl', 'id_acc'))
            for data_name in args.id_dataset + ['val']
        ])
    # single dataset meta-validation logger
    else:
        meta_val_logger = {
            'val': Accumulator('cent', 'kl', 'id_acc', 'ood_acc')
        }
    id_run = [tnet_cent, tnet_kl, tnet_acc_mean]
    ood_run = tnet_acc_mean

    for i in range(1, args.n_train_iters + 1):
        # feed_dict
        data_name = args.id_dataset[i % len(args.id_dataset)]
        episode = id_data[data_name].generate_episode(
            args, split='mtr', n_episodes=args.metabatch)
        fd_mtr = dict(zip(placeholders, episode))
        meta_train_logger.accum(sess.run(meta_train_to_run, feed_dict=fd_mtr))

        if i % args.save_freq == 0:
            saver.save(sess, os.path.join(args.savedir, 'model-{}'.format(i)))

        if i % 100 == 0:
            line = 'Iter %d start, learning rate %f' % (i, args.meta_lr)
            print('\n' + line)
            logfile.write('\n' + line + '\n')
            meta_train_logger.print_(header='train', logfile=logfile)
            meta_train_logger.clear()

        # meta-validation
        if i % 10000 == 0:
            for j in range(3000 // args.metabatch):
                # valdate on in-distribution (ID) dataset(s)
                for data_name in args.id_dataset:
                    id_episode = id_data[data_name].generate_episode(
                        args, split='mval', n_episodes=args.metabatch)
                    id_cent, id_kl, id_acc = sess.run(
                        id_run, feed_dict=dict(zip(placeholders, id_episode)))

                    if len(args.id_dataset) > 1:
                        meta_val_logger[data_name].accum(
                            [id_cent, id_kl, id_acc])
                        meta_val_logger['val'].accum([id_cent, id_kl, id_acc])

                # valiate on out-of-distribution (OOD) dataset
                if args.ood_dataset[0] in ['svhn', 'cub']:
                    ood_episode = ood_data[
                        args.ood_dataset[0]].generate_episode(
                            args, split='mval', n_episodes=args.metabatch)
                    ood_acc = sess.run(ood_run,
                                       feed_dict=dict(
                                           zip(placeholders, ood_episode)))
                    meta_val_logger['val'].accum(
                        [id_cent, id_kl, id_acc, ood_acc])

            for data_name, logger in meta_val_logger.items():
                logger.print_(header='%s  ' % data_name, logfile=logfile)
                logger.clear()

        # print balancing variables (omega, gamma) with 10 random tasks
        if args.gamma_on or args.omega_on:
            ntask = 10
            if i % 1000 == 0:
                testlist = [('ID ', id_data[args.id_dataset[0]])]
                if len(args.id_dataset) == 1:
                    testlist.append(('OOD', ood_data[args.ood_dataset[0]]))
                for flag, data in testlist:
                    # episode
                    episode = data.generate_episode(args,
                                                    split='mval',
                                                    n_episodes=ntask)
                    omega, gamma = sess.run(
                        model.get_balancing_variables(ntask=ntask),
                        feed_dict=dict(zip(placeholders, episode)))

                    print_balancing_variables(args, flag, episode, ntask,
                                              omega, gamma, logfile)

    logfile.close()
Exemplo n.º 11
0
def meta_train():
    global_step = tf.train.get_or_create_global_step()

    lr = tf.convert_to_tensor(args.meta_lr)

    optim = tf.train.AdamOptimizer(lr)

    if args.maml:
        var_list = [v for v in net_weights if 'phi' not in v.name]
    else:
        var_list = net_weights

    meta_train_op = get_train_op(optim,
                                 net_cent,
                                 clip=[-3., 3.],
                                 global_step=global_step,
                                 var_list=var_list)

    saver = tf.train.Saver(tf.trainable_variables())
    logfile = open(os.path.join(args.savedir, 'meta_train.log'), 'w')

    argdict = vars(args)
    print(argdict)
    for k, v in argdict.items():
        logfile.write(k + ': ' + str(v) + '\n')
    logfile.write('\n')

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    sess.run(tf.global_variables_initializer())

    meta_train_logger = Accumulator('cent', 'acc')
    meta_train_to_run = [meta_train_op, net_cent, net_acc_mean]

    meta_test_logger = Accumulator('cent', 'acc')
    meta_test_to_run = [tnet_cent, tnet_acc_mean]

    start = time.time()
    for i in range(args.n_train_iters + 1):
        episode = data.generate_episode(args,
                                        meta_training=True,
                                        n_episodes=args.metabatch)
        fd_mtr = dict(zip(placeholders, episode))
        meta_train_logger.accum(sess.run(meta_train_to_run, feed_dict=fd_mtr))

        if i % 50 == 0:
            line = 'Iter %d start, learning rate %f' % (i, sess.run(lr))
            print('\n' + line)
            logfile.write('\n' + line + '\n')
            meta_train_logger.print_(header='meta_train',
                                     episode=i * args.metabatch,
                                     time=time.time() - start,
                                     logfile=logfile)
            meta_train_logger.clear()

        if i % 1000 == 0:
            for j in range(50):
                episode = data.generate_episode(args,
                                                meta_training=False,
                                                n_episodes=args.metabatch)
                fd_mte = dict(zip(placeholders, episode))
                meta_test_logger.accum(
                    sess.run(meta_test_to_run, feed_dict=fd_mte))

            meta_test_logger.print_(header='meta_test ',
                                    episode=i * args.metabatch,
                                    time=time.time() - start,
                                    logfile=logfile)
            meta_test_logger.clear()

        if i % args.save_freq == 0:
            saver.save(sess, os.path.join(args.savedir, 'model'))

    logfile.close()