コード例 #1
0
def record():
    sess = tf.Session()
    saver = tf.train.Saver(tnet['weights']+tnet['qpi_vars'])
    saver.restore(sess, os.path.join(savedir, 'model'))
    logger = Accumulator('cent', 'acc')
    to_run = [tnet['cent'], tnet['acc']]
    for j in range(n_test_batches):
        bx, by = mnist.test.next_batch(batch_size)
        logger.accum(sess.run(to_run, {x:bx, y:by}))
    np_n_active = sess.run(tnet['n_active'])

    if not os.path.isdir('../../records'):
        os.makedirs('../../records')
    csvfn = os.path.join('../../records',
            'sbpdropout_lenet_dense.csv' if args.csvfn is None else args.csvfn)

    if csvfn is not None:
        flag = 'a' if os.path.exists(csvfn) else 'w'
        with open(csvfn, flag) as f:
            writer = csv.writer(f)
            if flag=='w':
                writer.writerow(['savedir', 'cent', 'acc', 'n_active'])
            line = [savedir]
            line.append('%.4f' % logger.get('cent'))
            line.append('%.4f' % logger.get('acc'))
            line.append('-'.join(str(x) for x in np_n_active))
            writer.writerow(line)
コード例 #2
0
def train():
    import random

    loss = -net['elbo'] + net['proto_loss'] * args.lamb

    global_step = tf.train.get_or_create_global_step()
    lr_step = args.n_trn_epsd / 3
    lr = tf.train.piecewise_constant(tf.cast(global_step,
                                             tf.int32), [lr_step, lr_step * 2],
                                     [1e-3, 1e-3 * 0.5, 1e-3 * 0.5 * 0.5])
    train_op = tf.train.AdamOptimizer(lr).minimize(loss,
                                                   global_step=global_step)

    saver = tf.train.Saver(net['weights'])
    logfile = open(os.path.join(savedir, 'train.log'), 'w', 0)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    # to_run
    train_logger = Accumulator('elbo', 'proto_loss')
    train_to_run = [train_op, net['elbo'], net['proto_loss']]
    for i in range(args.n_trn_epsd):
        #train feed_dict
        cidx = random.sample(xrange(len(nxtr)), args.way)
        fdtr = data_queue(args, xtr, nxtr, cidx)

        # train
        train_logger.clear()
        start = time.time()
        train_logger.accum(sess.run(train_to_run, feed_dict=fdtr))

        if i % 100 == 0:
            train_logger.print_(header='train',
                                epoch=i + 1,
                                time=time.time() - start,
                                logfile=logfile)

            line = 'Epoch %d start, learning rate %f' % (i + 1, sess.run(lr))
            print('\n' + line)
            logfile.write('\n' + line + '\n')

            accu = sess.run(net['acc'], feed_dict=fdtr)
            print("test accu ", np.mean(accu))

    saver.save(sess, os.path.join(savedir, 'model'))

    logfile.close()
    saver.save(sess, os.path.join(savedir, 'model'))
コード例 #3
0
def test():
    sess = tf.Session()
    saver = tf.train.Saver(tnet['weights'])
    saver.restore(sess, os.path.join(savedir, 'model'))
    logger = Accumulator('cent', 'acc')
    to_run = [tnet['cent'], tnet['acc']]
    for j in range(n_test_batches):
        bx, by = mnist.test.next_batch(batch_size)
        logger.accum(sess.run(to_run, {x: bx, y: by}))
    logger.print_(header='test')
コード例 #4
0
def test():
    sess = tf.Session()
    saver = tf.train.Saver(tnet['weights']+tnet['qpi_vars'])
    saver.restore(sess, os.path.join(savedir, 'model'))
    logger = Accumulator('cent', 'acc')
    to_run = [tnet['cent'], tnet['acc']]
    for j in range(n_test_batches):
        bx, by = mnist.test.next_batch(batch_size)
        logger.accum(sess.run(to_run, {x:bx, y:by}))
    logger.print_(header='test')
    line = 'kl: ' + str(sess.run(tnet['kl'])) + '\n'
    line += 'n_active: ' + str(sess.run(tnet['n_active'])) + '\n'
    print(line)
コード例 #5
0
def test():
    import random

    sess = tf.Session()
    saver = tf.train.Saver(tnet['weights'])
    saver.restore(sess, os.path.join(savedir, 'model'))

    test_logger = Accumulator('elbo', 'proto_loss', 'acc')
    test_to_run = [tnet['elbo'], tnet['proto_loss'], tnet['acc']]

    test_logger.clear()
    for i in range(args.n_tst_epsd):
        #train feed_dict
        cidx = random.sample(xrange(len(nxte)), args.way)
        fdte = data_queue(args, xte, nxte, cidx)

        # test
        test_logger.accum(sess.run(test_to_run, feed_dict=fdte))
        if (i + 1) % 100 == 0:
            test_logger.print_(header='test', epoch=i + 1)
コード例 #6
0
def test():
    sess = tf.Session()
    saver = tf.train.Saver(tnet['weights']+tnet['qpi_vars']+tnet['pzx_vars'])
    saver.restore(sess, os.path.join(savedir, 'model'))
    logger = Accumulator('cent', 'acc')
    to_run = [tnet['cent'], tnet['acc']] + tnet['n_active']
    np_n_active = [0]*n_drop
    for j in range(n_test_batches):
        bx, by = mnist.test.next_batch(batch_size)
        res = sess.run(to_run, {x:bx, y:by})
        logger.accum(res[:-n_drop])
        np_n_active = [a + b for a, b in zip(np_n_active, res[-n_drop:])]
    np_n_active = [int(a/n_test_batches) for a in np_n_active]
    logger.print_(header='test')
    line = 'kl: ' + str(sess.run(tnet['kl'])) + '\n'
    line += 'n_active:' + str(np_n_active) + '\n'
    print(line)
コード例 #7
0
args, _ = parser.parse_known_args()

os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
torch.backends.cudnn.benchmark = True
if args.seed is not None:
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

# load models from config file
config = os.path.splitext(os.path.basename(args.config))[0]
net, train_loader, test_loader, optimizer, scheduler = \
        imp.load_source(config, args.config).load(args)
net.cuda()
cent_fn = nn.CrossEntropyLoss().cuda()
save_dir = os.path.join('../results', config)
accm = Accumulator('cent', 'acc')


def train():
    if not os.path.isdir(save_dir):
        os.makedirs(save_dir)

    with open(os.path.join(save_dir, 'args.txt'), 'w') as f:
        for v in vars(args):
            f.write('{}: {}\n'.format(v, getattr(args, v)))

    logging.basicConfig(level=logging.INFO)
    logger = logging.getLogger(config)
    logger.addHandler(
        logging.FileHandler(os.path.join(save_dir, 'train.log'), mode='w'))
    logger.info(str(args) + '\n')
コード例 #8
0
def train():
    loss = net['cent'] + tf.add_n(net['kl'])/float(N) + net['wd']
    global_step = tf.train.get_or_create_global_step()
    bdr = [int(n_train_batches*(args.n_epochs-1)*r) for r in [0.5, 0.75]]
    vals = [1e-2, 1e-3, 1e-4]
    lr = tf.train.piecewise_constant(tf.cast(global_step, tf.int32), bdr, vals)
    train_op1 = tf.train.AdamOptimizer(lr).minimize(loss,
            var_list=net['qpi_vars'], global_step=global_step)
    train_op2 = tf.train.AdamOptimizer(0.1*lr).minimize(loss,
            var_list=net['weights'])
    train_op = tf.group(train_op1, train_op2)

    pretrain_saver = tf.train.Saver(net['weights'])
    saver = tf.train.Saver(net['weights']+net['qpi_vars'])
    logfile = open(os.path.join(savedir, 'train.log'), 'w', 0)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    pretrain_saver.restore(sess, os.path.join(pretraindir, 'model'))

    train_logger = Accumulator('cent', 'acc')
    train_to_run = [train_op, net['cent'], net['acc']]
    test_logger = Accumulator('cent', 'acc')
    test_to_run = [tnet['cent'], tnet['acc']]
    for i in range(args.n_epochs):
        line = 'Epoch %d start, learning rate %f' % (i+1, sess.run(lr))
        print(line)
        logfile.write(line + '\n')
        train_logger.clear()
        start = time.time()
        for j in range(n_train_batches):
            bx, by = mnist.train.next_batch(batch_size)
            train_logger.accum(sess.run(train_to_run, {x:bx, y:by}))
        train_logger.print_(header='train', epoch=i+1,
                time=time.time()-start, logfile=logfile)

        test_logger.clear()
        for j in range(n_test_batches):
            bx, by = mnist.test.next_batch(batch_size)
            test_logger.accum(sess.run(test_to_run, {x:bx, y:by}))
        test_logger.print_(header='test', epoch=i+1,
                time=time.time()-start, logfile=logfile)
        line = 'kl: ' + str(sess.run(tnet['kl'])) + '\n'
        line += 'n_active: ' + str(sess.run(tnet['n_active'])) + '\n'
        print(line)
        logfile.write(line+'\n')
        if (i+1)%args.save_freq == 0:
            saver.save(sess, os.path.join(savedir, 'model'))

    logfile.close()
    saver.save(sess, os.path.join(savedir, 'model'))
コード例 #9
0
def train():
    loss = net['cent'] + tf.add_n(net['kl']) / float(N) + net['wd']
    global_step = tf.train.get_or_create_global_step()
    bdr = [int(n_train_batches * (args.n_epochs - 1) * r) for r in [0.5, 0.75]]
    vals = [1e-2, 1e-3, 1e-4]
    lr = tf.train.piecewise_constant(tf.cast(global_step, tf.int32), bdr, vals)
    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        train_op1 = tf.train.AdamOptimizer(lr).minimize(
            loss, var_list=net['pzx_vars'], global_step=global_step)
        train_op2 = tf.train.AdamOptimizer(0.1 * lr).minimize(
            loss, var_list=net['weights'])
    train_op = tf.group(train_op1, train_op2)

    pretrain_saver = tf.train.Saver(net['weights'] + net['qpi_vars'])
    saver = tf.train.Saver(net['weights'] + net['qpi_vars'] + net['pzx_vars'])
    logfile = open(os.path.join(savedir, 'train.log'), 'w', 0)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    pretrain_saver.restore(sess, os.path.join(pretraindir, 'model'))

    train_logger = Accumulator('cent', 'acc')
    train_to_run = [train_op, net['cent'], net['acc']]
    test_logger = Accumulator('cent', 'acc')
    test_to_run = [tnet['cent'], tnet['acc']] + tnet['n_active']
    for i in range(args.n_epochs):
        line = 'Epoch %d start, learning rate %f' % (i + 1, sess.run(lr))
        print(line)
        logfile.write(line + '\n')
        train_logger.clear()
        start = time.time()
        for j in range(n_train_batches):
            bx, by = mnist.train.next_batch(batch_size)
            train_logger.accum(sess.run(train_to_run, {x: bx, y: by}))
        train_logger.print_(header='train',
                            epoch=i + 1,
                            time=time.time() - start,
                            logfile=logfile)

        test_logger.clear()
        np_n_active = [0] * n_drop
        for j in range(n_test_batches):
            bx, by = mnist.test.next_batch(batch_size)
            res = sess.run(test_to_run, {x: bx, y: by})
            test_logger.accum(res[:-n_drop])
            np_n_active = [a + b for a, b in zip(np_n_active, res[-n_drop:])]
        test_logger.print_(header='test',
                           epoch=i + 1,
                           time=time.time() - start,
                           logfile=logfile)
        np_n_active = [int(a / n_test_batches) for a in np_n_active]
        line = 'kl: ' + str(sess.run(tnet['kl'])) + '\n'
        line += 'n_active: ' + str(np_n_active) + '\n'
        print(line)
        logfile.write(line + '\n')

        if (i + 1) % args.save_freq == 0:
            saver.save(sess, os.path.join(savedir, 'model'))

        if (i + 1) % args.vis_freq == 0:
            fig = _visualize(sess)
            fig.savefig(os.path.join(figdir, 'epoch%d.png' % (i + 1)), dpi=200)

    saver.save(sess, os.path.join(savedir, 'model'))
    logfile.close()
コード例 #10
0
def train():
    loss = net['cent'] + net['wd']
    global_step = tf.train.get_or_create_global_step()
    lr = tf.train.piecewise_constant(tf.cast(global_step, tf.int32),
                                     [n_train_batches * args.n_epochs / 2],
                                     [1e-4, 1e-5])
    train_op = tf.train.AdamOptimizer(lr).minimize(loss,
                                                   global_step=global_step)

    saver = tf.train.Saver(net['weights'])
    logfile = open(os.path.join(savedir, 'train.log'), 'w', 0)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    train_logger = Accumulator('cent', 'acc')
    train_to_run = [train_op, net['cent'], net['acc']]
    test_logger = Accumulator('cent', 'acc')
    test_to_run = [tnet['cent'], tnet['acc']]
    for i in range(args.n_epochs):
        line = 'Epoch %d start, learning rate %f' % (i + 1, sess.run(lr))
        print(line)
        logfile.write(line + '\n')
        train_logger.clear()
        start = time.time()
        for j in range(n_train_batches):
            bx, by = mnist.train.next_batch(batch_size)
            train_logger.accum(sess.run(train_to_run, {x: bx, y: by}))
        train_logger.print_(header='train',
                            epoch=i + 1,
                            time=time.time() - start,
                            logfile=logfile)

        test_logger.clear()
        for j in range(n_test_batches):
            bx, by = mnist.test.next_batch(batch_size)
            test_logger.accum(sess.run(test_to_run, {x: bx, y: by}))
        test_logger.print_(header='test',
                           epoch=i + 1,
                           time=time.time() - start,
                           logfile=logfile)
        print()
        logfile.write('\n')
        if (i + 1) % args.save_freq == 0:
            saver.save(sess, os.path.join(savedir, 'model'))

    logfile.close()
    saver.save(sess, os.path.join(savedir, 'model'))