예제 #1
0
    opt = tf.train.AdamOptimizer(lr_ph)
    update_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_op):
        train_op = opt.minimize(loss)
    saver = tf.train.Saver()

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    if args.pretrained:
        loc = os.path.join(args.model_dir, args.model_name,
                           args.dataset_name + '.ckpt')
        saver.restore(sess, loc)

    config = load_config(args.config)
    train_gen = EpisodeGenerator(args.dataset_dir, 'train', config)
    test_gen = EpisodeGenerator(args.dataset_dir, 'test', config)
    if args.train:
        max_iter = train_gen.dataset_size[args.dataset_name] * args.max_epoch \
                // (nway * qsize)
        show_step = args.show_epoch * max_iter // args.max_epoch
        save_step = args.save_epoch * max_iter // args.max_epoch
        avger = np.zeros([4])
        for i in range(1, max_iter + 1):
            stt = time.time()
            cur_epoch = i * (
                nway * qsize) // train_gen.dataset_size[args.dataset_name]
            lr = args.lr if i < 0.7 * max_iter else args.lr * .1
            sx, sy, qx, qy = train_gen.get_episode(nway, kshot, qsize)
            fd = {\
                protonet.inputs['sx']: sx,
예제 #2
0
            pemb = Lrn2evl(args.ens_name, embeds, nway, transduction=False) 
            pemb = tf.reshape(pemb, [len(TRAIN_DATASET), nway*qsize, 1])
        
        ens_pred = tf.reduce_sum(pemb * preds, axis=0) 

    acc = tf_acc(ens_pred, qy_ph)
    saver = tf.train.Saver()
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    load_loc = os.path.join(args.model_dir, 
            args.project_name, 
            args.ens_name + '.ckpt')
    saver.restore(sess, load_loc)
    
    ep_gen = EpisodeGenerator(args.dataset_dir, 'test', config)
    target_dataset = ep_gen.dataset_list
    #target_dataset = ['miniImagenet']
    means = np.zeros([len(TRAIN_DATASET)+2, len(target_dataset)])
    stds  = np.zeros([len(TRAIN_DATASET)+2, len(target_dataset)])
    for tdi, dset in enumerate(target_dataset):
        print ('==========TARGET : {}=========='.format(dset)) 
        temp_results = [[] for _ in range(len(TRAIN_DATASET)+2)]
        for i in range(args.max_iter):
            sx, sy, qx, qy = ep_gen.get_episode(nway, kshot, qsize,
                    dataset_name=dset)
            fd = {sx_ph: sx, qx_ph: qx, qy_ph: qy}
            ps, p_acc, p_W = sess.run([preds, acc, pemb], fd)
            prediction = np.argmax(ps, axis=2) # (5, 150)

            for pn, p in enumerate(ps): 
예제 #3
0
                       qsize,
                       mbsize=1,
                       reuse=True if args.train else False,
                       inner_loop_iter=10,
                       isTr=False)

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    saver = tf.train.Saver()

    if args.resume is not None:
        print('restore from at : {}'.format(args.resume))
        saver.restore(sess, args.resume)

    ep_train = EpisodeGenerator(args.dataset_dir, 'train', config)
    ep_test = EpisodeGenerator(args.dataset_dir, 'test', config)

    vlist = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
    for i, v in enumerate(vlist):
        print(i, v)

    if args.train:
        avger = np.zeros([4])
        for i in range(1, args.max_iter + 1):
            stt = time.time()
            lr = args.meta_lr if i < 0.7 * args.max_iter else args.meta_lr * .1

            sx = []
            sy = []
            qx = []
예제 #4
0
    acc = tf_acc(pred, qy_ph)

opt = tf.train.AdamOptimizer(lr_ph)
update_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_op):
    train_op = opt.minimize(loss)
saver = tf.train.Saver()

config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = args.gpufraction
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
if args.pretrained:
    model.restore(saver, sess)

ep_gen = EpisodeGenerator(args.dataset_dir, 'train')
worker = Thread(target=ep_generator, args=(ep_queue, nway, kshot, qsize))
worker.setDaemon(True)
worker.start()

ep_gen_test = EpisodeGenerator(args.dataset_dir, 'test')

datasets_size = reduce((lambda x, y: x + y),
                       [val for key, val in TRAIN_DATASETS_SIZE.items()])
max_iter = datasets_size * args.max_epoch // (nway * qsize)
print(max_iter)
show_step = args.show_epoch * max_iter // args.max_epoch
save_step = args.save_epoch * max_iter // args.max_epoch
avger = np.zeros([4])

for i in range(1, max_iter + 1):
예제 #5
0
sess = tf.Session()
with tf.variable_scope(args.model_name):
    model = MAML(config)
    loss, acc, output = model.train(inputa, inputb, labela, labelb)

opt = tf.train.AdamOptimizer(meta_lr)
#update_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
#with tf.control_dependencies(update_op):
gvs = opt.compute_gradients(loss)
gvs = [(tf.clip_by_value(grad, -10, 10), var) for grad, var in gvs]
train_op = opt.apply_gradients(gvs)
saver = tf.train.Saver()

sess.run(tf.global_variables_initializer())

ep_gen = EpisodeGenerator(args.dataset_dir, 'train')
worker = Thread(target=ep_generator,
                args=(ep_queue, meta_batch_size, nway, kshot, qsize))
worker.setDaemon(True)
worker.start()

datasets_size = reduce((lambda x, y: x + y),
                       [TRAIN_DATASETS_SIZE[key] for key in TRAIN_DATASETS])
max_iter = datasets_size * args.max_epoch // (nway * qsize)

show_step = args.show_epoch * max_iter // args.max_epoch
save_step = args.save_epoch * max_iter // args.max_epoch
avger = np.zeros([4])

for i in range(1, max_iter + 1):
    stt = time.time()
예제 #6
0
    saver = tf.train.Saver()
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    if not args.from_ckpt:
        for tn, tset in enumerate(TRAIN_DATASET):
            saved_loc = os.path.join(args.model_dir, args.project_name,
                                     tset + '.ckpt')
            loaders[tn].restore(sess, saved_loc)
            print('model_{} restored from {}'.format(tn, saved_loc))
    else:
        out_loc = os.path.join(args.model_dir, args.project_name,
                               args.ens_name + '.ckpt')
        saver.restore(sess, out_loc)

    ep_gen = EpisodeGenerator(args.dataset_dir, 'val', config)
    ep_test = EpisodeGenerator(args.dataset_dir, 'test', config)
    avger = np.zeros([4])
    np.set_printoptions(precision=3, suppress=False)
    for i in range(1, args.max_iter + 1):
        stt = time.time()
        lr = args.lr if i < 0.7 * args.max_iter else args.lr * .1
        sx, sy, qx, qy = ep_gen.get_episode(nway,
                                            kshot,
                                            qsize,
                                            printname=False)
        bedt = time.time() - stt
        fd = {sx_ph: sx, qx_ph: qx, qy_ph: qy, lr_ph: lr}

        loss_val, acc_val, W_val, _ = sess.run([loss, acc, fe, train_op], fd)
        avger += [loss_val, acc_val, bedt, time.time() - stt]