Beispiel #1
0
    config = load_config(args.config)
    lr_ph = tf.placeholder(tf.float32)
    y_ph = tf.placeholder(tf.float32, [None, nway])
    # 64: train_n_classes
    trainnet = TransferNet(args.model_name, 64, isTr=True)
    testnet = TransferNet(args.model_name, 64, isTr=False, reuse=True)

    feat_emb = trainnet.outputs['embedding']
    ff = trainnet.dense(feat_emb, nway, name='specific_dense')
    pred = tf.nn.softmax(ff)
    loss = cross_entropy(pred, y_ph)

    ff = testnet.outputs['embedding']
    ff = testnet.dense(ff, nway, name='specific_dense', reuse=True)
    pred = tf.nn.softmax(ff)
    acc = tf_acc(pred, y_ph)

    rvlist = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                               args.model_name + '/*')

    opt = tf.train.AdamOptimizer(lr_ph)
    update_op = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_op):
        train_op = opt.minimize(loss)

    # placeholder for initializing
    var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
    var_holder = [tf.placeholder(tf.float32, v.shape) for v in var_list]
    init_ops = [var.assign(val) for var, val in zip(var_list, var_holder)]

    saver = tf.train.Saver(rvlist)
Beispiel #2
0
        embeds = [models[tn].outputs['embedding'] \
                for tn in range(len(TRAIN_DATASET))]
        embeds = tf.convert_to_tensor(embeds)
        preds = [models[tn].outputs['pred'] \
                for tn in range(len(TRAIN_DATASET))]
        
        if args.transductive:
            pemb = Lrn2evl(args.ens_name, embeds, nway) 
            pemb = tf.reshape(pemb, [-1,1,1])
        else:
            pemb = Lrn2evl(args.ens_name, embeds, nway, transduction=False) 
            pemb = tf.reshape(pemb, [len(TRAIN_DATASET), nway*qsize, 1])
        
        ens_pred = tf.reduce_sum(pemb * preds, axis=0) 

    acc = tf_acc(ens_pred, qy_ph)
    saver = tf.train.Saver()
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    load_loc = os.path.join(args.model_dir, 
            args.project_name, 
            args.ens_name + '.ckpt')
    saver.restore(sess, load_loc)
    
    ep_gen = EpisodeGenerator(args.dataset_dir, 'test', config)
    target_dataset = ep_gen.dataset_list
    #target_dataset = ['miniImagenet']
    means = np.zeros([len(TRAIN_DATASET)+2, len(target_dataset)])
    stds  = np.zeros([len(TRAIN_DATASET)+2, len(target_dataset)])
    for tdi, dset in enumerate(target_dataset):
Beispiel #3
0
            fe = Lrn2evl(args.ens_name, embeds, nway)  # (M,1)
            fe = tf.reshape(fe, [-1, 1, 1])  # (M,1,1)
        else:
            fe = Lrn2evl(args.ens_name, embeds, nway,
                         transduction=False)  # (MNQ,1)
            fe = tf.reshape(fe,
                            [len(TRAIN_DATASET), nway * qsize, 1])  # (M,NQ,1)

        pe = tf.reduce_sum(fe * preds, axis=0)
        pe = tf.nn.softmax(pe)

        train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                       scope=args.project_name + '/' +
                                       args.ens_name + '/*')

        acc = tf_acc(pe, qy_ph)
        loss = cross_entropy(pe, qy_ph)
        opt = tf.train.AdamOptimizer(lr_ph)
        train_op = opt.minimize(loss, var_list=train_vars)

    saver = tf.train.Saver()
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    if not args.from_ckpt:
        for tn, tset in enumerate(TRAIN_DATASET):
            saved_loc = os.path.join(args.model_dir, args.project_name,
                                     tset + '.ckpt')
            loaders[tn].restore(sess, saved_loc)
            print('model_{} restored from {}'.format(tn, saved_loc))
    else:
        out_loc = os.path.join(args.model_dir, args.project_name,