Exemple #1
0
def test_oracle(sess, testset, tokenizer, oracle, cpu_pool, batch_size, logger):

    oracle_dataset = OracleDataset(testset)
    oracle_sources = oracle.get_sources(sess)
    oracle_evaluator = Evaluator(oracle_sources, oracle.scope_name, network=oracle, tokenizer=tokenizer)
    oracle_batchifier = OracleBatchifier(tokenizer, oracle_sources, status=('success',))
    oracle_iterator = Iterator(oracle_dataset, pool=cpu_pool,
                             batch_size=batch_size,
                             batchifier=oracle_batchifier)
    [oracle_loss, oracle_error] = oracle_evaluator.process(sess, oracle_iterator, [oracle.loss, oracle.error])

    logger.info("Oracle test loss: {}".format(oracle_loss))
    logger.info("Oracle test error: {}".format(oracle_error))
Exemple #2
0

# CPU/GPU option
cpu_pool = Pool(args.no_thread, maxtasksperchild=1000)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_ratio)

with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)) as sess:
    saver = tf.train.Saver()
    saver.restore(sess, args.ckpt)

    features = dict()
    for one_set in args.set_type:

        print("Load dataset -> set: {}".format(one_set))
        dataset = OracleDataset.load(args.data_dir, one_set, image_loader=image_loader, crop_loader=crop_loader)
        batchifier = OracleBatchifier(tokenizer=None, sources=[source])
        iterator = Iterator(dataset,
                            batch_size=args.batch_size,
                            pool=cpu_pool,
                            batchifier=batchifier)

        for batch in tqdm(iterator):
            feat = sess.run(end_points[feature_name], feed_dict={images: numpy.array(batch[source])})
            for f, game in zip(feat, batch["raw"]):
                f = f.squeeze()

                if args.mode == "crop":
                    id =  game.object_id
                else:
                    id = game.picture.id
Exemple #3
0
        sources = network.get_sources(sess)
        logger.info("Sources: " + ', '.join(sources))

        sess.run(tf.global_variables_initializer())
        if use_resnet:
            resnet_saver.restore(sess, os.path.join(args.data_dir, 'resnet_v1_{}.ckpt'.format(resnet_version)))

        start_epoch = load_checkpoint(sess, saver, args, save_path)

        best_val_err = 0
        best_train_err = None

        # create training tools
        evaluator = Evaluator(sources, network.scope_name)
        batchifier = OracleBatchifier(tokenizer, sources, status=config['status'])

        for t in range(start_epoch, no_epoch):
            logger.info('Epoch {}..'.format(t + 1))

            train_iterator = Iterator(trainset,
                                      batch_size=batch_size, pool=cpu_pool,
                                      batchifier=batchifier,
                                      shuffle=True)
            train_loss, train_accuracy = evaluator.process(sess, train_iterator, outputs=outputs + [optimizer])

            valid_iterator = Iterator(validset, pool=cpu_pool,
                                      batch_size=batch_size*2,
                                      batchifier=batchifier,
                                      shuffle=False)
            valid_loss, valid_accuracy = evaluator.process(sess, valid_iterator, outputs=outputs)
Exemple #4
0
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                          allow_soft_placement=True)) as sess:

        sources = network.get_sources(sess)
        logger.info("Sources: " + ', '.join(sources))

        sess.run(tf.global_variables_initializer())
        start_epoch = load_checkpoint(sess, saver, args, save_path)

        best_val_err = 1e5
        best_train_err = None

        # create training tools
        evaluator = Evaluator(sources, network.scope_name)
        batchifier = OracleBatchifier(tokenizer,
                                      sources,
                                      status=config['status'],
                                      **config['model']['crop'])

        for t in range(start_epoch, no_epoch):
            logger.info('Epoch {}..'.format(t + 1))

            train_iterator = Iterator(trainset,
                                      batch_size=batch_size,
                                      pool=cpu_pool,
                                      batchifier=batchifier,
                                      shuffle=True)
            train_loss, train_error = evaluator.process(sess,
                                                        train_iterator,
                                                        outputs=outputs +
                                                        [optimizer])