Beispiel #1
0
def test_qgen(sess, testset, tokenizer, qgen, cpu_pool, batch_size, logger):
    qgen_sources = qgen.get_sources(sess)
    qgen_evaluator = Evaluator(qgen_sources, qgen.scope_name, network=qgen, tokenizer=tokenizer)
    qgen_batchifier = QuestionerBatchifier(tokenizer, qgen_sources, status=('success',))
    qgen_iterator = Iterator(testset, pool=cpu_pool,
                                 batch_size=batch_size,
                                 batchifier=qgen_batchifier)
    [qgen_loss] = qgen_evaluator.process(sess, qgen_iterator, outputs=[qgen.ml_loss])
    logger.info("QGen test loss: {}".format(qgen_loss))
Beispiel #2
0
def test_guesser(sess, testset, tokenizer, guesser, cpu_pool, batch_size, logger):
    guesser_sources = guesser.get_sources(sess)
    guesser_evaluator = Evaluator(guesser_sources, guesser.scope_name, network=guesser, tokenizer=tokenizer)
    guesser_batchifier = QuestionerBatchifier(tokenizer, guesser_sources, status=('success',))
    guesser_iterator = Iterator(testset, pool=cpu_pool,
                             batch_size=batch_size,
                             batchifier=guesser_batchifier)
    [guesser_loss, guesser_error] = guesser_evaluator.process(sess, guesser_iterator, [guesser.loss, guesser.error])
    logger.info("Guesser test loss: {}".format(guesser_loss))
    logger.info("Guesser test error: {}".format(guesser_error))
Beispiel #3
0
def test_oracle(sess, testset, tokenizer, oracle, cpu_pool, batch_size, logger):

    oracle_dataset = OracleDataset(testset)
    oracle_sources = oracle.get_sources(sess)
    oracle_evaluator = Evaluator(oracle_sources, oracle.scope_name, network=oracle, tokenizer=tokenizer)
    oracle_batchifier = OracleBatchifier(tokenizer, oracle_sources, status=('success',))
    oracle_iterator = Iterator(oracle_dataset, pool=cpu_pool,
                             batch_size=batch_size,
                             batchifier=oracle_batchifier)
    [oracle_loss, oracle_error] = oracle_evaluator.process(sess, oracle_iterator, [oracle.loss, oracle.error])

    logger.info("Oracle test loss: {}".format(oracle_loss))
    logger.info("Oracle test error: {}".format(oracle_error))
Beispiel #4
0
class OracleWrapper(object):
    def __init__(self, oracle, tokenizer):

        self.oracle = oracle
        self.evaluator = None
        self.tokenizer = tokenizer

    def initialize(self, sess):
        self.evaluator = Evaluator(self.oracle.get_sources(sess),
                                   self.oracle.scope_name)

    def answer_question(self, sess, question, seq_length, game_data):

        game_data["question"] = question
        game_data["seq_length"] = seq_length

        # convert dico name to fit oracle constraint
        game_data["category"] = game_data.get("targets_category", None)
        game_data["spatial"] = game_data.get("targets_spatial", None)

        # sample
        answers_indices = self.evaluator.execute(sess,
                                                 output=self.oracle.best_pred,
                                                 batch=game_data)

        # Decode the answers token  ['<yes>', '<no>', '<n/a>'] WARNING magic order... TODO move this order into tokenizer
        answer_dico = [
            self.tokenizer.yes_token, self.tokenizer.no_token,
            self.tokenizer.non_applicable_token
        ]
        answers = [answer_dico[a]
                   for a in answers_indices]  # turn indices into tokenizer_id

        return answers
class GuesserWrapper(object):
    def __init__(self, guesser, batchifier, tokenizer, listener):
        self.guesser = guesser
        self.batchifier = batchifier
        self.tokenizer = tokenizer
        self.listener = listener
        self.evaluator = None

    def initialize(self, sess):
        self.evaluator = Evaluator(self.guesser.get_sources(sess),
                                   self.guesser.scope_name)

    def find_object(self, sess, games):

        # the guesser may need to split the input
        iterator = BasicIterator(games,
                                 batch_size=len(games),
                                 batchifier=self.batchifier)

        # sample
        self.evaluator.process(sess,
                               iterator,
                               outputs=[],
                               listener=self.listener,
                               show_progress=False)
        results = self.listener.results()

        # Update games
        new_games = []
        # for game in games:
        for game in games:

            res = results[game.dialogue_id]
            # print("--")
            # print(att)

            game.id_guess_object = res["id_guess_object"]
            game.user_data.get("softmax", []).append(res["softmax"])
            game.status = "success" if res["success"] else "failure"
            game.is_full_dialogue = True

            new_games.append(game)

        return new_games
class QGenSamplingWrapper(object):
    def __init__(self, qgen, tokenizer, max_length):

        self.qgen = qgen

        self.tokenizer = tokenizer
        self.max_length = max_length

        self.evaluator = None

        # Track the hidden state of LSTM
        self.state_c = None
        self.state_h = None
        self.state_size = int(qgen.decoder_zero_state_c.get_shape()[1])

    def initialize(self, sess):
        self.evaluator = Evaluator(self.qgen.get_sources(sess),
                                   self.qgen.scope_name)

    def reset(self, batch_size):
        # reset state
        self.state_c = np.zeros((batch_size, self.state_size))
        self.state_h = np.zeros((batch_size, self.state_size))

    def sample_next_question(self, sess, prev_answers, game_data, greedy):

        game_data["dialogues"] = prev_answers
        game_data["seq_length"] = [1] * len(prev_answers)
        game_data["state_c"] = self.state_c
        game_data["state_h"] = self.state_h
        game_data["greedy"] = greedy

        # sample
        res = self.evaluator.execute(sess, self.qgen.samples, game_data)

        self.state_c = res[0]
        self.state_h = res[1]
        transpose_questions = res[2]
        seq_length = res[3]

        # Get questions
        padded_questions = transpose_questions.transpose([1, 0])
        padded_questions = padded_questions[:, 1:]  # ignore first token

        for i, l in enumerate(seq_length):
            padded_questions[i, l:] = self.tokenizer.padding_token

        questions = [q[:l] for q, l in zip(padded_questions, seq_length)]

        return padded_questions, questions, seq_length
class OracleWrapper(object):
    def __init__(self, oracle, batchifier, tokenizer):

        self.oracle = oracle
        self.evaluator = None

        self.tokenizer = tokenizer
        self.batchifier = batchifier

    def initialize(self, sess):
        self.evaluator = Evaluator(self.oracle.get_sources(sess),
                                   self.oracle.scope_name)

    def answer_question(self, sess, games):

        # create the training batch #TODO: hack -> to remove
        oracle_games = []
        if self.batchifier.split_mode == 1:
            for game in games:
                g = copy.copy(game)
                g.questions = [game.questions[-1]]
                g.question_ids = [game.question_ids[-1]]
                oracle_games.append(g)
        else:
            oracle_games = games

        batch = self.batchifier.apply(oracle_games, skip_targets=True)
        batch["is_training"] = False

        # Sample
        answers_index = self.evaluator.execute(sess,
                                               output=self.oracle.prediction,
                                               batch=batch)

        # Update game
        new_games = []
        for game, answer in zip(games, answers_index):
            if not game.user_data[
                    "has_stop_token"]:  # stop adding answer if dialogue is over
                game.answers.append(
                    self.tokenizer.decode_oracle_answer(answer, sparse=True))
            new_games.append(game)

        return new_games
Beispiel #8
0
class GuesserWrapper(object):

    def __init__(self, guesser):
        self.guesser = guesser
        self.evaluator = None

    def initialize(self, sess):
        self.evaluator = Evaluator(self.guesser.get_sources(sess), self.guesser.scope_name)

    def find_object(self, sess, dialogues, seq_length, game_data):
        game_data["dialogues"] = dialogues
        game_data["seq_length"] = seq_length

        # sample
        selected_object, softmax = self.evaluator.execute(sess, output=[self.guesser.selected_object, self.guesser.softmax], batch=game_data)

        found = (selected_object == game_data["targets_index"])

        return found, softmax, selected_object
    # Keeping them as list and passing list to a function has the same effect as pass by reference
    global_train_step = [0]
    global_valid_step = [0]

    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:

        # Returns all the input of the network (The placeholder variables)
        sources = network.get_sources(sess)
        logger.info("Sources: " + ', '.join(sources))

        sess.run(tf.global_variables_initializer())
        start_epoch = load_checkpoint(sess, saver, args, save_path)

        # create training tools
        evaluator = Evaluator(sources,
                              network.scope_name,
                              network=network,
                              tokenizer=tokenizer)
        batchifier = QuestionerBatchifier(tokenizer,
                                          sources,
                                          status=('success', ))

        best_val_loss = 1e5
        for t in range(0, config['optimizer']['no_epoch']):

            logger.info('Epoch {}..'.format(t + 1))

            train_iterator = Iterator(trainset,
                                      batch_size=batch_size,
                                      pool=cpu_pool,
                                      batchifier=batchifier,
                                      shuffle=True)
class QGenWrapper(object):
    def __init__(self, qgen, batchifier, tokenizer, max_length, k_best):

        self.qgen = qgen

        self.batchifier = batchifier
        self.tokenizer = tokenizer

        self.ops = dict()
        self.ops["sampling"] = qgen.create_sampling_graph(
            start_token=tokenizer.start_token,
            stop_token=tokenizer.stop_token,
            max_tokens=max_length)

        self.ops["greedy"] = qgen.create_greedy_graph(
            start_token=tokenizer.start_token,
            stop_token=tokenizer.stop_token,
            max_tokens=max_length)

        beam_predicted_ids, seq_length, att = qgen.create_beam_graph(
            start_token=tokenizer.start_token,
            stop_token=tokenizer.stop_token,
            max_tokens=max_length,
            k_best=k_best)
        # print('b',beam_predicted_ids)
        # print('s',seq_length)
        # Only keep best beam
        self.ops[
            "beam"] = beam_predicted_ids[:,
                                         0, :], seq_length[:,
                                                           0], beam_predicted_ids[:,
                                                                                  0, :] * 0, att

        self.evaluator = None

    def initialize(self, sess):
        self.evaluator = Evaluator(self.qgen.get_sources(sess),
                                   self.qgen.scope_name,
                                   network=self.qgen,
                                   tokenizer=self.tokenizer)

    def policy_update(self, sess, games, optimizer):

        # ugly hack... to allow training on RL
        batchifier = copy.copy(self.batchifier)
        batchifier.generate = False
        batchifier.supervised = False

        iterator = BasicIterator(games,
                                 batch_size=len(games),
                                 batchifier=batchifier)

        # Check whether the gradient is accumulated
        if isinstance(optimizer, AccOptimizer):
            sess.run(optimizer.zero)  # reset gradient
            local_optimizer = optimizer.accumulate
        else:
            local_optimizer = optimizer

        # Compute the gradient
        self.evaluator.process(sess,
                               iterator,
                               outputs=[local_optimizer],
                               show_progress=False)

        if isinstance(optimizer, AccOptimizer):
            sess.run(optimizer.update)  # Apply accumulated gradient

    def sample_next_question(self, sess, games, att_dict, beta_dict, mode):

        # ugly hack... to allow training on RL
        batchifier = copy.copy(self.batchifier)
        batchifier.generate = True
        batchifier.supervised = False

        # create the training batch
        batch = batchifier.apply(games, skip_targets=True)
        batch["is_training"] = False
        batch["is_dynamic"] = True

        # Sample
        tokens, seq_length, state_values, atts = self.evaluator.execute(
            sess, output=self.ops[mode], batch=batch)
        # tokens, seq_length, state_values, atts, betas = self.evaluator.execute(sess, output=self.ops[mode], batch=batch)

        # Update game
        new_games = []
        for game, question_tokens, l, state_value, att in zip(
                games, tokens, seq_length, state_values, atts):
            # for game, question_tokens, l, state_value, att, beta in zip(games, tokens, seq_length, state_values, atts, betas):

            if not game.user_data[
                    "has_stop_token"]:  # stop adding question if dialogue is over

                # clean tokens after stop_dialogue_tokens
                if self.tokenizer.stop_dialogue in question_tokens:
                    game.user_data["has_stop_token"] = True
                    l = np.nonzero(
                        question_tokens == self.tokenizer.stop_dialogue
                    )[0][0] + 1  # find the first stop_dialogue occurrence
                # Append the newly generated question
                game.questions.append(
                    self.tokenizer.decode(question_tokens[:l]))
                game.question_ids.append(len(game.question_ids))

                game.user_data["state_values"] = game.user_data.get(
                    "state_values", [])
                game.user_data["state_values"].append(state_value[:l].tolist())
            att = att.tolist()
            att_i = np.argsort(att).tolist()
            att_3 = np.sort(att).tolist()
            if game.dialogue_id not in att_dict:
                att_dict[game.dialogue_id] = []
                att_dict[game.dialogue_id].append((att_i, att_3))
            else:
                att_dict[game.dialogue_id].append((att_i, att_3))

            # beta = beta.tolist()
            # if game.dialogue_id not in beta_dict:
            #     beta_dict[game.dialogue_id] = []
            #     beta_dict[game.dialogue_id].append(beta)
            # else:
            #     beta_dict[game.dialogue_id].append(beta)

            new_games.append(game)

        return new_games, att_dict  #, beta_dict
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)) as sess:


        sources = network.get_sources(sess)
        out_net = network.get_parameters()[-1]
        # logger.info("Sources: " + ', '.join(sources))
        sess.run(tf.global_variables_initializer())
        if use_resnet:
            resnet_saver.restore(sess, os.path.join(args.data_dir, 'resnet_v1_{}.ckpt'.format(resnet_version)))
        
        start_epoch = load_checkpoint(sess, saver, args, save_path)
        best_val_err = 0
        # best_train_err = None
        # # create training tools
        evaluator = Evaluator(sources, network.scope_name,network=network,tokenizer=tokenizer)

        # train_evaluator = MultiGPUEvaluator(sources, scope_names, networks=networks, tokenizer=tokenizer)
        # train_evaluator = Evaluator(sources, scope_names[0], network=networks[0], tokenizer=tokenizer)
        # eval_evaluator = Evaluator(sources, scope_names[0], network=networks[0], tokenizer=tokenizer)
        
        batchifier =  OracleBatchifier(tokenizer, sources, status=config['status'],glove=glove,tokenizer_description=tokenizer_description,args = args,config=config)

        stop_learning = False
        progress_compteur = 0
        t = 0


        if inference == False:

            while start_epoch < no_epoch and not stop_learning :
            oracle_saver.restore(sess, os.path.join(args.networks_dir, 'oracle', args.oracle_identifier, 'params.ckpt'))
            guesser_saver.restore(sess, os.path.join(args.networks_dir, 'guesser', args.guesser_identifier, 'params.ckpt'))

        # check that models are correctly loaded
        test_model(sess, testset, cpu_pool=cpu_pool, tokenizer=tokenizer,
                   oracle=oracle_network,
                   guesser=guesser_network,
                   qgen=qgen_network,
                   batch_size=100,
                   logger=logger)

        # create training tools
        loop_sources = qgen_network.get_sources(sess)
        logger.info("Sources: " + ', '.join(loop_sources))

        evaluator = Evaluator(loop_sources, qgen_network.scope_name, network=qgen_network, tokenizer=tokenizer)

        train_batchifier = LooperBatchifier(tokenizer, loop_sources, train=True)
        eval_batchifier = LooperBatchifier(tokenizer, loop_sources, train=False)

        # Initialize the looper to eval/train the game-simulation
        qgen_network.build_sampling_graph(qgen_config["model"], tokenizer=tokenizer, max_length=loop_config['loop']['max_depth'])
        looper_evaluator = BasicLooper(loop_config,
                                       oracle=oracle_network,
                                       guesser=guesser_network,
                                       qgen=qgen_network,
                                       tokenizer=tokenizer)

        test_iterator = Iterator(testset, pool=cpu_pool,
                                 batch_size=batch_size,
                                 batchifier=eval_batchifier,
    no_epoch = config["optimizer"]["no_epoch"]

    # CPU/GPU option
    cpu_pool = Pool(args.no_thread, maxtasksperchild=1000)
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_ratio)

    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)) as sess:

        sources = network.get_sources(sess)
        logger.info("Sources: " + ', '.join(sources))
        saver.restore(sess, save_path.format('params.ckpt'))

        if not os.path.exists(args.features):
            os.makedirs(args.features)
        # create training tools
        evaluator = Evaluator(sources, network.scope_name, network=network, tokenizer=tokenizer)
        batchifier = QuestionerBatchifier(tokenizer, sources, status=('success',))

        train_iterator = Iterator(trainset,
                                  batch_size=batch_size * 2, pool=cpu_pool,
                                  batchifier=batchifier,
                                  shuffle=False)
        _, train_states = evaluator.process(sess, train_iterator, outputs=outputs, output_dialogue_states=True)

        save_dialogue_states(args.features, "train", *train_states)

        valid_iterator = Iterator(validset, pool=cpu_pool,
                                  batch_size=batch_size * 2,
                                  batchifier=batchifier,
                                  shuffle=False)
        _, valid_states = evaluator.process(sess, valid_iterator, outputs=outputs, output_dialogue_states=True)
 def initialize(self, sess):
     self.evaluator = Evaluator(self.guesser.get_sources(sess),
                                self.guesser.scope_name)
 def initialize(self, sess):
     self.evaluator = Evaluator(self.qgen.get_sources(sess),
                                self.qgen.scope_name,
                                network=self.qgen,
                                tokenizer=self.tokenizer)
Beispiel #16
0
cpu_pool = Pool(args.no_thread, maxtasksperchild=1000)
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_ratio)


with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)) as sess:

    # retrieve incoming sources
    sources = networks[0].get_sources(sess)
    scope_names = ['tower_{}/{}'.format(i, network.scope_name) for i, network in enumerate(networks)]
    logger.info("Sources: " + ', '.join(sources))


    # Create evaluation tools
    train_evaluator = MultiGPUEvaluator(sources, scope_names, networks=networks, tokenizer=tokenizer)
    #train_evaluator = Evaluator(sources, scope_names[0], network=networks[0], tokenizer=tokenizer)
    eval_evaluator = Evaluator(sources, scope_names[0], network=networks[0], tokenizer=tokenizer)


    # Load checkpoints or pre-trained networks
    sess.run(tf.global_variables_initializer())
    start_epoch = load_checkpoint(sess, saver, args, save_path)
    if use_resnet:
        resnet_saver.restore(sess, os.path.join(args.data_dir,'resnet_v1_{}.ckpt'.format(resnet_version)))




    train_batchifier = VQABatchifier(tokenizer, sources, glove, remove_unknown=True)
    eval_batchifier = VQABatchifier(tokenizer, sources, glove, remove_unknown=False)

Beispiel #17
0
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:

        # retrieve incoming sources
        sources = network.get_sources(sess)
        logger.info("Sources: " + ', '.join(sources))

        # Load checkpoints or pre-trained networks
        sess.run(tf.global_variables_initializer())
        if args.continue_exp or args.load_checkpoint is not None:
            start_epoch, _ = xp_manager.load_checkpoint(sess, saver)
        else:
            start_epoch = 0

        # Create evaluation tools
        evaluator = Evaluator(sources,
                              scope=network.scope_name,
                              network=network,
                              tokenizer=tokenizer)
        batchifier = ReferitBatchifier(tokenizer,
                                       sources,
                                       glove=glove,
                                       split_by_objects=True)

        # define listener
        listener = ReferitAccuracyListener(require=network.softmax)

        for t in range(start_epoch, no_epoch):

            # CPU
            cpu_pool = create_cpu_pool(
                args.no_thread,
                use_process=image_builder.require_multiprocess())
Beispiel #18
0
 def initialize(self, sess):
     self.evaluator = Evaluator(self.oracle.get_sources(sess),
                                self.oracle.scope_name)
Beispiel #19
0
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_ratio)

    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                          allow_soft_placement=True)) as sess:

        sources = network.get_sources(sess)
        logger.info("Sources: " + ', '.join(sources))

        sess.run(tf.global_variables_initializer())
        start_epoch = load_checkpoint(sess, saver, args, save_path)

        best_val_err = 1e5
        best_train_err = None

        # create training tools
        evaluator = Evaluator(sources, network.scope_name)
        batchifier = OracleBatchifier(tokenizer,
                                      sources,
                                      status=config['status'],
                                      **config['model']['crop'])

        for t in range(start_epoch, no_epoch):
            logger.info('Epoch {}..'.format(t + 1))

            train_iterator = Iterator(trainset,
                                      batch_size=batch_size,
                                      pool=cpu_pool,
                                      batchifier=batchifier,
                                      shuffle=True)
            train_loss, train_error = evaluator.process(sess,
                                                        train_iterator,
Beispiel #20
0
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                          allow_soft_placement=True)) as sess:

        sources = network.get_sources(sess)
        logger.info("Sources: " + ', '.join(sources))

        sess.run(tf.global_variables_initializer())
        start_epoch = load_checkpoint(sess, saver, args, save_path)

        best_val_err = 0
        best_train_err = None

        # create training tools
        evaluator = Evaluator(sources,
                              network.scope_name,
                              network=network,
                              tokenizer=tokenizer)
        batchifier = QuestionerBatchifier(tokenizer,
                                          sources,
                                          status=('success', ))

        for t in range(start_epoch, no_epoch):
            logger.info('Epoch {}..'.format(t + 1))

            train_iterator = Iterator(trainset,
                                      batch_size=batch_size,
                                      pool=cpu_pool,
                                      batchifier=batchifier,
                                      shuffle=True)
            train_loss, train_accuracy = evaluator.process(sess,
                                                           train_iterator,
    # gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_ratio)
    # config_gpu = tf.ConfigProto(gpu_options=gpu_options)

    with tf.Session(config=config_gpu) as sess:
        sources = network.get_sources(sess)
        logger.info("Sources: " + ', '.join(sources))

        sess.run(tf.global_variables_initializer())
        if args.continue_exp or args.load_checkpoint is not None:
            start_epoch = xp_manager.load_checkpoint(sess, saver)
        else:
            start_epoch = 0

        # create training tools
        evaluator = Evaluator(sources,
                              network.scope_name,
                              network=network,
                              tokenizer=tokenizer)
        batchifier = batchifier_cstor(tokenizer,
                                      sources,
                                      glove=glove,
                                      status=('success', ))
        xp_manager.configure_score_tracking("valid_accuracy", max_is_best=True)

        for t in range(start_epoch, no_epoch):
            if args.skip_training:
                logger.info("Skip training...")
                break
            logger.info('Epoch {}..'.format(t + 1))

            # Create cpu pools (at each iteration otherwise threads may become zombie - python bug)
            cpu_pool = create_cpu_pool(args.no_thread, use_process=use_process)