コード例 #1
0
class LanguageModelAgent(Agent):
    """ Agent which trains an RNN on a language modeling task.
    It is adapted from the language model featured in Pytorch's examples repo
    here: <https://github.com/pytorch/examples/tree/master/word_language_model>.
    """
    @staticmethod
    def dictionary_class():
        return DictionaryAgent

    @staticmethod
    def add_cmdline_args(argparser):
        """Add command-line arguments specifically for this agent."""
        argparser.set_defaults(batch_sort=False)
        agent = argparser.add_argument_group('Language Model Arguments')
        agent.add_argument(
            '--init-model',
            type=str,
            default=None,
            help='load dict/features/weights/opts from this file')
        agent.add_argument('-hs',
                           '--hiddensize',
                           type=int,
                           default=200,
                           help='size of the hidden layers')
        agent.add_argument('-esz',
                           '--embeddingsize',
                           type=int,
                           default=200,
                           help='size of the token embeddings')
        agent.add_argument('-nl',
                           '--numlayers',
                           type=int,
                           default=2,
                           help='number of hidden layers')
        agent.add_argument('-dr',
                           '--dropout',
                           type=float,
                           default=0.2,
                           help='dropout rate')
        agent.add_argument('-clip',
                           '--gradient-clip',
                           type=float,
                           default=0.25,
                           help='gradient clipping')
        agent.add_argument('--no-cuda',
                           action='store_true',
                           default=False,
                           help='disable GPUs even if available')
        agent.add_argument(
            '-rnn',
            '--rnn-class',
            default='LSTM',
            help='type of recurrent net (RNN_TANH, RNN_RELU, LSTM, GRU)')
        agent.add_argument('-sl',
                           '--seq-len',
                           type=int,
                           default=35,
                           help='sequence length')
        agent.add_argument('-tied',
                           '--emb-tied',
                           action='store_true',
                           help='tie the word embedding and softmax weights')
        agent.add_argument('-seed',
                           '--random-seed',
                           type=int,
                           default=1111,
                           help='random seed')
        agent.add_argument('--gpu',
                           type=int,
                           default=-1,
                           help='which GPU device to use')
        agent.add_argument('-tr',
                           '--truncate-pred',
                           type=int,
                           default=50,
                           help='truncate predictions')
        agent.add_argument('-rf',
                           '--report-freq',
                           type=float,
                           default=0.1,
                           help='report frequency of prediction during eval')
        agent.add_argument('-pt',
                           '--person-tokens',
                           type='bool',
                           default=True,
                           help='append person1 and person2 tokens to text')
        # learning rate parameters
        agent.add_argument('-lr',
                           '--learningrate',
                           type=float,
                           default=20,
                           help='initial learning rate')
        agent.add_argument(
            '-lrf',
            '--lr-factor',
            type=float,
            default=1.0,
            help='mutliply learning rate by this factor when the \
                           validation loss does not decrease')
        agent.add_argument('-lrp',
                           '--lr-patience',
                           type=int,
                           default=10,
                           help='wait before decreasing learning rate')
        agent.add_argument('-lrm',
                           '--lr-minimum',
                           type=float,
                           default=0.1,
                           help='minimum learning rate')
        agent.add_argument(
            '-sm',
            '--sampling-mode',
            type='bool',
            default=False,
            help='sample when generating tokens instead of taking \
                           the max and do not produce UNK token (when bs=1)')
        LanguageModelAgent.dictionary_class().add_cmdline_args(argparser)
        return agent

    def __init__(self, opt, shared=None):
        """Set up model if shared params not set, otherwise no work to do."""
        super().__init__(opt, shared)
        opt = self.opt  # there is a deepcopy in the init
        self.metrics = {
            'loss': 0,
            'num_tokens': 0,
            'lmloss': 0,
            'lm_num_tokens': 0
        }
        self.states = {}
        # check for cuda
        self.use_cuda = not opt.get('no_cuda') and torch.cuda.is_available()
        self.batchsize = opt.get('batchsize', 1)
        self.use_person_tokens = opt.get('person_tokens', True)
        self.sampling_mode = opt.get('sampling_mode', False)

        if shared:
            # set up shared properties
            self.opt = shared['opt']
            opt = self.opt
            self.dict = shared['dict']

            self.model = shared['model']
            self.metrics = shared['metrics']

            # get NULL token and END token
            self.NULL_IDX = self.dict[self.dict.null_token]
            self.END_IDX = self.dict[self.dict.end_token]

            if 'states' in shared:
                self.states = shared['states']

            if self.use_person_tokens:
                # add person1 and person2 tokens
                self.dict.add_to_dict(self.dict.tokenize("PERSON1"))
                self.dict.add_to_dict(self.dict.tokenize("PERSON2"))

        else:
            # this is not a shared instance of this class, so do full init
            if self.use_cuda:
                print('[ Using CUDA ]')
                torch.cuda.set_device(opt['gpu'])

            init_model = None
            # check first for 'init_model' for loading model from file
            if opt.get('init_model') and os.path.isfile(opt['init_model']):
                init_model = opt['init_model']
            # next check for 'model_file', this would override init_model
            if opt.get('model_file') and os.path.isfile(opt['model_file']):
                init_model = opt['model_file']

            # for backwards compatibility: will only be called for older models
            # for which .opt file does not exist
            if (init_model is not None
                    and not os.path.isfile(init_model + '.opt')):
                new_opt = self.load_opt(init_model)
                # load model parameters if available
                print('[ Setting opt from {} ]'.format(init_model))
                # since .opt file does not exist, save one for future use
                print("Saving opt file at:", init_model + ".opt")
                with open(init_model + ".opt", 'wb') as handle:
                    pickle.dump(new_opt,
                                handle,
                                protocol=pickle.HIGHEST_PROTOCOL)
                opt = self.override_opt(new_opt)

            if ((init_model is not None
                 and os.path.isfile(init_model + '.dict'))
                    or opt['dict_file'] is None):
                opt['dict_file'] = init_model + '.dict'

            # load dictionary and basic tokens & vectors
            self.dict = DictionaryAgent(opt)
            self.id = 'LanguageModel'

            # get NULL token and END token
            self.NULL_IDX = self.dict[self.dict.null_token]
            self.END_IDX = self.dict[self.dict.end_token]

            if self.use_person_tokens:
                # add person1 and person2 tokens
                self.dict.add_to_dict(self.dict.tokenize("PERSON1"))
                self.dict.add_to_dict(self.dict.tokenize("PERSON2"))

            # set model
            self.model = RNNModel(opt, len(self.dict))

            if init_model is not None:
                self.load(init_model)

            if self.use_cuda:
                self.model.cuda()

        self.next_observe = []
        self.next_batch = []

        self.is_training = True

        self.clip = opt.get('gradient_clip', 0.25)
        # set up criteria
        self.criterion = nn.CrossEntropyLoss(ignore_index=self.NULL_IDX,
                                             size_average=False)
        if self.use_cuda:
            # push to cuda
            self.criterion.cuda()
        # init hidden state
        self.hidden = self.model.init_hidden(self.batchsize)
        # init tensor of end tokens
        self.ends = torch.LongTensor(
            [self.END_IDX for _ in range(self.batchsize)])
        if self.use_cuda:
            self.ends = self.ends.cuda()
        # set up model and learning rate scheduler parameters
        self.lr = opt['learningrate']
        self.optimizer = torch.optim.SGD(self.model.parameters(), lr=self.lr)
        self.best_val_loss = self.states.get('best_val_loss', None)
        self.lr_factor = opt['lr_factor']
        if self.lr_factor < 1.0:
            self.lr_patience = opt['lr_patience']
            self.lr_min = opt['lr_minimum']
            self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
                self.optimizer,
                factor=self.lr_factor,
                verbose=True,
                patience=self.lr_patience,
                min_lr=self.lr_min)
            # initial step for scheduler if self.best_val_loss is initialized
            if self.best_val_loss is not None:
                self.scheduler.step(self.best_val_loss)
        else:
            self.scheduler = None

        self.reset()

    def override_opt(self, new_opt):
        """Set overridable opts from loaded opt file.
        Print out each added key and each overriden key.
        Only override args specific to the model.
        """
        model_args = {
            'hiddensize', 'embeddingsize', 'numlayers', 'dropout', 'seq_len',
            'emb_tied', 'truncate_pred', 'report_freq', 'person_tokens',
            'learningrate'
        }
        for k, v in new_opt.items():
            if k not in model_args:
                # skip non-model args
                continue
            if k not in self.opt:
                print('Adding new option [ {k}: {v} ]'.format(k=k, v=v))
            elif self.opt[k] != v:
                print('Overriding option [ {k}: {old} => {v}]'.format(
                    k=k, old=self.opt[k], v=v))
            self.opt[k] = v
        return self.opt

    def parse(self, text):
        """Convert string to token indices."""
        return self.dict.txt2vec(text)

    def zero_grad(self):
        """Zero out optimizer."""
        self.optimizer.zero_grad()

    def update_params(self):
        """Do one optimization step."""
        torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.clip)
        self.optimizer.step()

    def reset(self):
        """Reset observation and episode_done."""
        self.observation = None
        self.reset_metrics()

    def reset_metrics(self):
        self.metrics.clear()
        self.metrics['loss'] = 0
        self.metrics['lmloss'] = 0
        self.metrics['num_tokens'] = 0
        self.metrics['lm_num_tokens'] = 0

    def report(self):
        m = {}
        if self.metrics['num_tokens'] > 0:
            m['loss'] = self.metrics['loss'] / self.metrics['num_tokens']
            m['ppl'] = math.exp(m['loss'])
        if self.metrics['lm_num_tokens'] > 0:
            m['lmloss'] = self.metrics['lmloss'] / self.metrics['lm_num_tokens']
            m['lmppl'] = math.exp(m['lmloss'])
        for k, v in m.items():
            # clean up: rounds to sigfigs and converts tensors to floats
            m[k] = round_sigfigs(v, 4)
        return m

    def share(self):
        """Share internal states between parent and child instances."""
        shared = super().share()
        shared['opt'] = self.opt
        shared['dict'] = self.dict
        shared['NULL_IDX'] = self.NULL_IDX
        shared['END_IDX'] = self.END_IDX
        shared['model'] = self.model
        if self.opt.get('numthreads', 1) > 1:
            if type(self.metrics) == dict:
                # move metrics and model to shared memory
                self.metrics = SharedTable(self.metrics)
                self.model.share_memory()
            shared['states'] = {  # only need to pass optimizer states
                'optimizer': self.optimizer.state_dict(),
            }
        shared['metrics'] = self.metrics
        return shared

    def observe(self, observation):
        """Save observation for act.
        If multiple observations are from the same episode, concatenate them.
        """
        # shallow copy observation (deep copy can be expensive)
        obs = observation.copy()
        seq_len = self.opt['seq_len']
        is_training = True
        if 'labels' not in obs:
            is_training = False

        if is_training:
            if 'text' in obs:
                if self.use_person_tokens:
                    obs['text'] = 'PERSON1 ' + obs['text']
                vec = self.parse(obs['text'])
                vec.append(self.END_IDX)
                self.next_observe += vec
            if 'labels' in obs:
                if self.use_person_tokens:
                    labels = [
                        'PERSON2 ' + label for label in obs['labels']
                        if label != ''
                    ]
                    obs['labels'] = tuple(labels)
                vec = self.parse(obs['labels'][0])
                vec.append(self.END_IDX)
                self.next_observe += vec
            if len(self.next_observe) < (seq_len + 1):
                # not enough to return to make a batch
                # we handle this case in vectorize
                # labels indicates that we are training
                self.observation = {'labels': ''}
                return self.observation
            else:
                vecs_to_return = []
                total = len(self.next_observe) // (seq_len + 1)
                for _ in range(total):
                    observe = self.next_observe[:(seq_len + 1)]
                    self.next_observe = self.next_observe[(seq_len + 1):]
                    vecs_to_return.append(observe)
                dict_to_return = {
                    'text': '',
                    'labels': '',
                    'text2vec': vecs_to_return
                }
                self.observation = dict_to_return
                return dict_to_return
        else:
            if 'text' in obs:
                if self.use_person_tokens:
                    obs['text'] = 'PERSON1 ' + obs['text']
            if 'eval_labels' in obs:
                if self.use_person_tokens:
                    eval_labels = [
                        'PERSON2 ' + label for label in obs['eval_labels']
                        if label != ''
                    ]
                    obs['eval_labels'] = tuple(eval_labels)
            self.observation = obs
            return obs

    def repackage_hidden(self, h):
        """Wraps hidden states in new Variables, to detach them from their history."""
        if isinstance(h, Variable):
            return Variable(h.data)
        else:
            return tuple(self.repackage_hidden(v) for v in h)

    def get_target_loss(self, data, hidden, targets):
        """Calculates the loss with respect to the targets, token by token,
           where each output token is conditioned on either the input or the
           previous target token.
        """
        loss = 0.0
        bsz = data.size(0)

        # during interactive mode, when no targets exist, we return 0
        if targets is None:
            return loss

        # feed in inputs without end token
        output, hidden = self.model(data.transpose(0, 1), hidden)
        self.hidden = self.repackage_hidden(hidden)
        # feed in end tokens
        output, hidden = self.model(Variable(self.ends[:bsz].view(1, bsz)),
                                    self.hidden)
        self.hidden = self.repackage_hidden(hidden)
        output_flat = output.view(-1, len(self.dict))
        loss += self.criterion(output_flat, targets.select(1, 0).view(-1)).data

        for i in range(1, targets.size(1)):
            output, hidden = self.model(targets.select(1, i - 1).view(1, bsz),
                                        self.hidden,
                                        no_pack=True)
            self.hidden = self.repackage_hidden(hidden)
            output_flat = output.view(-1, len(self.dict))
            loss += self.criterion(output_flat,
                                   targets.select(1, i).view(-1)).data

        return loss

    def get_predictions(self, data):
        """Generates predictions word by word until we either reach the end token
           or some max length (opt['truncate_pred']).
        """
        token_list = []
        bsz = data.size(0)
        done = [False for _ in range(bsz)]
        total_done = 0
        hidden = self.model.init_hidden(bsz)

        i = 0
        word_idx = None
        while total_done < bsz and i <= self.opt['truncate_pred']:
            if i == 0:
                # feed in input without end tokens
                output, hidden = self.model(data.transpose(0, 1), hidden)
                hidden = self.repackage_hidden(hidden)
                # feed in end tokens
                output, hidden = self.model(
                    Variable(self.ends[:bsz].view(1, bsz)), hidden)
            else:
                output, hidden = self.model(Variable(word_idx.view(1, bsz)),
                                            hidden,
                                            no_pack=True)
            hidden = self.repackage_hidden(hidden)
            word_weights = output.squeeze().data.exp()
            if bsz > 1:
                _, word_idx = torch.max(word_weights, 1)
            else:
                if self.sampling_mode:
                    unk_idx = self.dict[self.dict.unk_token]
                    # make word_weights have smaller norm so that calculated
                    # norm does not blow up
                    word_weights = word_weights.div(1e10)
                    # make word_weights have L2 norm 1
                    ww_norm = torch.norm(word_weights, p=2)
                    word_weights = word_weights.div(ww_norm)
                    # square distribution
                    word_weights = torch.mul(word_weights, word_weights)
                    # sample distribution
                    word_idx = torch.multinomial(word_weights, 1)
                    # do not produce UNK token
                    while word_idx == unk_idx:
                        word_idx = torch.multinomial(word_weights, 1)
                else:
                    _, word_idx = torch.max(word_weights, 0)
            # mark end indices for items in batch
            word_idx = word_idx.view(-1)
            for k in range(word_idx.size(0)):
                if not done[k]:
                    if int(word_idx[k]) == self.END_IDX:
                        done[k] = True
                        total_done += 1
            token_list.append(word_idx.view(bsz, 1))
            i += 1

        return torch.cat(token_list, 1)

    def predict(self,
                data,
                hidden,
                targets=None,
                is_training=True,
                y_lens=None):
        """Produce a prediction from our model."""
        output = None
        predictions = None
        if is_training:
            self.model.train()
            self.zero_grad()
            output, hidden = self.model(data, hidden)
            loss = self.criterion(output.view(-1, len(self.dict)),
                                  targets.view(-1))
            # save loss to metrics
            target_tokens = targets.ne(self.NULL_IDX).float().sum().item()
            self.metrics['lmloss'] += loss.double().item()
            self.metrics['lm_num_tokens'] += target_tokens
            # average loss per token
            loss /= target_tokens
            loss.backward(retain_graph=True)
            self.update_params()
        else:
            self.model.eval()
            predictions = self.get_predictions(data)
            bsz = data.size(0)
            if bsz != self.batchsize:
                self.hidden = self.model.init_hidden(bsz)
            if targets is not None:
                loss = self.get_target_loss(data, self.hidden, targets)
                self.metrics['loss'] += loss
                self.metrics['num_tokens'] += sum(y_lens)

        return output, hidden, predictions

    def vectorize(self, observations, seq_len, is_training):
        """Convert a list of observations into input & target tensors."""
        labels = None
        valid_inds = None
        y_lens = None
        if is_training:
            for obs in observations:
                if obs:
                    if 'text2vec' in obs:
                        self.next_batch += obs['text2vec']
            if len(self.next_batch) <= self.batchsize:
                return None, None, None, None, None
            else:
                data_list = []
                targets_list = []
                # total is the number of batches
                total = len(self.next_batch) // self.batchsize
                for _ in range(total):
                    batch = self.next_batch[:self.batchsize]
                    self.next_batch = self.next_batch[self.batchsize:]

                    source = torch.LongTensor(batch).t().contiguous()
                    data = Variable(source[:seq_len])
                    targets = Variable(source[1:])

                    if self.use_cuda:
                        data = data.cuda()
                        targets = targets.cuda()

                    data_list.append(data)
                    targets_list.append(targets)
        else:
            # here we get valid examples and pad them with zeros
            xs, ys, labels, valid_inds, _, y_lens = PaddingUtils.pad_text(
                observations,
                self.dict,
                end_idx=self.END_IDX,
                null_idx=self.NULL_IDX)

            if self.use_cuda:
                if xs is not None:
                    xs = Variable(torch.LongTensor(xs)).cuda()
                if ys is not None:
                    ys = Variable(torch.LongTensor(ys)).cuda()
            else:
                if xs is not None:
                    xs = Variable(torch.LongTensor(xs))
                if ys is not None:
                    ys = Variable(torch.LongTensor(ys))
            data_list = [xs]
            targets_list = [ys]

        return data_list, targets_list, labels, valid_inds, y_lens

    def batch_act(self, observations):
        batch_reply = [{'id': self.getID()} for _ in range(len(observations))]
        if any(['labels' in obs for obs in observations]):
            # if we are starting a new training epoch, reinitialize hidden
            if not self.is_training:
                self.hidden = self.model.init_hidden(self.batchsize)
            self.is_training = True
            data_list, targets_list, _c, _v, y_lens = self.vectorize(
                observations, self.opt['seq_len'], self.is_training)
        else:
            # if we just finished training, reinitialize hidden
            if self.is_training:
                self.hidden = self.model.init_hidden(self.batchsize)
                self.is_training = False
            data_list, targets_list, labels, valid_inds, y_lens = self.vectorize(
                observations, self.opt['seq_len'], self.is_training)

        if data_list is None:
            # not enough data to batch act yet, return empty responses
            return batch_reply

        batch_reply = []
        # during evaluation, len(data_list) is always 1
        # during training, len(dat_list) >= 0: vectorize returns a list
        #     containing all batches available at the time it is called
        for i in range(len(data_list)):
            temp_dicts = [{
                'id': self.getID()
            } for _ in range(len(observations))]
            # ignore case when we do not return any valid indices
            if data_list[i] is not None:
                output, hidden, predictions = self.predict(
                    data_list[i], self.hidden, targets_list[i],
                    self.is_training, y_lens)
                self.hidden = self.repackage_hidden(hidden)

                if predictions is not None:
                    # map predictions back to the right order
                    PaddingUtils.map_predictions(
                        predictions.cpu(),
                        valid_inds,
                        temp_dicts,
                        observations,
                        self.dict,
                        self.END_IDX,
                        report_freq=self.opt['report_freq'])

            batch_reply += temp_dicts

        # for prediction metrics computations, we get rid of PERSON1 and PERSON2 tokens
        if not self.is_training:
            for reply in batch_reply:
                if 'text' in reply:
                    reply['text'] = reply['text'].replace('PERSON1 ', '')
                    reply['text'] = reply['text'].replace('PERSON2 ', '')

        return batch_reply

    def act(self):
        # call batch_act with this batch of one
        return self.batch_act([self.observation])[0]

    def save(self, path=None):
        """Save model parameters if model_file is set."""
        path = self.opt.get('model_file', None) if path is None else path

        if path and hasattr(self, 'model'):
            model = {}
            model['model'] = self.model.state_dict()
            model['opt'] = self.opt
            model['best_val_loss'] = self.best_val_loss

            with open(path, 'wb') as write:
                torch.save(model, write)
            # save opt file
            with open(path + ".opt", 'wb') as handle:
                pickle.dump(self.opt, handle, protocol=pickle.HIGHEST_PROTOCOL)

    def shutdown(self):
        """Save the state of the model when shutdown."""
        path = self.opt.get('model_file', None)
        if path is not None:
            self.save(path + '.shutdown_state')
        super().shutdown()

    def receive_metrics(self, metrics_dict):
        if 'loss' in metrics_dict and self.scheduler is not None:
            self.scheduler.step(metrics_dict['loss'])

    def load_opt(self, path):
        """Return opt, states."""
        states = torch.load(path, map_location=lambda cpu, _: cpu)
        return states['opt']

    def load(self, path):
        """Load model states."""
        if os.path.isfile(path):
            # load model parameters if available
            print('[ Loading existing model params from {} ]'.format(path))
            self.states = torch.load(path, map_location=lambda cpu, _: cpu)
            self.model.load_state_dict(self.states['model'])
コード例 #2
0
class LanguageModelAgent(Agent):
    """ Agent which trains an RNN on a language modeling task.

    It is adapted from the language model featured in Pytorch's examples repo
    here: <https://github.com/pytorch/examples/tree/master/word_language_model>.
    """
    @staticmethod
    def dictionary_class():
        return DictionaryAgent

    @staticmethod
    def add_cmdline_args(argparser):
        """Add command-line arguments specifically for this agent."""
        argparser.set_defaults(batch_sort=False)
        LanguageModelAgent.dictionary_class().add_cmdline_args(argparser)
        agent = argparser.add_argument_group('Language Model Arguments')
        agent.add_argument('-hs',
                           '--hiddensize',
                           type=int,
                           default=200,
                           help='size of the hidden layers')
        agent.add_argument('-esz',
                           '--embeddingsize',
                           type=int,
                           default=200,
                           help='size of the token embeddings')
        agent.add_argument('-nl',
                           '--numlayers',
                           type=int,
                           default=2,
                           help='number of hidden layers')
        agent.add_argument('-lr',
                           '--learningrate',
                           type=float,
                           default=20,
                           help='initial learning rate')
        agent.add_argument('-dr',
                           '--dropout',
                           type=float,
                           default=0.2,
                           help='dropout rate')
        agent.add_argument('-clip',
                           '--gradient-clip',
                           type=float,
                           default=0.25,
                           help='gradient clipping')
        agent.add_argument('--no-cuda',
                           action='store_true',
                           default=False,
                           help='disable GPUs even if available')
        agent.add_argument(
            '-rnn',
            '--rnn-class',
            default='LSTM',
            help='type of recurrent net (RNN_TANH, RNN_RELU, LSTM, GRU)')
        agent.add_argument('-sl',
                           '--seq-len',
                           type=int,
                           default=35,
                           help='sequence length')
        agent.add_argument('-tied',
                           '--emb-tied',
                           action='store_true',
                           help='tie the word embedding and softmax weights')
        agent.add_argument('-seed',
                           '--random-seed',
                           type=int,
                           default=1111,
                           help='random seed')
        agent.add_argument('--gpu',
                           type=int,
                           default=-1,
                           help='which GPU device to use')
        agent.add_argument('-tr',
                           '--truncate-pred',
                           type=int,
                           default=50,
                           help='truncate predictions')
        agent.add_argument('-rf',
                           '--report-freq',
                           type=float,
                           default=0.1,
                           help='report frequency of prediction during eval')
        agent.add_argument('-pt',
                           '--person-tokens',
                           type=bool,
                           default=True,
                           help='append person1 and person2 tokens to text')
        agent.add_argument(
            '-lrf',
            '--lr-factor',
            type=float,
            default=0.5,
            help='mutliply learning rate by this factor when the \
                           validation loss does not decrease')

    def __init__(self, opt, shared=None):
        """Set up model if shared params not set, otherwise no work to do."""
        super().__init__(opt, shared)
        opt = self.opt  # there is a deepcopy in the init
        self.states = {}
        # check for cuda
        self.use_cuda = not opt.get('no_cuda') and torch.cuda.is_available()
        self.batchsize = opt.get('batchsize', 1)
        self.use_person_tokens = opt.get('person_tokens', True)

        if shared:
            # set up shared properties
            self.dict = shared['dict']

            if 'model' in shared:
                # model is shared during hogwild
                self.model = shared['model']
                self.states = shared['states']

            # get NULL token and END token
            self.NULL_IDX = self.dict[self.dict.null_token]
            self.END_IDX = self.dict[self.dict.end_token]

            if self.use_person_tokens:
                # add person1 and person2 tokens
                self.dict.add_to_dict(self.dict.tokenize("PERSON1"))
                self.dict.add_to_dict(self.dict.tokenize("PERSON2"))

        else:
            # this is not a shared instance of this class, so do full init
            if self.use_cuda:
                print('[ Using CUDA ]')
                torch.cuda.set_device(opt['gpu'])

            if opt.get('model_file') and os.path.isfile(opt['model_file']):
                # load model parameters if available
                print('Loading existing model params from ' +
                      opt['model_file'])
                new_opt, self.states = self.load(opt['model_file'])
                # override model-specific options with stored ones
                opt = self.override_opt(new_opt)

            if opt['dict_file'] is None and opt.get('model_file'):
                # set default dict-file if not set
                opt['dict_file'] = opt['model_file'] + '.dict'

            # load dictionary and basic tokens & vectors
            self.dict = DictionaryAgent(opt)
            self.id = 'LanguageModel'

            # get NULL token and END token
            self.NULL_IDX = self.dict[self.dict.null_token]
            self.END_IDX = self.dict[self.dict.end_token]

            if self.use_person_tokens:
                # add person1 and person2 tokens
                self.dict.add_to_dict(self.dict.tokenize("PERSON1"))
                self.dict.add_to_dict(self.dict.tokenize("PERSON2"))

            # set model
            self.model = RNNModel(opt, len(self.dict))

            if self.states:
                # set loaded states if applicable
                self.model.load_state_dict(self.states['model'])

            if self.use_cuda:
                self.model.cuda()

        self.next_observe = []
        self.next_batch = []

        self.is_training = True

        if hasattr(self, 'model'):
            # if model was built, do more setup
            self.clip = opt.get('gradient_clip', 0.25)
            # set up criteria
            self.criterion = nn.CrossEntropyLoss(ignore_index=self.NULL_IDX)
            if self.use_cuda:
                # push to cuda
                self.criterion.cuda()
            # set up criterion for eval: we do not want to average over size
            self.eval_criterion = nn.CrossEntropyLoss(
                ignore_index=self.NULL_IDX, size_average=False)
            if self.use_cuda:
                # push to cuda
                self.eval_criterion.cuda()
            # init hidden state
            self.hidden = self.model.init_hidden(self.batchsize)
            # init tensor of end tokens
            self.ends = torch.LongTensor(
                [self.END_IDX for _ in range(self.batchsize)])
            if self.use_cuda:
                self.ends = self.ends.cuda()
            # set up optimizer
            self.lr = opt['learningrate']
            self.lr_factor = opt['lr_factor']
            self.best_val_loss = None

        self.reset()

    def override_opt(self, new_opt):
        """Set overridable opts from loaded opt file.

        Print out each added key and each overriden key.
        Only override args specific to the model.
        """
        model_args = {
            'hiddensize', 'embeddingsize', 'numlayers', 'dropout', 'seq_len',
            'emb_tied'
        }
        for k, v in new_opt.items():
            if k not in model_args:
                # skip non-model args
                continue
            if k not in self.opt:
                print('Adding new option [ {k}: {v} ]'.format(k=k, v=v))
            elif self.opt[k] != v:
                print('Overriding option [ {k}: {old} => {v}]'.format(
                    k=k, old=self.opt[k], v=v))
            self.opt[k] = v
        return self.opt

    def parse(self, text):
        """Convert string to token indices."""
        return self.dict.txt2vec(text)

    def zero_grad(self):
        """Zero out optimizer."""
        self.model.zero_grad()

    def update_params(self):
        """Do one optimization step."""
        torch.nn.utils.clip_grad_norm(self.model.parameters(), self.clip)
        for p in self.model.parameters():
            p.data.add_(-self.lr, p.grad.data)

    def reset(self):
        """Reset observation and episode_done."""
        self.observation = None

    def share(self):
        """Share internal states between parent and child instances."""
        shared = super().share()
        shared['dict'] = self.dict
        shared['NULL_IDX'] = self.NULL_IDX
        shared['END_IDX'] = self.END_IDX
        if self.opt.get('numthreads', 1) > 1:
            shared['model'] = self.model
            self.model.share_memory()
            shared['states'] = self.states
        return shared

    def observe(self, observation):
        """Save observation for act.
        If multiple observations are from the same episode, concatenate them.
        """
        #shallow copy observation (deep copy can be expensive)
        obs = observation.copy()
        seq_len = self.opt['seq_len']
        is_training = True
        if 'labels' not in obs:
            is_training = False

        if is_training:
            if 'text' in obs:
                if self.use_person_tokens:
                    obs['text'] = 'PERSON1 ' + obs['text']
                vec = self.parse(obs['text'])
                vec.append(self.END_IDX)
                self.next_observe += vec
            if 'labels' in obs:
                if self.use_person_tokens:
                    labels = [
                        'PERSON2 ' + label for label in obs['labels']
                        if label != ''
                    ]
                    obs['labels'] = tuple(labels)
                vec = self.parse(obs['labels'][0])
                vec.append(self.END_IDX)
                self.next_observe += vec
            if len(self.next_observe) < (seq_len + 1):
                # not enough to return to make a batch
                # we handle this case in vectorize
                # labels indicates that we are training
                self.observation = {'labels': ''}
                return self.observation
            else:
                vecs_to_return = []
                total = len(self.next_observe) // (seq_len + 1)
                for _ in range(total):
                    observe = self.next_observe[:(seq_len + 1)]
                    self.next_observe = self.next_observe[(seq_len + 1):]
                    vecs_to_return.append(observe)
                dict_to_return = {
                    'text': '',
                    'labels': '',
                    'text2vec': vecs_to_return
                }
                self.observation = dict_to_return
                return dict_to_return
        else:
            if 'text' in obs:
                if self.use_person_tokens:
                    obs['text'] = 'PERSON1 ' + obs['text']
            if 'eval_labels' in obs:
                if self.use_person_tokens:
                    eval_labels = [
                        'PERSON2 ' + label for label in obs['eval_labels']
                        if label != ''
                    ]
                    obs['eval_labels'] = tuple(eval_labels)
            self.observation = obs
            return obs

    def repackage_hidden(self, h):
        """Wraps hidden states in new Variables, to detach them from their history."""
        if type(h) == Variable:
            return Variable(h.data)
        else:
            return tuple(self.repackage_hidden(v) for v in h)

    def get_target_loss(self, data, hidden, targets, y_lens):
        """Calculates the loss with respect to the targets, token by token,
           where each output token is conditioned on either the input or the
           previous target token.
        """
        loss = 0.0
        bsz = data.size(0)

        # during interactive mode, when no targets exist, we return 0
        if targets is None:
            return loss

        # feed in inputs without end token
        output, hidden = self.model(data.transpose(0, 1), hidden)
        self.hidden = self.repackage_hidden(hidden)
        # feed in end tokens
        output, hidden = self.model(Variable(self.ends[:bsz].view(1, bsz)),
                                    self.hidden)
        self.hidden = self.repackage_hidden(hidden)
        output_flat = output.view(-1, len(self.dict))
        loss += self.eval_criterion(output_flat,
                                    targets.select(1, 0).view(-1)).data

        for i in range(1, targets.size(1)):
            output, hidden = self.model(targets.select(1, i - 1).view(1, bsz),
                                        self.hidden,
                                        no_pack=True)
            self.hidden = self.repackage_hidden(hidden)
            output_flat = output.view(-1, len(self.dict))
            loss += self.eval_criterion(output_flat,
                                        targets.select(1, i).view(-1)).data

        return loss / float(sum(y_lens))

    def get_predictions(self, data):
        """Generates predictions word by word until we either reach the end token
           or some max length (opt['truncate_pred']).
        """
        token_list = []
        bsz = data.size(0)
        done = [False for _ in range(bsz)]
        total_done = 0
        hidden = self.model.init_hidden(bsz)

        i = 0
        while total_done < bsz and i <= self.opt['truncate_pred']:
            if i == 0:
                # feed in input without end tokens
                output, hidden = self.model(data.transpose(0, 1), hidden)
                hidden = self.repackage_hidden(hidden)
                # feed in end tokens
                output, hidden = self.model(
                    Variable(self.ends[:bsz].view(1, bsz)), hidden)
            else:
                output, hidden = self.model(Variable(word_idx.view(1, bsz)),
                                            hidden,
                                            no_pack=True)
            hidden = self.repackage_hidden(hidden)
            word_weights = output.squeeze().data.exp()
            if bsz > 1:
                value, word_idx = torch.max(word_weights, 1)
            else:
                value, word_idx = torch.max(word_weights, 0)
            # mark end indices for items in batch
            for k in range(word_idx.size(0)):
                if not done[k]:
                    if int(word_idx[k]) == self.END_IDX:
                        done[k] = True
                        total_done += 1
            token_list.append(word_idx.view(bsz, 1))
            i += 1

        if token_list:
            return torch.cat(token_list, 1)
        else:
            return None

    def predict(self,
                data,
                hidden,
                targets=None,
                is_training=True,
                y_lens=None):
        """Produce a prediction from our model.
        """
        loss_dict = None
        output = None
        predictions = None
        if is_training:
            self.model.train()
            self.zero_grad()
            output, hidden = self.model(data, hidden)
            loss = self.criterion(output.view(-1, len(self.dict)),
                                  targets.view(-1))
            loss.backward(retain_graph=True)
            self.update_params()
            loss_dict = {'lmloss': loss.data}
            loss_dict['lmppl'] = math.exp(loss.data)
        else:
            self.model.eval()
            predictions = self.get_predictions(data)
            loss_dict = {}
            bsz = data.size(0)
            if bsz != self.batchsize:
                self.hidden = self.model.init_hidden(bsz)
            loss = self.get_target_loss(data, self.hidden, targets, y_lens)
            loss_dict['loss'] = loss
            loss_dict['ppl'] = math.exp(loss)

        return output, hidden, loss_dict, predictions

    def vectorize(self, observations, seq_len, is_training):
        """Convert a list of observations into input & target tensors."""
        labels = None
        valid_inds = None
        y_lens = None
        if is_training:
            for obs in observations:
                if obs:
                    if 'text2vec' in obs:
                        self.next_batch += obs['text2vec']
            if len(self.next_batch) <= self.batchsize:
                return None, None, None, None, None
            else:
                data_list = []
                targets_list = []
                # total is the number of batches
                total = len(self.next_batch) // self.batchsize
                for i in range(total):
                    batch = self.next_batch[:self.batchsize]
                    self.next_batch = self.next_batch[self.batchsize:]

                    source = torch.LongTensor(batch).t().contiguous()
                    data = Variable(source[:seq_len])
                    targets = Variable(source[1:])

                    if self.use_cuda:
                        data = data.cuda()
                        targets = targets.cuda()

                    data_list.append(data)
                    targets_list.append(targets)
        else:
            # here we get valid examples and pad them with zeros
            xs, ys, labels, valid_inds, _, y_lens = PaddingUtils.pad_text(
                observations, self.dict, self.END_IDX, self.NULL_IDX)
            if self.use_cuda:
                xs = Variable(xs).cuda()
                if ys is not None:
                    ys = Variable(ys).cuda()
            else:
                xs = Variable(xs)
                if ys is not None:
                    ys = Variable(ys)
            data_list = [xs]
            targets_list = [ys]

        return data_list, targets_list, labels, valid_inds, y_lens

    def batch_act(self, observations):
        batch_reply = [{'id': self.getID()} for _ in range(len(observations))]
        if any(['labels' in obs for obs in observations]):
            # if we are starting a new training epoch, reinitialize hidden
            if self.is_training == False:
                self.hidden = self.model.init_hidden(self.batchsize)
            self.is_training = True
            data_list, targets_list, _, _, y_lens = self.vectorize(
                observations, self.opt['seq_len'], self.is_training)
        else:
            # if we just finished training, reinitialize hidden
            if self.is_training == True:
                self.hidden = self.model.init_hidden(self.batchsize)
                self.is_training = False
            data_list, targets_list, labels, valid_inds, y_lens = self.vectorize(
                observations, self.opt['seq_len'], self.is_training)

        if data_list is None:
            # not enough data to batch act yet, return empty responses
            return batch_reply

        batch_reply = []
        # during evaluation, len(data_list) is always 1
        # during training, len(dat_list) >= 0: vectorize returns a list containing all batches available at the time it is called
        for i in range(len(data_list)):
            temp_dicts = [{
                'id': self.getID()
            } for _ in range(len(observations))]
            output, hidden, loss_dict, predictions = self.predict(
                data_list[i], self.hidden, targets_list[i], self.is_training,
                y_lens)
            self.hidden = self.repackage_hidden(hidden)

            if predictions is not None:
                # map predictions back to the right order
                PaddingUtils.map_predictions(
                    predictions,
                    valid_inds,
                    temp_dicts,
                    observations,
                    self.dict,
                    self.END_IDX,
                    report_freq=self.opt['report_freq'])

            if loss_dict is not None:
                if 'metrics' in temp_dicts[0]:
                    for k, v in loss_dict.items():
                        temp_dicts[0]['metrics'][k] = v
                else:
                    temp_dicts[0]['metrics'] = loss_dict

            batch_reply += temp_dicts

        return batch_reply

    def act(self):
        # call batch_act with this batch of one
        return self.batch_act([self.observation])[0]

    def save(self, path=None):
        """Save model parameters if model_file is set."""
        path = self.opt.get('model_file', None) if path is None else path

        if path and hasattr(self, 'model'):
            model = {}
            model['model'] = self.model.state_dict()
            model['opt'] = self.opt

            with open(path, 'wb') as write:
                torch.save(model, write)

    def shutdown(self):
        """Save the state of the model when shutdown."""
        path = self.opt.get('model_file', None)
        if path is not None:
            self.save(path + '.shutdown_state')
        super().shutdown()

    def receive_metrics(self, metrics_dict):
        if 'loss' in metrics_dict:
            if self.best_val_loss is None:
                self.best_val_loss = metrics_dict['loss']
            else:
                if metrics_dict['loss'] > self.best_val_loss:
                    self.lr *= self.lr_factor
                    print("Updating learning rate: lr =", self.lr)

    def load(self, path):
        """Return opt and model states."""
        with open(path, 'rb') as read:
            states = torch.load(read)

        return states['opt'], states
コード例 #3
0
class MemnnFeedbackAgent(Agent):
    """
    Memory Network agent for question answering that supports reward-based learning
    (RBI), forward prediction (FP), and imitation learning (IM).

    For more details on settings see: https://arxiv.org/abs/1604.06045.

    Models settings 'FP', 'RBI', 'RBI+FP', and 'IM_feedback' assume that
    feedback and reward for the current example immediatly follow the query
    (add ':feedback' argument when specifying task name).

    python examples/train_model.py --setting 'FP'
    -m "projects.memnn_feedback.agent.memnn_feedback:MemnnFeedbackAgent"
    -t "projects.memnn_feedback.tasks.dbll_babi.agents:taskTeacher:3_p0.5:feedback"
    """
    @staticmethod
    def add_cmdline_args(argparser):
        DictionaryAgent.add_cmdline_args(argparser)
        arg_group = argparser.add_argument_group('MemNN Arguments')
        arg_group.add_argument('-lr',
                               '--learning-rate',
                               type=float,
                               default=0.01,
                               help='learning rate')
        arg_group.add_argument('--embedding-size',
                               type=int,
                               default=128,
                               help='size of token embeddings')
        arg_group.add_argument('--hops',
                               type=int,
                               default=3,
                               help='number of memory hops')
        arg_group.add_argument('--mem-size',
                               type=int,
                               default=100,
                               help='size of memory')
        arg_group.add_argument(
            '--time-features',
            type='bool',
            default=True,
            help='use time features for memory embeddings',
        )
        arg_group.add_argument(
            '--position-encoding',
            type='bool',
            default=False,
            help='use position encoding instead of bag of words embedding',
        )
        arg_group.add_argument(
            '-clip',
            '--gradient-clip',
            type=float,
            default=0.2,
            help='gradient clipping using l2 norm',
        )
        arg_group.add_argument('--output',
                               type=str,
                               default='rank',
                               help='type of output (rank|generate)')
        arg_group.add_argument(
            '--rnn-layers',
            type=int,
            default=2,
            help='number of hidden layers in RNN decoder for generative output',
        )
        arg_group.add_argument(
            '--dropout',
            type=float,
            default=0.1,
            help='dropout probability for RNN decoder training',
        )
        arg_group.add_argument('--optimizer',
                               default='sgd',
                               help='optimizer type (sgd|adam)')
        arg_group.add_argument(
            '--no-cuda',
            action='store_true',
            default=False,
            help='disable GPUs even if available',
        )
        arg_group.add_argument('--gpu',
                               type=int,
                               default=-1,
                               help='which GPU device to use')
        arg_group.add_argument(
            '--setting',
            type=str,
            default='IM',
            help='choose among IM, IM_feedback, RBI, FP, RBI+FP',
        )
        arg_group.add_argument(
            '--num-feedback-cands',
            type=int,
            default=6,
            help='number of feedback candidates',
        )
        arg_group.add_argument(
            '--single_embedder',
            type='bool',
            default=False,
            help='number of embedding matrices in the model',
        )

    def __init__(self, opt, shared=None):
        super().__init__(opt, shared)

        opt['cuda'] = not opt['no_cuda'] and torch.cuda.is_available()
        if opt['cuda']:
            print('[ Using CUDA ]')
            torch.cuda.device(opt['gpu'])

        if not shared:
            self.id = 'MemNN'
            self.dict = DictionaryAgent(opt)
            self.decoder = None
            if opt['output'] == 'generate' or opt['output'] == 'g':
                self.decoder = Decoder(
                    opt['embedding_size'],
                    opt['embedding_size'],
                    opt['rnn_layers'],
                    opt,
                    self.dict,
                )
            elif opt['output'] != 'rank' and opt['output'] != 'r':
                raise NotImplementedError('Output type not supported.')

            if 'FP' in opt['setting']:
                # add extra beta-word to indicate learner's answer
                self.beta_word = 'betaword'
                self.dict.add_to_dict([self.beta_word])

            self.model = MemNN(opt, self.dict)

            optim_params = [
                p for p in self.model.parameters() if p.requires_grad
            ]
            lr = opt['learning_rate']
            if opt['optimizer'] == 'sgd':
                self.optimizers = {'memnn': optim.SGD(optim_params, lr=lr)}
                if self.decoder is not None:
                    self.optimizers['decoder'] = optim.SGD(
                        self.decoder.parameters(), lr=lr)
            elif opt['optimizer'] == 'adam':
                self.optimizers = {'memnn': optim.Adam(optim_params, lr=lr)}
                if self.decoder is not None:
                    self.optimizers['decoder'] = optim.Adam(
                        self.decoder.parameters(), lr=lr)
            else:
                raise NotImplementedError('Optimizer not supported.')

            if opt['cuda']:
                self.model.share_memory()
                if self.decoder is not None:
                    self.decoder.cuda()

            if opt.get('model_file') and os.path.isfile(opt['model_file']):
                print('Loading existing model parameters from ' +
                      opt['model_file'])
                self.load(opt['model_file'])
        else:
            if 'model' in shared:
                # model is shared during hogwild
                self.model = shared['model']
                self.dict = shared['dict']
                self.decoder = shared['decoder']
                self.optimizers = shared['optimizer']
                if 'FP' in opt['setting']:
                    self.beta_word = shared['betaword']

        if hasattr(self, 'model'):
            self.opt = opt
            self.mem_size = opt['mem_size']
            self.loss_fn = CrossEntropyLoss()
            self.gradient_clip = opt.get('gradient_clip', 0.2)

            self.model_setting = opt['setting']
            if 'FP' in opt['setting']:
                self.feedback_cands = set([])
                self.num_feedback_cands = opt['num_feedback_cands']

            self.longest_label = 1
            self.END = self.dict.end_token
            self.END_TENSOR = torch.LongTensor(self.dict.parse(self.END))
            self.START = self.dict.start_token
            self.START_TENSOR = torch.LongTensor(self.dict.parse(self.START))

        self.reset()
        self.last_cands, self.last_cands_list = None, None

    def share(self):
        # Share internal states between parent and child instances
        shared = super().share()

        if self.opt.get('numthreads', 1) > 1:
            shared['model'] = self.model
            self.model.share_memory()
            shared['optimizer'] = self.optimizers
            shared['dict'] = self.dict
            shared['decoder'] = self.decoder
            if 'FP' in self.model_setting:
                shared['betaword'] = self.beta_word
        return shared

    def observe(self, observation):
        observation = copy.copy(observation)

        # extract feedback for forward prediction
        # IM setting - no feedback provided in the dataset
        if self.opt['setting'] != 'IM':
            if 'text' in observation:
                split = observation['text'].split('\n')
                feedback = split[-1]
                observation['feedback'] = feedback
                observation['text'] = '\n'.join(split[:-1])

        if not self.episode_done:
            # if the last example wasn't the end of an episode, then we need to
            # recall what was said in that example
            prev_dialogue = (self.observation['text']
                             if self.observation is not None else '')

            # append answer and feedback (if available) given in the previous example to the previous dialog
            if 'eval_labels' in self.observation:
                prev_dialogue += '\n' + random.choice(
                    self.observation['eval_labels'])
            elif 'labels' in self.observation:
                prev_dialogue += '\n' + random.choice(
                    self.observation['labels'])
            if 'feedback' in self.observation:
                prev_dialogue += '\n' + self.observation['feedback']

            observation['text'] = prev_dialogue + '\n' + observation['text']

        self.observation = observation
        self.episode_done = observation['episode_done']
        return observation

    def reset(self):
        # reset observation and episode_done
        self.observation = None
        self.episode_done = True

    def backward(self, loss, retain_graph=False):
        # zero out optimizer and take one optimization step
        for o in self.optimizers.values():
            o.zero_grad()
        loss.backward(retain_graph=retain_graph)

        torch.nn.utils.clip_grad_norm(self.model.parameters(),
                                      self.gradient_clip)
        for o in self.optimizers.values():
            o.step()

    def parse_cands(self, cand_answers):
        """Returns:
            cand_answers = tensor (vector) of token indices for answer candidates
            cand_answers_lengths = tensor (vector) with lengths of each answer candidate
        """
        parsed_cands = [to_tensors(c, self.dict) for c in cand_answers]
        cand_answers_tensor = torch.cat([x[1] for x in parsed_cands])
        max_cands_len = max([len(c) for c in cand_answers])
        cand_answers_lengths = torch.LongTensor(len(cand_answers),
                                                max_cands_len).zero_()
        for i in range(len(cand_answers)):
            if len(parsed_cands[i][0]) > 0:
                cand_answers_lengths[
                    i, -len(parsed_cands[i][0]):] = parsed_cands[i][0]
        cand_answers_tensor = Variable(cand_answers_tensor)
        cand_answers_lengths = Variable(cand_answers_lengths)
        return cand_answers_tensor, cand_answers_lengths

    def get_cand_embeddings_with_added_beta(self, cands, selected_answer_inds):
        # add beta_word to the candidate selected by the learner to indicate learner's answer
        cand_answers_with_beta = copy.deepcopy(cands)

        for i in range(len(cand_answers_with_beta)):
            cand_answers_with_beta[i][
                selected_answer_inds[i]] += ' ' + self.beta_word

        # get candidate embeddings after adding beta_word to the selected candidate
        (
            cand_answers_tensor_with_beta,
            cand_answers_lengths_with_beta,
        ) = self.parse_cands(cand_answers_with_beta)
        cands_embeddings_with_beta = self.model.answer_embedder(
            cand_answers_lengths_with_beta, cand_answers_tensor_with_beta)
        if self.opt['cuda']:
            cands_embeddings_with_beta = cands_embeddings_with_beta.cuda()
        return cands_embeddings_with_beta

    def predict(self, xs, answer_cands, ys=None, feedback_cands=None):
        is_training = ys is not None
        if is_training and 'FP' not in self.model_setting:
            # Subsample to reduce training time
            answer_cands = [
                list(set(random.sample(c, min(32, len(c))) + self.labels))
                for c in answer_cands
            ]
        else:
            # rank all cands to increase accuracy
            answer_cands = [list(set(c)) for c in answer_cands]

        self.model.train(mode=is_training)

        # Organize inputs for network (see contents of xs and ys in batchify method)
        inputs = [Variable(x, volatile=is_training) for x in xs]

        if self.decoder:
            output_embeddings = self.model(*inputs)
            self.decoder.train(mode=is_training)
            output_lines, loss = self.decode(output_embeddings, ys)
            predictions = self.generated_predictions(output_lines)
            self.backward(loss)
            return predictions

        scores = None
        if is_training:
            label_inds = [
                cand_list.index(self.labels[i])
                for i, cand_list in enumerate(answer_cands)
            ]

            if 'FP' in self.model_setting:
                if len(feedback_cands) == 0:
                    print(
                        'FP is not training... waiting for negative feedback examples'
                    )
                else:
                    cand_answers_embs_with_beta = self.get_cand_embeddings_with_added_beta(
                        answer_cands, label_inds)
                    scores, forward_prediction_output = self.model(
                        *inputs, answer_cands, cand_answers_embs_with_beta)
                    fp_scores = self.model.get_score(feedback_cands,
                                                     forward_prediction_output,
                                                     forward_predict=True)
                    feedback_label_inds = [
                        cand_list.index(self.feedback_labels[i])
                        for i, cand_list in enumerate(feedback_cands)
                    ]
                    if self.opt['cuda']:
                        feedback_label_inds = Variable(
                            torch.cuda.LongTensor(feedback_label_inds))
                    else:
                        feedback_label_inds = Variable(
                            torch.LongTensor(feedback_label_inds))
                    loss_fp = self.loss_fn(fp_scores, feedback_label_inds)
                    if loss_fp.data[0] > 100000:
                        raise Exception("Loss might be diverging. Loss:",
                                        loss_fp.data[0])
                    self.backward(loss_fp, retain_graph=True)

            if self.opt['cuda']:
                label_inds = Variable(torch.cuda.LongTensor(label_inds))
            else:
                label_inds = Variable(torch.LongTensor(label_inds))

        if scores is None:
            output_embeddings = self.model(*inputs)
            scores = self.model.get_score(answer_cands, output_embeddings)

        predictions = self.ranked_predictions(answer_cands, scores)

        if is_training:
            update_params = True
            # don't perform regular training if in FP mode
            if self.model_setting == 'FP':
                update_params = False
            elif 'RBI' in self.model_setting:
                if len(self.rewarded_examples_inds) == 0:
                    update_params = False
                else:
                    self.rewarded_examples_inds = torch.LongTensor(
                        self.rewarded_examples_inds)
                    if self.opt['cuda']:
                        self.rewarded_examples_inds = self.rewarded_examples_inds.cuda(
                        )
                    # use only rewarded examples for training
                    loss = self.loss_fn(
                        scores[self.rewarded_examples_inds, :],
                        label_inds[self.rewarded_examples_inds],
                    )
            else:
                # regular IM training
                loss = self.loss_fn(scores, label_inds)

            if update_params:
                self.backward(loss)
        return predictions

    def ranked_predictions(self, cands, scores):
        _, inds = scores.data.sort(descending=True, dim=1)
        return [[cands[i][j] for j in r if j < len(cands[i])]
                for i, r in enumerate(inds)]

    def decode(self, output_embeddings, ys=None):
        batchsize = output_embeddings.size(0)
        hn = output_embeddings.unsqueeze(0).expand(self.opt['rnn_layers'],
                                                   batchsize,
                                                   output_embeddings.size(1))
        x = self.model.answer_embedder(Variable(torch.LongTensor([1])),
                                       Variable(self.START_TENSOR))
        xes = x.unsqueeze(1).expand(x.size(0), batchsize, x.size(1))

        loss = 0
        output_lines = [[] for _ in range(batchsize)]
        done = [False for _ in range(batchsize)]
        total_done = 0
        idx = 0
        while (total_done < batchsize) and idx < self.longest_label:
            # keep producing tokens until we hit END or max length for each ex
            if self.opt['cuda']:
                xes = xes.cuda()
                hn = hn.contiguous()
            preds, scores = self.decoder(xes, hn)
            if ys is not None:
                y = Variable(ys[0][:, idx])
                temp_y = y.cuda() if self.opt['cuda'] else y
                loss += self.loss_fn(scores, temp_y)
            else:
                y = preds
            # use the true token as the next input for better training
            xes = self.model.answer_embedder(
                Variable(torch.LongTensor(preds.numel()).fill_(1)),
                y).unsqueeze(0)

            for b in range(batchsize):
                if not done[b]:
                    token = self.dict.vec2txt(preds.data[b])
                    if token == self.END:
                        done[b] = True
                        total_done += 1
                    else:
                        output_lines[b].append(token)
            idx += 1
        return output_lines, loss

    def generated_predictions(self, output_lines):
        return [[
            ' '.join(c for c in o
                     if c != self.END and c != self.dict.null_token)
        ] for o in output_lines]

    def parse(self, text):
        """Returns:
            query = tensor (vector) of token indices for query
            query_length = length of query
            memory = tensor (matrix) where each row contains token indices for a memory
            memory_lengths = tensor (vector) with lengths of each memory
        """
        sp = text.split('\n')
        query_sentence = sp[-1]
        query = self.dict.txt2vec(query_sentence)
        query = torch.LongTensor(query)
        query_length = torch.LongTensor([len(query)])

        sp = sp[:-1]
        sentences = []
        for s in sp:
            sentences.extend(s.split('\t'))
        if len(sentences) == 0:
            sentences.append(self.dict.null_token)

        num_mems = min(self.mem_size, len(sentences))
        memory_sentences = sentences[-num_mems:]
        memory = [self.dict.txt2vec(s) for s in memory_sentences]
        memory = [torch.LongTensor(m) for m in memory]
        memory_lengths = torch.LongTensor([len(m) for m in memory])
        memory = torch.cat(memory)
        return (query, memory, query_length, memory_lengths)

    def batchify(self, obs):
        """Returns:
            xs = [memories, queries, memory_lengths, query_lengths]
            ys = [labels, label_lengths] (if available, else None)
            cands = list of candidates for each example in batch
            valid_inds = list of indices for examples with valid observations
        """
        exs = [ex for ex in obs if 'text' in ex]
        valid_inds = [i for i, ex in enumerate(obs) if 'text' in ex]
        if not exs:
            return [None] * 5

        if 'RBI' in self.model_setting:
            self.rewarded_examples_inds = [
                i for i, ex in enumerate(obs)
                if 'text' in ex and ex.get('reward', 0) > 0
            ]

        parsed = [self.parse(ex['text']) for ex in exs]
        queries = torch.cat([x[0] for x in parsed])
        memories = torch.cat([x[1] for x in parsed])
        query_lengths = torch.cat([x[2] for x in parsed])
        memory_lengths = torch.LongTensor(len(exs), self.mem_size).zero_()
        for i in range(len(exs)):
            if len(parsed[i][3]) > 0:
                memory_lengths[i, -len(parsed[i][3]):] = parsed[i][3]
        xs = [memories, queries, memory_lengths, query_lengths]

        ys = None
        self.labels = [
            random.choice(ex['labels']) for ex in exs if 'labels' in ex
        ]

        if len(self.labels) == len(exs):
            parsed = [self.dict.txt2vec(l) for l in self.labels]
            parsed = [torch.LongTensor(p) for p in parsed]
            label_lengths = torch.LongTensor([len(p)
                                              for p in parsed]).unsqueeze(1)
            self.longest_label = max(self.longest_label, label_lengths.max())
            padded = [
                torch.cat((
                    p,
                    torch.LongTensor(self.longest_label - len(p)).fill_(
                        self.END_TENSOR[0]),
                )) for p in parsed
            ]
            labels = torch.stack(padded)
            ys = [labels, label_lengths]

        feedback_cands = []
        if 'FP' in self.model_setting:
            self.feedback_labels = [
                ex['feedback'] for ex in exs
                if 'feedback' in ex and ex['feedback'] is not None
            ]
            self.feedback_cands = self.feedback_cands | set(
                self.feedback_labels)

            if (len(self.feedback_labels) == len(exs)
                    and len(self.feedback_cands) > self.num_feedback_cands):
                feedback_cands = [
                    list(
                        set(
                            random.sample(self.feedback_cands,
                                          self.num_feedback_cands) +
                            [feedback])) for feedback in self.feedback_labels
                ]

        cands = [
            ex['label_candidates'] for ex in exs if 'label_candidates' in ex
        ]
        # Use words in dict as candidates if no candidates are provided
        if len(cands) < len(exs):
            cands = build_cands(exs, self.dict)
        # Avoid rebuilding candidate list every batch if its the same
        if self.last_cands != cands:
            self.last_cands = cands
            self.last_cands_list = [list(c) for c in cands]
        cands = self.last_cands_list
        return xs, ys, cands, valid_inds, feedback_cands

    def batch_act(self, observations):
        batchsize = len(observations)
        batch_reply = [{'id': self.getID()} for _ in range(batchsize)]

        xs, ys, cands, valid_inds, feedback_cands = self.batchify(observations)

        if xs is None or len(xs[1]) == 0:
            return batch_reply

        # Either train or predict
        predictions = self.predict(xs, cands, ys, feedback_cands)

        for i in range(len(valid_inds)):
            batch_reply[valid_inds[i]]['text'] = predictions[i][0]
            batch_reply[valid_inds[i]]['text_candidates'] = predictions[i]
        return batch_reply

    def act(self):
        return self.batch_act([self.observation])[0]

    def save(self, path=None):
        path = self.opt.get('model_file', None) if path is None else path

        if path:
            checkpoint = {}
            checkpoint['memnn'] = self.model.state_dict()
            checkpoint['memnn_optim'] = self.optimizers['memnn'].state_dict()
            if self.decoder is not None:
                checkpoint['decoder'] = self.decoder.state_dict()
                checkpoint['decoder_optim'] = self.optimizers[
                    'decoder'].state_dict()
                checkpoint['longest_label'] = self.longest_label
            with open(path, 'wb') as write:
                torch.save(checkpoint, write)

    def load(self, path):
        with open(path, 'rb') as read:
            checkpoint = torch.load(read)
        self.model.load_state_dict(checkpoint['memnn'])
        self.optimizers['memnn'].load_state_dict(checkpoint['memnn_optim'])
        if self.decoder is not None:
            self.decoder.load_state_dict(checkpoint['decoder'])
            self.optimizers['decoder'].load_state_dict(
                checkpoint['decoder_optim'])
            self.longest_label = checkpoint['longest_label']
コード例 #4
0
import pickle
import os
from parlai.core.dict import DictionaryAgent

path = 'dat/MovieTriples_Dataset.tar'

with open(os.path.join(path, 'Training.dict.pkl'), 'rb') as data_file:
    dictionary = pickle.load(data_file)

parlai_dict = DictionaryAgent({'vocab_size': 10004})

dictionary = sorted(dictionary, key=lambda x: x[1])
print(dictionary[:10])

for word in dictionary:
    # print(word[0])
    parlai_dict.add_to_dict([word[0]])
    parlai_dict.freq[word[0]] = word[2]
    # print(word)

# print(parlai_dict)
# parlai_dict.add_to_dict(['hello'])

parlai_dict.save('test_hred.dict', sort=True)