Esempio n. 1
0
    def setup_vocab(self, vocab_config):
        # Load pretrained embeddings.
        vocab = dotDict()
        for vocab_type in vocab_config:
            with tf.variable_scope(vocab_type):
                vocab[vocab_type] = dotDict()
                for woken_type, conf in vocab_config[vocab_type].items():
                    with tf.variable_scope(token_type):
                        vocab_class = getattr(vocab_libs, conf.vocab_class)
                        vocab[vocab_type][token_type] = vocab_class(conf)

        return vocab
Esempio n. 2
0
    def setup_tasks(self, sess, config):
        num_gpus = max(1, len(get_available_gpus()))
        sys.stdout.write('Available GPUs: %s\n' %
                         str(['/gpu:%d' % i for i in range(num_gpus)]))
        tasks = dotDict()
        for task_idx, (task_name,
                       task_config) in enumerate(config.tasks.items()):
            models = []
            for gpu_idx in range(num_gpus):
                device = '/gpu:%d' % gpu_idx
                sys.stdout.write('Building %s model to %s...\n' %
                                 (task_name, device))
                with tf.variable_scope(task_name,
                                       reuse=tf.AUTO_REUSE) as scope:
                    with tf.device(device):
                        task_class = getattr(occult.models,
                                             task_config.model_type)
                        args = [sess, task_config, self, self.vocab]
                        if issubclass(task_class, AdversarialBase):
                            other_models = [
                                t.models[gpu_idx]
                                for t in tasks.values() if not isinstance(
                                    t.models[gpu_idx], AdversarialBase)
                            ]
                            args.append(other_models)
                        model = task_class(*args)
                    models.append(model)
            tasks[task_name] = MultiModelWrapper(models)

        return tasks
Esempio n. 3
0
    def get_updates_by_task(self, tasks):
        updates = dotDict()
        reuse = False

        for task_name, task_model in tasks.items():
            with tf.variable_scope(task_name):
                updates[task_name] = super(MTLTrainerBase, self).get_updates(
                    task_model.loss, task_model.global_step)
            reuse = True
        return updates
Esempio n. 4
0
  def padding(self, _batch, minlen=None, maxlen=None):
    batch = dotDict()
    batch.inp_context = dotDict()
    batch.inp_response = dotDict()

    inp_context, inp_context_word_ids, inp_response, inp_response_word_ids = _batch

    batch.inp_context.raw = inp_context
    batch.inp_response.raw = inp_response

    # [batch_size, n_max_sent, n_max_word]
    batch.inp_context.word = _padding(inp_context_word_ids,
                                      minlen=[1, minlen.word],
                                      maxlen=[maxlen.sent, maxlen.word])
    # [batch_size, n_max_word]
    if inp_response_word_ids:
      batch.inp_response.word = _padding(inp_response_word_ids,
                                         minlen=[minlen.word],
                                         maxlen=[maxlen.word])
    return batch
Esempio n. 5
0
  def padding(self, _batch, minlen=None, maxlen=None):
    batch = super(_MultiTurnDialogDatasetBaseWithExemplar, self).padding(_batch[:4], minlen=minlen, maxlen=maxlen)
    ex_context, ex_c_word_ids, ex_response, ex_r_word_ids = _batch[4:]

    batch.ex_context = dotDict()
    batch.ex_response = dotDict()

    batch.ex_context.raw = ex_context
    batch.ex_response.raw = ex_response

    # [batch_size, n_max_exemplar, n_max_word]


    batch.ex_context.word = _padding(
      ex_c_word_ids,
      minlen=[None, minlen.word],
      maxlen=[None, maxlen.word])
    batch.ex_response.word = _padding(
      ex_r_word_ids,
      minlen=[None, minlen.word],
      maxlen=[None, maxlen.word])
    return batch
Esempio n. 6
0
    def __init__(self, sess, config, vocab, activation=tf.nn.relu):
        super(MTLTrainerBase, self).__init__(sess, config.trainer, vocab)
        # Define each task.
        self.tasks = self.setup_tasks(sess, config)
        self.trainable_tasks = dotDict({
            k: v
            for k, v in self.tasks.items()
            if hasattr(v, 'loss') and v.loss is not None
        })

        # Calculate losses of the tasks and gradients.
        self.losses = [t.loss for t in self.trainable_tasks.values()]
        self.updates = self.get_updates(self.trainable_tasks)
Esempio n. 7
0
    def __init__(self,
                 config,
                 keep_prob,
                 dec_vocab,
                 activation=tf.nn.relu,
                 scope=None):
        self.keep_prob = keep_prob
        self.scope = scope
        self.activation = activation
        self.config = config
        self.start_token = dec_vocab.word.token2id(dec_vocab.word.BOS)
        self.end_token = dec_vocab.word.token2id(dec_vocab.word.PAD)
        self.beam_width = config.beam_width

        self.embeddings = dotDict()
        self.embeddings = dec_vocab.word.embeddings
Esempio n. 8
0
    def __init__(self,
                 config,
                 keep_prob,
                 enc_vocab,
                 embeddings=None,
                 scope=None):
        self.cbase = config.cbase
        self.keep_prob = keep_prob
        self.vocab = enc_vocab
        self.scope = scope  # to reuse variables

        sys.stderr.write(
            "Initialize word embeddings by the pretrained ones.\n")

        self.embeddings = dotDict()
        self.embeddings.word = enc_vocab.word.embeddings
        if config.cbase:
            self.embeddings.char = enc_vocab.char.embeddings
Esempio n. 9
0
    def setup_tasks(self, sess, config):
        # Assign GPUs (for multi-gpu computation).
        devices = [assign_device(i) for i in range(len(config.tasks))]

        tasks = dotDict()
        for i, (task_name, task_config) in enumerate(config.tasks.items()):
            device = devices[i]
            sys.stdout.write('Building %s model to %s...\n' %
                             (task_name, device))
            with tf.variable_scope(task_name, reuse=tf.AUTO_REUSE) as scope:
                with tf.device(device):
                    task_class = getattr(occult.models, task_config.model_type)
                    args = [sess, task_config, self, self.vocab]
                    if issubclass(task_class, AdversarialBase):
                        other_models = [
                            t for t in tasks.values()
                            if not isinstance(t, AdversarialBase)
                        ]
                        args.append(other_models)
                    tasks[task_name] = task_class(*args)
        return tasks
Esempio n. 10
0
 def setup_tasks(self, sess, config):
     try:
         assert len(config.tasks) == 1
     except:
         raise ValueError("%s can execute only one type of task." %
                          (self.__class__.__name__))
     task_name = list(config.tasks.keys())[0]
     task_config = list(config.tasks.values())[0]
     model_tyoe = getattr(occult.models, task_config.model_type)
     num_gpus = len(tf_utils.get_available_gpus())
     if not num_gpus:
         with tf.variable_scope(task_name, reuse=tf.AUTO_REUSE) as scope:
             models = [model_type(sess, task_config, self, self.vocab)]
     else:
         models = []
         for i in range(num_gpus):
             device = '/gpu:%d' % (i)
             with tf.variable_scope(task_name,
                                    reuse=tf.AUTO_REUSE) as scope:
                 with tf.device(device):
                     model = model_type(sess, task_config, self, self.vocab)
                     models.append(model)
     tasks = dotDict({task_name: MultiModelWrapper(models)})
     return tasks