예제 #1
0
 def _build_clip_fn(self):
     clip_class = self._params[self.CLIP_GRADS_CLASS_PK]
     clip_params = self._params[self.CLIP_GRADS_PARAMS_PK]
     if clip_class:
         return configurable.factory(
             clip_class, self.mode, clip_params, sys.modules[__name__])
     return None
예제 #2
0
파일: runtime.py 프로젝트: yingning/dket
    def __init__(self, config):
        """Initializes a new experiment instance."""

        self._logdir = config[self.LOGDIR_KEY]
        self._train_files = config[self.TRAIN_FILES_KEY]
        self._train_steps = config[self.TRAIN_STEPS_KEY]
        self._train_dev = config[self.TRAIN_DEVICE_KEY]
        self._train_ckpt_every = config[self.TRAIN_CKPT_EVERY_KEY]
        self._eval_files = config[self.EVAL_FILES_KEY]
        self._eval_dev = config[self.EVAL_DEVICE_KEY]
        self._eval_dump = config[self.EVAL_DUMP_KEY]
        self._params = config[self.MODEL_PARAMS_KEY]

        # build the training model.
        t_params = copy.deepcopy(self._params)
        t_params[Model.INPUT_PARAMS_PK][
            ModelInputs.FILES_PK] = self._train_files
        clz = config[self.MODEL_CLASS_KEY]
        with tf.device(self._train_dev):
            t_model = configurable.factory(clz, self._TRAIN, t_params,
                                           dket.model)
            t_logdir = os.path.join(self._logdir, self._TRAIN)

        # build the eval model.
        e_params = copy.deepcopy(self._params)
        e_params[Model.INPUT_PARAMS_PK][
            ModelInputs.FILES_PK] = self._eval_files
        e_params[Model.INPUT_PARAMS_PK][ModelInputs.EPOCHS_PK] = 1
        with tf.device(self._eval_dev):
            e_model = configurable.factory(clz, self._EVAL, e_params,
                                           dket.model)
            e_logdir = os.path.join(self._logdir, self._EVAL)
            e_dumpdir = os.path.join(e_logdir,
                                     'dump') if self._eval_dump else ''

        self._eval = Evaluation(model=e_model,
                                logdir=e_logdir,
                                steps=0,
                                metrics=get_metrics(),
                                dumpdir=e_dumpdir)
        self._training = Training(model=t_model,
                                  logdir=t_logdir,
                                  steps=self._train_steps,
                                  checkpoint_every=self._train_ckpt_every,
                                  metrics=get_metrics(),
                                  evaluation=self._eval)
예제 #3
0
 def _build_lr(self, global_step):
     lr = self._params[self.LR_PK]  # pylint: disable=I0011,C0103
     if self._params[self.LR_DECAY_CLASS_PK]:
         lr_decay_class = self._params[self.LR_DECAY_CLASS_PK]
         lr_decay_params = self._params[self.LR_DECAY_PARAMS_PK]
         lr_decay_fn = configurable.factory(
             lr_decay_class, self.mode, lr_decay_params, sys.modules[__name__])
         lr = lr_decay_fn(lr, global_step)  # pylint: disable=I0011,C0103
     return lr
예제 #4
0
    def _build_train_op(self):
        if self.mode != tf.contrib.learn.ModeKeys.TRAIN:
            logging.debug('mode is `%s`: skipping the loss calculation.', self.mode)
            return

        opt_class = self._params[self.OPTIMIZER_CLASS_PK]
        opt_params = self._params[self.OPTIMIZER_PARAMS_PK]
        self._optimizer = configurable.factory(opt_class, self.mode, opt_params, train)
        self._train_op = self._optimizer.minimize(
            self._loss_op, global_step=self._global_step)
예제 #5
0
    def _build_loss(self):
        if self.mode != tf.contrib.learn.ModeKeys.TRAIN:
            logging.debug('mode is `%s`: skipping the loss calculation.', self.mode)
            return

        targets = self.inputs.get(ModelInputs.FORMULA_KEY)
        lengths = self.inputs.get(ModelInputs.FORMULA_LENGTH_KEY)
        weights = tf.sequence_mask(lengths, dtype=tf.float32)
        predictions = self._predictions
        clz = self._params[self.LOSS_NAME_PK]
        loss = configurable.factory(clz, self.mode, {}, train)
        self._loss_op = loss.compute(targets, predictions, weights=weights)
예제 #6
0
    def test_factory(self):
        """Test the configurable.factory method."""
        clz = __name__ + '.' + DummyConf.__name__
        module = sys.modules[__name__]
        mode = tf.contrib.learn.ModeKeys.TRAIN
        params = DummyConf.get_default_params()
        params['seed'] = 23
        params['label'] = 'ciaone proprio!'
        instance = configurable.factory(clz, mode, params, module)

        self.assertIsInstance(instance, DummyConf)
        self.assertEqual(mode, instance.mode)
        self.assertEqual(params, instance.get_params())
        self.assertEqual(1, len(DummyConf.get_create_call_args()))
        self.assertEqual((mode, params), DummyConf.call_args())
예제 #7
0
    def test_factory(self):
        """Test the configurable.factory method."""
        clz = __name__ + '.' + DummyConf.__name__
        module = sys.modules[__name__]
        mode = tf.contrib.learn.ModeKeys.TRAIN
        params = DummyConf.get_default_params()
        params['seed'] = 23
        params['label'] = 'ciaone proprio!'
        instance = configurable.factory(clz, mode, params, module)

        self.assertIsInstance(instance, DummyConf)
        self.assertEqual(mode, instance.mode)
        self.assertEqual(params, instance.get_params())
        self.assertEqual(1, len(DummyConf.get_create_call_args()))
        self.assertEqual((mode, params), DummyConf.call_args())
예제 #8
0
 def _do_test(self, ctype):
     clz = ctype.__module__ + '.' + ctype.__name__
     params = ctype.get_default_params()
     instance = configurable.factory(clz, TRAIN, params)
     self.assertIsNotNone(instance)
예제 #9
0
    def _build_graph(self):
        trainable = self.mode == tf.contrib.learn.ModeKeys.TRAIN
        words = self.inputs.get(self.inputs.WORDS_KEY)
        slengths = self.inputs.get(self.inputs.SENTENCE_LENGTH_KEY)
        targets = self.inputs.get(self.inputs.FORMULA_KEY)
        flengths = self.inputs.get(self.inputs.FORMULA_LENGTH_KEY)
        with self._graph.as_default():  # pylint: disable=E1129
            with tf.variable_scope('Embedding'):  # pylint: disable=E1129
                with tf.device('CPU:0'):
                    embedding_size = self._params['embedding_size']
                    vocabulary_size = self._params[self.INPUT_VOC_SIZE_PK]
                    embeddings = tf.get_variable(
                        'E', [vocabulary_size, embedding_size])
                    inputs = tf.nn.embedding_lookup(embeddings, words)

            batch_dim = utils.get_dimension(words, 0)
            with tf.variable_scope('Encoder'):  # pylint: disable=E1129
                encoder_params = self._params['encoder']
                encoder_cell_type = encoder_params['cell.type']
                encoder_cell_params = encoder_params['cell.params']
                encoder_cell = configurable.factory(encoder_cell_type,
                                                    self._mode,
                                                    encoder_cell_params, rnn)
                state = encoder_cell.zero_state(batch_dim, tf.float32)
                encoder_out, _ = tf.nn.dynamic_rnn(
                    cell=encoder_cell,
                    initial_state=state,
                    inputs=inputs,
                    sequence_length=slengths,
                    parallel_iterations=self._params['parallel_iterations'])

            with tf.variable_scope('Decoder'):  # pylint: disable=E1129
                decoder_params = self._params['decoder']
                decoder_cell_type = decoder_params['cell.type']
                decoder_cell_params = decoder_params['cell.params']
                decoder_cell = configurable.factory(decoder_cell_type,
                                                    self._mode,
                                                    decoder_cell_params, rnn)
                attention = layers.BahdanauAttention(
                    states=encoder_out,
                    inner_size=self._params['attention_size'],
                    trainable=trainable)
                location = layers.LocationSoftmax(attention=attention,
                                                  sequence_length=slengths)
                output = layers.PointingSoftmaxOutput(
                    shortlist_size=self._params[self.OUTPUT_VOC_SIZE_PK],
                    decoder_out_size=decoder_cell.output_size,
                    state_size=encoder_out.shape[-1].value,
                    trainable=trainable)

                self._decoder_inputs = None
                if trainable:
                    location_size = utils.get_dimension(words, 1)
                    output_size = self._params[
                        self.OUTPUT_VOC_SIZE_PK] + location_size
                    self._decoder_inputs = tf.one_hot(
                        targets,
                        output_size,
                        dtype=tf.float32,
                        name='decoder_training_input')

                ps_decoder = layers.PointingSoftmaxDecoder(
                    cell=decoder_cell,
                    location_softmax=location,
                    pointing_output=output,
                    input_size=self._params['feedback_size'],
                    decoder_inputs=self._decoder_inputs,
                    trainable=trainable)

                eos = None if trainable else self.EOS_IDX
                pad_to = None if trainable else utils.get_dimension(targets, 1)
                helper = layers.TerminationHelper(lengths=flengths, EOS=eos)
                decoder = layers.DynamicDecoder(
                    decoder=ps_decoder,
                    helper=helper,
                    pad_to=pad_to,
                    parallel_iterations=self._params['parallel_iterations'],
                    swap_memory=False)

                self._predictions, _ = decoder.decode()
예제 #10
0
 def _build_inputs(self):
     clz = self._params[self.INPUT_CLASS_PK]
     params = self._params[self.INPUT_PARAMS_PK]
     self._inputs = configurable.factory(clz, self.mode, params)
예제 #11
0
파일: model.py 프로젝트: usman776/dket
    def _build_graph(self):
        trainable = self.mode == tf.contrib.learn.ModeKeys.TRAIN
        words = self.inputs.get(self.inputs.WORDS_KEY)
        slengths = self.inputs.get(self.inputs.SENTENCE_LENGTH_KEY)
        targets = self.inputs.get(self.inputs.FORMULA_KEY)
        flengths = self.inputs.get(self.inputs.FORMULA_LENGTH_KEY)
        with self._graph.as_default():  # pylint: disable=E1129
            if self._seed:
                tf.set_random_seed(self._seed)
            with tf.variable_scope('Embedding'):  # pylint: disable=E1129
                with tf.device('CPU:0'):
                    embedding_size = self._params['embedding_size']
                    vocabulary_size = self._params[self.INPUT_VOC_SIZE_PK]
                    embeddings = tf.get_variable(
                        'E', [vocabulary_size, embedding_size])
                    inputs = tf.nn.embedding_lookup(embeddings, words)

            batch_dim = utils.get_dimension(words, 0)
            with tf.variable_scope('Encoder'):  # pylint: disable=E1129
                encoder_params = self._params['encoder']
                encoder_cell_type = encoder_params['cell.type']
                encoder_cell_params = encoder_params['cell.params']
                encoder_cell = configurable.factory(encoder_cell_type, self._mode, encoder_cell_params, rnn)
                state = encoder_cell.zero_state(batch_dim, tf.float32)
                encoder_out, _ = tf.nn.dynamic_rnn(
                    cell=encoder_cell,
                    initial_state=state,
                    inputs=inputs,
                    sequence_length=slengths,
                    parallel_iterations=self._params['parallel_iterations'])

            with tf.variable_scope('Decoder'):  # pylint: disable=E1129
                decoder_params = self._params['decoder']
                decoder_cell_type = decoder_params['cell.type']
                decoder_cell_params = decoder_params['cell.params']
                decoder_cell = configurable.factory(decoder_cell_type, self._mode, decoder_cell_params, rnn)
                attention = layers.BahdanauAttention(
                    states=encoder_out,
                    inner_size=self._params['attention_size'],
                    trainable=trainable)
                location = layers.LocationSoftmax(
                    attention=attention,
                    sequence_length=slengths)
                output = layers.PointingSoftmaxOutput(
                    shortlist_size=self._params[self.OUTPUT_VOC_SIZE_PK],
                    decoder_out_size=decoder_cell.output_size,
                    state_size=encoder_out.shape[-1].value,
                    trainable=trainable)
                
                self._decoder_inputs = None
                if trainable:
                    location_size = utils.get_dimension(words, 1)
                    output_size = self._params[self.OUTPUT_VOC_SIZE_PK] + location_size
                    self._decoder_inputs = tf.one_hot(
                        targets, output_size, dtype=tf.float32,
                        name='decoder_training_input')
                
                ps_decoder = layers.PointingSoftmaxDecoder(
                    cell=decoder_cell,
                    location_softmax=location,
                    pointing_output=output,
                    input_size=self._params['feedback_size'],
                    decoder_inputs=self._decoder_inputs,
                    trainable=trainable)
                
                eos = None if trainable else self.EOS_IDX
                pad_to = None if trainable else utils.get_dimension(targets, 1)
                helper = layers.TerminationHelper(
                    lengths=flengths, EOS=eos)
                decoder = layers.DynamicDecoder(
                    decoder=ps_decoder, helper=helper, pad_to=pad_to,
                    parallel_iterations=self._params['parallel_iterations'],
                    swap_memory=False)
                
                self._predictions, _ = decoder.decode()
예제 #12
0
파일: model.py 프로젝트: usman776/dket
 def _build_inputs(self):
     clz = self._params[self.INPUT_CLASS_PK]
     params = self._params[self.INPUT_PARAMS_PK]
     self._inputs = configurable.factory(clz, self.mode, params)
예제 #13
0
파일: test_train.py 프로젝트: usman776/dket
 def _do_test(self, ctype):
     clz = ctype.__module__ + '.' + ctype.__name__
     params = ctype.get_default_params()
     instance = configurable.factory(clz, TRAIN, params)
     self.assertIsNotNone(instance)