コード例 #1
0
    def __init__(self, is_training=True, is_predict=False):
        super(DecomposableNLI, self).__init__()

        self.is_training = is_training
        self.is_predict = is_predict

        #TODO move to melt.EmbeddingTrainerBase
        emb_dim = FLAGS.emb_dim
        init_width = 0.5 / emb_dim
        vocabulary.init()
        vocab_size = vocabulary.get_vocab_size()
        self.vocab_size = vocab_size

        # cpu for adgrad optimizer
        self.emb = embedding.get_or_restore_embedding_cpu()

        melt.visualize_embedding(self.emb, FLAGS.vocab)
        if is_training and FLAGS.monitor_level > 0:
            melt.monitor_embedding(self.emb, vocabulary.vocab, vocab_size)

        self._attention_output_size = 256
        self._comparison_output_size = 128

        self.scope = 'decomposable_nli'
        self.build_train_graph = self.build_graph
コード例 #2
0
ファイル: seq2seq.py プロジェクト: tangqiqi123/hasky
    def __init__(self, is_training=True, is_predict=False):
        super(Seq2seq, self).__init__()

        self.is_training = is_training
        self.is_predict = is_predict

        emb = embedding.get_embedding('emb')
        if is_training and FLAGS.monitor_level > 0:
            melt.monitor_embedding(emb, vocabulary.vocab,
                                   vocabulary.vocab_size)

        self.encoder = seq2seq.rnn_encoder.RnnEncoder(is_training, is_predict)
        self.encoder.set_embedding(emb)

        #emb2 = embedding.get_embedding('emb2')
        if not FLAGS.experiment_rnn_decoder:
            self.decoder = seq2seq.rnn_decoder.RnnDecoder(
                is_training, is_predict)
        else:
            self.decoder = seq2seq.experiment.rnn_decoder.RnnDecoder(
                is_training, is_predict)
        self.decoder.set_embedding(emb)

        print('start_id', self.decoder.start_id)

        assert FLAGS.add_text_start is True
        assert self.decoder.start_id is not None
コード例 #3
0
    def __init__(self, is_training=True, is_predict=False):
        super(DiscriminantTrainer, self).__init__()
        self.is_training = is_training
        self.is_predict = is_predict
        self.gen_text_feature = None

        emb_dim = FLAGS.emb_dim
        init_width = 0.5 / emb_dim
        vocabulary.init()
        vocab_size = vocabulary.get_vocab_size()
        self.vocab_size = vocab_size
        #if not cpu and on gpu run and using adagrad, will fail  TODO check why
        #also this will be more safer, since emb is large might exceed gpu mem
        #with tf.device('/cpu:0'):
        #  #NOTICE if using bidirectional rnn then actually emb_dim is emb_dim / 2, because will at last step depth-concatate output fw and bw vectors
        #  self.emb = melt.variable.get_weights_uniform('emb', [vocab_size, emb_dim], -init_width, init_width)
        self.emb = embedding.get_embedding_cpu('emb')
        melt.visualize_embedding(self.emb, FLAGS.vocab)
        if is_training and FLAGS.monitor_level > 0:
            melt.monitor_embedding(self.emb, vocabulary.vocab, vocab_size)

        self.activation = melt.activations[FLAGS.activation]

        self.weights_initializer = tf.random_uniform_initializer(
            -FLAGS.initializer_scale, FLAGS.initializer_scale)
        self.biases_initialzier = melt.slim.init_ops.zeros_initializer if FLAGS.bias else None

        if not FLAGS.pre_calc_image_feature:
            assert melt.apps.image_processing.image_processing_fn is not None, 'forget melt.apps.image_processing.init()'
            self.image_process_fn = functools.partial(
                melt.apps.image_processing.image_processing_fn,
                height=FLAGS.image_height,
                width=FLAGS.image_width)
コード例 #4
0
    def __init__(self, is_training=True, is_predict=False):
        super(ShowAndTell, self).__init__()

        if FLAGS.image_as_init_state:
            #just use default method here is ok!
            assert FLAGS.add_text_start is True, 'need to add text start for im2tx mode'
        else:
            #just for experiment to be same as im2txt but result is worse
            assert FLAGS.add_text_start is False, 'normal mode must not add text start'

        self.is_training = is_training
        self.is_predict = is_predict
        self.is_evaluate = (not is_training) and (not is_predict)

        #if is_training:
        logging.info('num_sampled:{}'.format(FLAGS.num_sampled))
        logging.info('log_uniform_sample:{}'.format(FLAGS.log_uniform_sample))
        logging.info('keep_prob:{}'.format(FLAGS.keep_prob))
        logging.info('emb_dim:{}'.format(FLAGS.emb_dim))
        logging.info('add_text_start:{}'.format(FLAGS.add_text_start))
        logging.info('zero_as_text_start:{}'.format(FLAGS.zero_as_text_start))

        emb = self.emb = embedding.get_or_restore_embedding_cpu()
        if is_training and FLAGS.monitor_level > 0:
            melt.monitor_embedding(emb, vocabulary.vocab,
                                   vocabulary.vocab_size)

        self.emb_dim = FLAGS.emb_dim

        self.using_attention = FLAGS.image_encoder != 'ShowAndTell'

        ImageEncoder = deepiu.seq2seq.image_encoder.Encoders[
            FLAGS.image_encoder]
        #juse for scritps backward compact, TODO remove show_atten_tell
        if FLAGS.show_atten_tell:
            logging.info(
                'warning, show_atten_tell mode depreciated, just set --image_encoder='
            )
            ImageEncoder = deepiu.seq2seq.image_encoder.MemoryEncoder

        self.encoder = ImageEncoder(is_training, is_predict, self.emb_dim)

        self.decoder = deepiu.seq2seq.rnn_decoder.RnnDecoder(
            is_training, is_predict)
        self.decoder.set_embedding(emb)

        if not FLAGS.pre_calc_image_feature:
            assert melt.apps.image_processing.image_processing_fn is not None, 'forget melt.apps.image_processing.init()'
            self.image_process_fn = functools.partial(
                melt.apps.image_processing.image_processing_fn,
                height=FLAGS.image_height,
                width=FLAGS.image_width,
                trainable=FLAGS.finetune_image_model,
                is_training=is_training,
                random_crop=FLAGS.random_crop_image,
                finetune_end_point=FLAGS.finetune_end_point,
                distort=FLAGS.distort_image,
                feature_name=FLAGS.image_endpoint_feature_name)
        else:
            self.image_process_fn = None
コード例 #5
0
  def __init__(self, is_training=True, is_predict=False):
    super(Seq2seq, self).__init__()

    #assert FLAGS.rnn_output_method == 'all', 'attention need to encode all'

    self.is_training = is_training 
    self.is_predict = is_predict
    self.is_evaluate = (not is_training) and (not is_predict)

    emb = embedding.get_or_restore_embedding_cpu()
    if is_training and FLAGS.monitor_level > 0:
      melt.monitor_embedding(emb, vocabulary.vocab, vocabulary.vocab_size)

    self.encoder = seq2seq.rnn_encoder.RnnEncoder(is_training, is_predict)
    self.encoder.set_embedding(emb)

    #emb2 = embedding.get_embedding('emb2')
    if not FLAGS.experiment_rnn_decoder:
      self.decoder = seq2seq.rnn_decoder.RnnDecoder(is_training, is_predict)
    else:
      self.decoder = seq2seq.experiment.rnn_decoder.RnnDecoder(is_training, is_predict)
    self.decoder.set_embedding(emb)
    
    print('start_id', self.decoder.start_id)

    assert FLAGS.add_text_start is True 
    assert self.decoder.start_id is not None
コード例 #6
0
    def __init__(self, encoder_type='bow', is_training=True, is_predict=False):
        super(DiscriminantTrainer, self).__init__()
        self.is_training = is_training
        self.is_predict = is_predict

        logging.info('emb_dim:{}'.format(FLAGS.emb_dim))
        logging.info('margin:{}'.format(FLAGS.margin))

        self.encoder = encoder_factory.get_encoder(encoder_type, is_training,
                                                   is_predict)
        self.encoder_type = encoder_type

        emb_dim = FLAGS.emb_dim
        init_width = 0.5 / emb_dim
        vocabulary.init()
        vocab_size = vocabulary.get_vocab_size()
        self.vocab_size = vocab_size
        self.emb = embedding.get_or_restore_embedding_cpu()

        melt.visualize_embedding(self.emb, vocabulary.vocab_path)
        if is_training and FLAGS.monitor_level > 0:
            melt.monitor_embedding(self.emb, vocabulary.vocab, vocab_size)

        self.activation = melt.activations[FLAGS.activation]

        #TODO can consider global initiallizer like
        # with tf.variable_scope("Model", reuse=None, initializer=initializer)
        #https://github.com/tensorflow/models/blob/master/tutorials/rnn/ptb/ptb_word_lm.py
        self.weights_initializer = tf.random_uniform_initializer(
            -FLAGS.initializer_scale, FLAGS.initializer_scale)
        self.biases_initializer = melt.slim2.init_ops.zeros_initializer if FLAGS.bias else None

        self.image_process_fn = lambda x: x
        if not FLAGS.pre_calc_image_feature:
            assert melt.apps.image_processing.image_processing_fn is not None, 'forget melt.apps.image_processing.init()'
            self.image_process_fn = functools.partial(
                melt.apps.image_processing.image_processing_fn,
                height=FLAGS.image_height,
                width=FLAGS.image_width,
                trainable=FLAGS.finetune_image_model,
                is_training=is_training,
                random_crop=FLAGS.random_crop_image,
                finetune_end_point=FLAGS.finetune_end_point,
                distort=FLAGS.distort_image,
                feature_name=FLAGS.image_endpoint_feature_name)

        self.image_mlp_dims = [
            int(x) for x in FLAGS.image_mlp_dims.split(',')
        ] if FLAGS.image_mlp_dims is not '0' else None
        self.text_mlp_dims = [int(x) for x in FLAGS.text_mlp_dims.split(',')
                              ] if FLAGS.text_mlp_dims is not '0' else None

        self.scope = 'image_text_sim'
コード例 #7
0
    def __init__(self, is_training=True, is_predict=False):
        super(ShowAndTell, self).__init__()

        if FLAGS.image_as_init_state:
            #just use default method here is ok!
            assert FLAGS.add_text_start is True, 'need to add text start for im2tx mode'
        else:
            #just for experiment to be same as im2txt but result is worse
            assert FLAGS.add_text_start is False, 'normal mode must not add text start'

        self.is_training = is_training
        self.is_predict = is_predict
        self.is_evaluate = (not is_training) and (not is_predict)

        #if is_training:
        logging.info('num_sampled:{}'.format(FLAGS.num_sampled))
        logging.info('num_sampled:{}'.format(FLAGS.num_sampled))
        logging.info('log_uniform_sample:{}'.format(FLAGS.log_uniform_sample))
        logging.info('num_layers:{}'.format(FLAGS.num_layers))
        logging.info('keep_prob:{}'.format(FLAGS.keep_prob))
        logging.info('emb_dim:{}'.format(FLAGS.emb_dim))
        logging.info('add_text_start:{}'.format(FLAGS.add_text_start))
        logging.info('zero_as_text_start:{}'.format(FLAGS.zero_as_text_start))

        emb = self.emb = embedding.get_or_restore_embedding_cpu()
        if is_training and FLAGS.monitor_level > 0:
            melt.monitor_embedding(emb, vocabulary.vocab,
                                   vocabulary.vocab_size)

        self.decoder = deepiu.seq2seq.rnn_decoder.RnnDecoder(
            is_training, is_predict)
        self.decoder.set_embedding(emb)

        self.emb_dim = FLAGS.emb_dim

        self.initializer = tf.random_uniform_initializer(
            minval=-FLAGS.initializer_scale, maxval=FLAGS.initializer_scale)

        if not FLAGS.pre_calc_image_feature:
            assert melt.apps.image_processing.image_processing_fn is not None, 'forget melt.apps.image_processing.init()'
            self.image_process_fn = functools.partial(
                melt.apps.image_processing.image_processing_fn,
                height=FLAGS.image_height,
                width=FLAGS.image_width,
                trainable=FLAGS.finetune_image_model,
                is_training=is_training,
                random_crop=FLAGS.random_crop_image,
                finetune_end_point=FLAGS.finetune_end_point,
                distort=FLAGS.distort_image)
コード例 #8
0
ファイル: show_and_tell.py プロジェクト: tangqiqi123/hasky
    def __init__(self, is_training=True, is_predict=False):
        super(ShowAndTell, self).__init__()

        assert FLAGS.add_text_start is False

        #---------should be show_and_tell/model_init_1
        #print('ShowAndTell init:', tf.get_variable_scope().name)
        #self.abcd = melt.init_bias(3)
        #print('ShowAndTell bias', self.abcd.name)

        self.is_training = is_training
        self.is_predict = is_predict

        #if is_training:
        logging.info('num_sampled:{}'.format(FLAGS.num_sampled))
        logging.info('use_neg:{}'.format(FLAGS.use_neg))
        logging.info('num_sampled:{}'.format(FLAGS.num_sampled))
        logging.info('log_uniform_sample:{}'.format(FLAGS.log_uniform_sample))
        logging.info('num_layers:{}'.format(FLAGS.num_layers))
        logging.info('keep_prob:{}'.format(FLAGS.keep_prob))
        logging.info('emb_dim:{}'.format(FLAGS.emb_dim))
        logging.info('add_text_start:{}'.format(FLAGS.add_text_start))
        logging.info('zero_as_text_start:{}'.format(FLAGS.zero_as_text_start))

        emb = embedding.get_embedding('emb')
        if is_training and FLAGS.monitor_level > 0:
            melt.monitor_embedding(emb, vocabulary.vocab,
                                   vocabulary.vocab_size)

        self.decoder = deepiu.seq2seq.rnn_decoder.RnnDecoder(
            is_training, is_predict)
        self.decoder.set_embedding(emb)

        self.emb_dim = FLAGS.emb_dim
        #TODO for safe, can add_text_start but add 0 not calc weight or
        #do additional cell(image_embedding, state) and pass state with start_id as input like im2text

        self.initializer = tf.random_uniform_initializer(
            minval=-FLAGS.initializer_scale, maxval=FLAGS.initializer_scale)

        if not FLAGS.pre_calc_image_feature:
            assert melt.apps.image_processing.image_processing_fn is not None, 'forget melt.apps.image_processing.init()'
            self.image_process_fn = functools.partial(
                melt.apps.image_processing.image_processing_fn,
                height=FLAGS.image_height,
                width=FLAGS.image_width)
コード例 #9
0
    def __init__(self, is_training=True, is_predict=False):
        super(Imtxt2txt, self).__init__()

        assert FLAGS.add_text_start is True

        self.is_training = is_training
        self.is_predict = is_predict

        #if is_training:
        logging.info('num_sampled:{}'.format(FLAGS.num_sampled))
        logging.info('use_neg:{}'.format(FLAGS.use_neg))
        logging.info('num_sampled:{}'.format(FLAGS.num_sampled))
        logging.info('log_uniform_sample:{}'.format(FLAGS.log_uniform_sample))
        logging.info('num_layers:{}'.format(FLAGS.num_layers))
        logging.info('keep_prob:{}'.format(FLAGS.keep_prob))
        logging.info('emb_dim:{}'.format(FLAGS.emb_dim))
        logging.info('add_text_start:{}'.format(FLAGS.add_text_start))
        logging.info('zero_as_text_start:{}'.format(FLAGS.zero_as_text_start))

        emb = embedding.get_embedding('emb')
        if is_training and FLAGS.monitor_level > 0:
            melt.monitor_embedding(emb, vocabulary.vocab,
                                   vocabulary.vocab_size)

        self.encoder = seq2seq.rnn_encoder.RnnEncoder(is_training, is_predict)
        self.encoder.set_embedding(emb)

        #emb2 = embedding.get_embedding('emb2')
        self.decoder = seq2seq.rnn_decoder.RnnDecoder(is_training, is_predict)
        self.decoder.set_embedding(emb)

        print('start_id', self.decoder.start_id)

        self.emb_dim = FLAGS.emb_dim
        self.initializer = tf.random_uniform_initializer(
            minval=-FLAGS.initializer_scale, maxval=FLAGS.initializer_scale)

        if not FLAGS.pre_calc_image_feature:
            assert melt.apps.image_processing.image_processing_fn is not None, 'forget melt.apps.image_processing.init()'
            self.image_process_fn = functools.partial(
                melt.apps.image_processing.image_processing_fn,
                height=FLAGS.image_height,
                width=FLAGS.image_width)
コード例 #10
0
    def __init__(self, is_training=True, is_predict=False):
        super(ShowAndTell, self).__init__()

        #---------should be show_and_tell/model_init_1
        #print('ShowAndTell init:', tf.get_variable_scope().name)
        #self.abcd = melt.init_bias(3)
        #print('ShowAndTell bias', self.abcd.name)

        self.is_training = is_training
        self.is_predict = is_predict

        #if is_training:
        logging.info('num_sampled:{}'.format(FLAGS.num_sampled))
        logging.info('use_neg:{}'.format(FLAGS.use_neg))
        logging.info('num_sampled:{}'.format(FLAGS.num_sampled))
        logging.info('log_uniform_sample:{}'.format(FLAGS.log_uniform_sample))
        logging.info('num_layers:{}'.format(FLAGS.num_layers))
        logging.info('keep_prob:{}'.format(FLAGS.keep_prob))
        logging.info('emb_dim:{}'.format(FLAGS.emb_dim))
        logging.info('add_text_start:{}'.format(FLAGS.add_text_start))
        logging.info('zero_as_text_start:{}'.format(FLAGS.zero_as_text_start))

        emb = embedding.get_embedding('emb')
        if is_training and FLAGS.monitor_level > 0:
            melt.monitor_embedding(emb, vocabulary.vocab,
                                   vocabulary.vocab_size)

        self.decoder = deepiu.seq2seq.rnn_decoder.RnnDecoder(
            is_training, is_predict)
        self.decoder.set_embedding(emb)

        emb_dim = FLAGS.emb_dim
        self.encode_img_W = melt.variable.get_weights_uniform(
            'encode_img_W', [IMAGE_FEATURE_LEN, emb_dim],
            -FLAGS.initializer_scale, FLAGS.initializer_scale)
        self.encode_img_b = melt.variable.get_bias('encode_img_b', [emb_dim])

        #TODO for safe, can add_text_start but add 0 not calc weight or
        #do additional cell(image_embedding, state) and pass state with start_id as input like im2text
        assert FLAGS.add_text_start is False
コード例 #11
0
    def __init__(self, encoder_type='bow', is_training=True, is_predict=False):
        super(DualTextsim, self).__init__()

        self.is_training = is_training
        self.is_predict = is_predict

        self.encoder = encoder_factory.get_encoder(encoder_type, is_training,
                                                   is_predict)
        self.encoder_type = encoder_type

        emb_dim = FLAGS.emb_dim
        init_width = 0.5 / emb_dim
        vocabulary.init()
        vocab_size = vocabulary.get_vocab_size()
        self.vocab_size = vocab_size

        # cpu for adgrad optimizer
        self.emb = embedding.get_or_restore_embedding_cpu()
        self.pos_emb = embedding.get_position_embedding_cpu()

        melt.visualize_embedding(self.emb, FLAGS.vocab)
        if is_training and FLAGS.monitor_level > 0:
            melt.monitor_embedding(self.emb, vocabulary.vocab, vocab_size)

        self.activation = melt.activations[FLAGS.activation]

        # TODO can consider global initiallizer like
        # with tf.variable_scope("Model", reuse=None, initializer=initializer)
        # https://github.com/tensorflow/models/blob/master/tutorials/rnn/ptb/ptb_word_lm.py
        self.weights_initializer = tf.random_uniform_initializer(
            -FLAGS.initializer_scale, FLAGS.initializer_scale)
        self.biases_initialzier = melt.slim.init_ops.zeros_initializer if FLAGS.bias else None

        self.mlp_dims = [int(x) for x in FLAGS.mlp_dims.split(',')
                         ] if FLAGS.mlp_dims is not '0' else None

        #needed in build graph from PairwiseGraph
        self.scope = 'dual_textsim'
        self.build_train_graph = self.build_graph
コード例 #12
0
    def __init__(self, is_training=True, is_predict=False):
        super(MilTrainer, self).__init__()
        self.is_training = is_training
        self.is_predict = is_predict

        logging.info('emb_dim:{}'.format(FLAGS.emb_dim))
        logging.info('margin:{}'.format(FLAGS.margin))

        vocabulary.init()
        vocab_size = vocabulary.get_vocab_size()
        self.vocab_size = vocab_size
        self.emb = embedding.get_or_restore_embedding_cpu()

        melt.visualize_embedding(self.emb, FLAGS.vocab)
        if is_training and FLAGS.monitor_level > 0:
            melt.monitor_embedding(self.emb, vocabulary.vocab, vocab_size)

        self.image_process_fn = lambda x: x
        if not FLAGS.pre_calc_image_feature:
            assert melt.apps.image_processing.image_processing_fn is not None, 'forget melt.apps.image_processing.init()'
            self.image_process_fn = functools.partial(
                melt.apps.image_processing.image_processing_fn,
                height=FLAGS.image_height,
                width=FLAGS.image_width,
                trainable=FLAGS.finetune_image_model,
                is_training=is_training,
                random_crop=FLAGS.random_crop_image,
                finetune_end_point=FLAGS.finetune_end_point,
                distort=FLAGS.distort_image,
                feature_name=FLAGS.image_endpoint_feature_name)

        ImageEncoder = deepiu.seq2seq.image_encoder.Encoders[
            FLAGS.image_encoder]
        self.image_encoder = ImageEncoder(is_training, is_predict,
                                          FLAGS.emb_dim)
        self.using_attention = FLAGS.image_encoder != 'ShowAndTell'
        assert self.using_attention

        with tf.variable_scope('text_encoder'):
            if FLAGS.text_encoder:
                self.text_encoder = encoder_factory.get_encoder(
                    FLAGS.text_encoder, is_training, is_predict)
            else:
                self.text_encoder = None

        self.weights_initializer = tf.random_uniform_initializer(
            -FLAGS.initializer_scale, FLAGS.initializer_scale)
        self.activation = melt.activations[FLAGS.activation]
        self.text_mlp_dims = [int(x) for x in FLAGS.text_mlp_dims.split(',')
                              ] if FLAGS.text_mlp_dims is not '0' else None
        self.biases_initializer = melt.slim2.init_ops.zeros_initializer if FLAGS.bias else None

        logging.info('mil text_encoder:{}'.format(self.text_encoder))

        if FLAGS.use_idf_weights:
            self.idf_weights = tf.constant(idf.get_idf())
        else:
            self.idf_weights = tf.constant(
                [0.] * NUM_RESERVED_IDS +
                [1.0 for id in range(NUM_RESERVED_IDS, vocab_size)])

        self.scope = FLAGS.trainer_scope or 'image_text_sim'
コード例 #13
0
ファイル: dual_textsim.py プロジェクト: tangqiqi123/hasky
    def __init__(self, encoder_type='bow', is_training=True, is_predict=False):
        super(DualTextsim, self).__init__()

        self.is_training = is_training
        self.is_predict = is_predict

        self.encoder = encoder_factory.get_encoder(encoder_type, is_training,
                                                   is_predict)
        self.encoder_type = encoder_type

        emb_dim = FLAGS.emb_dim
        init_width = 0.5 / emb_dim
        vocabulary.init()
        vocab_size = vocabulary.get_vocab_size()
        self.vocab_size = vocab_size

        # cpu for adgrad optimizer
        if (not FLAGS.word_embedding_file) or glob.glob(FLAGS.model_dir +
                                                        '/model.ckpt*'):
            logging.info(
                'Word embedding random init or from model_dir :{} and finetune=:{}'
                .format(FLAGS.model_dir, FLAGS.finetune_word_embedding))
            self.emb = embedding.get_embedding_cpu(
                name='emb', trainable=FLAGS.finetune_word_embedding)
        else:
            # https://github.com/tensorflow/tensorflow/issues/1570
            # still adgrad must cpu..
            # if not fintue emb this will be ok if fintune restart will ok ? must not use word embedding file? os.path.exists(FLAGS.model_dir) ? judge?
            # or will still try to load from check point ? TODO for safe you could re run by setting word_embedding_file as None or ''
            logging.info(
                'Loading word embedding from :{} and finetune=:{}'.format(
                    FLAGS.word_embedding_file, FLAGS.finetune_word_embedding))
            self.emb = melt.load_constant_cpu(
                FLAGS.word_embedding_file,
                name='emb',
                trainable=FLAGS.finetune_word_embedding)

        if FLAGS.position_embedding:
            logging.info('Using position embedding')
            self.pos_emb = embedding.get_embedding_cpu(name='pos_emb',
                                                       height=TEXT_MAX_WORDS)
        else:
            self.pos_emb = None

        melt.visualize_embedding(self.emb, FLAGS.vocab)
        if is_training and FLAGS.monitor_level > 0:
            melt.monitor_embedding(self.emb, vocabulary.vocab, vocab_size)

        self.activation = melt.activations[FLAGS.activation]

        # TODO can consider global initiallizer like
        # with tf.variable_scope("Model", reuse=None, initializer=initializer)
        # https://github.com/tensorflow/models/blob/master/tutorials/rnn/ptb/ptb_word_lm.py
        self.weights_initializer = tf.random_uniform_initializer(
            -FLAGS.initializer_scale, FLAGS.initializer_scale)
        self.biases_initialzier = melt.slim.init_ops.zeros_initializer if FLAGS.bias else None

        self.mlp_dims = [int(x) for x in FLAGS.mlp_dims.split(',')
                         ] if FLAGS.mlp_dims is not '0' else None

        self.scope = 'dual_textsim'

        self.build_train_graph = self.build_graph
コード例 #14
0
    def __init__(self, encoder_type='bow', is_training=True, is_predict=False):
        super(DiscriminantTrainer, self).__init__()
        self.is_training = is_training
        self.is_predict = is_predict

        logging.info('emb_dim:{}'.format(FLAGS.emb_dim))
        logging.info('margin:{}'.format(FLAGS.margin))

        self.encoder = encoder_factory.get_encoder(encoder_type, is_training,
                                                   is_predict)
        self.encoder_type = encoder_type

        emb_dim = FLAGS.emb_dim
        init_width = 0.5 / emb_dim
        vocabulary.init()
        vocab_size = vocabulary.get_vocab_size()
        self.vocab_size = vocab_size
        #if not cpu and on gpu run and using adagrad, will fail  TODO check why
        #also this will be more safer, since emb is large might exceed gpu mem
        #with tf.device('/cpu:0'):
        #  self.emb = melt.variable.get_weights_uniform('emb', [vocab_size, emb_dim], -init_width, init_width)
        if (not FLAGS.word_embedding_file) or glob.glob(FLAGS.model_dir +
                                                        '/model.ckpt*'):
            logging.info(
                'Word embedding random init or from model_dir :{} and finetune=:{}'
                .format(FLAGS.model_dir, FLAGS.finetune_word_embedding))
            self.emb = embedding.get_embedding_cpu(
                name='emb', trainable=FLAGS.finetune_word_embedding)
        else:
            #https://github.com/tensorflow/tensorflow/issues/1570
            #still adgrad must cpu..
            #if not fintue emb this will be ok if fintune restart will ok ? must not use word embedding file? os.path.exists(FLAGS.model_dir) ? judge?
            #or will still try to load from check point ? TODO for safe you could re run by setting word_embedding_file as None or ''
            logging.info(
                'Loading word embedding from :{} and finetune=:{}'.format(
                    FLAGS.word_embedding_file, FLAGS.finetune_word_embedding))
            self.emb = melt.load_constant_cpu(
                FLAGS.word_embedding_file,
                name='emb',
                trainable=FLAGS.finetune_word_embedding)

        melt.visualize_embedding(self.emb, FLAGS.vocab)
        if is_training and FLAGS.monitor_level > 0:
            melt.monitor_embedding(self.emb, vocabulary.vocab, vocab_size)

        self.activation = melt.activations[FLAGS.activation]

        #TODO can consider global initiallizer like
        # with tf.variable_scope("Model", reuse=None, initializer=initializer)
        #https://github.com/tensorflow/models/blob/master/tutorials/rnn/ptb/ptb_word_lm.py
        self.weights_initializer = tf.random_uniform_initializer(
            -FLAGS.initializer_scale, FLAGS.initializer_scale)
        self.biases_initialzier = melt.slim.init_ops.zeros_initializer if FLAGS.bias else None

        if not FLAGS.pre_calc_image_feature:
            assert melt.apps.image_processing.image_processing_fn is not None, 'forget melt.apps.image_processing.init()'
            self.image_process_fn = functools.partial(
                melt.apps.image_processing.image_processing_fn,
                height=FLAGS.image_height,
                width=FLAGS.image_width)

        self.image_mlp_dims = [
            int(x) for x in FLAGS.image_mlp_dims.split(',')
        ] if FLAGS.image_mlp_dims is not '0' else None
        self.text_mlp_dims = [int(x) for x in FLAGS.text_mlp_dims.split(',')
                              ] if FLAGS.text_mlp_dims is not '0' else None

        self.scope = 'image_text_sim'