def __init__(self, is_training=True, is_predict=False): super(DiscriminantTrainer, self).__init__() self.is_training = is_training self.is_predict = is_predict self.gen_text_feature = None emb_dim = FLAGS.emb_dim init_width = 0.5 / emb_dim vocabulary.init() vocab_size = vocabulary.get_vocab_size() self.vocab_size = vocab_size #if not cpu and on gpu run and using adagrad, will fail TODO check why #also this will be more safer, since emb is large might exceed gpu mem #with tf.device('/cpu:0'): # #NOTICE if using bidirectional rnn then actually emb_dim is emb_dim / 2, because will at last step depth-concatate output fw and bw vectors # self.emb = melt.variable.get_weights_uniform('emb', [vocab_size, emb_dim], -init_width, init_width) self.emb = embedding.get_embedding_cpu('emb') melt.visualize_embedding(self.emb, FLAGS.vocab) if is_training and FLAGS.monitor_level > 0: melt.monitor_embedding(self.emb, vocabulary.vocab, vocab_size) self.activation = melt.activations[FLAGS.activation] self.weights_initializer = tf.random_uniform_initializer( -FLAGS.initializer_scale, FLAGS.initializer_scale) self.biases_initialzier = melt.slim.init_ops.zeros_initializer if FLAGS.bias else None if not FLAGS.pre_calc_image_feature: assert melt.apps.image_processing.image_processing_fn is not None, 'forget melt.apps.image_processing.init()' self.image_process_fn = functools.partial( melt.apps.image_processing.image_processing_fn, height=FLAGS.image_height, width=FLAGS.image_width)
def __init__(self, encoder_type='bow', is_training=True, is_predict=False): super(DualTextsim, self).__init__() self.is_training = is_training self.is_predict = is_predict self.encoder = encoder_factory.get_encoder(encoder_type, is_training, is_predict) self.encoder_type = encoder_type emb_dim = FLAGS.emb_dim init_width = 0.5 / emb_dim vocabulary.init() vocab_size = vocabulary.get_vocab_size() self.vocab_size = vocab_size # cpu for adgrad optimizer if (not FLAGS.word_embedding_file) or glob.glob(FLAGS.model_dir + '/model.ckpt*'): logging.info( 'Word embedding random init or from model_dir :{} and finetune=:{}' .format(FLAGS.model_dir, FLAGS.finetune_word_embedding)) self.emb = embedding.get_embedding_cpu( name='emb', trainable=FLAGS.finetune_word_embedding) else: # https://github.com/tensorflow/tensorflow/issues/1570 # still adgrad must cpu.. # if not fintue emb this will be ok if fintune restart will ok ? must not use word embedding file? os.path.exists(FLAGS.model_dir) ? judge? # or will still try to load from check point ? TODO for safe you could re run by setting word_embedding_file as None or '' logging.info( 'Loading word embedding from :{} and finetune=:{}'.format( FLAGS.word_embedding_file, FLAGS.finetune_word_embedding)) self.emb = melt.load_constant_cpu( FLAGS.word_embedding_file, name='emb', trainable=FLAGS.finetune_word_embedding) if FLAGS.position_embedding: logging.info('Using position embedding') self.pos_emb = embedding.get_embedding_cpu(name='pos_emb', height=TEXT_MAX_WORDS) else: self.pos_emb = None melt.visualize_embedding(self.emb, FLAGS.vocab) if is_training and FLAGS.monitor_level > 0: melt.monitor_embedding(self.emb, vocabulary.vocab, vocab_size) self.activation = melt.activations[FLAGS.activation] # TODO can consider global initiallizer like # with tf.variable_scope("Model", reuse=None, initializer=initializer) # https://github.com/tensorflow/models/blob/master/tutorials/rnn/ptb/ptb_word_lm.py self.weights_initializer = tf.random_uniform_initializer( -FLAGS.initializer_scale, FLAGS.initializer_scale) self.biases_initialzier = melt.slim.init_ops.zeros_initializer if FLAGS.bias else None self.mlp_dims = [int(x) for x in FLAGS.mlp_dims.split(',') ] if FLAGS.mlp_dims is not '0' else None self.scope = 'dual_textsim' self.build_train_graph = self.build_graph
def __init__(self, is_training=False, is_predict=False, emb_dim=None, initializer=None): ShowAndTellEncoder.__init__(self, is_training, is_predict, emb_dim, initializer) self.pos_emb = embedding.get_embedding_cpu(name='pos_emb', height=FLAGS.image_attention_size)
def __init__(self, encoder_type='bow', is_training=True, is_predict=False): super(DiscriminantTrainer, self).__init__() self.is_training = is_training self.is_predict = is_predict logging.info('emb_dim:{}'.format(FLAGS.emb_dim)) logging.info('margin:{}'.format(FLAGS.margin)) self.encoder = encoder_factory.get_encoder(encoder_type, is_training, is_predict) self.encoder_type = encoder_type emb_dim = FLAGS.emb_dim init_width = 0.5 / emb_dim vocabulary.init() vocab_size = vocabulary.get_vocab_size() self.vocab_size = vocab_size #if not cpu and on gpu run and using adagrad, will fail TODO check why #also this will be more safer, since emb is large might exceed gpu mem #with tf.device('/cpu:0'): # self.emb = melt.variable.get_weights_uniform('emb', [vocab_size, emb_dim], -init_width, init_width) if (not FLAGS.word_embedding_file) or glob.glob(FLAGS.model_dir + '/model.ckpt*'): logging.info( 'Word embedding random init or from model_dir :{} and finetune=:{}' .format(FLAGS.model_dir, FLAGS.finetune_word_embedding)) self.emb = embedding.get_embedding_cpu( name='emb', trainable=FLAGS.finetune_word_embedding) else: #https://github.com/tensorflow/tensorflow/issues/1570 #still adgrad must cpu.. #if not fintue emb this will be ok if fintune restart will ok ? must not use word embedding file? os.path.exists(FLAGS.model_dir) ? judge? #or will still try to load from check point ? TODO for safe you could re run by setting word_embedding_file as None or '' logging.info( 'Loading word embedding from :{} and finetune=:{}'.format( FLAGS.word_embedding_file, FLAGS.finetune_word_embedding)) self.emb = melt.load_constant_cpu( FLAGS.word_embedding_file, name='emb', trainable=FLAGS.finetune_word_embedding) melt.visualize_embedding(self.emb, FLAGS.vocab) if is_training and FLAGS.monitor_level > 0: melt.monitor_embedding(self.emb, vocabulary.vocab, vocab_size) self.activation = melt.activations[FLAGS.activation] #TODO can consider global initiallizer like # with tf.variable_scope("Model", reuse=None, initializer=initializer) #https://github.com/tensorflow/models/blob/master/tutorials/rnn/ptb/ptb_word_lm.py self.weights_initializer = tf.random_uniform_initializer( -FLAGS.initializer_scale, FLAGS.initializer_scale) self.biases_initialzier = melt.slim.init_ops.zeros_initializer if FLAGS.bias else None if not FLAGS.pre_calc_image_feature: assert melt.apps.image_processing.image_processing_fn is not None, 'forget melt.apps.image_processing.init()' self.image_process_fn = functools.partial( melt.apps.image_processing.image_processing_fn, height=FLAGS.image_height, width=FLAGS.image_width) self.image_mlp_dims = [ int(x) for x in FLAGS.image_mlp_dims.split(',') ] if FLAGS.image_mlp_dims is not '0' else None self.text_mlp_dims = [int(x) for x in FLAGS.text_mlp_dims.split(',') ] if FLAGS.text_mlp_dims is not '0' else None self.scope = 'image_text_sim'