def __init__(self, batch, config, is_train=True): self.batch = batch self.config = config self.data_cfg = config.data_cfg self.data_dir = config.data_dir self.is_train = is_train self.losses = {} self.report = {} self.mid_result = {} self.vis_image = {} vocab_path = os.path.join(self.data_dir, 'vocab.pkl') self.vocab = cPickle.load(open(vocab_path, 'rb')) answer_dict_path = os.path.join(self.data_dir, 'answer_dict.pkl') self.answer_dict = cPickle.load(open(answer_dict_path, 'rb')) self.num_answer = len(self.answer_dict['vocab']) ws_dict_path = os.path.join( self.data_dir, 'wordset_dict5_depth{}.pkl'.format(int(config.expand_depth))) self.ws_dict = cPickle.load(open(ws_dict_path, 'rb')) self.num_ws = len(self.ws_dict['vocab']) self.wordset_map = modules.learn_embedding_map(self.ws_dict, scope='wordset_map') self.v_word_map = modules.LearnGloVe(self.vocab, scope='V_GloVe') self.l_word_map = modules.LearnGloVe(self.vocab, scope='L_GloVe') self.l_answer_word_map = modules.LearnAnswerGloVe(self.answer_dict) self.build()
if not os.path.exists(config.save_dir): log.warn('create directory: {}'.format(config.save_dir)) os.makedirs(config.save_dir) else: raise ValueError('Do not overwrite: {}'.format(config.save_dir)) vocab_path = os.path.join(config.data_dir, 'vocab.pkl') vocab = cPickle.load(open(vocab_path, 'rb')) answer_dict_path = os.path.join(config.data_dir, 'answer_dict.pkl') answer_dict = cPickle.load(open(answer_dict_path, 'rb')) num_answer = len(answer_dict['vocab']) v_word_map = modules.LearnGloVe(vocab, scope='V_GloVe') l_word_map = modules.LearnGloVe(vocab, scope='L_GloVe') l_answer_word_map = modules.LearnAnswerGloVe(answer_dict) with tf.variable_scope('classifier/fc', reuse=tf.AUTO_REUSE): # (float32_ref 2048x4000) [8192000, bytes: 32768000] class_weights = tf.get_variable('weights', shape=[config.class_feat_dim, num_answer]) # (float32_ref 4000) [4000, bytes: 16000] class_biases = tf.get_variable('biases', shape=[num_answer]) session_config = tf.ConfigProto(allow_soft_placement=True, gpu_options=tf.GPUOptions(allow_growth=True), device_count={'GPU': 1}) sess = tf.Session(config=session_config) all_vars = tf.global_variables() checkpoint_loader = tf.train.Saver(var_list=all_vars, max_to_keep=1)