def __init__(self, batch, config, is_train=True):
        self.batch = batch
        self.config = config
        self.data_cfg = config.data_cfg
        self.data_dir = config.data_dir
        self.is_train = is_train

        self.losses = {}
        self.report = {}
        self.mid_result = {}
        self.vis_image = {}

        vocab_path = os.path.join(self.data_dir, 'vocab.pkl')
        self.vocab = cPickle.load(open(vocab_path, 'rb'))

        answer_dict_path = os.path.join(self.data_dir, 'answer_dict.pkl')
        self.answer_dict = cPickle.load(open(answer_dict_path, 'rb'))
        self.num_answer = len(self.answer_dict['vocab'])

        ws_dict_path = os.path.join(self.data_dir, 'wordset_dict5.pkl')
        self.ws_dict = cPickle.load(open(ws_dict_path, 'rb'))
        self.num_ws = len(self.ws_dict['vocab'])

        self.wordset_map = modules.learn_embedding_map(self.ws_dict,
                                                       scope='wordset_map')
        self.v_word_map = modules.LearnGloVe(self.vocab, scope='V_GloVe')
        self.l_word_map = modules.LearnGloVe(self.vocab, scope='L_GloVe')
        self.l_answer_word_map = modules.LearnAnswerGloVe(self.answer_dict)

        self.build()
    def __init__(self, batch, config, is_train=True):
        self.batch = batch
        self.config = config
        self.data_cfg = config.data_cfg
        self.data_dir = config.data_dir
        self.is_train = is_train

        self.losses = {}
        self.report = {}
        self.vis_hist = {}
        self.mid_result = {}
        self.vis_image = {}

        self.global_step = tf.train.get_or_create_global_step(graph=None)
        self.latent_loss_weight = tf.convert_to_tensor(0.1)
        self.report['model_step'] = self.global_step
        self.report['latent_loss_weight'] = self.latent_loss_weight

        vocab_path = os.path.join(self.data_dir, 'vocab.pkl')
        self.vocab = cPickle.load(open(vocab_path, 'rb'))

        answer_dict_path = os.path.join(self.data_dir, 'answer_dict.pkl')
        self.answer_dict = cPickle.load(open(answer_dict_path, 'rb'))
        self.num_answer = len(self.answer_dict['vocab'])

        # [#vocab, W_DIM]
        self.v_word_map = modules.LearnGloVe(self.vocab, scope='V_GloVe')
        self.l_word_map = modules.LearnGloVe(self.vocab, scope='L_GloVe')
        # [#answer_vocab, W_DIM]
        self.l_answer_word_map = modules.LearnAnswerGloVe(self.answer_dict)

        self.build()
    def __init__(self, batch, config, is_train=True):
        self.batch = batch
        self.config = config
        self.data_cfg = config.data_cfg
        self.data_dir = config.data_dir
        self.is_train = is_train

        self.losses = {}
        self.report = {}
        self.mid_result = {}
        self.vis_image = {}

        vocab_path = os.path.join(self.data_dir, 'vocab.pkl')
        self.vocab = cPickle.load(open(vocab_path, 'rb'))

        answer_dict_path = os.path.join(self.data_dir, 'answer_dict.pkl')
        self.answer_dict = cPickle.load(open(answer_dict_path, 'rb'))
        self.num_answer = len(self.answer_dict['vocab'])

        ws_dict_path = os.path.join(self.data_dir, 'wordset_dict5.pkl')
        self.ws_dict = cPickle.load(open(ws_dict_path, 'rb'))
        self.num_ws = len(self.ws_dict['vocab'])

        enwiki_dict_path = os.path.join(
            self.data_dir, 'enwiki_context_dict_w3_p{}_n5.pkl'.format(config.enwiki_preprocessing))

        self.enwiki_dict = cPickle.load(open(enwiki_dict_path, 'rb'))
        self.num_context_vocab = len(self.enwiki_dict['context_word_vocab'])
        self.max_context_len = self.enwiki_dict['max_context_len']
        self.enwiki_vocab_dict = {
            'vocab': self.enwiki_dict['context_word_vocab'],
            'dict': self.enwiki_dict['context_word_dict'],
        }

        self.wordset_map = modules.learn_embedding_map(
            self.ws_dict, scope='wordset_map')
        self.enwiki_map = modules.learn_embedding_map(
            self.enwiki_vocab_dict, scope='enwiki_map')
        self.v_word_map = modules.LearnGloVe(self.vocab, scope='V_GloVe')
        self.l_word_map = modules.LearnGloVe(self.vocab, scope='L_GloVe')
        self.l_answer_word_map = modules.LearnAnswerGloVe(self.answer_dict)

        self.build()
if not os.path.exists(config.save_dir):
    log.warn('create directory: {}'.format(config.save_dir))
    os.makedirs(config.save_dir)
else:
    raise ValueError('Do not overwrite: {}'.format(config.save_dir))

vocab_path = os.path.join(config.data_dir, 'vocab.pkl')
vocab = cPickle.load(open(vocab_path, 'rb'))

answer_dict_path = os.path.join(config.data_dir, 'answer_dict.pkl')
answer_dict = cPickle.load(open(answer_dict_path, 'rb'))
num_answer = len(answer_dict['vocab'])

v_word_map = modules.LearnGloVe(vocab, scope='V_GloVe')
l_word_map = modules.LearnGloVe(vocab, scope='L_GloVe')
l_answer_word_map = modules.LearnAnswerGloVe(answer_dict)

with tf.variable_scope('classifier/fc', reuse=tf.AUTO_REUSE):
    # (float32_ref 2048x4000) [8192000, bytes: 32768000]
    class_weights = tf.get_variable('weights',
                                    shape=[config.class_feat_dim, num_answer])
    # (float32_ref 4000) [4000, bytes: 16000]
    class_biases = tf.get_variable('biases', shape=[num_answer])

session_config = tf.ConfigProto(allow_soft_placement=True,
                                gpu_options=tf.GPUOptions(allow_growth=True),
                                device_count={'GPU': 1})
sess = tf.Session(config=session_config)

all_vars = tf.global_variables()
checkpoint_loader = tf.train.Saver(var_list=all_vars, max_to_keep=1)