def __init__(self, config, rng): self.config = config self.rng = rng self.task = config.task self.model_dir = config.model_dir self.gpu_memory_fraction = config.gpu_memory_fraction self.checkpoint_secs = config.checkpoint_secs self.log_step = config.log_step self.num_epoch = config.num_epochs ## import data Loader ## data_dir = config.data_dir dataset_name = config.task batch_size = config.batch_size num_time_steps = config.num_time_steps num_node = config.num_node self.data_loader = BatchLoader(data_dir, dataset_name, batch_size, num_time_steps, num_node) ## Need to think about how we construct adj matrix(W) W = self.data_loader.adj laplacian = W / W.max() laplacian = scipy.sparse.csr_matrix(laplacian, dtype=np.float32) lmax = graph.lmax(laplacian) #idx2char = batchLoader_.idx2char #char2idx = batchLoader_.char2idx #batch_x, batch_y = batchLoader_.next_batch(0) 0:train 1:valid 2:test #batchLoader_.reset_batch_pointer(0) ## define model ## self.model = Model(config, laplacian, lmax) ## model saver / summary writer ## self.saver = tf.train.Saver() self.model_saver = tf.train.Saver(self.model.model_vars) self.summary_train_writer = tf.summary.FileWriter(self.model_dir + '/train') self.summary_test_writer = tf.summary.FileWriter(self.model_dir + '/test') sv = tf.train.Supervisor(logdir=self.model_dir, is_chief=True, saver=self.saver, summary_op=None, summary_writer=self.summary_train_writer, save_summaries_secs=300, save_model_secs=self.checkpoint_secs, global_step=self.model.model_step) gpu_options = tf.GPUOptions( per_process_gpu_memory_fraction=self.gpu_memory_fraction, allow_growth=True) # seems to be not working sess_config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options) self.sess = sv.prepare_or_wait_for_session(config=sess_config)
def __init__(self, config, rng): self.config = config self.rng = rng self.task = config.task self.model_dir = config.model_dir self.gpu_memory_fraction = config.gpu_memory_fraction self.checkpoint_secs = config.checkpoint_secs self.log_step = config.log_step self.num_epoch = config.num_epochs ## import data Loader ## data_dir = config.data_dir dataset_name = config.task batch_size = config.batch_size num_time_steps = config.num_time_steps self.data_loader = BatchLoader(data_dir, dataset_name, batch_size, num_time_steps) ## Need to think about how we construct adj matrix(W) # Oh no. Are you kidding me?? W = self.data_loader.adj laplacian = W / W.max() # 作了归一化 laplacian = scipy.sparse.csr_matrix(laplacian, dtype=np.float32) # 将矩阵用CSR的方式存储 lmax = graph.lmax(laplacian) # Q:作用未知 ## define model ## self.model = Model(config, laplacian, lmax) ## model saver / summary writer ## self.saver = tf.train.Saver() self.model_saver = tf.train.Saver(self.model.model_vars) self.summary_writer = tf.summary.FileWriter(self.model_dir) sv = tf.train.Supervisor(logdir=self.model_dir, is_chief=True, saver=self.saver, summary_op=None, summary_writer=self.summary_writer, save_summaries_secs=300, save_model_secs=self.checkpoint_secs, global_step=self.model.model_step) gpu_options = tf.GPUOptions( per_process_gpu_memory_fraction=self.gpu_memory_fraction, allow_growth=True) # seems to be not working sess_config = tf.ConfigProto(allow_soft_placement=True, gpu_options=gpu_options) self.sess = sv.prepare_or_wait_for_session(config=sess_config)