def run(dat, ac_config): with tf.Graph().as_default() as g: m = SCModel(ac_config) s = tf.train.Saver(tf.global_variables()) # print("AC config hypnodensity path",ac_config.hypnodensity_model_dir) with tf.Session(config=tf.ConfigProto(log_device_placement=False)) as session: ckpt = tf.train.get_checkpoint_state(ac_config.hypnodensity_model_dir) # For debugging # pdb.set_trace() s.restore(session, ckpt.model_checkpoint_path) state = np.zeros([1, ac_config.num_hidden * 2]) dat, Nextra, prediction, num_batches = Hypnodensity.segment(dat, ac_config) for i in range(num_batches): x = dat[:, i * ac_config.eval_nseg_atonce * ac_config.segsize:( i + 1) * ac_config.eval_nseg_atonce * ac_config.segsize, :] est, _ = session.run([m.logits, m.final_state], feed_dict={ m.features: x, m.targets: np.ones([ac_config.eval_nseg_atonce * ac_config.segsize, 5]), m.mask: np.ones(ac_config.eval_nseg_atonce * ac_config.segsize), m.batch_size: np.ones([1]), m.initial_state: state }) prediction[i * ac_config.eval_nseg_atonce:(i + 1) * ac_config.eval_nseg_atonce, :] = est prediction = prediction[:-int(Nextra / ac_config.segsize), :] return prediction
def run(dat, ac_config): ++Hypnodensity.run_count once = min(ac_config.eval_nseg_atonce, int((dat.shape[1]/ac_config.segsize)-1)) thread_count = max(multiprocessing.cpu_count() - 1, 1) os.environ["MKL_NUM_THREADS"] = str(thread_count) with tf.Graph().as_default() as g: m = SCModel(ac_config) s = tf.train.Saver(tf.global_variables()) print("AC config hypnodensity path",ac_config.hypnodensity_model_dir) builder = tf.profiler.ProfileOptionBuilder opts = builder(builder.time_and_memory()).order_by('micros').build() profile_dir = gettempdir() + 'mlstages_profile_' + str(Hypnodensity.run_count) with tf.contrib.tfprof.ProfileContext(profile_dir) as pctx: config = tf.ConfigProto(log_device_placement=False, intra_op_parallelism_threads=thread_count, inter_op_parallelism_threads=thread_count) with tf.Session(config=config) as session: pctx.trace_next_step() pctx.dump_next_step() ckpt = tf.train.get_checkpoint_state(ac_config.hypnodensity_model_dir) pctx.profiler.profile_operations(options=opts) s.restore(session, ckpt.model_checkpoint_path) state = np.zeros([1, ac_config.num_hidden * 2]) dat, Nextra, prediction, num_batches = Hypnodensity.segment(dat, ac_config) for i in range(num_batches): x = dat[:, i * once * ac_config.segsize:(i + 1) * once * ac_config.segsize,:] pctx.trace_next_step() pctx.dump_next_step() est, _ = session.run([m.logits, m.final_state], feed_dict={ m.features: x, m.targets: np.ones([once * ac_config.segsize, 5]), m.mask: np.ones(once * ac_config.segsize), m.batch_size: np.ones([1]), m.initial_state: state }, options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)) pctx.profiler.profile_operations(options=opts) prediction[i * once:(i + 1) * once, :] = est prediction = prediction[:-int(Nextra / ac_config.segsize), :] return prediction
def run(dat, ac_config): with tf.compat.v1.Graph().as_default() as g: m = SCModel(ac_config) s = tf.compat.v1.train.Saver(tf.compat.v1.global_variables()) print("AC config hypnodensity path", ac_config.hypnodensity_model_dir) # config = tf.compat.v1.ConfigProto(log_device_placement=False, device_count={'GPU': 0}) # For cpu only operations config = tf.compat.v1.ConfigProto(log_device_placement=False) # config = tf.ConfigProto() # config = tf.ConfigProto(log_device_placement=True) # Setting log_device_placement=True gives way too much output. # config.gpu_options.allow_growth = True # See also: https://www.tensorflow.org/guide/using_gpu # config.gpu_options.per_process_gpu_memory_fraction = 1.0 with tf.compat.v1.Session(config=config) as session: ckpt = tf.compat.v1.train.get_checkpoint_state( ac_config.hypnodensity_model_dir) s.restore(session, ckpt.model_checkpoint_path) state = np.zeros([1, ac_config.num_hidden * 2]) # state = m.initial_state dat, Nextra, prediction, num_batches = Hypnodensity.segment( dat, ac_config) for i in range(num_batches): x = dat[:, i * ac_config.eval_nseg_atonce * ac_config.segsize:(i + 1) * ac_config.eval_nseg_atonce * ac_config.segsize, :] est, state = session.run( [m.logits, m.final_state], feed_dict={ m.features: x, m.targets: np.ones([ ac_config.eval_nseg_atonce * ac_config.segsize, 5 ]), m.mask: np.ones(ac_config.eval_nseg_atonce * ac_config.segsize), m.batch_size: np.ones([1]), m.initial_state: state }) prediction[i * ac_config.eval_nseg_atonce:(i + 1) * ac_config.eval_nseg_atonce, :] = est prediction = prediction[:-int(Nextra / ac_config.segsize), :] return prediction