def _dump_tensors(self): if not self._has_recorded_tensor: return layout_categories = [] for scope in self._scope_tensor: chart = [] for name in self._scope_tensor[scope]: chart.append( layout_pb2.Chart( title=name, multiline=layout_pb2.MultilineChartContent( tag=[r'name(?!.*margin.*)'.replace('name', name) ]))) category = layout_pb2.Category(title=scope, chart=chart) layout_categories.append(category) if layout_categories: layout_proto_to_write = layout_pb2.Layout( category=layout_categories) try: # Load former layout_proto from self._layout_writer_dir. multiplexer = event_multiplexer.EventMultiplexer() multiplexer.AddRunsFromDirectory(self._layout_writer_dir) multiplexer.Reload() tensor_events = multiplexer.Tensors( '.', metadata.CONFIG_SUMMARY_TAG) shutil.rmtree(self._layout_writer_dir) # Parse layout proto from disk. string_array = tf.make_ndarray(tensor_events[0].tensor_proto) content = np.asscalar(string_array) layout_proto_from_disk = layout_pb2.Layout() layout_proto_from_disk.ParseFromString( tf.compat.as_bytes(content)) # Merge two layout proto. merged_layout_json = merge( json_format.MessageToJson(layout_proto_from_disk), json_format.MessageToJson(layout_proto_to_write)) merged_layout_proto = layout_pb2.Layout() json_format.Parse(str(merged_layout_json), merged_layout_proto) self._layout_writer = tf.summary.FileWriter( self._layout_writer_dir) layout_summary = summary_lib.custom_scalar_pb( merged_layout_proto) self._layout_writer.add_summary(layout_summary) self._layout_writer.close() except KeyError: # Write the current layout proto into disk # when there is no layout. self._layout_writer = tf.summary.FileWriter( self._layout_writer_dir) layout_summary = summary_lib.custom_scalar_pb( layout_proto_to_write) self._layout_writer.add_summary(layout_summary) self._layout_writer.close()
def custom_board_generator(graph): # We make the coefficient and scaled coefficient charts first because we need to do it dynamically. coeff_chart = [ layout_pb2.Chart(title='Coeff_' + str(idx), multiline=layout_pb2.MultilineChartContent( tag=[r'Coeff_' + str(idx) + '_Comp_*'])) for idx in np.arange(len(graph.PI_costs)) ] coeff_scaled_chart = [ layout_pb2.Chart(title='Scaled_Coeff_' + str(idx), multiline=layout_pb2.MultilineChartContent( tag=[r'Scaled_Coeff_' + str(idx) + '_Comp_*'])) for idx in np.arange(len(graph.PI_costs)) ] # Actually making the board custom_board = custom_scalar_pb( layout_pb2.Layout(category=[ layout_pb2.Category(title='Training', chart=[layout_pb2.Chart(title='MSE_Losses', multiline=layout_pb2.MultilineChartContent(tag=[r'MSE_cost_*'])), layout_pb2.Chart(title='PI_Losses', multiline=layout_pb2.MultilineChartContent(tag=[r'PI_cost_*'])), layout_pb2.Chart(title='L1_Losses', multiline=layout_pb2.MultilineChartContent(tag=[r'L1_cost_*'])), layout_pb2.Chart(title='Total_cost', multiline=layout_pb2.MultilineChartContent(tag=['Total_cost'])), layout_pb2.Chart(title='Gradloss', multiline=layout_pb2.MultilineChartContent(tag=['Loss_Grad']))\ ] ), layout_pb2.Category(title='Coefficients', chart=coeff_chart), layout_pb2.Category(title='Scaled_Coefficients', chart=coeff_scaled_chart) ])) return custom_board
def finalize_autosummaries() -> None: """Create the necessary ops to include autosummaries in TensorBoard report. Note: This should be done only once per graph. """ global _finalized tfutil.assert_tf_initialized() if _finalized: return None _finalized = True tfutil.init_uninitialized_vars([var for vars_list in _vars.values() for var in vars_list]) # Create summary ops. with tf.device(None), tf.control_dependencies(None): for name, vars_list in _vars.items(): name_id = name.replace("/", "_") with tfutil.absolute_name_scope("Autosummary/" + name_id): moments = tf.add_n(vars_list) moments /= moments[0] with tf.control_dependencies([moments]): # read before resetting reset_ops = [tf.assign(var, tf.zeros(3, dtype=_dtype)) for var in vars_list] with tf.name_scope(None), tf.control_dependencies(reset_ops): # reset before reporting mean = moments[1] std = tf.sqrt(moments[2] - tf.square(moments[1])) tf.summary.scalar(name, mean) if enable_custom_scalars: tf.summary.scalar("xCustomScalars/" + name + "/margin_lo", mean - std) tf.summary.scalar("xCustomScalars/" + name + "/margin_hi", mean + std) # Setup layout for custom scalars. layout = None if enable_custom_scalars: cat_dict = OrderedDict() for series_name in sorted(_vars.keys()): p = series_name.split("/") cat = p[0] if len(p) >= 2 else "" chart = "/".join(p[1:-1]) if len(p) >= 3 else p[-1] if cat not in cat_dict: cat_dict[cat] = OrderedDict() if chart not in cat_dict[cat]: cat_dict[cat][chart] = [] cat_dict[cat][chart].append(series_name) categories = [] for cat_name, chart_dict in cat_dict.items(): charts = [] for chart_name, series_names in chart_dict.items(): series = [] for series_name in series_names: series.append(layout_pb2.MarginChartContent.Series( value=series_name, lower="xCustomScalars/" + series_name + "/margin_lo", upper="xCustomScalars/" + series_name + "/margin_hi")) margin = layout_pb2.MarginChartContent(series=series) charts.append(layout_pb2.Chart(title=chart_name, margin=margin)) categories.append(layout_pb2.Category(title=cat_name, chart=charts)) layout = summary_lib.custom_scalar_pb(layout_pb2.Layout(category=categories)) return layout
def tb_layout(): episode_rewards = layout_pb2.Category( title='Episode Reward', chart=gen_multiline_charts([ ("Shaped Reward", [r'shaping/eprewmean_true']), ("Episode Length", [r'eplenmean']), ("Sparse Reward", [r'shaping/epsparsemean']), ("Dense Reward", [r'shaping/epdensemean']), ("Dense Reward Annealing", [r'shaping/rew_anneal_c']), ("Unshaped Reward", [r'ep_rewmean']), ("Victim Action Noise", [r'shaping/victim_noise']) ]), ) game_outcome = layout_pb2.Category( title="Game Outcomes", chart=gen_multiline_charts([ ("Agent 0 Win Proportion", [r'game_win0']), ("Agent 1 Win Proportion", [r'game_win1']), ("Tie Proportion", [r'game_tie']), ("# of games", [r'game_total']), ]), ) training = layout_pb2.Category( title="Training", chart=gen_multiline_charts([ ("Policy Loss", [r'policy_loss']), ("Value Loss", [r'value_loss']), ("Policy Entropy", [r'policy_entropy']), ("Explained Variance", [r'explained_variance']), ("Approx KL", [r'approxkl']), ("Clip Fraction", [r'clipfrac']), ]) ) # Intentionally unused: # + serial_timesteps (just total_timesteps / num_envs) # + time_elapsed (TensorBoard already logs wall-clock time) # + nupdates (this is already logged as step) time = layout_pb2.Category( title="Time", chart=gen_multiline_charts([ ("Total Timesteps", [r'total_timesteps']), ("FPS", [r'fps']), ]) ) categories = [episode_rewards, game_outcome, training, time] return summary_lib.custom_scalar_pb(layout_pb2.Layout(category=categories))
def get_layout_summary(): return summary_lib.custom_scalar_pb( layout_pb2.Layout(category=[ layout_pb2.Category( title="metrics", chart=[ layout_pb2.Chart( title="losses", multiline=layout_pb2.MultilineChartContent( tag=['train/loss', 'eval/loss'])), layout_pb2.Chart( title="rmsles", multiline=layout_pb2.MultilineChartContent( tag=['train/rmsle', 'eval/rmsle'])), ]) ]))
def layout_dashboard(writer): from tensorboard import summary from tensorboard.plugins.custom_scalar import layout_pb2 # This action does not have to be performed at every step, so the action is not # taken care of by an op in the graph. We only need to specify the layout once. # We only need to specify the layout once (instead of per step). layout_summary = summary.custom_scalar_pb(layout_pb2.Layout( category=[ layout_pb2.Category( title='losses', chart=[ layout_pb2.Chart( title='losses', multiline=layout_pb2.MultilineChartContent( tag=[r'loss.*'], )), layout_pb2.Chart( title='baz', margin=layout_pb2.MarginChartContent( series=[ layout_pb2.MarginChartContent.Series( value='loss/baz/scalar_summary', lower='baz_lower/baz/scalar_summary', upper='baz_upper/baz/scalar_summary'), ], )), ]), layout_pb2.Category( title='trig functions', chart=[ layout_pb2.Chart( title='wave trig functions', multiline=layout_pb2.MultilineChartContent( tag=[r'trigFunctions/cosine', r'trigFunctions/sine'], )), # The range of tangent is different. Let's give it its own chart. layout_pb2.Chart( title='tan', multiline=layout_pb2.MultilineChartContent( tag=[r'trigFunctions/tangent'], )), ], # This category we care less about. Let's make it initially closed. closed=True), ])) writer.add_summary(layout_summary)
def _init_custom_scalar_layout(self): layout = layout_pb2.Layout( category=[ layout_pb2.Category( title="mean rewards", chart=[ layout_pb2.Chart( title="mean rewards per actor", multiline=layout_pb2.MultilineChartContent( tag=[r"actor-\d+/mean rewards"] ), ) ], ), layout_pb2.Category( title="mean episode lengths", chart=[ layout_pb2.Chart( title="mean episode length per actor", multiline=layout_pb2.MultilineChartContent( tag=[r"actor-\d+/mean episode lengths"] ), ) ], ), layout_pb2.Category( title="mean fruits eaten", chart=[ layout_pb2.Chart( title="mean fruits eaten per actor", multiline=layout_pb2.MultilineChartContent( tag=[r"actor-\d+/mean fruits eaten"] ), ) ], ), ] ) self.writer.add_summary(summary.custom_scalar_pb(layout))
def add_custom_scalar(logdir): summary_writer = SummaryWriterCache.get(logdir) layout_summary = summary.custom_scalar_pb( layout_pb2.Layout(category=[ layout_pb2.Category( title='Loss', chart=[ layout_pb2.Chart( title='Loss', multiline=layout_pb2.MultilineChartContent( tag=[r'1_loss/*'], )), layout_pb2.Chart( title='Loss Component', multiline=layout_pb2.MultilineChartContent( tag=[r'2_loss_component/*'], )), layout_pb2.Chart( title='Discriminator Values', multiline=layout_pb2.MultilineChartContent( tag=[r'3_discriminator_values/*'], )), layout_pb2.Chart( title='Variation of sequences', multiline=layout_pb2.MultilineChartContent( tag=[r'Stddev/*'], )), layout_pb2.Chart( title='BLOMSUM45', multiline=layout_pb2.MultilineChartContent( tag=[r'Blast/*/BLOMSUM45'], )), layout_pb2.Chart( title='Evalue', multiline=layout_pb2.MultilineChartContent( tag=[r'Blast/*/Evalue'], )), layout_pb2.Chart( title='Identity', multiline=layout_pb2.MultilineChartContent( tag=[r'Blast/*/Identity'], )), ]), ])) summary_writer.add_summary(layout_summary)
saver.restore(sess, latest_checkpoint) print_term(' done!', run_id) else: print_term('No checkpoint found in: {}'.format(checkpoint_paths), run_id) # Actual training with epochs as iteration layout_summary = summary_lib.custom_scalar_pb( layout_pb2.Layout(category=[ layout_pb2.Category(title='losses', chart=[ layout_pb2.Chart(title='losses', multiline=layout_pb2. MultilineChartContent( tag=[ 'training_col', 'validation_col', 'training_fwd', 'validation_fwd', 'training_ref', 'validation_ref', ])) ]) ])) train_col_writer.add_summary(layout_summary) for epoch in range(epochs): print_term( 'Starting epoch: {} (total images {})'.format( epoch, total_train_images), run_id) # Training step
import argparse from pathlib import Path import tensorflow as tf from tensorboard import summary as summary_lib from tensorboard.plugins.custom_scalar import layout_pb2 layout_summary = summary_lib.custom_scalar_pb( layout_pb2.Layout(category=[ layout_pb2.Category( title='losses', chart=[ # Chart 'losses' (include all losses, exclude upper and lower bounds) layout_pb2.Chart(title='losses', multiline=layout_pb2.MultilineChartContent( tag=[r'loss(?!.*bound.*)'])), ]) ])) parser = argparse.ArgumentParser() parser.add_argument('folder', help='The log folder to place the layout in') args = parser.parse_args() folder = (Path(args.folder) / 'layout').expanduser().resolve() with tf.summary.FileWriter(folder) as writer: writer.add_summary(layout_summary) print('Layout saved to', folder)
layout_summary = summary_lib.custom_scalar_pb(layout_pb2.Layout( category=[ layout_pb2.Category( title='losses', chart=[ layout_pb2.Chart( title='losses', multiline=layout_pb2.MultilineChartContent( tag=[r'loss(?!.*margin.*)'], )), layout_pb2.Chart( title='baz', margin=layout_pb2.MarginChartContent( series=[ layout_pb2.MarginChartContent.Series( value='loss/baz/scalar_summary', lower='loss/baz_lower_margin/scalar_summary', upper='loss/baz_upper_margin/scalar_summary'), ], )), ]), layout_pb2.Category( title='trig functions', chart=[ layout_pb2.Chart( title='wave trig functions', multiline=layout_pb2.MultilineChartContent( tag=[r'trigFunctions/cosine', r'trigFunctions/sine'], )), # The range of tangent is different. Give it its own chart. layout_pb2.Chart( title='tan', multiline=layout_pb2.MultilineChartContent( tag=[r'trigFunctions/tangent'], )), ], # This category we care less about. Lets make it initially closed. closed=True), ]))
import argparse from pathlib import Path import tensorflow as tf from tensorboard import summary as summary_lib from tensorboard.plugins.custom_scalar import layout_pb2 layout_summary = summary_lib.custom_scalar_pb(layout_pb2.Layout(category=[ layout_pb2.Category( title='Losses', chart=[ layout_pb2.Chart( title='Train', multiline=layout_pb2.MultilineChartContent(tag=['loss/train/mse', 'loss/train/l1'])), layout_pb2.Chart( title='Val', multiline=layout_pb2.MultilineChartContent(tag=['loss/train/mse', 'loss/val/mse'])), ]) ])) parser = argparse.ArgumentParser() parser.add_argument('folder', help='The log folder to place the layout in') args = parser.parse_args() folder = (Path(args.folder) / 'layout').expanduser().resolve() with tf.summary.FileWriter(folder) as writer: writer.add_summary(layout_summary) print('Layout saved to', folder)
def run_model(self): os.environ["CUDA_VISIBLE_DEVICES"] = "0" # gpu selection sess_config = tf.ConfigProto() sess_config.gpu_options.per_process_gpu_memory_fraction = 1 # 100% gpu sess_config.gpu_options.allow_growth = True # dynamic growth #iter_steps = (self._train_data.shape[0] / self._batch_size) * self._epoch #epoch = 1 tot = 0 history = np.empty([0, 5]) with tf.Session(config=sess_config) as sess: sess.run(tf.global_variables_initializer()) sess.run(tf.local_variables_initializer()) train_writer = tf.summary.FileWriter(self._log_path + '/train', graph=sess.graph) test_write = tf.summary.FileWriter(self._log_path + '/test', graph=sess.graph) custom_layout = summary_lib.custom_scalar_pb( layout_pb2.Layout(category=[ layout_pb2.Category( title='Accuracies', chart=[ layout_pb2.Chart(title='Activity', multiline=layout_pb2. MultilineChartContent(tag=[ r'train_1/a_accuracy', r'val_1/a_accuracy' ])), layout_pb2.Chart(title='User', multiline=layout_pb2. MultilineChartContent(tag=[ r'train_1/u_accuracy', r'val_1/u_accuracy' ])), ]), layout_pb2.Category( title='Losses', chart=[ layout_pb2.Chart( title='Activity', multiline=layout_pb2.MultilineChartContent( tag=[r'train_1/a_loss', r'val_1/a_loss'])), layout_pb2.Chart( title='User', multiline=layout_pb2.MultilineChartContent( tag=[r'train_1/u_loss', r'val_1/u_loss'])), ]) ])) train_writer.add_summary(custom_layout) # result_array = np.empty( [0, 2, len( self._test_data )] ) LARecord = np.empty([0, 2, self._test_data.shape[0]]) LURecord = np.empty([0, 2, self._test_data.shape[0]]) for i in range(self._iter_steps): data, la, lu = self.next_batch() lr = self._min_lr + (self._max_lr - self._min_lr) * math.exp( -i / self._decay_speed) tot += data.shape[0] ''' if self._framework == 1: summary, _, _, _, _, _ = sess.run([self._merged, self._update_ops, self._a_train_step, self._u_train_step, self._a_accuracy_op, self._u_accuracy_op], feed_dict={ self._X: data, self._YA: la, self._YU: lu, self._learning_rate: lr, self._is_training: True}) elif self._framework == 2: summary, _, _, = sess.run([self._merged, self._update_ops, self._a_train_step], feed_dict={ self._X: data, self._YA: la, self._YU: lu, self._learning_rate: lr, self._is_training: True}) else: print("model error") exit() ''' if self._framework == 1: _, _, _, a_acc_train, u_acc_train, _, _, a_loss, u_loss, _lr = sess.run( [ self._update_ops, self._a_train_step, self._u_train_step, self._a_accuracy_train, self._u_accuracy_train, self._a_accuracy_op_train, self._u_accuracy_op_train, self._a_loss_train, self._u_loss_train, self._lr ], feed_dict={ self._X: data, self._YA: la, self._YU: lu, self._learning_rate: lr, self._is_training: True }) elif self._framework == 2: _, _, a_acc_train, u_acc_train, _, _, a_loss, u_loss, _lr = sess.run( [ self._update_ops, self._a_train_step, self._a_accuracy_train, self._u_accuracy_train, self._a_accuracy_op_train, self._u_accuracy_op_train, self._a_loss_train, self._u_loss_train, self._lr ], feed_dict={ self._X: data, self._YA: la, self._YU: lu, self._learning_rate: lr, self._is_training: True }) else: print("model error") exit() train_writer.add_summary(a_acc_train, i) train_writer.add_summary(u_acc_train, i) train_writer.add_summary(a_loss, i) train_writer.add_summary(u_loss, i) train_writer.add_summary(_lr, i) if i % self._print_interval == 0: # added reset for validation metrics stream_vars_valid = [ v for v in tf.local_variables() if 'val/' in v.name ] sess.run(tf.variables_initializer(stream_vars_valid)) a_acc_val, u_acc_val, a_loss, u_loss, LATruth, LAPreds, LUTruth, LUPreds = self.predict( sess, lr) LARecord = np.append(LARecord, np.expand_dims( np.vstack((LATruth, LAPreds)), 0), axis=0) LURecord = np.append(LURecord, np.expand_dims( np.vstack((LUTruth, LUPreds)), 0), axis=0) AAccuracy = accuracy_score(LATruth, LAPreds, range(self._dataset._act_num)) Af1 = f1_score(LATruth, LAPreds, range(self._dataset._act_num), average='macro') UAccuracy = accuracy_score(LUTruth, LUPreds, range(self._dataset._user_num)) Uf1 = f1_score(LUTruth, LUPreds, range(self._dataset._user_num), average='macro') test_write.add_summary(a_acc_val, i) test_write.add_summary(u_acc_val, i) test_write.add_summary(a_loss, i) test_write.add_summary(u_loss, i) print( "step: {}, AAccuracy: {}, Af1: {}, UAccuracy: {}, Uf1: {}" .format(i, AAccuracy, Af1, UAccuracy, Uf1)) history = np.concatenate( (history, np.array([[i, AAccuracy, Af1, UAccuracy, Uf1]])), axis=0) if self._framework == 1: self.save_paremeters(sess) np.savetxt(self._result_path + 'log_history_pre_train_{}.txt'.format(self._fold), history, header='Step AAaccuracy Af1 UAccuracy Uf1', fmt='%d %1.4f %1.4f %1.4f %1.4f', delimiter='\t') print('finish pretrain') if self._framework == 2: # save log of train to file np.savetxt(self._result_path + 'log_history_train_{}.txt'.format(self._fold), history, header='Step AAaccuracy Af1 UAccuracy Uf1', fmt='%d %1.4f %1.4f %1.4f %1.4f', delimiter='\t') LARecordFile = self._result_path + \ "AR_fold{}_".format( self._fold) + time.strftime('%Y%m%d%H%M%S', time.localtime(time.time())) LURecordFile = self._result_path + \ "UR_fold{}_".format( self._fold) + time.strftime('%Y%m%d%H%M%S', time.localtime(time.time())) np.save(LARecordFile, LARecord) np.save(LURecordFile, LURecord) print("finish train") tf.keras.backend.clear_session()
def graph_setup(self): """ Set up the computation graph for the neural network based on the parameters set at initialization """ with self.graph.as_default(): ####################### # Define placeholders # ####################### self.gamma = tf.placeholder(tf.float32, shape=[], name='gamma') self.learning_rate = tf.placeholder(tf.float32, shape=[], name='learning_rate') self.reg_loss_factor = tf.placeholder(tf.float32, shape=[], name='reg_loss_factor') self.nloc_factor = tf.placeholder(tf.float32, shape=[], name='nloc_factor') self.total_latent_size = np.sum(self.latent_sizes) self.inputs = [tf.placeholder(tf.float32, [None, self.input_sizes[k]], name='input{}'.format(k)) for k in range(self.encoder_num)] self.question_inputs = [ tf.placeholder(tf.float32, shape=[None, self.question_sizes[i]], name='q_dec{}'.format(i)) for i in range(self.decoder_num) ] self.answers = [ tf.placeholder(tf.float32, shape=[None, self.answer_sizes[i]], name='q_dec{}'.format(i)) for i in range(self.decoder_num) ] self.select_noise = [ tf.placeholder(tf.float32, shape=[None, self.total_latent_size], name='select_noise_{}'.format(i)) for i in range(self.decoder_num) ] def fc_layer(in_layer, num_outputs, activation_fn, collection='std'): return fully_connected(in_layer, num_outputs, activation_fn, weights_regularizer=l2_regularizer(1.), biases_regularizer=l2_regularizer(1.), variables_collections=[collection]) ########################################## # Set up variables and computation graph # ########################################## self.individual_latent = [] for k in range(self.encoder_num): with tf.variable_scope('encoder_{}'.format(k)): temp_layer = self.inputs[k] for n in self.encoder_num_units: temp_layer = fc_layer(temp_layer, num_outputs=n, activation_fn=tf.nn.elu) self.individual_latent.append(fc_layer(temp_layer, num_outputs=self.latent_sizes[k], activation_fn=tf.identity)) with tf.variable_scope('latent_layer'): self.full_latent = tf.concat(self.individual_latent, axis=1) latent_std = tf.math.sqrt(tf.nn.moments(self.full_latent, axes=[0])[1]) self.select_logs = [] self.dec_inputs = [] for n in range(self.decoder_num): with tf.variable_scope('select_dec{}'.format(n)): selectors = tf.get_variable('sf_log', initializer=tf.initializers.constant(-10.), shape=self.total_latent_size, collections=[tf.GraphKeys.GLOBAL_VARIABLES, 'sel']) self.select_logs.append(selectors) self.dec_inputs.append(self.full_latent + latent_std * tf.exp(selectors) * self.select_noise[n]) self.outputs = [] for n in range(self.decoder_num): with tf.variable_scope('dec{}'.format(n)): temp_layer = tf.concat([self.dec_inputs[n], self.question_inputs[n]], axis=1, name='dec_in') for q in self.decoder_num_units: temp_layer = fc_layer(temp_layer, num_outputs=q, activation_fn=tf.nn.elu) out = np.pi / 2. * fc_layer(temp_layer, num_outputs=self.answer_sizes[n], activation_fn=tf.identity) self.outputs.append(out) ##################### # Cost and training # ##################### with tf.name_scope('cost'): sel_cost_list = [] ans_cost_list = [] for n in range(self.decoder_num): sel_cost_list.append(tf.reduce_mean(self.select_logs[n])) ans_cost_list.append(tf.reduce_mean(tf.reduce_sum(tf.squared_difference(self.answers[n], self.outputs[n]), axis=1))) self.cost_select = (-1) * tf.add_n(sel_cost_list) loc_cut = int(ceil(self.decoder_num / 2)) self.cost_loc = tf.add_n([ans_cost_list[i] for i in range(0, loc_cut)], name='cost_local') self.cost_nloc = tf.add_n([ans_cost_list[i] for i in range(loc_cut, self.decoder_num)], name='cost_local') self.weighted_cost = (self.cost_loc + self.nloc_factor * self.cost_nloc) / (1. + self.nloc_factor) with tf.name_scope('reg_loss'): self.reg_loss = tf.losses.get_regularization_loss() with tf.name_scope('optimizer'): optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate) self.training_op = self.train_op_from_loss(optimizer, self.weighted_cost) self.pretraining_op = self.train_op_from_loss(optimizer, self.weighted_cost, collections=['std', 'loc_decoder', 'nloc_decoder']) ######################### # Tensorboard summaries # ######################### chart = [] for i in range(self.decoder_num): chart.append(layout_pb2.Chart( title='Decoder {}'.format(i), multiline=layout_pb2.MultilineChartContent( tag=[r'^sf_log_{}'.format(i)] ) )) layout_summary = summary_lib.custom_scalar_pb( layout_pb2.Layout(category=[ layout_pb2.Category( title='Select factors', chart=chart) ]) ) tf.summary.scalar('cost_select', self.cost_select, collections=['vd']) tf.summary.scalar('cost', self.weighted_cost, collections=['vd']) tf.summary.scalar('cost_td', self.weighted_cost, collections=['td']) tf.summary.scalar('cost_loc', self.cost_loc, collections=['vd']) tf.summary.scalar('cost_nloc', self.cost_nloc, collections=['vd']) tf.summary.scalar('reg_loss', self.reg_loss, collections=['vd']) for i in range(self.decoder_num): for l in range(self.total_latent_size): tf.summary.scalar('sf_log_{}_{}'.format(i, l), self.select_logs[i][l], collections=['vd']) for i in range(len(self.decoder_num_units)): weight_id = '' if i == 0 else '_{}'.format(i) for j in range(self.decoder_num): tf.summary.histogram('dec{}_weight_{}'.format(j, i), self.graph.get_tensor_by_name('dec{}/fully_connected{}/weights:0'.format(j, weight_id)), collections=['vd']) for i in range(len(self.encoder_num_units)): weight_id = '' if i == 0 else '_{}'.format(i) for k in range(self.encoder_num): tf.summary.histogram('enc_weight_{}'.format(i), self.graph.get_tensor_by_name('encoder_{}/fully_connected{}/weights:0'.format(k, weight_id)), collections=['vd']) self.summary_writer = tf.summary.FileWriter(io.tf_log_path + self.name + '/', graph=self.graph) self.summary_writer.add_summary(layout_summary) self.summary_writer.flush() self.vd_summaries = tf.summary.merge_all(key='vd') self.td_summaries = tf.summary.merge_all(key='td')
def __init__(self, scalars=None, groups=None, pr_curve=None, tfpn=None, num_classes=None, val_generator=None, val_steps=None, **kwargs): """Constructor Args: scalars: A dict mapping strings to tensors. These tensors will be evaluated and show up as a scalar summary. groups: A dict that defines groups of scalars and the op names that they group. Accepts regex for op names. Example: {'category A': {'chart A1': ['op_name_1', r'.*acc.*']}} pr_curve: Evaluate the precision-recall curve. tfpn: Publish TP (True Positives), FP (False Positives), FN (False Negatives), F1 Score, Precision, Recall. (DEVEL). num_classes: The number of classes (dimension 1 of the data). val_generator: The PR curve callback only works with a static validation_data. Pass a generator here to generate the val data on the fly. val_steps: The number of steps to use for the val_generator. log_dir: the path of the directory where to save the log files to be parsed by TensorBoard. histogram_freq: frequency (in epochs) at which to compute activation and weight histograms for the layers of the model. If set to 0, histograms won't be computed. Validation data (or split) must be specified for histogram visualizations. write_graph: whether to visualize the graph in TensorBoard. The log file can become quite large when write_graph is set to True. write_grads: whether to visualize gradient histograms in TensorBoard. `histogram_freq` must be greater than 0. batch_size: size of batch of inputs to feed to the network for histograms computation. write_images: whether to write model weights to visualize as image in TensorBoard. embeddings_freq: frequency (in epochs) at which selected embedding layers will be saved. If set to 0, embeddings won't be computed. Data to be visualized in TensorBoard's Embedding tab must be passed as `embeddings_data`. embeddings_layer_names: a list of names of layers to keep eye on. If None or empty list all the embedding layer will be watched. embeddings_metadata: a dictionary which maps layer name to a file name in which metadata for this embedding layer is saved. See the [details](https://www.tensorflow.org/how_tos/embedding_viz/#metadata_optional) about metadata files format. In case if the same metadata file is used for all embedding layers, string can be passed. embeddings_data: data to be embedded at layers specified in `embeddings_layer_names`. Numpy array (if the model has a single input) or list of Numpy arrays (if the model has multiple inputs). Learn [more about embeddings](https://www.tensorflow.org/programmers_guide/embedding) **kwargs: All keyword arguments are passed to the standard TensorBoard plugin. """ super().__init__(**kwargs) if not isinstance(scalars, (dict, type(None))): raise ValueError("scalars must be a dict mapping Strings to Tensors") self.scalars = scalars if not isinstance(groups, (dict, type(None))): raise ValueError("groups must be a dict like {'category A': {'chart A1': ['op_name_1', 'op_name_2']}}") self.groups = groups if pr_curve and num_classes is None: raise ValueError("pr_curve requires num_classes to be set.") self.pr_curve = pr_curve self.pr_summary = [] self.num_classes = num_classes self.val_generator = val_generator self.val_steps = val_steps self.layout_summary = None if groups: categories = [] for category_name, chart in groups.items(): chart_list = [] for chart_name, op_list in chart.items(): chart_list.append( layout_pb2.Chart( title=chart_name, multiline=layout_pb2.MultilineChartContent(tag=op_list))) categories.append( layout_pb2.Category(title=category_name, chart=chart_list)) self.layout_summary = summary.custom_scalar_pb( layout_pb2.Layout(category=categories)) self.tfpn = tfpn self.precision_summary = None self.recall_summary = None self.f1_summary = None self.tp_summary = None self.fn_summary = None self.fp_summary = None
def run(): """Run custom scalar demo and generate event files.""" step = tf.placeholder(tf.float32, shape=[]) with tf.name_scope('loss'): # Specify 2 different loss values, each tagged differently. summary_lib.scalar('foo', tf.pow(0.9, step)) summary_lib.scalar('bar', tf.pow(0.85, step + 2)) # Log metric baz as well as upper and lower bounds for a margin chart. middle_baz_value = step + 4 * tf.random_uniform([]) - 2 summary_lib.scalar('baz', middle_baz_value) summary_lib.scalar('baz_lower', middle_baz_value - 6.42 - tf.random_uniform([])) summary_lib.scalar('baz_upper', middle_baz_value + 6.42 + tf.random_uniform([])) with tf.name_scope('trigFunctions'): summary_lib.scalar('cosine', tf.cos(step)) summary_lib.scalar('sine', tf.sin(step)) summary_lib.scalar('tangent', tf.tan(step)) merged_summary = tf.summary.merge_all() with tf.Session() as sess, tf.summary.FileWriter(LOGDIR) as writer: # We only need to specify the layout once (instead of per step). layout_summary = summary_lib.custom_scalar_pb( layout_pb2.Layout(category=[ layout_pb2.Category( title='losses', chart=[ layout_pb2.Chart( title='losses', multiline=layout_pb2.MultilineChartContent( tag=[r'loss(?!.*margin.*)'], )), layout_pb2.Chart( title='baz', margin=layout_pb2.MarginChartContent(series=[ layout_pb2.MarginChartContent.Series( value='loss/baz/scalar_summary', lower='loss/baz_lower/scalar_summary', upper='loss/baz_upper/scalar_summary'), ], )), ]), layout_pb2.Category( title='trig functions', chart=[ layout_pb2.Chart( title='wave trig functions', multiline=layout_pb2.MultilineChartContent(tag=[ r'trigFunctions/cosine', r'trigFunctions/sine' ], )), # The range of tangent is different. Give it its own chart. layout_pb2.Chart( title='tan', multiline=layout_pb2.MultilineChartContent( tag=[r'trigFunctions/tangent'], )), ], # This category we care less about. Make it initially closed. closed=True), ])) writer.add_summary(layout_summary) for i in xrange(42): summary = sess.run(merged_summary, feed_dict={step: i}) writer.add_summary(summary, global_step=i)
def run(): """Run custom scalar demo and generate event files.""" step = tf.placeholder(tf.float32, shape=[]) with tf.name_scope('loss'): # Specify 2 different loss values, each tagged differently. summary_lib.scalar('foo', tf.pow(0.9, step)) summary_lib.scalar('bar', tf.pow(0.85, step + 2)) # Log metric baz as well as upper and lower bounds for a margin chart. middle_baz_value = step + 4 * tf.random_uniform([]) - 2 summary_lib.scalar('baz', middle_baz_value) summary_lib.scalar('baz_lower', middle_baz_value - 6.42 - tf.random_uniform([])) summary_lib.scalar('baz_upper', middle_baz_value + 6.42 + tf.random_uniform([])) with tf.name_scope('trigFunctions'): summary_lib.scalar('cosine', tf.cos(step)) summary_lib.scalar('sine', tf.sin(step)) summary_lib.scalar('tangent', tf.tan(step)) merged_summary = tf.summary.merge_all() with tf.Session() as sess, tf.summary.FileWriter(LOGDIR) as writer: # We only need to specify the layout once (instead of per step). layout_summary = summary_lib.custom_scalar_pb( layout_pb2.Layout(category=[ layout_pb2.Category( title='losses', chart=[ layout_pb2.Chart( title='losses', multiline=layout_pb2.MultilineChartContent( tag=[r'loss(?!.*margin.*)'],)), layout_pb2.Chart( title='baz', margin=layout_pb2.MarginChartContent( series=[ layout_pb2.MarginChartContent.Series( value='loss/baz/scalar_summary', lower='loss/baz_lower/scalar_summary', upper='loss/baz_upper/scalar_summary' ), ],)), ]), layout_pb2.Category( title='trig functions', chart=[ layout_pb2.Chart( title='wave trig functions', multiline=layout_pb2.MultilineChartContent( tag=[ r'trigFunctions/cosine', r'trigFunctions/sine' ],)), # The range of tangent is different. Give it its own chart. layout_pb2.Chart( title='tan', multiline=layout_pb2.MultilineChartContent( tag=[r'trigFunctions/tangent'],)), ], # This category we care less about. Make it initially closed. closed=True), ])) writer.add_summary(layout_summary) for i in xrange(42): summary = sess.run(merged_summary, feed_dict={step: i}) writer.add_summary(summary, global_step=i)