def slim_net_original(image, keep_prob): with arg_scope([layers.conv2d, layers.fully_connected], biases_initializer=tf.random_normal_initializer(stddev=0.1)): # conv2d(inputs, num_outputs, kernel_size, stride=1, padding='SAME', # activation_fn=nn.relu, normalizer_fn=None, normalizer_params=None, # weights_initializer=initializers.xavier_initializer(), weights_regularizer=None, # biases_initializer=init_ops.zeros_initializer, biases_regularizer=None, scope=None): net = layers.conv2d(image, 32, [5, 5], scope='conv1', weights_regularizer=regularizers.l1_regularizer(0.5)) # max_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None) net = layers.max_pool2d(net, 2, scope='pool1') net = layers.conv2d(net, 64, [5, 5], scope='conv2', weights_regularizer=regularizers.l2_regularizer(0.5)) summaries.summarize_tensor(net, tag='conv2') net = layers.max_pool2d(net, 2, scope='pool2') net = layers.flatten(net, scope='flatten1') # fully_connected(inputs, num_outputs, activation_fn=nn.relu, normalizer_fn=None, # normalizer_params=None, weights_initializer=initializers.xavier_initializer(), # weights_regularizer=None, biases_initializer=init_ops.zeros_initializer, # biases_regularizer=None, scope=None): net = layers.fully_connected(net, 1024, scope='fc1') # dropout(inputs, keep_prob=0.5, is_training=True, scope=None) net = layers.dropout(net, keep_prob=keep_prob, scope='dropout1') net = layers.fully_connected(net, 10, scope='fc2') return net
def slim_net_original(image, keep_prob): with arg_scope( [layers.conv2d, layers.fully_connected], biases_initializer=tf.random_normal_initializer(stddev=0.1)): # conv2d(inputs, num_outputs, kernel_size, stride=1, padding='SAME', # activation_fn=nn.relu, normalizer_fn=None, normalizer_params=None, # weights_initializer=initializers.xavier_initializer(), weights_regularizer=None, # biases_initializer=init_ops.zeros_initializer, biases_regularizer=None, scope=None): net = layers.conv2d( image, 32, [5, 5], scope='conv1', weights_regularizer=regularizers.l1_regularizer(0.5)) # max_pool(inputs, kernel_size, stride=2, padding='VALID', scope=None) net = layers.max_pool2d(net, 2, scope='pool1') net = layers.conv2d( net, 64, [5, 5], scope='conv2', weights_regularizer=regularizers.l2_regularizer(0.5)) summaries.summarize_tensor(net, tag='conv2') net = layers.max_pool2d(net, 2, scope='pool2') net = layers.flatten(net, scope='flatten1') # fully_connected(inputs, num_outputs, activation_fn=nn.relu, normalizer_fn=None, # normalizer_params=None, weights_initializer=initializers.xavier_initializer(), # weights_regularizer=None, biases_initializer=init_ops.zeros_initializer, # biases_regularizer=None, scope=None): net = layers.fully_connected(net, 1024, scope='fc1') # dropout(inputs, keep_prob=0.5, is_training=True, scope=None) net = layers.dropout(net, keep_prob=keep_prob, scope='dropout1') net = layers.fully_connected(net, 10, scope='fc2') return net
def test_summarize_multidim_tensor(self): with self.cached_session(): tensor_var = variables.Variable([1, 2, 3]) summary_op = summaries_lib.summarize_tensor(tensor_var) self.assertEquals(summary_op.op.type, 'HistogramSummary')
def test_summarize_scalar_tensor(self): with self.cached_session(): scalar_var = variables.Variable(1) summary_op = summaries_lib.summarize_tensor(scalar_var) self.assertEquals(summary_op.op.type, 'ScalarSummary')
def evaluate(graph, output_dir, checkpoint_path, eval_dict, update_op=None, global_step_tensor=None, supervisor_master='', log_every_steps=10, feed_fn=None, max_steps=None): """Evaluate a model loaded from a checkpoint. Given `graph`, a directory to write summaries to (`output_dir`), a checkpoint to restore variables from, and a `dict` of `Tensor`s to evaluate, run an eval loop for `max_steps` steps. In each step of evaluation, all tensors in the `eval_dict` are evaluated, and every `log_every_steps` steps, they are logged. At the very end of evaluation, a summary is evaluated (finding the summary ops using `Supervisor`'s logic) and written to `output_dir`. Args: graph: A `Graph` to train. It is expected that this graph is not in use elsewhere. output_dir: A string containing the directory to write a summary to. checkpoint_path: A string containing the path to a checkpoint to restore. Can be `None` if the graph doesn't require loading any variables. eval_dict: A `dict` mapping string names to tensors to evaluate. It is evaluated in every logging step. The result of the final evaluation is returned. If update_op is None, then it's evaluated in every step. update_op: A `Tensor` which is run in every step. global_step_tensor: A `Variable` containing the global step. If `None`, one is extracted from the graph using the same logic as in `Supervisor`. Used to place eval summaries on training curves. supervisor_master: The master string to use when preparing the session. log_every_steps: Integer. Output logs every `log_every_steps` evaluation steps. The logs contain the `eval_dict` and timing information. feed_fn: A function that is called every iteration to produce a `feed_dict` passed to `session.run` calls. Optional. max_steps: Integer. Evaluate `eval_dict` this many times. Returns: A tuple `(eval_results, global_step)`: eval_results: A `dict` mapping `string` to numeric values (`int`, `float`) that are the result of running eval_dict in the last step. `None` if no eval steps were run. global_step: The global step this evaluation corresponds to. """ global_step_tensor = contrib_variables.assert_or_get_global_step( graph, global_step_tensor) # Add scalar summaries for every tensor in evaluation dict if there is not # one existing already or it's a string. existing_tags = [ tensor_util.constant_value(summary.op.inputs[0]) for summary in ops.get_collection(ops.GraphKeys.SUMMARIES) ] for key, value in eval_dict.items(): if key in existing_tags: continue if isinstance(value, ops.Tensor): summaries.summarize_tensor(value, tag=key) # Create or get summary op, global_step and saver. summary_op = logging_ops.get_summary_op() saver = _get_saver() local_init_op = _get_local_init_op() ready_op = _get_ready_op() session_manager = session_manager_lib.SessionManager( local_init_op=local_init_op, ready_op=ready_op) session, initialized = session_manager.recover_session( master=supervisor_master, saver=saver, checkpoint_dir=checkpoint_path) # Start queue runners. coord = coordinator.Coordinator() threads = _start_queue_runners(session, coord) with session: if not initialized: logging.warning('Failed to initialize from %s.', checkpoint_path) # TODO(ipolosukhin): This should be failing, but old code relies on that. session.run(variables.initialize_all_variables()) if checkpoint_path: _restore_from_checkpoint(session, graph, checkpoint_path, saver) current_global_step = session.run(global_step_tensor) eval_results = None # TODO(amodei): Fix this to run through the eval set exactly once. step = 0 logging.info('Eval steps [%d,%s) for training step %d.', step, 'inf' if max_steps is None else str(max_steps), current_global_step) try: try: while (max_steps is None) or (step < max_steps): start_time = time.time() feed_dict = feed_fn() if feed_fn is not None else None eval_results = None if update_op is not None: session.run(update_op, feed_dict=feed_dict) else: eval_results = _run_dict(session, eval_dict, feed_dict=feed_dict) # TODO(wicke): We should assert that the global step hasn't changed. step += 1 if step % log_every_steps == 0: if eval_results is None: eval_results = _run_dict(session, eval_dict, feed_dict=feed_dict) duration = time.time() - start_time logging.info( 'Results after %d steps (%.3f sec/batch): %s.', step, float(duration), ', '.join('%s = %s' % (k, v) for k, v in eval_results.items())) finally: if eval_results is None: eval_results = _run_dict(session, eval_dict, feed_dict=feed_dict) # Stop queue runners. coord.request_stop() coord.join(threads, stop_grace_period_secs=120) # Make our own summary writer and write a summary to the eval dir. # Only is feed_fn is not provided. # TODO(ipolosukhin): Convert evaluation to use streaming_metrics, # then we can save for non feed_fn as well. if summary_op is not None and feed_fn is None: summary_writer = None try: summary_writer = get_summary_writer(output_dir) summary_str = session.run(summary_op) if summary_str: summary_writer.add_summary(summary_str, current_global_step) finally: if summary_writer: summary_writer.close() # catch OutOfRangeError which is thrown when queue is out of data (and for # other reasons as well). except errors.OutOfRangeError as e: if max_steps is None: logging.info('Input queue is exhausted.') else: logging.warn('Input queue is exhausted: %s.', e) # catch StopIteration which is thrown is DataReader is out of data. except StopIteration as e: if max_steps is None: logging.info('Input iterator is exhausted.') else: logging.warn('Input iterator is exhausted: %s.', e) return eval_results, current_global_step
# weights_regularizer=None, biases_initializer=init_ops.zeros_initializer, # biases_regularizer=None, scope=None): net = layers.fully_connected(net, 1024, scope='fc1') # dropout(inputs, keep_prob=0.5, is_training=True, scope=None) net = layers.dropout(net, keep_prob=keep_prob, scope='dropout1') net = layers.fully_connected(net, 10, scope='fc2') return net y_pred = slim_net_original(tf.reshape(x, [-1, 28, 28, 1]), keep_prob) with tf.name_scope('x_ent'): cross_entropy = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(y_pred, y_true)) summaries.summarize_tensor(cross_entropy, tag='x_ent') train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) with tf.name_scope('accuracy'): correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y_true, 1)) accuracy = tf.reduce_mean(tf.to_float(correct_prediction)) summaries.summarize_tensor(accuracy, tag='acc') sess = tf.Session() merged = tf.merge_all_summaries() writer = tf.train.SummaryWriter('/tmp/layers/run1', sess.graph) sess.run(tf.initialize_all_variables())
def evaluate(graph, output_dir, checkpoint_path, eval_dict, global_step_tensor=None, init_op=None, supervisor_master='', log_every_steps=10, feed_fn=None, max_steps=None): """Evaluate a model loaded from a checkpoint. Given `graph`, a directory to write summaries to (`output_dir`), a checkpoint to restore variables from, and a `dict` of `Tensor`s to evaluate, run an eval loop for `max_steps` steps. In each step of evaluation, all tensors in the `eval_dict` are evaluated, and every `log_every_steps` steps, they are logged. At the very end of evaluation, a summary is evaluated (finding the summary ops using `Supervisor`'s logic) and written to `output_dir`. Args: graph: A `Graph` to train. It is expected that this graph is not in use elsewhere. output_dir: A string containing the directory to write a summary to. checkpoint_path: A string containing the path to a checkpoint to restore. Can be `None` if the graph doesn't require loading any variables. eval_dict: A `dict` mapping string names to tensors to evaluate for in every eval step. global_step_tensor: A `Variable` containing the global step. If `None`, one is extracted from the graph using the same logic as in `Supervisor`. Used to place eval summaries on training curves. init_op: An op that initializes the graph. If `None`, use `Supervisor`'s default. supervisor_master: The master string to use when preparing the session. log_every_steps: Integer. Output logs every `log_every_steps` evaluation steps. The logs contain the `eval_dict` and timing information. feed_fn: A function that is called every iteration to produce a `feed_dict` passed to `session.run` calls. Optional. max_steps: Integer. Evaluate `eval_dict` this many times. Returns: A tuple `(eval_results, global_step)`: eval_results: A `dict` mapping `string` to numeric values (`int`, `float`) that are the eval results from the last step of the eval. None if no eval steps were run. global_step: The global step this evaluation corresponds to. """ global_step_tensor = contrib_variables.assert_or_get_global_step( graph, global_step_tensor) # Add scalar summaries for every tensor in evaluation dict if there is not # one existing already or it's a string. existing_tags = [ tensor_util.constant_value(summary.op.inputs[0]) for summary in ops.get_collection(ops.GraphKeys.SUMMARIES) ] for key, value in eval_dict.items(): if key in existing_tags: continue if isinstance(value, ops.Tensor): summaries.summarize_tensor(value, tag=key) # Create or get summary op. summary_op = logging_ops.get_summary_op() # TODO(wicke): Don't use supervisor here, or switch to output_dir=eval_dir. supervisor, session = _prepare_session( graph=graph, output_dir=None, # Must be None to avoid writing an event file start_services=False, global_step_tensor=global_step_tensor, init_op=init_op, supervisor_is_chief=True, supervisor_master=supervisor_master, supervisor_save_model_secs=None) global_step_tensor = supervisor.global_step with session: if checkpoint_path: _restore_from_checkpoint(session, graph, checkpoint_path, supervisor.saver) current_global_step = session.run(global_step_tensor) eval_results = None # TODO(amodei): Fix this to run through the eval set exactly once. step = 0 logging.info('Eval steps [%d,%s)', step, 'inf' if max_steps is None else str(max_steps)) try: try: while not supervisor.ShouldStop() and ((max_steps is None) or (step < max_steps)): start_time = time.time() feed_dict = feed_fn() if feed_fn is not None else None eval_results = _run_dict(session, eval_dict, feed_dict=feed_dict) # TODO(wicke): We should assert that the global step hasn't changed. step += 1 if step % log_every_steps == 0: duration = time.time() - start_time logging.info( 'Results after %d steps (%.3f sec/batch): %s.', step, float(duration), ', '.join('%s = %s' % (k, v) for k, v in eval_results.items())) finally: # Make our own summary writer and write a summary to the eval dir. # Only is feed_fn is not provided. # TODO(ipolosukhin): Convert evaluation to use streaming_metrics, # then we can save for non feed_fn as well. if summary_op is not None and feed_fn is None: summary_writer = None try: summary_writer = SummaryWriter( output_dir, graph_def=session.graph_def) summary_str = session.run(summary_op) if summary_str: summary_writer.add_summary(summary_str, current_global_step) finally: if summary_writer: summary_writer.close() # Call supervisor.Stop() from within a try block because it re-raises # exceptions thrown by the supervised threads. supervisor.Stop() # catch OutOfRangeError which is thrown when queue is out of data (and for # other reasons as well). except errors.OutOfRangeError as e: logging.warn('Input queue is exhausted: %s.', e) # catch StopIteration which is thrown is DataReader is out of data. except StopIteration as e: logging.info('Input iterator is exhausted: %s.', e) return eval_results, current_global_step
def evaluate(graph, output_dir, checkpoint_path, eval_dict, update_op=None, global_step_tensor=None, supervisor_master='', log_every_steps=10, feed_fn=None, max_steps=None): """Evaluate a model loaded from a checkpoint. Given `graph`, a directory to write summaries to (`output_dir`), a checkpoint to restore variables from, and a `dict` of `Tensor`s to evaluate, run an eval loop for `max_steps` steps. In each step of evaluation, all tensors in the `eval_dict` are evaluated, and every `log_every_steps` steps, they are logged. At the very end of evaluation, a summary is evaluated (finding the summary ops using `Supervisor`'s logic) and written to `output_dir`. Args: graph: A `Graph` to train. It is expected that this graph is not in use elsewhere. output_dir: A string containing the directory to write a summary to. checkpoint_path: A string containing the path to a checkpoint to restore. Can be `None` if the graph doesn't require loading any variables. eval_dict: A `dict` mapping string names to tensors to evaluate. It is evaluated in every logging step. The result of the final evaluation is returned. If update_op is None, then it's evaluated in every step. update_op: A `Tensor` which is run in every step. global_step_tensor: A `Variable` containing the global step. If `None`, one is extracted from the graph using the same logic as in `Supervisor`. Used to place eval summaries on training curves. supervisor_master: The master string to use when preparing the session. log_every_steps: Integer. Output logs every `log_every_steps` evaluation steps. The logs contain the `eval_dict` and timing information. feed_fn: A function that is called every iteration to produce a `feed_dict` passed to `session.run` calls. Optional. max_steps: Integer. Evaluate `eval_dict` this many times. Returns: A tuple `(eval_results, global_step)`: eval_results: A `dict` mapping `string` to numeric values (`int`, `float`) that are the result of running eval_dict in the last step. `None` if no eval steps were run. global_step: The global step this evaluation corresponds to. """ global_step_tensor = contrib_variables.assert_or_get_global_step( graph, global_step_tensor) # Add scalar summaries for every tensor in evaluation dict if there is not # one existing already or it's a string. existing_tags = [tensor_util.constant_value(summary.op.inputs[0]) for summary in ops.get_collection(ops.GraphKeys.SUMMARIES)] for key, value in eval_dict.items(): if key in existing_tags: continue if isinstance(value, ops.Tensor): summaries.summarize_tensor(value, tag=key) # Create or get summary op, global_step and saver. summary_op = logging_ops.get_summary_op() saver = _get_saver() local_init_op = _get_local_init_op() ready_op = _get_ready_op() session_manager = session_manager_lib.SessionManager( local_init_op=local_init_op, ready_op=ready_op) session, initialized = session_manager.recover_session( master=supervisor_master, saver=saver, checkpoint_dir=checkpoint_path) # Start queue runners. coord = coordinator.Coordinator() threads = _start_queue_runners(session, coord) with session: if not initialized: logging.warning('Failed to initialize from %s.', checkpoint_path) # TODO(ipolosukhin): This should be failing, but old code relies on that. session.run(variables.initialize_all_variables()) if checkpoint_path: _restore_from_checkpoint(session, graph, checkpoint_path, saver) current_global_step = session.run(global_step_tensor) eval_results = None # TODO(amodei): Fix this to run through the eval set exactly once. step = 0 logging.info('Eval steps [%d,%s) for training step %d.', step, 'inf' if max_steps is None else str(max_steps), current_global_step) try: try: while (max_steps is None) or (step < max_steps): start_time = time.time() feed_dict = feed_fn() if feed_fn is not None else None eval_results = None if update_op is not None: session.run(update_op, feed_dict=feed_dict) else: eval_results = _run_dict(session, eval_dict, feed_dict=feed_dict) # TODO(wicke): We should assert that the global step hasn't changed. step += 1 if step % log_every_steps == 0: if eval_results is None: eval_results = _run_dict(session, eval_dict, feed_dict=feed_dict) duration = time.time() - start_time logging.info('Results after %d steps (%.3f sec/batch): %s.', step, float(duration), ', '.join('%s = %s' % (k, v) for k, v in eval_results.items())) finally: if eval_results is None: eval_results = _run_dict(session, eval_dict, feed_dict=feed_dict) # Stop queue runners. coord.request_stop() coord.join(threads, stop_grace_period_secs=120) # Make our own summary writer and write a summary to the eval dir. # Only is feed_fn is not provided. # TODO(ipolosukhin): Convert evaluation to use streaming_metrics, # then we can save for non feed_fn as well. if summary_op is not None and feed_fn is None: summary_writer = None try: summary_writer = SummaryWriter(output_dir, graph_def=session.graph_def) summary_str = session.run(summary_op) if summary_str: summary_writer.add_summary(summary_str, current_global_step) finally: if summary_writer: summary_writer.close() # catch OutOfRangeError which is thrown when queue is out of data (and for # other reasons as well). except errors.OutOfRangeError as e: if max_steps is None: logging.info('Input queue is exhausted.') else: logging.warn('Input queue is exhausted: %s.', e) # catch StopIteration which is thrown is DataReader is out of data. except StopIteration as e: if max_steps is None: logging.info('Input iterator is exhausted.') else: logging.warn('Input iterator is exhausted: %s.', e) return eval_results, current_global_step
def evaluate(graph, output_dir, checkpoint_path, eval_dict, global_step_tensor=None, init_op=None, supervisor_master='', log_every_steps=10, feed_fn=None, max_steps=None): """Evaluate a model loaded from a checkpoint. Given `graph`, a directory to write summaries to (`output_dir`), a checkpoint to restore variables from, and a `dict` of `Tensor`s to evaluate, run an eval loop for `max_steps` steps. In each step of evaluation, all tensors in the `eval_dict` are evaluated, and every `log_every_steps` steps, they are logged. At the very end of evaluation, a summary is evaluated (finding the summary ops using `Supervisor`'s logic) and written to `output_dir`. Args: graph: A `Graph` to train. It is expected that this graph is not in use elsewhere. output_dir: A string containing the directory to write a summary to. checkpoint_path: A string containing the path to a checkpoint to restore. Can be `None` if the graph doesn't require loading any variables. eval_dict: A `dict` mapping string names to tensors to evaluate for in every eval step. global_step_tensor: A `Variable` containing the global step. If `None`, one is extracted from the graph using the same logic as in `Supervisor`. Used to place eval summaries on training curves. init_op: An op that initializes the graph. If `None`, use `Supervisor`'s default. supervisor_master: The master string to use when preparing the session. log_every_steps: Integer. Output logs every `log_every_steps` evaluation steps. The logs contain the `eval_dict` and timing information. feed_fn: A function that is called every iteration to produce a `feed_dict` passed to `session.run` calls. Optional. max_steps: Integer. Evaluate `eval_dict` this many times. Returns: A tuple `(eval_results, global_step)`: eval_results: A `dict` mapping `string` to numeric values (`int`, `float`) that are the eval results from the last step of the eval. None if no eval steps were run. global_step: The global step this evaluation corresponds to. """ global_step_tensor = contrib_variables.assert_or_get_global_step( graph, global_step_tensor) # Add scalar summaries for every tensor in evaluation dict if there is not # one existing already or it's a string. existing_tags = [tensor_util.constant_value(summary.op.inputs[0]) for summary in ops.get_collection(ops.GraphKeys.SUMMARIES)] for key, value in eval_dict.items(): if key in existing_tags: continue if isinstance(value, ops.Tensor): summaries.summarize_tensor(value, tag=key) # TODO(wicke): Don't use supervisor here, or switch to output_dir=eval_dir. supervisor, session = _prepare_session( graph=graph, output_dir=None, # Must be None to avoid writing an event file start_services=False, global_step_tensor=global_step_tensor, init_op=init_op, supervisor_is_chief=True, supervisor_master=supervisor_master, supervisor_save_model_secs=None, supervisor_save_summaries_secs=None) global_step_tensor = supervisor.global_step with session: if checkpoint_path: _restore_from_checkpoint( session, graph, checkpoint_path, supervisor.saver) current_global_step = session.run(global_step_tensor) eval_results = None # TODO(amodei): Fix this to run through the eval set exactly once. step = 0 logging.info('Eval steps [%d,%s)', step, 'inf' if max_steps is None else str(max_steps)) try: try: while not supervisor.ShouldStop() and ( (max_steps is None) or (step < max_steps)): start_time = time.time() feed_dict = feed_fn() if feed_fn is not None else None eval_results = _run_dict(session, eval_dict, feed_dict=feed_dict) # TODO(wicke): We should assert that the global step hasn't changed. step += 1 if step % log_every_steps == 0: duration = time.time() - start_time logging.info('Results after %d steps (%.3f sec/batch): %s.', step, float(duration), ', '.join('%s = %s' % (k, v) for k, v in eval_results.items())) finally: # Make our own summary writer and write a summary to the eval dir if supervisor.summary_op is not None: summary_writer = None try: summary_writer = SummaryWriter(output_dir, graph_def=session.graph_def) summary_str = session.run(supervisor.summary_op) if summary_str: summary_writer.add_summary(summary_str, current_global_step) finally: if summary_writer: summary_writer.close() # Call supervisor.Stop() from within a try block because it re-raises # exceptions thrown by the supervised threads. supervisor.Stop() # catch OutOfRangeError which is thrown when queue is out of data (and for # other reasons as well). except errors.OutOfRangeError as e: logging.warn('Input queue exhausted: %s.', e) return eval_results, current_global_step
# biases_regularizer=None, scope=None): net = layers.fully_connected(net, 1024, scope='fc1') # dropout(inputs, keep_prob=0.5, is_training=True, scope=None) net = layers.dropout(net, keep_prob=keep_prob, scope='dropout1') net = layers.fully_connected(net, 10, scope='fc2') return net y_pred = slim_net_original(tf.reshape(x, [-1, 28, 28, 1]), keep_prob) with tf.name_scope('x_ent'): cross_entropy = tf.reduce_sum( tf.nn.softmax_cross_entropy_with_logits(y_pred, y_true)) summaries.summarize_tensor(cross_entropy, tag='x_ent') train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) with tf.name_scope('accuracy'): correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.argmax(y_true, 1)) accuracy = tf.reduce_mean(tf.to_float(correct_prediction)) summaries.summarize_tensor(accuracy, tag='acc') sess = tf.Session() merged = tf.merge_all_summaries() writer = tf.train.SummaryWriter('/tmp/layers/run1', sess.graph) sess.run(tf.initialize_all_variables())