Example #1
0
 def list_supported_ops(self, request):
   supported_ops = lite_backend.get_potentially_supported_ops()
   return http_util.Respond(request, supported_ops, "application/json")
Example #2
0
 def _serve_asset(self, path, gzipped_asset_bytes, request):
   """Serves a pre-gzipped static asset from the zip file."""
   mimetype = mimetypes.guess_type(path)[0] or 'application/octet-stream'
   return http_util.Respond(
       request, gzipped_asset_bytes, mimetype, content_encoding='gzip')
Example #3
0
    def serve_graph_op_info(self, request):
        """Serve information for ops in graphs.

        The request specifies the op name and the ID of the graph that
        contains the op.

        The response contains a JSON object with the following fields:
          - op_type
          - op_name
          - graph_ids: Stack of graph IDs that the op is located in, from
            outermost to innermost. The length of this array is always >= 1.
            The length is 1 if and only if the graph is an outermost graph.
          - num_outputs: Number of output tensors.
          - output_tensor_ids: The debugger-generated number IDs for the
            symbolic output tensors of the op (an array of numbers).
          - host_name: Name of the host on which the op is created.
          - stack_trace: Stack frames of the op's creation.
          - inputs: Specifications of all inputs to this op.
            Currently only immediate (one level of) inputs are provided.
            This is an array of length N_in, where N_in is the number of
            data inputs received by the op. Each element of the array is an
            object with the following fields:
              - op_name: Name of the op that provides the input tensor.
              - output_slot: 0-based output slot index from which the input
                tensor emits.
              - data: A recursive data structure of this same schema.
                This field is not populated (undefined) at the leaf nodes
                of this recursive data structure.
                In the rare case wherein the data for an input cannot be
                retrieved properly (e.g., special internal op types), this
                field will be unpopulated.
            This is an empty list for an op with no inputs.
          - consumers: Specifications for all the downstream consuming ops of
            this. Currently only immediate (one level of) consumers are provided.
            This is an array of length N_out, where N_out is the number of
            symbolic tensors output by this op.
            Each element of the array is an array of which the length equals
            the number of downstream ops that consume the corresponding symbolic
            tensor (only data edges are tracked).
            Each element of the array is an object with the following fields:
              - op_name: Name of the op that receives the output tensor as an
                input.
              - input_slot: 0-based input slot index at which the downstream
                op receives this output tensor.
              - data: A recursive data structure of this very schema.
                This field is not populated (undefined) at the leaf nodes
                of this recursive data structure.
                In the rare case wherein the data for a consumer op cannot be
                retrieved properly (e.g., special internal op types), this
                field will be unpopulated.
            If this op has no output tensors, this is an empty array.
            If one of the output tensors of this op has no consumers, the
            corresponding element is an empty array.
        """
        experiment = plugin_util.experiment_id(request.environ)
        run = request.args.get("run")
        if run is None:
            return _missing_run_error_response(request)
        graph_id = request.args.get("graph_id")
        op_name = request.args.get("op_name")
        run_tag_filter = debug_data_provider.graph_op_info_run_tag_filter(
            run, graph_id, op_name)
        blob_sequences = self._data_provider.read_blob_sequences(
            experiment, self.plugin_name, run_tag_filter=run_tag_filter)
        tag = next(iter(run_tag_filter.tags))
        try:
            return http_util.Respond(
                request,
                self._data_provider.read_blob(
                    blob_sequences[run][tag][0].blob_key),
                "application/json",
            )
        except errors.NotFoundError as e:
            return _error_response(request, str(e))
 def tags_route(self, request):
     experiment = plugin_util.experiment_id(request.environ)
     index = self.index_impl(experiment=experiment)
     return http_util.Respond(request, index, "application/json")
Example #5
0
 def text_route(self, request):
     run = request.args.get('run')
     tag = request.args.get('tag')
     response = self.text_impl(run, tag)
     return http_util.Respond(request, response, 'application/json')
    def _serve_gated_grpc(self, request):
        mode = request.args.get("mode")
        if mode == "retrieve_all" or mode == "retrieve_device_names":
            # 'retrieve_all': Retrieve all gated-gRPC debug tensors and currently
            #   enabled breakpoints associated with the given run_key.
            # 'retrieve_device_names': Retrieve all device names associated with the
            #   given run key.
            run_key = interactive_debugger_server_lib.RunKey(
                *json.loads(request.args.get("run_key")))
            # debug_graph_defs is a map from device_name to GraphDef.
            debug_graph_defs = self._debugger_data_server.get_graphs(
                run_key, debug=True)
            if mode == "retrieve_device_names":
                return http_util.Respond(
                    request,
                    {
                        "device_names": list(debug_graph_defs.keys()),
                    },
                    "application/json",
                )

            gated = {}
            for device_name in debug_graph_defs:
                gated[
                    device_name] = self._debugger_data_server.get_gated_grpc_tensors(
                        run_key, device_name)

            # Both gated and self._debugger_data_server.breakpoints are lists whose
            # items are (node_name, output_slot, debug_op_name).
            return http_util.Respond(
                request,
                {
                    "gated_grpc_tensors": gated,
                    "breakpoints": self._debugger_data_server.breakpoints,
                    "device_names": list(debug_graph_defs.keys()),
                },
                "application/json",
            )
        elif mode == "breakpoints":
            # Retrieve currently enabled breakpoints.
            return http_util.Respond(
                request,
                self._debugger_data_server.breakpoints,
                "application/json",
            )
        elif mode == "set_state":
            # Set the state of gated-gRPC debug tensors, e.g., disable, enable
            # breakpoint.
            node_name = request.args.get("node_name")
            output_slot = int(request.args.get("output_slot"))
            debug_op = request.args.get("debug_op")
            state = request.args.get("state")
            logger.debug("Setting state of %s:%d:%s to: %s" %
                         (node_name, output_slot, debug_op, state))
            if state == "disable":
                self._debugger_data_server.request_unwatch(
                    node_name, output_slot, debug_op)
            elif state == "watch":
                self._debugger_data_server.request_watch(node_name,
                                                         output_slot,
                                                         debug_op,
                                                         breakpoint=False)
            elif state == "break":
                self._debugger_data_server.request_watch(node_name,
                                                         output_slot,
                                                         debug_op,
                                                         breakpoint=True)
            else:
                return self._error_response(
                    request,
                    "Unrecognized new state for %s:%d:%s: %s" %
                    (node_name, output_slot, debug_op, state),
                )
            return http_util.Respond(
                request,
                {
                    "node_name": node_name,
                    "output_slot": output_slot,
                    "debug_op": debug_op,
                    "state": state,
                },
                "application/json",
            )
        else:
            return self._error_response(
                request,
                "Unrecognized mode for the gated_grpc route: %s" % mode)
Example #7
0
 def hosts_route(self, request):
     run = request.args.get('run')
     tool = request.args.get('tag')
     hosts = self.host_impl(run, tool)
     return http_util.Respond(request, hosts, 'application/json')
Example #8
0
 def _serve_sprite(self, request):
     return http_util.Respond(request, self.sprite, 'image/png')
Example #9
0
    def _serve_health_pills_handler(self, request):
        """A (wrapped) werkzeug handler for serving health pills.

    Accepts POST requests and responds with health pills. The request accepts
    several POST parameters:

      node_names: (required string) A JSON-ified list of node names for which
          the client would like to request health pills.
      run: (optional string) The run to retrieve health pills for. Defaults to
          '.'. This data is sent via POST (not GET) since URL length is limited.
      step: (optional integer): The session run step for which to
          retrieve health pills. If provided, the handler reads the health pills
          of that step from disk (which is slow) and produces a response with
          only health pills at that step. If not provided, the handler returns a
          response with health pills at all steps sampled by the event
          multiplexer (the fast path). The motivation here is that, sometimes,
          one desires to examine health pills at a specific step (to say find
          the first step that causes a model to blow up with NaNs).
          get_plugin_apps must be called before this slower feature is used
          because that method passes the logdir (directory path) to this plugin.

    This handler responds with a JSON-ified object mapping from node names to a
    list (of size 1) of health pill event objects, each of which has these
    properties.

    {
        'wall_time': float,
        'step': int,
        'node_name': string,
        'output_slot': int,
        # A list of 12 floats that summarizes the elements of the tensor.
        'value': float[],
    }

    Node names for which there are no health pills to be found are excluded from
    the mapping.

    Args:
      request: The request issued by the client for health pills.

    Returns:
      A werkzeug BaseResponse object.
    """
        if request.method != 'POST':
            return wrappers.Response(
                response=('%s requests are forbidden by the debugger plugin.' %
                          request.method),
                status=405)

        if _NODE_NAMES_POST_KEY not in request.form:
            return wrappers.Response(response=(
                'The %r POST key was not found in the request for health pills.'
                % _NODE_NAMES_POST_KEY),
                                     status=400)

        jsonified_node_names = request.form[_NODE_NAMES_POST_KEY]
        try:
            node_names = json.loads(tf.compat.as_text(jsonified_node_names))
        except Exception as e:  # pylint: disable=broad-except
            # Different JSON libs raise different exceptions, so we just do a
            # catch-all here. This problem is complicated by how Tensorboard might be
            # run in many different environments, as it is open-source.
            # TODO(@caisq, @chihuahua): Create platform-dependent adapter to catch
            # specific types of exceptions, instead of the broad catching here.
            tf.logging.error('Could not decode node name JSON string %r: %s',
                             jsonified_node_names, e)
            return wrappers.Response(status=400)

        if not isinstance(node_names, list):
            tf.logging.error('%r is not a JSON list of node names:',
                             jsonified_node_names)
            return wrappers.Response(status=400)

        run = request.form.get(_RUN_POST_KEY, _DEFAULT_RUN)
        step_string = request.form.get(_STEP_POST_KEY, None)
        if step_string is None:
            # Use all steps sampled by the event multiplexer (Relatively fast).
            mapping = self._obtain_sampled_health_pills(run, node_names)
        else:
            # Read disk to obtain the health pills for that step (Relatively slow).
            # Make sure that the directory for the run exists.
            # Determine the directory of events file to read.
            events_directory = self._logdir
            if run != _DEFAULT_RUN:
                # Use the directory for the specific run.
                events_directory = os.path.join(events_directory, run)

            step = int(step_string)
            try:
                mapping = self._obtain_health_pills_at_step(
                    events_directory, node_names, step)
            except IOError as error:
                tf.logging.error(
                    'Error retrieving health pills for step %d: %s', step,
                    error)
                return wrappers.Response(status=404)

        # Convert event_accumulator.HealthPillEvents to JSON-able dicts.
        jsonable_mapping = {}
        for node_name, events in mapping.items():
            jsonable_mapping[node_name] = [e._asdict() for e in events]
        return http_util.Respond(request, jsonable_mapping, 'application/json')
 def _serve_ping(self, request):  # pylint: disable=unused-argument
     return http_util.Respond(request, {"status": "alive"},
                              "application/json")
 def tags_route(self, request):
     experiment = request.args.get('experiment', '')
     index = self.index_impl(experiment=experiment)
     return http_util.Respond(request, index, 'application/json')
Example #12
0
    def _infer(self, request):
        """Returns JSON for the `vz-line-chart`s for a feature.

    Args:
      request: A request that should contain 'inference_address', 'model_name',
        'model_type, 'model_version', 'model_signature' and 'label_vocab_path'.

    Returns:
      A list of JSON objects, one for each chart.
    """
        vocab_path = request.args.get('label_vocab_path')
        if vocab_path:
            try:
                with tf.gfile.GFile(vocab_path, 'r') as f:
                    label_vocab = [line.rstrip('\n') for line in f]
            except tf.errors.NotFoundError as err:
                tf.logging.error('error reading vocab file: %s', err)
                label_vocab = []
        else:
            label_vocab = []

        try:
            if request.method != 'GET':
                tf.logging.error('%s requests are forbidden.', request.method)
                return http_util.Respond(request,
                                         {'error': 'invalid non-GET request'},
                                         'application/json',
                                         code=405)

            (inference_addresses, model_names, model_versions,
             model_signatures) = self._parse_request_arguments(request)

            indices_to_infer = sorted(self.updated_example_indices)
            examples_to_infer = [
                self.examples[index] for index in indices_to_infer
            ]
            infer_objs = []
            for model_num in xrange(len(inference_addresses)):
                serving_bundle = inference_utils.ServingBundle(
                    inference_addresses[model_num], model_names[model_num],
                    request.args.get('model_type'), model_versions[model_num],
                    model_signatures[model_num],
                    request.args.get('use_predict') == 'true',
                    request.args.get('predict_input_tensor'),
                    request.args.get('predict_output_tensor'))

                # Get inference results proto and combine with indices of inferred
                # examples and respond with this data as json.
                inference_result_proto = platform_utils.call_servo(
                    examples_to_infer, serving_bundle)
                new_inferences = inference_utils.wrap_inference_results(
                    inference_result_proto)
                infer_json = json_format.MessageToJson(
                    new_inferences, including_default_value_fields=True)
                infer_objs.append(json.loads(infer_json))

            resp = {'indices': indices_to_infer, 'results': infer_objs}
            self.updated_example_indices = set()
            return http_util.Respond(request, {
                'inferences': json.dumps(resp),
                'vocab': json.dumps(label_vocab)
            }, 'application/json')
        except common_utils.InvalidUserInputError as e:
            return http_util.Respond(request, {'error': e.message},
                                     'application/json',
                                     code=400)
        except AbortionError as e:
            return http_util.Respond(request, {'error': e.details},
                                     'application/json',
                                     code=400)
Example #13
0
 def info_route(self, request):
     ctx = plugin_util.context(request.environ)
     experiment = plugin_util.experiment_id(request.environ)
     info = self.info_impl(ctx, experiment)
     return http_util.Respond(request, info, "application/json")
Example #14
0
 def _serve_file(self, file_path, request):
   """Returns a resource file."""
   res_path = os.path.join(os.path.dirname(__file__), file_path)
   with open(res_path, "rb") as read_file:
     mimetype = mimetypes.guess_type(file_path)[0]
     return http_util.Respond(request, read_file.read(), content_type=mimetype)
 def _serve_comm(self, request):
     # comm_channel.get() blocks until an item is put into the queue (by
     # self._debugger_data_server). This is how the HTTP long polling ends.
     pos = int(request.args.get("pos"))
     comm_data = self._debugger_data_server.get_outgoing_message(pos)
     return http_util.Respond(request, comm_data, "application/json")
Example #16
0
    def _serve_plugins_listing(self, request):
        """Serves an object mapping plugin name to whether it is enabled.

    Args:
      request: The werkzeug.Request object.

    Returns:
      A werkzeug.Response object.
    """
        response = collections.OrderedDict()
        for plugin in self._plugins:
            if type(plugin) is core_plugin.CorePlugin:  # pylint: disable=unidiomatic-typecheck
                # This plugin's existence is a backend implementation detail.
                continue
            start = time.time()
            is_active = plugin.is_active()
            elapsed = time.time() - start
            logger.info(
                'Plugin listing: is_active() for %s took %0.3f seconds',
                plugin.plugin_name, elapsed)

            plugin_metadata = plugin.frontend_metadata()._asdict()
            if plugin_metadata['tab_name'] is None:
                plugin_metadata['tab_name'] = plugin.plugin_name
            plugin_metadata['enabled'] = is_active

            es_module_handler = plugin_metadata.pop('es_module_path')
            element_name = plugin_metadata.pop('element_name')
            if element_name is not None and es_module_handler is not None:
                logger.error(
                    'Plugin %r declared as both legacy and iframed; skipping',
                    plugin.plugin_name,
                )
                continue
            elif element_name is not None and es_module_handler is None:
                loading_mechanism = {
                    'type': 'CUSTOM_ELEMENT',
                    'element_name': element_name,
                }
            elif element_name is None and es_module_handler is not None:
                loading_mechanism = {
                    'type':
                    'IFRAME',
                    'module_path':
                    ''.join([
                        self._path_prefix,
                        DATA_PREFIX,
                        PLUGIN_PREFIX,
                        '/',
                        plugin.plugin_name,
                        es_module_handler,
                    ]),
                }
            else:
                # As a compatibility measure (for plugins that we don't
                # control), we'll pull it from the frontend registry for now.
                loading_mechanism = {
                    'type': 'NONE',
                }
            plugin_metadata['loading_mechanism'] = loading_mechanism

            response[plugin.plugin_name] = plugin_metadata
        return http_util.Respond(request, response, 'application/json')
 def _error_response(self, request, error_msg):
     logger.error(error_msg)
     return http_util.Respond(request, {"error": error_msg},
                              "application/json", 400)
Example #18
0
 def scalars_route(self, request):
     """Given a tag and single run, return array of ScalarEvents."""
     tag = request.args.get("tag")
     run = request.args.get("run")
     body = self.scalars_impl(tag, run)
     return http_util.Respond(request, body, "application/json")
Example #19
0
 def tools_route(self, request):
     run_to_tools = dict(self.generate_run_to_tools())
     return http_util.Respond(request, run_to_tools, 'application/json')
 def train_route(self, request):
     time.sleep(1)
     return http_util.Respond(request, {}, 'application/json')
Example #21
0
 def tags_route(self, request):
     index = self.index_impl()
     return http_util.Respond(request, index, 'application/json')
Example #22
0
 def serve_embeddings(self, request):
     ctx = plugin_util.context(request.environ)
     experiment = plugin_util.experiment_id(request.environ)
     contents = self.embeddings_impl(ctx, experiment=experiment)
     return http_util.Respond(request, contents, "application/json")
Example #23
0
 def tags_route(self, request):
     response = self.tags_impl()
     return http_util.Respond(request, response, 'application/json')
 def logdir_route(self, request):
     return http_util.Respond(request, {'logdir': self.plugin_logdir},
                              'application/json')
Example #25
0
 def _send_404_without_logging(self, request):
   return http_util.Respond(request, 'Not found', 'text/plain', code=404)
    def tools_route(self, request):
        run_to_tools = self.index_impl()

        return http_util.Respond(request, run_to_tools, 'application/json')
Example #27
0
 def _serve_window_properties(self, request):
   """Serve a JSON object containing this TensorBoard's window properties."""
   # TODO(chihuahua): Remove this method once the frontend instead uses the
   # /data/environment route.
   return http_util.Respond(
       request, {'window_title': self._window_title}, 'application/json')
 def _serve_ack(self, request):
     # Send client acknowledgement. `True` is just used as a dummy value.
     self._debugger_data_server.put_incoming_message(True)
     return http_util.Respond(request, {}, "application/json")
Example #29
0
 def _serve_tags(self, request):
     ctx = plugin_util.context(request.environ)
     experiment = plugin_util.experiment_id(request.environ)
     index = self._index_impl(ctx, experiment)
     return http_util.Respond(request, index, "application/json")
Example #30
0
 def list_saved_models(self, request):
   saved_models = lite_backend.get_saved_model_dirs(self._logdir)
   return http_util.Respond(request, json.dumps(saved_models),
                            "application/json")