Esempio n. 1
0
    def _get_original_tf_node(lnf_node, sw_config, op=ops_pb2.CAST):
        g = tf.Graph()
        with g.as_default():
            with tf.Session(graph=g) as sess:
                dtype_tup = (lnf_node.inputs[0].dtype.t,
                             lnf_node.inputs[0].dtype.p)
                inp = tf.placeholder(
                    tf_saved_model_base_importer.ImportTFSavedModelBase.
                    REV_DATA_TYPE_MAP[dtype_tup],
                    shape=[
                        None if d < 0 else d
                        for d in lnf_node.inputs[0].shape.d
                    ],
                    name=lnf_node.inputs[0].name)
                dtype_tup = (lnf_node.outputs[0].dtype.t,
                             lnf_node.outputs[0].dtype.p)
                tf_cast = tf.cast(
                    inp,
                    tf_saved_model_base_importer.ImportTFSavedModelBase.
                    REV_DATA_TYPE_MAP[dtype_tup],
                    name=lnf_node.name)

                tmp_dir = py_file_utils.mkdtemp()
                saved_model_dir = os.path.join(tmp_dir, "saved_model")
                tf_graph_exporter.ExportTFSavedModel.save_model(
                    saved_model_dir, sess, [inp], [tf_cast])
        importer = tf_saved_model_importer.ImportTFSavedModel(
            saved_model_dir, sw_config)
        light_graph = importer.as_light_graph()

        tf_node = light_graph.get_node_by_name(lnf_node.name)
        return tf_node.original
Esempio n. 2
0
    def _run_single_config_helper(self, performance_data):
        """Run the config and update performance_data"""
        sw_config = performance_data.config.sw_config
        hw_specs = performance_data.config.hw_specs
        sim_params = performance_data.config.sim_params

        # Use defaults from perf_sweep if necessary
        if sw_config.sweep_info.py_batch_size == 0:
            sw_config.sweep_info.py_batch_size = self.py_batch_size()

        sim_params.compiled_batch_size = self.compilation_batch_size()
        if sw_config.sweep_info.num_py_batches > 0:
            sim_params.compiled_batch_size = min(
                sw_config.sweep_info.num_py_batches * sw_config.sweep_info.py_batch_size,
                sim_params.compiled_batch_size)

        # Graph transformations
        if performance_data.config.do_transform:
            transform_hw_specs = self._copy_proto(hw_specs)
            transform_sw_config = self._copy_proto(sw_config)
            transform_sim_params = self._copy_proto(sim_params)

            transform_sw_config.debug_info.debug_dir = ""
            transform_sim_params.arch_params.arch_type = \
                sim_params_pb2.ArchitectureParams.VIRTUAL

            # Full graph pipeline
            calibration_data = self.get_calibration_inputs(transform_sw_config)
            tmp_dir = py_file_utils.mkdtemp()
            lgf_pb_path = os.path.join(tmp_dir, "modified_lgf.pb")

            full_graph_pipeline.main(self._graph_path,
                                     self._graph_type,
                                     lgf_pb_path,
                                     graph_types_pb2.LGFProtobuf,
                                     calibration_data,
                                     transform_hw_specs,
                                     transform_sw_config,
                                     transform_sim_params)

            # Read light graph
            light_graph = lgf_graph.LightGraph.lgf_pb_to_graph(
                lgf_graph.LightGraph.read_lgf_pb(lgf_pb_path))

            # Cleanup
            shutil.rmtree(tmp_dir)
        else:
            light_graph = self.read_graph(performance_data.config.sw_config)

        # Fine tuning
        if (performance_data.config.do_fine_tuning
                and sw_config.sweep_info.num_fine_tuning_epochs > 0):
            if self._fine_tuning_fn is None:
                raise ValueError("Must provide fine tuning function")

            num_fine_tuning_shards = self.num_fine_tuning_shards()
            tot_num_shards = int(sw_config.sweep_info.num_fine_tuning_epochs *
                                 num_fine_tuning_shards)
            # Get an ordered list of shards to be used for fine tuning
            shard_list = []
            while len(shard_list) < tot_num_shards:
                shard_list.extend(np.random.permutation(range(num_fine_tuning_shards)))
            shard_list = shard_list[:tot_num_shards]
            for i, shard_indx in enumerate(shard_list):
                fine_tuning_data = self.get_fine_tuning_inputs(
                    performance_data.config.sw_config,
                    shard_indx)
                fine_tuning_labels = self.get_fine_tuning_labels(
                    performance_data.config.sw_config,
                    shard_indx)
                light_graph = self._fine_tuning_fn(light_graph,
                                                   fine_tuning_data,
                                                   fine_tuning_labels,
                                                   performance_data.config.hw_specs,
                                                   performance_data.config.sw_config,
                                                   performance_data.config.sim_params,
                                                   self.logits_tensor_name())

        # Create debug_dir if necessary
        debug_dir = sw_config.debug_info.debug_dir
        if debug_dir:
            if os.path.exists(debug_dir):
                shutil.rmtree(debug_dir)
            os.makedirs(debug_dir)

        with graph_collection.GraphCollection() as graph_coll:
            # Initialize graph for running test data
            run_graph, runner_cls, debug_kwargs = self._init_graph_coll(
                light_graph, graph_coll, performance_data)

            # Run test data
            runner = runner_cls(light_graph, hw_specs, sw_config, sim_params, graph_coll)
            self._run_streamed_test_data(runner, performance_data)

            # Get extra information after running
            self._get_extra_debug_kwargs(debug_kwargs, graph_coll, performance_data)

            # Save simulation metrics
            performance_data.simulation_metrics.CopyFrom(
                graph_coll.simulation_metrics_collection().get_simulation_metrics())

        # Save graph and debug info
        performance_data.graph.CopyFrom(light_graph.as_lgf_pb())
        self._save_debug_info(performance_data, **debug_kwargs)
Esempio n. 3
0
def main(ground_truth_dir,
         detection_results_dir,
         ignore=None,
         set_class_iou=None):
    # Defaults from from args
    GT_PATH = ground_truth_dir
    DR_PATH = detection_results_dir
    specific_iou_flagged = False
    if set_class_iou is not None:
        specific_iou_flagged = True
    if ignore is None:
        ignore = []

    # Create a temp dir
    TEMP_FILES_PATH = py_file_utils.mkdtemp()

    # get a list with the ground-truth files
    ground_truth_files_list = glob.glob(GT_PATH + "/*.txt")
    if len(ground_truth_files_list) == 0:
        error("Error: No ground-truth files found!")
    ground_truth_files_list.sort()
    # dictionary with counter per class
    gt_counter_per_class = {}
    counter_images_per_class = {}

    for txt_file in ground_truth_files_list:
        file_id = txt_file.split(".txt", 1)[0]
        file_id = os.path.basename(os.path.normpath(file_id))
        # check if there is a correspondent detection-results file
        temp_path = os.path.join(DR_PATH, (file_id + ".txt"))
        if not os.path.exists(temp_path):
            error_msg = "Error. File not found: {}\n".format(temp_path)
            error_msg += "(You can avoid this error message by running"
            error_msg += "extra/intersect-gt-and-dr.py)"
            error(error_msg)
        lines_list = file_lines_to_list(txt_file)
        # create ground-truth dictionary
        bounding_boxes = []
        is_difficult = False
        already_seen_classes = []
        for line in lines_list:
            try:
                if "difficult" in line:
                    class_name, left, top, right, bottom, _difficult = line.split(
                    )
                    is_difficult = True
                else:
                    class_name, left, top, right, bottom = line.split()
            except ValueError:
                error_msg = "Error: File " + txt_file + " in the wrong format.\n"
                error_msg += " Expected: <class_name> <left> <top> <right> <bottom>"
                error_msg += "[\"difficult\"]\n"
                error_msg += " Received: " + line
                error_msg += "\n\nIf you have a <class_name> with spaces "
                error_msg += "between words you should remove them\n"
                error_msg += "by running the script \"remove_space.py\" or "
                error_msg += "rename_class.py\" in the \"extra/\" folder."
                error(error_msg)
            # check if class is in the ignore list, if yes skip
            if class_name in ignore:
                continue
            bbox = left + " " + top + " " + right + " " + bottom
            if is_difficult:
                bounding_boxes.append({
                    "class_name": class_name,
                    "bbox": bbox,
                    "used": False,
                    "difficult": True
                })
                is_difficult = False
            else:
                bounding_boxes.append({
                    "class_name": class_name,
                    "bbox": bbox,
                    "used": False
                })
                # count that object
                if class_name in gt_counter_per_class:
                    gt_counter_per_class[class_name] += 1
                else:
                    # if class didn"t exist yet
                    gt_counter_per_class[class_name] = 1

                if class_name not in already_seen_classes:
                    if class_name in counter_images_per_class:
                        counter_images_per_class[class_name] += 1
                    else:
                        # if class didn"t exist yet
                        counter_images_per_class[class_name] = 1
                    already_seen_classes.append(class_name)

        # dump bounding_boxes into a ".json" file
        with open(TEMP_FILES_PATH + "/" + file_id + "_ground_truth.json",
                  "w") as outfile:
            json.dump(bounding_boxes, outfile)

    gt_classes = list(gt_counter_per_class.keys())
    # let"s sort the classes alphabetically
    gt_classes = sorted(gt_classes)
    n_classes = len(gt_classes)

    if specific_iou_flagged:
        n_args = len(set_class_iou)
        error_msg = \
            "\n --set-class-iou [class_1] [IoU_1] [class_2] [IoU_2] [...]"
        if n_args % 2 != 0:
            error("Error, missing arguments. Flag usage:" + error_msg)
        # [class_1] [IoU_1] [class_2] [IoU_2]
        # specific_iou_classes = ["class_1", "class_2"]
        specific_iou_classes = set_class_iou[::2]  # even
        # iou_list = ["IoU_1", "IoU_2"]
        iou_list = set_class_iou[1::2]  # odd
        if len(specific_iou_classes) != len(iou_list):
            error("Error, missing arguments. Flag usage:" + error_msg)
        for tmp_class in specific_iou_classes:
            if tmp_class not in gt_classes:
                error("Error, unknown class \"" + tmp_class +
                      "\". Flag usage:" + error_msg)
        for num in iou_list:
            if not is_float_between_0_and_1(num):
                error("Error, IoU must be between 0.0 and 1.0. Flag usage:" +
                      error_msg)

    # get a list with the detection-results files
    dr_files_list = glob.glob(DR_PATH + "/*.txt")
    dr_files_list.sort()

    for class_index, class_name in enumerate(gt_classes):
        bounding_boxes = []
        for txt_file in dr_files_list:
            # the first time it checks if all the corresponding ground-truth files exist
            file_id = txt_file.split(".txt", 1)[0]
            file_id = os.path.basename(os.path.normpath(file_id))
            temp_path = os.path.join(GT_PATH, (file_id + ".txt"))
            if class_index == 0:
                if not os.path.exists(temp_path):
                    error_msg = "Error. File not found: {}\n".format(temp_path)
                    error_msg += "(You can avoid this error message "
                    error_msg += "by running extra/intersect-gt-and-dr.py)"
                    error(error_msg)
            lines = file_lines_to_list(txt_file)
            for line in lines:
                try:
                    tmp_class_name, confidence, left, top, right, bottom = line.split(
                    )
                except ValueError:
                    error_msg = "Error: File " + txt_file
                    error_msg += " in the wrong format.\n"
                    error_msg += " Expected: <class_name> <confidence> <left>"
                    error_msg += " <top> <right> <bottom>\n"
                    error_msg += " Received: " + line
                    error(error_msg)
                if tmp_class_name == class_name:
                    bbox = left + " " + top + " " + right + " " + bottom
                    bounding_boxes.append({
                        "confidence": confidence,
                        "file_id": file_id,
                        "bbox": bbox
                    })

        # sort detection-results by decreasing confidence
        bounding_boxes.sort(key=lambda x: float(x["confidence"]), reverse=True)
        with open(TEMP_FILES_PATH + "/" + class_name + "_dr.json",
                  "w") as outfile:
            json.dump(bounding_boxes, outfile)

    # Calculate the AP for each class
    sum_AP = 0.0
    ap_dictionary = {}
    lamr_dictionary = {}
    count_true_positives = {}
    for class_index, class_name in enumerate(gt_classes):
        count_true_positives[class_name] = 0
        # Load detection-results of that class
        dr_file = TEMP_FILES_PATH + "/" + class_name + "_dr.json"
        with open(dr_file) as f:
            dr_data = json.load(f)

        # Assign detection-results to ground-truth objects
        nd = len(dr_data)
        tp = [0] * nd  # creates an array of zeros of size nd
        fp = [0] * nd
        for idx, detection in enumerate(dr_data):
            file_id = detection["file_id"]

            # assign detection-results to ground truth object if any
            # open ground-truth with that file_id
            gt_file = TEMP_FILES_PATH + "/" + file_id + "_ground_truth.json"
            with open(gt_file) as f:
                ground_truth_data = json.load(f)
            ovmax = -1
            gt_match = -1
            # load detected object bounding-box
            bb = [float(x) for x in detection["bbox"].split()]
            for obj in ground_truth_data:
                # look for a class_name match
                if obj["class_name"] == class_name:
                    bbgt = [float(x) for x in obj["bbox"].split()]
                    bi = [
                        max(bb[0], bbgt[0]),
                        max(bb[1], bbgt[1]),
                        min(bb[2], bbgt[2]),
                        min(bb[3], bbgt[3])
                    ]
                    iw = bi[2] - bi[0] + 1
                    ih = bi[3] - bi[1] + 1
                    if iw > 0 and ih > 0:
                        # compute overlap (IoU) = area of intersection / area of union
                        ua = ((bb[2] - bb[0] + 1) * (bb[3] - bb[1] + 1) +
                              (bbgt[2] - bbgt[0] + 1) *
                              (bbgt[3] - bbgt[1] + 1) - iw * ih)
                        ov = iw * ih / ua
                        if ov > ovmax:
                            ovmax = ov
                            gt_match = obj

            # assign detection as true positive/don"t care/false positive
            # set minimum overlap
            min_overlap = MINOVERLAP
            if specific_iou_flagged:
                if class_name in specific_iou_classes:
                    index = specific_iou_classes.index(class_name)
                    min_overlap = float(iou_list[index])
            if ovmax >= min_overlap:
                if "difficult" not in gt_match:
                    if not bool(gt_match["used"]):
                        # true positive
                        tp[idx] = 1
                        gt_match["used"] = True
                        count_true_positives[class_name] += 1
                        # update the ".json" file
                        with open(gt_file, "w") as f:
                            f.write(json.dumps(ground_truth_data))
                    else:
                        # false positive (multiple detection)
                        fp[idx] = 1
            else:
                # false positive
                fp[idx] = 1

        # compute precision/recall
        cumsum = 0
        for idx, val in enumerate(fp):
            fp[idx] += cumsum
            cumsum += val
        cumsum = 0
        for idx, val in enumerate(tp):
            tp[idx] += cumsum
            cumsum += val

        rec = tp[:]
        for idx, val in enumerate(tp):
            rec[idx] = float(tp[idx]) / gt_counter_per_class[class_name]

        prec = tp[:]
        for idx, val in enumerate(tp):
            prec[idx] = float(tp[idx]) / (fp[idx] + tp[idx])

        ap, mrec, mprec = voc_ap(rec[:], prec[:])
        sum_AP += ap
        ap_dictionary[class_name] = ap

        n_images = counter_images_per_class[class_name]
        lamr, mr, fppi = log_average_miss_rate(np.array(rec), np.array(fp),
                                               n_images)
        lamr_dictionary[class_name] = lamr

    mAP = sum_AP / n_classes
    text = "mAP = {0:.2f}%".format(mAP * 100)
    logging.info(text)

    # remove the temp_files directory
    shutil.rmtree(TEMP_FILES_PATH)

    return mAP
Esempio n. 4
0
 def setUp(self):
     self._delete_tmp_dir = True
     self.tmp_dir = py_file_utils.mkdtemp()
Esempio n. 5
0
    def run(self, inputs, output_edges=None):
        """
        Params:
            inputs: a inference_pb2.BatchedInferenceInput() protobuf
            outputs_edges: a list of lgf_pb2.EdgeInfo() protobufs, if None will use
                self._light_graph.outputs()

        Returns:
            outputs: a inference_pb2.BatchedInferenceOutput() protobuf object, such that
                outputs.batches[i].results[j] corresponds to the edge output_edges[j]
                from batch inputs.batches[i]
        """
        # Check inputs and get output_edges
        self._check_inputs(inputs)
        if output_edges is None:
            output_edges = self._light_graph.output_edges()

        # Prune graph
        input_edges = [nt.edge_info for nt in inputs.batches[0].inputs]
        light_graph = self._light_graph.prune_graph(input_edges=input_edges,
                                                    output_edges=output_edges,
                                                    include_inputs=False)

        # Collapse graph if it is not fully supported
        if not (self.is_fully_supported(light_graph)):
            light_graph = graph_exporter.ExportGraph.get_collapsed_light_graph(
                light_graph)

        # Create exporter
        graph_type = self.get_graph_type(light_graph)
        exporter = graph_exporter_map.GRAPH_EXPORTER_MAP[graph_type](
            light_graph,
            self._hw_spec,
            self._sw_config,
            self._sim_params,
            graph_coll=self._graph_coll)

        # Export the graph
        tmp_dir = py_file_utils.mkdtemp()
        graph_path = os.path.join(tmp_dir, "graph_path")
        exporter.export_graph(graph_path)

        # External graph runner
        external_runner = external_graph_runner_map.EXTERNAL_GRAPH_RUNNER_MAP[
            graph_type](graph_path,
                        self._hw_spec,
                        self._sw_config,
                        self._sim_params,
                        graph_coll=self._graph_coll)

        # Run inference
        outputs = inference_pb2.BatchedInferenceOutput()
        for inf_inp in inputs.batches:
            outputs.batches.add().CopyFrom(external_runner.run(inf_inp))

        # Clean up
        shutil.rmtree(tmp_dir)

        # Re-align outputs because outputs of collapsed graph may have different
        # names than output of original graph
        output_map = self._create_output_map(output_edges, light_graph)
        return self._get_aligned_outputs(outputs, output_edges, output_map)
Esempio n. 6
0
 def init_new_config(self):
     # Need to write ground truth and detection results to disk to compute mAP
     self._ground_truth_dir = py_file_utils.mkdtemp()
     self._detection_results_dir = py_file_utils.mkdtemp()
     self._image_indx = 0  # tracks image index over stream of test outputs
def main(light_graph_pb_path,
         reduce_unsupported=False,
         reduce_while_nodes=True,
         list_of_pb_histogram_folder_paths=None,
         workload=None,
         error_output_dir=None):
    """
    This code converts protobuf files to json file ,
    adds dummy nodes and calculates appropriate positions for nodes for visualization.

    :param path_to_pb: path to proto file
    :return: graph dictionary which can be converted to json format.
    """

    graph_transform = expand_subgraph_nodes.ExpandSubgraphNodes(
        hw_spec_pb2.HardwareSpecs(), sw_config_pb2.SoftwareConfig(),
        sim_params_pb2.SimulationParams())

    # Transform light graph protobuf to light graph
    light_graph = graph_transform.process_transforms(
        lgf_graph.LightGraph.lgf_pb_to_graph(
            lgf_graph.LightGraph.read_lgf_pb(light_graph_pb_path)))

    # Nodes and link is a structure required by d3.js
    list_of_links = []
    list_of_nodes = []
    # Used to store i/p nodes that are input to the graph and are non-dummy nodes.
    input_nodes = set()
    node_set = set()

    # Simultaneously creating nx graph to get absolute node positions to space
    # them appropriately.
    nxGraph = nx.Graph()
    dummy_nodes_counter = 0

    for node in light_graph.nodes():
        for ctr, input_edge in enumerate(node.inputs):
            if light_graph.has_node(input_edge.name):
                """ To check if incoming node is input node by checking if it is in present
                in graph. """
                source = input_edge.name
            else:
                source = "Dummy" + str(dummy_nodes_counter)
                dummy_nodes_counter += 1
            target = node.name

            linkname = input_edge.name + str(input_edge.port)
            # Add other attributes for link here and append as argument
            if not reduce_unsupported or (
                    light_graph.get_node_by_name(source).supported
                    or light_graph.get_node_by_name(target).supported):
                # Add link only if reduce flag is not set or if
                # either src/tar is added (supported)
                if (not reduce_while_nodes or "/while/" not in source
                        and "/while/" not in target):
                    # Checking if while keyword in node names
                    node_set.add(target)
                    node_set.add(source)
                    list_of_links.append((source, target, linkname))

        for input_name in node.control_inputs:
            if not light_graph.has_node(input_name):
                raise ValueError(
                    "Could not find control input {0}".format(input_name))
            if not reduce_unsupported or light_graph.get_node_by_name(
                    input_name).supported:
                if not reduce_while_nodes or "/while/" not in node.name:
                    node_set.add(input_name)
                    list_of_links.append((input_name, node.name, "ctrl_input"))

    for edge in light_graph.output_edges():
        source = edge.name
        target = "Dummy" + str(dummy_nodes_counter)
        dummy_nodes_counter += 1
        linkname = edge.name + str(edge.port)
        # Add other attributes for link here and append as argument
        if not reduce_unsupported or light_graph.get_node_by_name(
                source).supported:
            list_of_links.append((source, target, linkname))
            node_set.add(source)
            node_set.add(target)

    for node in light_graph.input_edges():
        input_nodes.add(node.name)

    # list provides a better layout than set
    list_of_nodes = list(node_set)

    # Adding nodes and edges to nxGraph
    for node in list_of_nodes:
        nxGraph.add_node(node)

    for link in list_of_links:
        nxGraph.add_edge(link[0], link[1])

    positions_dict = nx.spectral_layout(nxGraph)
    # get position from spectral layout which is faster than others.

    graph = {}
    graph["nodes"] = []
    graph["links"] = []
    graph["directories"] = ["Graph"]
    graph["workload"] = []
    graph["op_type"] = []
    graph["all_opu_type_names"] = []

    node_name_to_error_dict = collections.defaultdict(int)
    if workload:
        make_tmp_dir = not error_output_dir
        if make_tmp_dir:
            error_output_dir = py_file_utils.mkdtemp()
        fname = os.path.join(error_output_dir, "relative_error_data.pb")

        if not os.path.exists(fname):
            # Writes the pb
            plot_layer_error.main(workload, error_output_dir, fix_bns=False)

        # Reads the pb
        errors_pb = performance_data_pb2.RelativeErrorData()
        with open(fname, "rb") as f:
            errors_pb.ParseFromString(f.read())
        node_name_to_error_dict = collections.defaultdict(
            int, dict(errors_pb.errors_dict))
        for name, error in node_name_to_error_dict.items():
            graph["workload"].append({"name": name, "error": error})

        if make_tmp_dir:
            shutil.rmtree(error_output_dir)

    opu_type_set = set()
    for index, node in enumerate(node_set):
        group = -1
        hover_text = ""
        error_value = 0
        opu_type_name = light_graph.get_node_by_name(node).WhichOneof(
            "node") if not node.startswith("Dummy") else "Dummy"
        opu_type_set.add(opu_type_name)

        if not node.startswith("Dummy"):
            group = graph_transform.node_name_to_subgraph_id(node) \
                if (light_graph.get_node_by_name(node).supported) else -1
            if group is None:
                group = -1
            lnf_node = light_graph.get_node_by_name(node)
            hover_text = text_list_to_hover_text(
                message_to_text_list(
                    getattr(lnf_node, lnf_node.WhichOneof("node"))))
            error_value = node_name_to_error_dict[node]

        graph["nodes"].append({
            "name": node,
            "node_info": opu_type_name,
            "group": group + 1,
            "hover_text": hover_text,
            "error": error_value
        })
        # Take position from the layout algorithm.
        graph["nodes"][index]["x"] = positions_dict[node][0]
        graph["nodes"][index]["y"] = positions_dict[node][1]

    graph["all_opu_type_names"] = list(opu_type_set)
    name_to_hist_dict = collections.defaultdict(list)

    if list_of_pb_histogram_folder_paths:
        for folder_path in list_of_pb_histogram_folder_paths:
            graph["directories"].append(folder_path)

        # If path exists add the histogram.pb files as hist arrays to dict
        fetch_all_histograms(list_of_pb_histogram_folder_paths,
                             name_to_hist_dict)

    # Excluding dummy nodes since no info is associated with dummy nodes
    for link in list_of_links:
        histograms_per_edge = []
        # Unsupported edges have port as -1
        port = int(link[2][-1]) if not link[2].startswith("ctrl_input") else -1

        if not link[0].startswith("Dummy"):
            # Add histogram data if the source node is a const node.
            if get_tensor_data_from_const_node(
                    light_graph.get_node_by_name(link[0])):
                histograms_per_edge.append(
                    get_tensor_data_from_const_node(
                        light_graph.get_node_by_name(link[0])))

            # check in dictionary if current blank histograms are supposed to be filled
            if not histograms_per_edge:
                node_file_name = (convert_to_debug_mode.ConvertToDebugMode.
                                  file_friendly_name(link[2][:-1]))
                names_to_try = [node_file_name]
                if "_0_cast" in node_file_name:
                    names_to_try.append(node_file_name.strip("_0_cast"))
                for n in names_to_try:
                    histograms_per_edge = name_to_hist_dict[n + ":" +
                                                            link[2][-1]]
                    if histograms_per_edge:
                        break

        graph["links"].append({
            "source":
            link[0],
            "target":
            link[1],
            "linkname":
            link[0] + ":" + str(port),
            "hover_info":
            get_hover_text_link(link, light_graph, port),
            "edge_hist_data":
            histograms_per_edge or [],
            "port":
            port
        })

    graph["input_nodes"] = []
    for node in input_nodes:
        graph["input_nodes"].append({"name": node})

    # Adding all histogram data from folder in the graph.

    # To print the graph (dictionary) use this: print(json.dumps(graph))
    return graph