コード例 #1
0
 def _CompressHistogram(self, histo_ev):
   """Callback for _ProcessHistogram."""
   return CompressedHistogramEvent(
       histo_ev.wall_time,
       histo_ev.step,
       compressor.compress_histogram_proto(
           histo_ev.histogram_value, self._compression_bps))
コード例 #2
0
 def _CompressHistogram(self, histo_ev):
   """Callback for _ProcessHistogram."""
   return CompressedHistogramEvent(
       histo_ev.wall_time,
       histo_ev.step,
       compressor.compress_histogram_proto(
           histo_ev.histogram_value, self._compression_bps))
コード例 #3
0
def aggregate(path, run_wanted=0):
    keys = [
        #'Eval/Reward',
        # 'Eval/Length',
        # 'Eval/MCTS_Confidence',
        #'Train/AvgLoss'
        # 'Actor/Sample_length',
        # 'Train/Samples'
        'Train/Shark_Speed_H'
    ]

    agg_keys = {k: [] for k in keys}

    path = glob.glob(os.path.join(path, "events.out.tfevents.*"))
    if not path:
        return None, None
    path = path[0]

    import tensorflow as tf
    i = 0
    j = 0
    str_ = ''
    for event in my_summary_iterator(path):
        val = event.summary.value
        if not val:
            continue
        tag = event.summary.value[0].tag
        for key in keys:
            if tag.startswith(key):
                # run = tag[tag.rfind('/')+1:]
                # if int(run) != run_wanted:
                #     continue
                x = compress_histogram_proto(event.summary.value[0].histo)
                # import pdb; pdb.set_trace()
                agg_keys[key].append([y.value for y in x])
                if i in [1, 744, 988, 2288, 2971]:
                    arr = tf.make_ndarray(event.summary.value[0].tensor)
                    X = (arr[:, 0] + arr[:, 1]) / 2.
                    Y = np.vstack([X, arr[:, 2]]).T.tolist()
                    str_ += '\n\\addplot3 [area plot] coordinates {'
                    for x_ in Y:
                        str_ += str((x_[0], j, x_[1]))
                    str_ += '};\n'
                    j += 1
                i += 1
    print(str_)
    import pdb
    pdb.set_trace()
    return agg_keys
コード例 #4
0
ファイル: dataproc.py プロジェクト: yueyedeai/rltf
    def _read_tb_data(self):
        """Read data from a tensorboard file"""

        files = self._get_tb_files()
        x, y = [], []

        scalar = None

        # Read TB file
        for file in files:
            # Traverse events/summaries
            for e in tf.train.summary_iterator(file):
                # Traverse every value in the summary
                for v in e.summary.value:
                    if self.tb_tag == v.tag:
                        if scalar is None:
                            if v.HasField("simple_value"):
                                scalar = True
                            elif v.HasField("histo"):
                                scalar = False
                            else:
                                raise ValueError(
                                    "Only simple_value and histo data can be processed from TB file"
                                )

                        x.append(e.step)
                        if scalar:
                            y.append(v.simple_value)
                        else:
                            # Convert to compressed histogram
                            y.append([
                                chv.value
                                for chv in compress_histogram_proto(v.histo)
                            ])

        # Check for correct parsing
        assert len(x) > 0 and len(y) > 0, "Parsing TB incorrect:"
        "len(x)=%d, len(y)=%d\nx: %s\ny: %s" % (len(x), len(y), x, y)

        # Sort the lists by step
        x, y = (list(t) for t in zip(*sorted(zip(x, y), key=lambda t: t[0])))

        return dict(x=x, y=y, i=None)
コード例 #5
0
ファイル: gen_vector_plot.py プロジェクト: instance01/GRAB0
def aggregate(path, run_wanted=0):
    keys = [
        #'Eval/Reward',
        # 'Eval/Length',
        # 'Eval/MCTS_Confidence',
        #'Train/AvgLoss'
        'Actor/Sample_length',
        'Train/Samples'
    ]

    agg_keys = {k: [] for k in keys}

    path = os.path.join(path, "event.tfevents")
    for event in my_summary_iterator(path):
        tag = event.summary.value[0].tag
        for key in keys:
            if tag.startswith(key):
                run = tag[tag.rfind('/') + 1:]
                if int(run) != run_wanted:
                    continue
                x = compress_histogram_proto(event.summary.value[0].histo)
                agg_keys[key].append([y.value for y in x])
    return agg_keys