コード例 #1
0
def stream_tfevents(path, file_api, run, step=0, namespace=""):
    """Parses and streams a tfevents file to the server"""
    last_step = 0
    row = {}
    buffer = []
    last_row = {}
    global_step_key = namespaced_tag("global_step", namespace)
    try:
        for summary in summary_iterator(path):
            parsed = tf_summary_to_dict(summary, namespace=namespace)
            if parsed is None:
                continue
            if last_step != parsed[global_step_key]:
                last_step = parsed[global_step_key]
                if len(row) > 3:  # Must have more than _timestamp, _step, and global_step
                    step += 1
                    row["_step"] = step
                    last_row = history_dict_to_json(run, deepcopy(row))
                    file_api.push("wandb-history.jsonl", util.json_dumps_safer_history(last_row))
                    row = {}
            row.update(parsed)
    except tf.errors.DataLossError:
        wandb.termwarn("Found a truncated record in tfevents file, stopping parse")
    step += 1
    row["_step"] = step
    last_row = history_dict_to_json(run, deepcopy(row))
    file_api.push("wandb-history.jsonl", util.json_dumps_safer_history(last_row))
    return last_row
コード例 #2
0
    def max_val_acc(self, event_file_name):
        max_val_acc = 0
        for e in summary_iterator(event_file_name):
            for v in e.summary.value:
                if v.tag == 'val_acc':
                    if v.simple_value > max_val_acc:
                        max_val_acc = v.simple_value

        return max_val_acc
コード例 #3
0
def get_actual_step(log_file):
    """
    returna actual step
    """
    actual_step = 0
    for e in summary_iterator(log_file):
        if hasattr(e, 'step'):
            actual_step = e.step

    return actual_step + 1
コード例 #4
0
    def __init__(self,
                 filepath,
                 score=None,
                 tensorboard_logfile=None,
                 monitor='val_loss',
                 verbose=0,
                 save_best_only=True,
                 save_weights_only=False,
                 mode='auto',
                 period=1):
        """
        :param filepath: Keras' filepath argument. "filepath: string, path to save the model file."
        :param score: float, the score of the existing model you wish to use (either score or
            tensorboard_logfile must be provided).
        :param tensorboard_logfile: string, path to the tensorboard log file you wish to pull
        score metrics from (either score or tensorboard_logfile must be provided).
        :param monitor: Keras' monitor argument. "monitor: quantity to monitor."
        :param verbose: Keras' verbose argument. "verbose: verbosity mode, 0 or 1."
        :param save_best_only: Keras' save_best_only argument, NOTE: This is defaulted to True: "save_best_only:
            if `save_best_only=True`, the latest best model according to the quantity monitored will not be
            overwritten".
        :param save_weights_only: Keras' save_weights_only argument: "save_weights_only: if True,
            then only the model's weights will be saved (`model.save_weights(filepath)`), else the full model
            is saved (`model.save(filepath)`)."
        :param mode: Keras' mode argument: "mode: one of {auto, min, max}. If `save_best_only=True`, the decision
            to overwrite the current save file is made based on either the maximization or the
            minimization of the monitored quantity. For `val_acc`, this should be `max`, for `val_loss` this should
            be `min`, etc. In `auto` mode, the direction is automatically inferred from the name of the monitored
            quantity."
        :param period: Keras' period argument: "Interval (number of epochs) between checkpoints".
        """
        super(MemoryModelCheckpoint,
              self).__init__(filepath, monitor, verbose, save_best_only,
                             save_weights_only, mode, period)

        if (score is None and tensorboard_logfile is None) or (
                score is not None and tensorboard_logfile is not None):
            raise Exception("Either (XOR) score or logfile must be provided.")
        elif score is not None:
            self.best = score
        elif tensorboard_logfile is not None:
            scores = []
            for x in summary_iterator(tensorboard_logfile):
                for val in x.summary.value:
                    if val.tag == 'loss':
                        scores.append(val.simple_value)

            if self.monitor_op == np.greater:
                self.best = np.max(scores)
            else:
                self.best = np.min(scores)
コード例 #5
0
def load_log(log_file, tags=['train/acc', 'val/acc', 'train/iou', 'val/iou']):
    """
    return best metrics
    """
    best_metrics = {}

    for e in summary_iterator(log_file):
        for v in e.summary.value:
            if v.tag in tags:
                if v.tag not in best_metrics.keys():
                    best_metrics[v.tag] = v.simple_value
                elif v.tag.find('loss') >= 0:
                    best_metrics[v.tag] = min(best_metrics[v.tag],
                                              v.simple_value)
                else:
                    best_metrics[v.tag] = max(best_metrics[v.tag],
                                              v.simple_value)


#    print(best_metrics)
    return best_metrics
コード例 #6
0
def get_rewards(paths, smoothing=None, extend=True):
    all_steps = None
    all_rewards = None
    for path in paths:
        if isdir(path):
            files = [f for f in listdir(path) if isfile(join(path, f))]

            for f in sorted(files):
                if "events.out.tfevents" in f:
                    path = join(path, f)
                    #                    print("path:", path)
                    break
        steps = []
        rewards = []
        for summary in summary_iterator(path):
            step = None
            reward = None
            for v in summary.summary.value:
                if v.tag == "ray/tune/train-steps":
                    step = v.simple_value
                if v.tag == "ray/tune/evaluation/return-average":
                    reward = v.simple_value
            if step is not None and reward is not None:
                if len(steps) > 0 and step < steps[-1]:
                    print("detected backwards time")
                    break
                steps.append(step)
                rewards.append(reward)
        if all_steps is None:
            all_steps = steps
            all_rewards = [rewards]
        else:
            all_rewards.append(rewards)
            if len(all_steps) < len(steps):
                all_steps = steps
        print(path, len(rewards))

    if not extend:
        min_length = int(min([len(r) for r in all_rewards]))
        for i in range(len(all_rewards)):
            all_rewards[i] = all_rewards[i][:min_length]
    if extend:
        max_length = int(max([len(r) for r in all_rewards]))
        print("max_length:", max_length)
        for i in range(len(all_rewards)):
            print("len before:", len(all_rewards[i]))
            all_rewards[i].extend([all_rewards[i][-1]] *
                                  (max_length - len(all_rewards[i])))
            print("len after:", len(all_rewards[i]))

        min_length = int(min([len(r) for r in all_rewards]))

        if smoothing is not None:
            all_rewards = [moving_average(r, n=smoothing) for r in all_rewards]
    if smoothing is not None:
        all_steps = moving_average(all_steps, n=smoothing)
    all_steps = np.array(all_steps[:min_length])
    all_rewards = np.array(all_rewards)

    print("all_steps", all_steps.shape)
    print("all_rewards:", all_rewards.shape)

    mean_rewards = np.mean(all_rewards, axis=0)
    std_dev = np.std(all_rewards, axis=0)
    std_error = std_dev / np.sqrt(all_rewards.shape[0])

    print("mean_rewards:", mean_rewards.shape)
    print("std_error:", std_error.shape)

    return all_steps, mean_rewards, std_error