Example #1
0
    def update(self, experiment, seed, step):
        """updates the scatter plot.
        This method is called by the model picker """
        self.experiment = experiment
        self.seed = seed
        self.step = step
        self.failures_indices = DataReader.get_episodes_with_outcome(
            experiment, seed, step, 0)

        features = DimensionalityReduction.get_model_failing_features(
            experiment, seed, step)

        costs = DataReader.get_model_states(experiment, seed, step)
        print('costs shape', len(costs))
        print('max failure indices', max(self.failures_indices))

        res = self.DimensionalityReduction.transform(features)
        colors = ['gray'] * res.shape[0]
        opacities = [0.3] * res.shape[0]

        classes = self.DimensionalityReduction.cluster(features)

        category = bq.colorschemes.CATEGORY10[2:]
        for f in range(len(features)):
            if f - 1 < len(colors):  # TODO: wtf?
                if f < len(classes):
                    colors[f] = category[classes[f]]
                else:
                    colors[f] = 'red'
                opacities[f] = 0.8

        self.scatter.x = res[:, 0]
        self.scatter.y = res[:, 1]
        self.scatter.colors = colors
        self.scatter.opacity = opacities
Example #2
0
    def update2(self, experiment, seed, step):
        # used in developement, not currently used
        self.experiment = experiment
        self.seed = seed
        self.step = step
        features = DimensionalityReduction.get_model_failing_features(
            experiment, seed, step)
        self.failures_indices = DataReader.get_episodes_with_outcome(
            experiment, seed, step, 0)

        failure_features = features[np.array(failures[:-1]) - 1]

        res = self.DimensionalityReduction.transform(features)
        colors = ['gray'] * res.shape[0]
        opacities = [0.3] * res.shape[0]

        classes = self.DimensionalityReduction.cluster(failure_features)

        category = bq.colorschemes.CATEGORY20[2:]
        for i, f in enumerate(failures):
            if f - 1 < len(colors):  # TODO: wtf?
                if i < len(classes):
                    colors[f - 1] = category[classes[i]]
                else:
                    colors[f - 1] = 'red'
                opacities[f - 1] = 0.8

        self.scatter.x = res[:, 0]
        self.scatter.y = res[:, 1]
        self.scatter.colors = colors
        self.scatter.opacity = opacities
Example #3
0
    def get_model_failing_features(experiment, seed, checkpoint):
        """ Get features for one model
        This is used for dimensionality reduction, which is later used for
        scatter plotting.
        """
        speeds = DataReader.get_model_speeds(experiment, seed, checkpoint)
        costs = DataReader.get_model_costs(experiment, seed, checkpoint)
        states = DataReader.get_model_states(experiment, seed, checkpoint)
        failing = DataReader.get_episodes_with_outcome(experiment, seed,
                                                       checkpoint, 0)

        data = []

        history_size = 10

        for fail in failing:
            columns_to_save = ["lane_cost", "pixel_proximity_cost"]
            features = [speeds[fail - 1][-history_size:]]
            for c in columns_to_save:
                features.append(costs[fail - 1][c][-history_size:])
            for i in range(len(features)):
                features[i] = np.pad(
                    features[i],
                    (history_size - features[i].shape[0], 0),
                    "constant",
                )
            features = np.stack(features)
            features = features.flatten()
            length = costs[fail - 1]["collisions_per_frame"].shape[0]
            features = np.append(
                features,
                [costs[fail - 1]["collisions_per_frame"][length - 1]])
            features = np.append(
                features, [costs[fail - 1]["arrived_to_dst"][length - 1]])
            features = np.append(features, [states[fail - 1][length - 1][0]])
            features = np.append(features, [states[fail - 1][length - 1][1]])
            features = np.append(
                features,
                DataReader.get_last_gradient(experiment, seed, checkpoint,
                                             fail),
            )
            data.append(features)

        data = np.stack(data)
        data = sklearn.preprocessing.scale(data)

        return data