예제 #1
0
def plot_param_sweep(results, n, params, param_name, param_logscale):
    """Plot accuracy versus parameter.

    Parameters
    ----------

    results: list[float]
        The accuracy or loss for a number of experiments, each of which used
        different parameters.
    n: int
        The number of test samples used for each experiment.
    params: list[float]
        The parameter values that changed during each experiment.
    param_name: str
        The name of the parameter that varied.
    param_logscale: bool
        Whether to plot the parameter axis in log-scale.
    """

    from snntoolbox.utils.utils import wilson_score

    # Compute confidence intervals of the experiments
    ci = [wilson_score(q, n) for q in results]
    if param_logscale:
        plt.xscale('log', nonposx='clip')
    plt.errorbar(params, results, yerr=ci, fmt='x-')
    plt.title('Accuracy vs Hyperparameter')
    plt.xlabel(param_name)
    plt.ylabel('accuracy')
    fac = 0.9
    if params[0] < 0:
        fac += 0.2
    plt.xlim(fac * params[0], 1.1 * params[-1])
    plt.ylim(0, 1)
예제 #2
0
    def set_spikestats(self):
        from snntoolbox.utils.utils import wilson_score

        num_batches = len(os.listdir(self.dirname))
        if num_batches == 0:
            return
        batch_size, num_timesteps = np.load(os.path.join(
            self.dirname, '0.npz'))['top1err_b_t'].shape
        self.time = np.arange(num_timesteps)
        self.num_samples = num_batches * batch_size
        e1 = np.empty((self.num_samples, num_timesteps))
        e5 = np.empty((self.num_samples, num_timesteps))

        # Load operation count
        operations_d_t = np.empty((self.num_samples, num_timesteps))
        for batch_idx in range(num_batches):
            operations_d_t[batch_idx*batch_size:(batch_idx+1)*batch_size] = \
                np.load(os.path.join(self.dirname, str(batch_idx) + '.npz'))[
                    'synaptic_operations_b_t'] / self.scale
        self.mean_computations_t = np.mean(operations_d_t, 0)
        self.std_computations_t = np.std(operations_d_t, 0)

        # Load error
        for batch_idx in range(num_batches):
            e1[batch_idx * batch_size: (batch_idx + 1) * batch_size] = \
                np.multiply(100, np.load(os.path.join(
                    self.dirname, str(batch_idx) + '.npz'))['top1err_b_t'])
            e5[batch_idx * batch_size: (batch_idx + 1) * batch_size] = \
                np.multiply(100, np.load(os.path.join(
                    self.dirname, str(batch_idx) + '.npz'))['top5err_b_t'])

        self.operations_ann = float(np.load(os.path.join(self.dirname, str(
            num_batches - 1) + '.npz'))['operations_ann'] / self.scale)
        self.e1_ann = float(np.load(os.path.join(
            self.dirname, str(num_batches - 1) + '.npz'))['top1err_ann']) * 100
        self.e5_ann = float(np.load(os.path.join(
            self.dirname, str(num_batches - 1) + '.npz'))['top5err_ann']) * 100

        # Averaged across samples, shape (1, num_timesteps)
        self.e1_mean = np.mean(e1, axis=0)
        self.e1_std = np.std(e1, axis=0)
        self.e5_mean = np.mean(e5, axis=0)
        self.e5_std = np.std(e5, axis=0)
        self.e1_confidence95 = np.array([wilson_score(1-e/100, self.num_samples)
                                         for e in self.e1_mean]) * 100
        self.e5_confidence95 = np.array([wilson_score(1-e/100, self.num_samples)
                                         for e in self.e5_mean]) * 100

        # Get the operation count at which the error is minimal or 1 % above the
        # min.
        self.e1_0 = min(self.e1_mean)
        self.op1_0 = get_op_at_err(self.mean_computations_t, self.e1_mean,
                                   self.e1_0)
        self.e1_1 = min(self.e1_mean) + 1
        self.op1_1 = get_op_at_err(self.mean_computations_t, self.e1_mean,
                                   self.e1_1)
        self.e1_1 = get_err_at_op(self.e1_mean, self.mean_computations_t,
                                  self.op1_1)
        self.e1_2 = get_err_at_op(self.e1_mean, self.mean_computations_t,
                                  self.operations_ann)
        self.op1_2 = get_op_at_err(self.mean_computations_t, self.e1_mean,
                                   self.e1_2)
        self.e5_0 = min(self.e5_mean)
        self.op5_0 = get_op_at_err(self.mean_computations_t, self.e5_mean,
                                   self.e5_0)
        self.e5_1 = min(self.e5_mean) + 1
        self.op5_1 = get_op_at_err(self.mean_computations_t, self.e5_mean,
                                   self.e5_1)
        self.e5_1 = get_err_at_op(self.e5_mean, self.mean_computations_t,
                                  self.op5_1)
        self.op5_1 = get_op_at_err(self.mean_computations_t, self.e5_mean,
                                   self.e5_1)
        self.e5_2 = get_err_at_op(self.e5_mean, self.mean_computations_t,
                                  self.operations_ann)
        self.op5_2 = get_op_at_err(self.mean_computations_t, self.e5_mean,
                                   self.e5_2)

        self.e1_std_ann = get_std(self.e1_ann)
        self.e5_std_ann = get_std(self.e5_ann)
        self.e1_confidence95_ann = wilson_score(1 - self.e1_ann / 100,
                                                self.num_samples) * 100
        self.e5_confidence95_ann = wilson_score(1 - self.e5_ann / 100,
                                                self.num_samples) * 100

        self.e1_optimal, self.op1_optimal = get_minimal_err_and_op(
            self.mean_computations_t, self.e1_mean)

        self.e5_optimal, self.op5_optimal = get_minimal_err_and_op(
            self.mean_computations_t, self.e5_mean)