예제 #1
0
def get_topology():
    from modules.topologies import SmallWorldTopology

    return SmallWorldTopology(
        SmallWorldTopology.Configuration(
            minicolumn_shape=(4, 4, 4),
            macrocolumn_shape=(2, 2, 2),
            minicolumn_spacing=1460,
            p_max=0.11,
            spectral_radius_norm=False,
            intracolumnar_sparseness=635,
            neuron_spacing=40,
            inhibitory_init_weight_range=(0.1, 0.3),
            excitatory_init_weight_range=(0.2, 0.5),
        )
    )
예제 #2
0
def main(seed=0x1B):
    np.random.seed(seed)

    topology = SmallWorldTopology(
        SmallWorldTopology.Configuration(
            minicolumn_shape=(4, 4, 4),
            macrocolumn_shape=(2, 2, 2),
            inhibitory_prob=0.2,
            minicolumn_spacing=1460,
            p_max=0.11,
            spectral_radius_norm=False,
            intracolumnar_sparseness=635,
            neuron_spacing=40,
            inhibitory_init_weight_range=(0.1, 0.3),
            excitatory_init_weight_range=(0.2, 0.5),
        ))

    freqs = [10, 15, 30, 50]  # in Hz
    duration = 4080 * 2
    np.save("freqs.npy", freqs)
    input_dim = topology.number_of_nodes() // 3

    with NxPCritical(
            topology,
            input_dim=input_dim,
            tau_v=60 * ms,
            tau_i=2 * ms,
            tau_v_pair=1 * ms,
            tau_i_pair=1 * ms,
            debug=False,
    ) as model:
        in_spikes = [
            np.random.poisson(lam=freq / 1000.0,
                              size=(input_dim, duration)).clip(0, 1)
            for i, freq in enumerate(freqs)
        ]
        in_spikes = np.asarray(in_spikes)

        bins = model(spike_train=in_spikes)

        weights = model.read_weights()

    np.save("weights.npy", weights)
예제 #3
0
def get_topology():
    if os.path.exists(TOPOLOGY_CACHE):
        adj_matrix = np.load(TOPOLOGY_CACHE)
        return netx.from_numpy_matrix(adj_matrix, create_using=netx.DiGraph())

    from modules.topologies import SmallWorldTopology

    return SmallWorldTopology(
        SmallWorldTopology.Configuration(
            minicolumn_shape=(4, 4, 4),
            macrocolumn_shape=(2, 2, 2),
            minicolumn_spacing=1460,
            p_max=0.11,
            spectral_radius_norm=False,
            intracolumnar_sparseness=635,
            neuron_spacing=40,
            inhibitory_init_weight_range=(0.1, 0.3),
            excitatory_init_weight_range=(0.2, 0.5),
        ))
예제 #4
0
def run_ntidigits(
    nb_iters: int,
    plasticity: bool = True,
    spectral_radius_norm: bool = False,
    weight_decay: float = 0.0,
    readout_layer_type: ReadoutType = ReadoutType.TIME_BINNING,
    debug: bool = False,
):
    _logger.info("Starting N-TIDIGITS classification experiment")

    reporter.log_tags(["N-TIDIGITS", "-".join(readout_layer_type.value.split(" "))])
    reporter.log_parameters(
        {
            "dt": dt,
            "plasticity": plasticity,
            "nb_iters": nb_iters,
            "spectral_radius_norm": spectral_radius_norm,
        }
    )

    topology = SmallWorldTopology(
        reporter.log_parameters(
            SmallWorldTopology.Configuration(
                minicolumn_shape=(4, 4, 4),
                macrocolumn_shape=(2, 2, 2),
                minicolumn_spacing=1460,
                p_max=0.11,
                spectral_radius_norm=spectral_radius_norm,
                intracolumnar_sparseness=635,
                neuron_spacing=40,
                inhibitory_init_weight_range=(0.1, 0.3),
                excitatory_init_weight_range=(0.2, 0.5),
            )
        )
    )

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    _logger.info("Using device type %s", str(device))

    batch_size = 32
    reporter.log_parameter("batch_size", batch_size)
    data_loader_parameters = {
        "batch_size": batch_size,
        "num_workers": 4,
        "pin_memory": True,
        "timeout": 120,
        "collate_fn": collate_fn,
    }
    train_set = NTidigits(
        DATASET_PATH,
        is_train=True,
        transforms=rec_array_to_spike_train,
        only_single_digits=True,
    )
    val_set = NTidigits(
        DATASET_PATH,
        is_train=False,
        transforms=rec_array_to_spike_train,
        only_single_digits=True,
    )

    pcritical_configs: dict = reporter.log_parameters(
        {
            "alpha": 1e-2,
            "stochastic_alpha": False,
            "beta": 1e-3,
            "tau_v": 30 * ms,
            "tau_i": 5 * ms,
            "tau_v_pair": 5 * ms,
            "tau_i_pair": 0 * ms,
            "v_th": 1,
        }
    )

    n_neurons = topology.number_of_nodes()
    model = torch.nn.Sequential(
        OneToNLayer(N=2, dim_input=n_features, dim_output=n_neurons),
        PCritical(1, topology, dt=dt, **pcritical_configs),
    ).to(device)
    model[1].plasticity = plasticity

    if readout_layer_type == ReadoutType.TIME_BINNING:
        bin_size = 60  # ms
        reporter.log_parameter("Time bin size", bin_size * ms)
        convert_layer = TimeBinningLayer(
            bin_size, max_duration=2464, nb_of_neurons=n_neurons
        ).to(device)
    elif readout_layer_type == ReadoutType.EXPONENTIAL_FILTER:
        exp_tau = 60
        reporter.log_parameter("Exp filter tau", exp_tau * dt)
        convert_layer = ExponentialFilterLayer(tau=exp_tau, nb_of_neurons=n_neurons).to(
            device
        )
    elif readout_layer_type == ReadoutType.REVERSE_EXPONENTIAL_FILTER:
        reverse_exp_tau = 60
        reporter.log_parameter("Reverse exp filter tau", reverse_exp_tau * dt)
        convert_layer = ReverseExponentialFilterLayer(
            tau=reverse_exp_tau, nb_of_neurons=n_neurons
        ).to(device)

    linear_classifier = LinearWithBN(convert_layer.number_of_features(), n_classes).to(
        device
    )
    loss_fn = torch.nn.CrossEntropyLoss()
    lr = 0.001
    reporter.log_parameters(
        {"optimizer": "Adam", "weight_decay": weight_decay, "lr": lr}
    )
    optimizer = torch.optim.Adam(
        linear_classifier.parameters(), lr=lr, weight_decay=weight_decay
    )

    train_accuracy_for_iters = []
    val_accuracy_for_iters = []

    if debug:
        nb_of_debug_steps = 5000
        spike_recorder = SpikeRecorder(
            "pcritical-tidigits-spike-recording.h5",
            model[0].W_t.t(),
            topology,
            nb_of_debug_steps,
        )
        weight_recorder = StateRecorder(
            "pcritical-tidigits-weight-recording.h5",
            nb_of_debug_steps,
            ("reservoir_weights", (n_neurons, n_neurons)),
        )
        debug_progress_bar = tqdm(total=nb_of_debug_steps, disable=not debug)

    def input_and_reservoir_layers(x):
        """
        Compute post-reservoir state-space for input batch x
        NOTE: If x is a batch, plasticity will be merged during iterations
        For more accurate readings, process one sample at a time

        :param x: Input sample
        :return: x => input layer => reservoir layer => convert layer
        """
        x = x.to(device)
        current_batch_size = x.shape[0]  # 1 if unbatchifier active

        if not debug:
            model[
                1
            ].batch_size = (
                current_batch_size  # Will also reset neuron states (mem pot, cur)
            )
        duration = x.shape[-1]
        convert_layer.reset()

        for t in range(duration):
            out_spikes = model(x[:, :, t])
            lsm_output = convert_layer(spikes=out_spikes, time=t, duration=duration)

            if debug:
                exit_early = not spike_recorder(x[:, :, t], out_spikes)
                exit_early &= not weight_recorder(model[1].W_rec)
                if exit_early:
                    exit(0)
                debug_progress_bar.update(1)

        return lsm_output

    def train_batch(x, y):
        optimizer.zero_grad()
        reservoir_out = unbatchifier(x, input_and_reservoir_layers)
        net_out = linear_classifier(reservoir_out)
        preds = torch.argmax(net_out.detach(), dim=1).cpu()
        loss = loss_fn(net_out, y.to(device))
        loss.backward()
        optimizer.step()
        return loss.cpu().detach().item(), torch.sum(preds == y).item()

    def validate_batch(x, y):
        reservoir_out = unbatchifier(x, input_and_reservoir_layers)
        net_out = linear_classifier(reservoir_out)
        preds = torch.argmax(net_out, dim=1).cpu()
        return torch.sum(preds == y).item()

    for iter_nb in range(nb_iters):
        reporter.log_metric("iteration", iter_nb)

        # -------- TRAINING PHASE --------
        train_generator = torch_data.DataLoader(
            train_set, shuffle=True, **data_loader_parameters
        )
        progress_bar = tqdm(
            train_generator, desc=f"train iter {iter_nb} / {nb_iters}", disable=debug
        )
        total_accurate = 0
        total_elems = 0
        for x, y in progress_bar:
            loss_value, train_acc = train_batch(x, y)
            total_elems += len(y)
            total_accurate += train_acc
            progress_bar.set_postfix(
                loss=loss_value, cur_acc=total_accurate / total_elems
            )
            reporter.log_metric("loss", loss_value)

        _logger.info(
            "Final train accuracy at iter %i: %.4f",
            iter_nb,
            total_accurate / total_elems,
        )
        reporter.log_metric("train_accuracy", total_accurate / total_elems)
        train_accuracy_for_iters.append(total_accurate / total_elems)

        # -------- VALIDATION PHASE --------
        val_gen = torch_data.DataLoader(
            val_set, shuffle=False, **data_loader_parameters
        )
        total_accurate = 0
        total_elems = 0
        progress_bar = tqdm(
            val_gen, desc=f"val iter {iter_nb} / {nb_iters}", disable=debug
        )
        with torch.no_grad():
            for x, y in progress_bar:
                nb_accurate = validate_batch(x, y)
                total_accurate += nb_accurate
                total_elems += len(y)
                progress_bar.set_postfix(cur_acc=total_accurate / total_elems)

        if isinstance(linear_classifier[0], torch.nn.BatchNorm1d):
            # Reset batch-norm parameters so we do use them for training
            linear_classifier[0].reset_running_stats()

        _logger.info(
            "Final accuracy at iter %i: %.4f", iter_nb, total_accurate / total_elems
        )
        reporter.log_metric("accuracy", total_accurate / total_elems)
        val_accuracy_for_iters.append(total_accurate / total_elems)

    return train_accuracy_for_iters, val_accuracy_for_iters
예제 #5
0
def run_experiment(
    freq: int,
    display: bool = False,
    debug: bool = False,
):
    plt.ioff()
    sns.set()
    topology = SmallWorldTopology(
        SmallWorldTopology.Configuration(
            minicolumn_shape=(4, 4, 4),
            macrocolumn_shape=(1, 1, 1),
            # minicolumn_spacing=1460,
            p_max=0.17,
            intracolumnar_sparseness=635.0,
            neuron_spacing=40.0,
            inhibitory_init_weight_range=(0.1, 0.3),
            excitatory_init_weight_range=(0.2, 0.5),
        ))

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    pcritical_configs: dict = {
        "alpha": 1e-2,
        "stochastic_alpha": False,
        "beta": 1e-3,
        "tau_v": 30 * ms,
        "tau_i": 1 * ms,
        "tau_v_pair": 5 * ms,
        "tau_i_pair": 0 * ms,
        "v_th": 1,
    }

    n_neurons = topology.number_of_nodes()
    n_input = 8
    model = torch.nn.Sequential(
        OneToNLayer(N=1, dim_input=n_input, dim_output=n_neurons),
        PCritical(1, topology, dt=dt, **pcritical_configs),
    ).to(device)

    freq = freq / 1000
    duration = 5000
    input_spike_train = torch.from_numpy(
        np.random.poisson(lam=freq, size=(1, n_input, duration))).float()

    # Set up figure
    fig = plt.figure(constrained_layout=True, figsize=(16, 10))
    gs = fig.add_gridspec(3, 2)

    reservoir_spikes_ax = fig.add_subplot(gs[1, :])
    reservoir_spikes_ax.set_title(f"P-CRITICAL enabled reservoir spike train")
    reservoir_spikes_ax.set_xlim(0, duration)
    reservoir_spikes_ax.set_ylim(-0.5, n_neurons + 0.5)

    in_spikes_ax = fig.add_subplot(gs[0, :], sharex=reservoir_spikes_ax)
    in_spikes_ax.set_title(
        f"Input spike train of {n_input} neurons poisson-sampled at {freq * 1000} Hz"
    )
    in_spikes_ax.set_ylim(-0.5, n_input + 0.5)
    for label in in_spikes_ax.get_xticklabels():
        label.set_visible(False)

    weight_hist_ax = fig.add_subplot(gs[2, 0])
    weight_hist_ax.set_ylim([0.0, 0.3])
    weight_hist_ax.set_title("Excitatory Weights Probability Density Function")

    relative_weights_ax = fig.add_subplot(gs[2, 1])
    relative_weights_ax.set_ylim([0.0, 300.0])
    relative_weights_ax.set_xlim([-1, 1])
    relative_weights_ax.set_title(
        "Relative Excitatory Weights Adaptation (current - initial) / initial")

    # Set up video recorder
    out = cv2.VideoWriter(
        f"{freq*1000:.0f}hz_spike_analysis.avi",
        cv2.VideoWriter_fourcc(*"MP42"),
        1000.0,
        (800 * 2, 1000),
    )

    # Iterate over time
    excitatory_mask = model[1].W_rec.numpy() > 0
    initial_excitatory_weights = model[1].W_rec[excitatory_mask].numpy()

    blit = True

    if blit:
        fig.canvas.draw()
        backgrounds = [
            fig.canvas.copy_from_bbox(weight_hist_ax.bbox),
            fig.canvas.copy_from_bbox(relative_weights_ax.bbox),
        ]

    for t in tqdm(range(duration)):
        input_spikes = input_spike_train[:, :, t]
        reservoir_spikes = model(input_spikes).numpy()
        input_spikes = input_spikes.numpy()
        adj_matrix = model[1].W_rec.numpy()
        excitatory_weights = adj_matrix[excitatory_mask]
        # pair_spikes = model[1].S_paired.numpy()

        # Input spikes plot
        neurons_that_spikes = np.flatnonzero(input_spikes[0])
        if len(neurons_that_spikes) > 0:
            raster = [[] if i not in neurons_that_spikes else [t]
                      for i in range(n_input)]
            input_spikes_ax_data = in_spikes_ax.eventplot(raster)
        else:
            input_spikes_ax_data = []

        # Reservoir spikes plot
        neurons_that_spikes = np.flatnonzero(reservoir_spikes[0])
        if len(neurons_that_spikes) > 0:
            raster = [[] if i not in neurons_that_spikes else [t]
                      for i in range(n_neurons)]
            reservoir_spikes_ax_data = reservoir_spikes_ax.eventplot(raster)
        else:
            reservoir_spikes_ax_data = []

        # Weight hist plot
        _, _, weight_hist_ax_data = weight_hist_ax.hist(
            excitatory_weights,
            bins=np.arange(0.0, 1.0, 0.01),
            weights=np.ones_like(excitatory_weights) / len(excitatory_weights),
            color=sns.color_palette()[0],
        )

        # Relative hist plot
        _, _, relative_weights_ax_data = relative_weights_ax.hist(
            np.divide(
                excitatory_weights - initial_excitatory_weights,
                initial_excitatory_weights,
            ),
            bins=np.arange(-1.0, 1.0, 0.0125),
            color=sns.color_palette()[1],
        )

        # Compute frame to video
        if blit:
            # reservoir_spikes_ax.draw_artist(reservoir_spikes_ax.patch)
            for i in reservoir_spikes_ax_data:
                reservoir_spikes_ax.draw_artist(i)
            # in_spikes_ax.draw_artist(in_spikes_ax.patch)
            for i in input_spikes_ax_data:
                in_spikes_ax.draw_artist(i)

            for background in backgrounds:
                fig.canvas.restore_region(background)

            for i in weight_hist_ax_data:
                weight_hist_ax.draw_artist(i)
            for i in relative_weights_ax_data:
                relative_weights_ax.draw_artist(i)
            fig.canvas.update()
        else:
            fig.canvas.draw()

        fig.canvas.flush_events()

        img = np.frombuffer(fig.canvas.tostring_argb(), dtype=np.uint8)
        img = img.reshape(fig.canvas.get_width_height()[::-1] + (4, ))
        img = img[:, :, ::-1]  # argb to bgra
        img = cv2.cvtColor(img, cv2.COLOR_BGRA2BGR)  # Removing alpha channel

        if display:
            cv2.imshow("Analysis of the weights", img)
            cv2.waitKey(1)

        out.write(img)

        # Clear data from plot
        for d in weight_hist_ax_data:
            d.remove()
        weight_hist_ax_data.clear()
        for d in relative_weights_ax_data:
            d.remove()
        relative_weights_ax_data.clear()

    for _ in range(15):  # Hold the last frame for 15 fps
        out.write(img)

    out.release()
예제 #6
0
def run_roshambo():
    seed = 0x1B
    random.seed(seed)
    np.random.seed(seed)
    os.environ["PYTHONHASHSEED"] = str(seed)
    torch.manual_seed(seed)
    if torch.cuda.is_available():
        torch.backends.cudnn.deterministic = True
        torch.backends.cudnn.benchmark = False

    neptune.set_property("seed", seed)
    neptune.append_tag("ROSHAMBO")

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    _logger.info("Using device type %s", str(device))

    reduction_factor = 5  # Reduce dimension axis by this factor
    neptune.set_property("reduction_factor", reduction_factor)

    width = 240 // reduction_factor
    height = 180 // reduction_factor
    n_features = width * height * 2
    batch_size = 5
    neptune.set_property("batch_size", batch_size)

    dt = 1 * ms
    neptune.set_property("dt", dt)

    bin_size = 50 * ms
    neptune.set_property("bin_size", bin_size)

    bin_steps = rescale(bin_size, dt, int)
    duration_per_sample = 500 * ms
    neptune.set_property("duration_per_sample", duration_per_sample)

    number_of_steps = rescale(duration_per_sample, dt, int)

    topology = SmallWorldTopology(
        SmallWorldTopology.Configuration(
            minicolumn_shape=(7, 7, 7),
            macrocolumn_shape=(3, 3, 3),
            minicolumn_spacing=300,
            p_max=0.025,
            sparse_init=True,
        )
    )
    n_neurons = topology.number_of_nodes()
    nb_of_bins = 1 + number_of_steps // bin_steps
    linear_readout = LinearWithBN(n_neurons * nb_of_bins, 3).to(device)
    loss_fn = torch.nn.CrossEntropyLoss()

    optimizer = torch.optim.Adam(linear_readout.parameters(), lr=0.001)
    neptune.set_property("adam.lr", 0.001)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=2, gamma=0.1)
    neptune.set_property("steplr.gamma", 0.1)
    neptune.set_property("steplr.step_size", 2)

    p_critical_configs = {
        "alpha": 0.0025,
        "beta": 0.00025,
        "tau_v": 50 * ms,
        "tau_i": 5 * ms,
        "v_th": 1.0,
    }

    for k, v in p_critical_configs.items():
        neptune.set_property(k, v)

    model = PCritical(
        n_features, batch_size, topology, dt=dt, **p_critical_configs,
    ).to(device)

    all_transforms = Compose(
        [
            ScaleDown(240, 180, factor=reduction_factor),
            ToDense(width, height, duration_per_sample, dt=dt),
            Flatten(),
        ]
    )

    label_dict = {
        "scissors": 0,
        "paper": 1,
        "rock": 2,
    }

    data = INIRoshambo(
        os.getenv("ROSHAMBO_DATASET_LOCATION_500ms_subsamples"),
        transforms=all_transforms,
    )
    train_data, val_data = split_per_user(data, train_ratio=0.85)
    _logger.info(
        "Keeping %i samples for training and %i for validation",
        len(train_data),
        len(val_data),
    )

    def labels_to_tensor(labels):
        return torch.tensor([label_dict[l] for l in labels])

    def run_batch(X, y):
        current_batch_size = len(y)
        model.batch_size = current_batch_size
        bins = torch.zeros(current_batch_size, n_neurons, nb_of_bins, device=device)
        for t in range(number_of_steps):
            out_spikes = model.forward(X[:, :, t])
            bins[:, :, t // bin_steps] += out_spikes
        return bins

    for iter_nb in range(10):
        train_generator = torch_data.DataLoader(
            train_data,
            batch_size=batch_size,
            shuffle=True,
            num_workers=2,
            pin_memory=True,
            timeout=120,
        )
        for i, (X, labels) in enumerate(tqdm(train_generator)):
            if i >= 20:
                break

            neptune.log_metric("iteration", i)
            X, y = X.to(device), labels_to_tensor(labels).to(device)

            # fig, axs = plt.subplots()
            # display_spike_train(axs, X[0])
            # plt.show()
            # print(X.shape)
            # exit(0)

            bins = run_batch(X, y)

            # fig, axs = plt.subplots()
            # activity = bins[0].sum(dim=0)
            # axs.plot(np.arange(nb_of_bins), activity.cpu().numpy())
            # plt.show()

            optimizer.zero_grad()
            out = linear_readout(bins.view(len(y), -1))
            loss = loss_fn(out, y)
            loss.backward()
            optimizer.step()
            loss_val = loss.cpu().detach().item()
            _logger.info("Loss: %.3f", loss_val)
            neptune.log_metric("loss", loss_val)

        total_accurate = 0
        total_elems = 0
        val_generator = torch_data.DataLoader(
            val_data,
            batch_size=batch_size,
            shuffle=False,
            num_workers=2,
            pin_memory=True,
            timeout=120,
        )
        for i, (X, labels) in enumerate(tqdm(val_generator)):
            if i >= 10:
                break
            X, y = X.to(device), labels_to_tensor(labels).to(device)
            bins = run_batch(X, y)
            out = linear_readout(bins.view(len(y), -1))
            preds = torch.argmax(out, dim=1)
            total_accurate += torch.sum(preds == y).cpu().float().item()
            total_elems += len(y)
            _logger.info("Current accuracy: %.4f", total_accurate / total_elems)
            neptune.log_metric("current_accuracy", total_accurate / total_elems)

        scheduler.step()

        _logger.info(
            "Final accuracy at iter %i: %.4f", iter_nb, total_accurate / total_elems
        )
        neptune.log_metric("final_accuracy", total_accurate / total_elems)
예제 #7
0
def run_power_ntidigits():
    topology = SmallWorldTopology(
        SmallWorldTopology.Configuration(
            minicolumn_shape=(4, 4, 4),
            macrocolumn_shape=(2, 2, 2),
            minicolumn_spacing=1460,
            p_max=0.11,
            spectral_radius_norm=False,
            intracolumnar_sparseness=635,
            neuron_spacing=40,
            inhibitory_init_weight_range=(0.1, 0.3),
            excitatory_init_weight_range=(0.2, 0.5),
        ))

    pcritical_configs = {
        "alpha": 1e-2,
        "stochastic_alpha": True,
        "beta": 1e-5,
        "tau_v": 30 * ms,
        "tau_i": 5 * ms,
        "tau_v_pair": 5 * ms,
        "tau_i_pair": 0 * ms,
        "v_th": 1,
    }

    model = torch.nn.Sequential(
        OneToNLayer(N=2,
                    dim_input=n_features,
                    dim_output=topology.number_of_nodes()),
        PCritical(1, topology, dt=dt, **pcritical_configs),
    )

    train_set = NTidigits(
        DATASET_PATH,
        train=True,
        transforms=rec_array_to_spike_train,
        only_single_digits=True,
    )
    data_loader_parameters = {
        "batch_size":
        len(train_set),  # Load all in memory before starting socwatch
        "num_workers": 1,
        "pin_memory": True,
        "timeout": 120,
        "collate_fn": collate_fn,
    }
    generator = torch_data.DataLoader(train_set,
                                      shuffle=False,
                                      **data_loader_parameters)
    data, _ = next(iter(generator))
    data = data.view(data.shape[0], 1, *data.shape[1:])

    duration = data.shape[-1]

    # First get idle power
    process = subprocess.Popen(
        [
            os.path.join(SOCWATCH_PATH, "socwatch"),
            "-m",
            "-t",
            "60",
            "-f",
            "cpu-cstate",
            "-f",
            "cpu-pstate",
            "-f",
            "pkg-pwr",
            "-o",
            "/opt/socwatch/results/ntidigits_idle",
        ],
        stdout=sys.stdout,
        stdin=sys.stdin,
        stderr=sys.stderr,
        shell=False,
    )
    process.wait()

    # Start SoC watch for dynamic power
    process = subprocess.Popen(
        [
            os.path.join(SOCWATCH_PATH, "socwatch"),
            "-m",
            "-f",
            "cpu-cstate",
            "-f",
            "cpu-pstate",
            "-f",
            "pkg-pwr",
            "-o",
            "/opt/socwatch/results/ntidigits_running",
        ],
        stdout=sys.stdout,
        stdin=sys.stdin,
        stderr=sys.stderr,
        shell=False,
    )
    for spike_train in data:
        for t in range(duration):
            model(spike_train[:, :, t])

    os.kill(process.pid, signal.SIGINT)  # Stop SoC watch
    process.wait()
예제 #8
0
def run_power_ntidigits():
    topology = SmallWorldTopology(
        SmallWorldTopology.Configuration(
            minicolumn_shape=(4, 4, 4),
            macrocolumn_shape=(2, 2, 2),
            minicolumn_spacing=1460,
            p_max=0.11,
            spectral_radius_norm=False,
            intracolumnar_sparseness=635,
            neuron_spacing=40,
            inhibitory_init_weight_range=(0.1, 0.3),
            excitatory_init_weight_range=(0.2, 0.5),
        ))

    pcritical_configs = {
        "alpha": 1e-2,
        "stochastic_alpha": True,
        "beta": 1e-5,
        "tau_v": 30 * ms,
        "tau_i": 5 * ms,
        "tau_v_pair": 5 * ms,
        "tau_i_pair": 0 * ms,
        "v_th": 1,
    }

    model = torch.nn.Sequential(
        OneToNLayer(N=2,
                    dim_input=n_features,
                    dim_output=topology.number_of_nodes()),
        PCritical(1, topology, dt=dt, **pcritical_configs),
    )

    train_set = NTidigits(
        DATASET_PATH,
        train=True,
        transforms=rec_array_to_spike_train,
        only_single_digits=True,
    )
    data_loader_parameters = {
        "batch_size":
        len(train_set),  # Load all in memory before starting socwatch
        "num_workers": 1,
        "pin_memory": True,
        "timeout": 120,
        "collate_fn": collate_fn,
    }
    generator = torch_data.DataLoader(train_set,
                                      shuffle=False,
                                      **data_loader_parameters)
    data, _ = next(iter(generator))
    data = data.view(data.shape[0], 1, *data.shape[1:])

    duration = data.shape[-1]

    start = time.time()
    for spike_train in data:
        for t in range(duration):
            model(spike_train[:, :, t])
    end = time.time()
    output = f"{start=} s., {end=} s., total={end-start} s., nb_of_samples={len(data)}, avg={(end-start)/len(data)} s.\n"
    print(output)
    with open("ntidigits-execution-time.txt", "a") as f:
        f.write(output)
예제 #9
0
파일: Agent.py 프로젝트: nimamox/DQN_SNN
    def _build_net_PG(self):
        if self.agent_id == 0:
            print("Policy Gradient")
            print('Creating Regressors:', REGRESSOR)

        if USE_LSM:
            topology = SmallWorldTopology(
                SmallWorldTopology.Configuration(
                    minicolumn_shape=minicol,
                    macrocolumn_shape=macrocol,
                    p_max=PMAX,
                    # minicolumn_spacing=1460,
                    # intracolumnar_sparseness=635.0,
                    # neuron_spacing=40.0,
                    spectral_radius_norm=SpecRAD,
                    inhibitory_init_weight_range=(0.1, 0.3),
                    excitatory_init_weight_range=(0.2, 0.5),
                ))
            lsm_N = topology.number_of_nodes()
            N_inputs = 5
            if CONV_TYPE == 3:
                N_inputs = 6
            self.reservoir = PCritical(1, topology, alpha=ALPHA).to(device)
            #self.lsm = torch.nn.Sequential(OneToNLayer(1, N_inputs, lsm_N), self.reservoir).to(device)
            self.lsm = torch.nn.Sequential(
                InputLayer(1, N_inputs, lsm_N), self.reservoir,
                ReadoutLayer(lsm_N, readout_inp, readout_out)).to(device)

        if REGRESSOR == 'LinReg':
            self.policy_net = LinReg(self.n_features, self.n_actions)
            self.optimizer = optim.Adam(self.policy_net.parameters(),
                                        lr=self.lr)

        elif REGRESSOR == 'MLP':
            self.policy_net = MLP(self.n_features, self.n_actions, hidden)
            self.optimizer = optim.Adam(self.policy_net.parameters(),
                                        lr=self.lr)
            #if SCENARIO == 'SSSC':
            #self.power_nets = []
            #self.power_nets_opts = []

        elif REGRESSOR == 'SurrGrad':
            self.snn_params = {}
            if USE_LSM:
                self.snn_params['dim_in'] = readout_out
            else:
                self.snn_params['dim_in'] = 5
                if CONV_TYPE == 3:
                    self.snn_params['dim_in'] = 6

            self.snn_params['T_sim'] = 10
            self.policy_net, self.surr_alpha, self.surr_beta = init_model(
                self.snn_params['dim_in'], hidden, self.n_actions, .05)
            self.optimizer = optim.Adam(self.policy_net,
                                        lr=self.lr,
                                        betas=(0.9,
                                               0.999))  #TODO: learning rate
            self.all_obs_spikes = []

        elif REGRESSOR.startswith('SNN'):
            self.snn_params = {
                'seed': 1337,
                'Rd':
                5.0e3,  # this device resistance is mannually set for smaller leaky current?
                'Cm':
                3.0e-6,  # real capacitance is absolutely larger than this value
                'Rs':
                1.0,  # this series resistance value is mannually set for larger inject current?
                'Vth': 0.8,  # this is the real device threshould voltage
                'V_reset': 0.0,
                'dt':
                1.0e-6,  # every time step is dt, in the one-order differential equation of neuron
                'T_sim': 10,  # could control total spike number collected
                'dim_in': 5,
                'dim_h': hidden,
                'dim_out': self.n_actions,
                'epoch': 10,
                'W_std1': 1.0,
                'W_std2': 1.0,
            }

            if USE_LSM:
                self.snn_params['dim_in'] = readout_out
            else:
                self.snn_params['dim_in'] = 5
                if CONV_TYPE == 3:
                    self.snn_params['dim_in'] = 6

            self.policy_net = Three_Layer_SNN(self.snn_params)
            self.optimizer = optim.Adam(self.policy_net.parameters(),
                                        lr=self.lr)
            self.all_obs_spikes = []
        else:
            raise Exception('Invalid regressor')
예제 #10
0
def main(seed=0x1B, pool_size=6, num_threads_per_cpu=2):
    torch.manual_seed(0)
    np.random.seed(0)
    device = torch.device(
        "cuda") if torch.cuda.is_available() else torch.device("cpu")

    topology = SmallWorldTopology(
        SmallWorldTopology.Configuration(
            minicolumn_shape=(6, 6, 5),
            macrocolumn_shape=(4, 4, 3),
            minicolumn_spacing=1460,
            p_max=0.11,
            intracolumnar_sparseness=635,
            neuron_spacing=40,
            inhibitory_init_weight_range=(0.1, 0.3),
            excitatory_init_weight_range=(0.2, 0.5),
        ))

    dt = 1 * ms
    transforms = Compose([
        ToDense(dt=dt),
        Flatten(),
    ])

    N_features = 34 * 34 * 2

    params = {
        "batch_size": 4,
        "collate_fn": collate_fn,
        "shuffle": True,
        "num_workers": 2,
    }

    pcritical_configs = {
        "alpha": 1e-2,
        "stochastic_alpha": False,
        "beta": 1e-3,
        "tau_v": 30 * ms,
        "tau_i": 5 * ms,
        "tau_v_pair": 5 * ms,
        "tau_i_pair": 0 * ms,
        "v_th": 1,
    }

    reservoir = PCritical(params["batch_size"], topology,
                          **pcritical_configs).float()
    model = torch.nn.Sequential(
        OneToNLayer(1, N_features, topology.number_of_nodes()), reservoir)
    model = model.to(device)
    model[0].W_t = model[0].W_t.to_dense()

    torch.set_num_threads(num_threads_per_cpu)
    with Pool(pool_size) as p:
        training_set = NMnist(NMNIST_PATH,
                              is_train=True,
                              transforms=transforms)
        training_generator = data.DataLoader(training_set, **params)

        train_samples = map(lambda args: (*args, model, "TRAIN", device),
                            enumerate(training_generator))

        pbar = tqdm(total=len(training_generator))
        for _ in p.imap(process_batch, train_samples):
            pbar.update(1)
        pbar.close()

        test_set = NMnist(NMNIST_PATH, is_train=False, transforms=transforms)
        test_generator = data.DataLoader(test_set, **params)

        test_samples = map(
            lambda i, sample: (i, *sample, model, "TEST", device),
            enumerate(test_generator),
        )
        pbar = tqdm(total=len(test_generator))
        for _ in p.imap(process_batch, test_samples):
            pbar.update(1)
        pbar.close()
예제 #11
0
from modules.pcritical import PCritical
from modules.utils import OneToNLayer
from modules.topologies import SmallWorldTopology


if __name__ == "__main__":
    seed = 0x1B
    random.seed(seed)
    np.random.seed(seed)
    os.environ["PYTHONHASHSEED"] = str(seed)
    torch.manual_seed(seed)
    device = torch.device("cpu")

    topology = SmallWorldTopology(
        SmallWorldTopology.Configuration(
            minicolumn_shape=[4, 4, 4], macrocolumn_shape=[2, 2, 2], p_max=0.02,
        )
    )
    for u, v in topology.edges:
        topology.edges[u, v]["weight"] = np.clip(
            np.random.normal(loc=0.08, scale=0.2), -0.2, 0.6
        )

    N = topology.number_of_nodes()
    N_inputs = N // 3

    fig = plt.figure(figsize=(18, 10))
    ax = fig.add_subplot(1, 1, 1)
    duration = 1000
    freqs = [10, 15, 30, 50]  # in Hz
    mean_weights_output = []