Exemple #1
0
def main(gpu, batch, epochs, x_max, y_max, t_max, dense, theta, forced_dep,
         w_init, bias, step, leak, neurons, winners, decay, train_path,
         test_path, model_path, **kwargs):
    if torch.cuda.is_available():
        dev = f'cuda:{gpu}'
    else:
        dev = 'cpu'
    device = torch.device(dev)

    train_data_loader = DataLoader(NMnistSampled(train_path,
                                                 x_max,
                                                 y_max,
                                                 t_max,
                                                 device=device),
                                   shuffle=True,
                                   batch_size=batch)
    test_data_loader = DataLoader(NMnistSampled(test_path,
                                                x_max,
                                                y_max,
                                                t_max,
                                                device=device),
                                  batch_size=batch)

    model = FullColumn(x_max * y_max,
                       neurons,
                       input_channel=2,
                       output_channel=10,
                       step=step,
                       leak=leak,
                       bias=bias,
                       winners=winners,
                       fodep=forced_dep,
                       w_init=w_init,
                       theta=theta,
                       dense=dense).to(device)

    for epoch in range(epochs):
        model.train(mode=True)
        print(f"epoch: {epoch}")
        train_data_iterator = tqdm(train_data_loader)
        with Interrupter():
            for data, label in train_data_iterator:
                input_spikes = data.reshape(-1, 2, x_max * y_max, t_max)
                output_spikes = model.forward(input_spikes,
                                              label.to(device),
                                              bias_decay=decay)
                # output_spikes: bacth, channel, neuro, time
                accurate = (output_spikes.sum((-3, -2, -1)) > 0).logical_and(
                    output_spikes.sum((
                        -2, -1)).argmax(-1) == label.to(device)).sum()
                train_data_iterator.set_description(
                    f'{model.describe()}; {output_spikes.sum().int()}, {accurate}'
                )

        model.train(mode=False)
        torch.save(model.state_dict(), f'{model_path}_epoch{epoch}')

        tracer = SpikesTracer(10)
        with Interrupter():
            test_data_iterator = tqdm(test_data_loader)
            for data, label in test_data_iterator:
                input_spikes = data.reshape(-1, 2, x_max * y_max, t_max)
                output_spikes = model.forward(input_spikes)

                y_preds = tracer.get_predict(output_spikes)
                tracer.add_sample(label.numpy(), y_preds)

                test_data_iterator.set_description('; '.join(
                    f'{k}: {v}' for k, v in tracer.describe_batch_spikes(
                        output_spikes).items()))
        tracer.describe()

    return 0
Exemple #2
0
def main(gpu, batch, epochs, supervised, x_max, y_max, t_max, neurons, winners,
         step, leak, forced_dep, dense, w_init, bias, capture, backoff, search,
         decay, train_path, test_path, model_path, **kwargs):
    if torch.cuda.is_available():
        dev = f'cuda:{gpu}'
    else:
        dev = 'cpu'
    device = torch.device(dev)

    print(f'Device: {device}, Batch: {batch}, Supervised: {supervised}')
    print(f'Forced Dep: {forced_dep}, Dense: {dense}')
    print(f'Capture: {capture}, Backoff: {backoff}, Search: {search}')

    train_data_loader = DataLoader(NMnistSampled(train_path,
                                                 x_max,
                                                 y_max,
                                                 t_max,
                                                 device=device),
                                   shuffle=True,
                                   batch_size=batch)
    test_data_loader = DataLoader(NMnistSampled(test_path,
                                                x_max,
                                                y_max,
                                                t_max,
                                                device=device),
                                  batch_size=batch)

    model = FullDualColumn(x_max * y_max,
                           neurons,
                           input_channel=2,
                           output_channel=10,
                           winners=winners,
                           step=step,
                           leak=leak,
                           bias=bias,
                           dense=dense,
                           fodep=forced_dep,
                           w_init=w_init).to(device)

    def descriptor():
        max_print = 10
        s = f"{','.join(f'{x*100:.0f}' for x in model.weight.mean(axis=1)[:max_print])}; "
        s += f"{','.join(f'{x*100:.0f}' for x in model.bias[:max_print])}; "

        return s

    for epoch in range(epochs):
        model.train(mode=True)
        print(f"epoch: {epoch}")
        train_data_iterator = tqdm(train_data_loader)
        train_data_iterator.set_description(descriptor())
        with Interrupter():
            for data, label in train_data_iterator:
                input_spikes = data.reshape(-1, 2, x_max * y_max, t_max)
                if supervised:
                    output_spikes = model.forward(input_spikes,
                                                  label.to(device),
                                                  mu_capture=capture,
                                                  mu_backoff=backoff,
                                                  mu_search=search,
                                                  beta_decay=decay)
                else:
                    output_spikes = model.forward(input_spikes, bias=0.5)
                # output_spikes: bacth, channel, neuro, time
                accurate = (output_spikes.sum((-3, -2, -1)) > 0).logical_and(
                    output_spikes.sum((
                        -2, -1)).argmax(-1) == label.to(device)).sum()
                train_data_iterator.set_description(
                    f'{descriptor()}; {output_spikes.sum()}, {accurate}')

        model.train(mode=False)
        torch.save(model.state_dict(), model_path)

        spikes_tracer = SpikesTracer()
        with Interrupter():
            for data, label in tqdm(test_data_loader):
                input_spikes = data.reshape(-1, 2, x_max * y_max, t_max)
                output_spikes = model.forward(input_spikes)

                has_spikes = output_spikes.sum((-3, -2, -1)) > 0
                y_preds = spikes_tracer.get_predict(output_spikes)
                spikes_tracer.add_sample(label.numpy(), y_preds)

        spikes_tracer.describe_print_clear()

    return 0
Exemple #3
0
def main(
    gpu, batch, epochs, supervised,
    x_max, y_max, t_max,
    neurons, channels,
    step, leak,
    theta, dense, forced_dep, w_init,
    train_path, test_path, model_path,
    **kwargs
):
    if torch.cuda.is_available():
        dev = f'cuda:{gpu}'
    else:
        dev = 'cpu'
    device = torch.device(dev)

    print(
        f'Device: {device}, Batch: {batch}, Epochs: {epochs}, Supervised: {supervised}')
    print(f'Forced Dep: {forced_dep}, Dense: {dense}, Weight Init: {w_init}')

    train_data_loader = DataLoader(NMnistSampled(
        train_path, x_max, y_max, t_max, device=device), shuffle=True, batch_size=batch)
    test_data_loader = DataLoader(NMnistSampled(
        test_path, x_max, y_max, t_max, device=device), batch_size=batch)

    model = StackFullColumn(
        [x_max * y_max, *neurons], [2, *channels], kernel=2,
        step=step, leak=leak,
        theta=theta, dense=dense, fodep=forced_dep, w_init=w_init
    ).to(device)

    def descriptor():
        return ','.join('{:.0f}'.format(c.weight.sum()) for c in model.columns)

    for epoch in range(epochs):
        model.train(mode=True)
        print(f"epoch: {epoch}")
        train_data_iterator = tqdm(train_data_loader)
        train_data_iterator.set_description(descriptor())
        with Interrupter():
            for data, label in train_data_iterator:
                input_spikes = data.reshape(-1, 2, x_max * y_max, t_max)
                if supervised:
                    output_spikes = model.forward(
                        input_spikes, label.to(device))
                else:
                    output_spikes = model.forward(input_spikes)
                accurate = (output_spikes.sum((-3, -2, -1)) > 0).logical_and(
                    output_spikes.sum((-3, -1)).argmax(-1) == label.to(device)).sum()
                train_data_iterator.set_description(
                    f'{descriptor()}; {output_spikes.sum()}, {accurate}')

        model.train(mode=False)
        auto_matcher = AutoMatchingMatrix(10, 10)
        with Interrupter():
            for data, label in tqdm(test_data_loader):
                input_spikes = data.reshape(-1, 2, x_max * y_max, t_max)
                output_spikes = model.forward(input_spikes)

                has_spikes = output_spikes.sum((-3, -2, -1)) > 0
                y_preds = output_spikes.sum((-3, -1)).argmax(-1)

                for has_spike, y_pred, y_true in zip(has_spikes.cpu().numpy(), y_preds.cpu().numpy(), label.numpy()):
                    if has_spike:
                        auto_matcher.add_sample(y_true, y_pred)

        print(auto_matcher.mat)
        print(
            f'Coverage: {auto_matcher.mat.sum() / len(test_data_loader.dataset)}')
        auto_matcher.describe_print_clear()
        torch.save(model.state_dict(), model_path)

    return 0
Exemple #4
0
def main(
    gpu, batch, epochs, supervised,
    x_max, y_max, t_max,
    step, leak,
    forced_dep, dense, w_init, channel,
    capture, backoff, search,
    train_path, test_path, model_path,
    **kwargs
):
    if torch.cuda.is_available():
        dev = f'cuda:{gpu}'
    else:
        dev = 'cpu'
    device = torch.device(dev)

    print(
        f'Device: {device}, Batch: {batch}, Epochs: {epochs}, Supervised: {supervised}')
    print(f'Forced Dep: {forced_dep}, Dense: {dense}, Weight Init: {w_init}')

    train_data_loader = DataLoader(NMnistSampled(
        train_path, x_max, y_max, t_max, device=device), shuffle=True, batch_size=batch)
    test_data_loader = DataLoader(NMnistSampled(
        test_path, x_max, y_max, t_max, device=device), batch_size=batch)

    model = ConvColumn(
        input_channel=2, output_channel=channel,
        kernel=3, stride=2,
        step=step, leak=leak,
        dense=dense, fodep=forced_dep, w_init=w_init
    ).to(device)

    def descriptor():
        return ','.join('{:.0f}'.format(x) for x in model.weight.sum((1, 2, 3)).detach())

    def othogonal():
        oc, ic, x, y = model.weight.shape
        w = model.weight.reshape(oc, -1)
        w = w / (w ** 2).sum(1, keepdim=True).sqrt()
        return (((w @ w.T) ** 2).mean() - 1 / oc).sqrt()

    for epoch in range(epochs):
        print(f"epoch: {epoch}")

        model.train(mode=True)
        train_data_iterator = tqdm(train_data_loader)
        with Interrupter():
            for data, label in train_data_iterator:
                input_spikes = data
                output_spikes = model.forward(
                    input_spikes, mu_capture=capture, mu_backoff=backoff, mu_search=search)
                train_data_iterator.set_description(
                    f'weight sum:{descriptor()}; '
                    f'weight othogonal:{othogonal():.4f}; '
                    f'total spikes:{output_spikes.sum().int()}; '
                    f'time coverage:{(output_spikes.sum((1, 2, 3)) > 0).float().mean() * 100:.2f}')

        model.train(mode=False)
        torch.save(model.state_dict(), model_path)

        features = []
        labels = []
        with Interrupter():
            for data, label in tqdm(train_data_loader):
                output_spikes = model.forward(data)
                feature = output_spikes.sum((-1, -2, -3)).cpu().numpy()
                features.append(feature)
                labels.append(label.numpy())
        X_train = np.vstack(features)
        Y_train = np.hstack(labels)

        features = []
        labels = []
        with Interrupter():
            for data, label in tqdm(test_data_loader):
                output_spikes = model.forward(data)
                feature = output_spikes.sum((-1, -2, -3)).cpu().numpy()
                features.append(feature)
                labels.append(label.numpy())
        X_test = np.vstack(features)
        Y_test = np.hstack(labels)

        tester = GradientBoostingClassifier()
        tester.fit(X_train, Y_train)
        Y_pred = tester.predict(X_test)
        print('accuracy: ', accuracy_score(Y_test, Y_pred))
        print(confusion_matrix(Y_test, Y_pred))

    return 0
Exemple #5
0
def main(gpu, batch, epochs, x_max, y_max, t_max, train_path, test_path,
         model_path, model, **kwargs):
    if torch.cuda.is_available():
        dev = f'cuda:{gpu}'
    else:
        dev = 'cpu'
    device = torch.device(dev)

    train_data_loader = DataLoader(NMnistSampled(train_path, x_max, y_max,
                                                 t_max),
                                   shuffle=True,
                                   batch_size=batch)
    test_data_loader = DataLoader(NMnistSampled(test_path, x_max, y_max,
                                                t_max),
                                  batch_size=batch)

    if model == 'linear':
        model = LinearModel(x_max * y_max, 10).to(device)

        def transform(data):
            batch = data.shape[0]
            return data.reshape(batch, 2, x_max * y_max,
                                t_max).to(device).sum(axis=(-1, -3)).float()
    elif model == 'linear_t':
        model = LinearModel(2 * x_max * y_max * t_max, 10).to(device)

        def transform(data):
            batch = data.shape[0]
            return data.reshape(batch,
                                2 * x_max * y_max * t_max).to(device).float()
    elif model == 'conv':
        model = ConvModel(2, 16, (x_max, y_max), 10).to(device)

        def transform(data):
            batch = data.shape[0]
            return data.reshape(batch, 2, x_max, y_max,
                                t_max).to(device).sum(axis=-1).float()
    elif model == 'conv_gru_t':
        model = ConvGRUModel(2, 16, (x_max, y_max), 10).to(device)

        def transform(data):
            batch = data.shape[0]
            return data.reshape(batch, 2, x_max, y_max,
                                t_max).to(device).float()
    else:
        return 255

    optimizer = torch.optim.Adam(model.parameters())
    error = nn.CrossEntropyLoss()
    model.train()

    for epoch in range(epochs):
        print(f"epoch: {epoch}")
        training = tqdm(train_data_loader)
        for data, labels in training:
            data = transform(data)
            labels = labels.to(device)
            optimizer.zero_grad()
            output = model.forward(data)
            loss = error(output, labels.cuda())
            training.set_description(f'loss={loss.detach().cpu().numpy():.4f}')
            loss.backward()
            optimizer.step()

        auto_matcher = AutoMatchingMatrix(10, 10)
        for data, labels in tqdm(test_data_loader):
            data = transform(data)
            output = model.forward(data).argmax(axis=-1).cpu()
            for y_pred, y_true in zip(output, labels):
                auto_matcher.add_sample(y_true, y_pred)

        print(auto_matcher.mat)
        auto_matcher.describe_print_clear()
        torch.save(model.state_dict(), model_path)

    return 0
def main(gpu, batch, epochs, x_max, y_max, t_max, step, leak, winners,
         forced_dep, dense, w_init, channel, pooling_kernel, capture, backoff,
         search, fc_capture, fc_backoff, fc_search, fc_neuron, fc_winners,
         fc_step, fc_leak, fc_dense, fc_theta, fc_w_init, fc_bias, depth_start,
         train_path, test_path, model_path, **kwargs):
    if torch.cuda.is_available():
        dev = f'cuda:{gpu}'
    else:
        dev = 'cpu'
    device = torch.device(dev)

    cv_stack_model_path = os.path.join(model_path, "cv")
    if not os.path.isdir(cv_stack_model_path):
        os.makedirs(cv_stack_model_path)

    print(f'Device: {device}, Batch: {batch}, Epochs: {epochs}')
    print(f'Forced Dep: {forced_dep}, Dense: {dense}, Weight Init: {w_init}')

    train_data_loader = DataLoader(NMnistSampled(train_path,
                                                 x_max,
                                                 y_max,
                                                 t_max,
                                                 device=device),
                                   shuffle=True,
                                   batch_size=batch)
    test_data_loader = DataLoader(NMnistSampled(test_path,
                                                x_max,
                                                y_max,
                                                t_max,
                                                device=device),
                                  batch_size=batch)

    model = StackCV(channels=[2] + channel,
                    step=step,
                    leak=leak,
                    bias=0.5,
                    winners=winners,
                    pooling_kernel=pooling_kernel,
                    fodep=forced_dep,
                    w_init=w_init,
                    dense=dense).to(device)

    def descriptor(depth):
        return ','.join('{:.0f}'.format(x)
                        for x in model.columns[depth].weight.sum((1, 2,
                                                                  3)).detach())

    def tester_descriptor():
        max_print = 10
        s = f"{','.join(f'{x*100:.0f}' for x in tester.weight.mean(axis=1)[:max_print])}; "
        s += f"{','.join(f'{x*100:.0f}' for x in tester.bias[:max_print])}; "

        return s

    def othogonal(depth):
        weight = model.columns[depth].weight
        oc, ic, x, y = weight.shape
        w = weight.reshape(oc, -1)
        w = w / (w**2).sum(1, keepdim=True).sqrt()
        return (((w @ w.T)**2).mean() - 1 / oc).sqrt()

    if depth_start != -1:
        print("Starting from ", depth_start)
        depth_i_model_path = os.path.join(model_path, str(depth_start - 1))
        model.load_state_dict(torch.load(depth_i_model_path))
        print("Finished loading ", depth_i_model_path)

        # just to get the shape of output_spikes
        model.eval()
        for data, label in train_data_loader:
            input_spikes = data
            output_spikes = model.forward(input_spikes,
                                          depth_start - 1,
                                          mu_capture=capture,
                                          mu_backoff=backoff,
                                          mu_search=search)
            break

    else:
        depth_start = 0
        print("Fresh train from 0")

    for epoch in range(epochs):
        model.train(mode=True)
        for depth in range(depth_start, model.num_spikes):
            print(f"train epoch: {epoch}, depth: {depth}")
            train_data_iterator = tqdm(train_data_loader)
            with Interrupter():
                for data, label in train_data_iterator:

                    input_spikes = data
                    output_spikes = model.forward(input_spikes,
                                                  depth,
                                                  mu_capture=capture,
                                                  mu_backoff=backoff,
                                                  mu_search=search)

                    train_data_iterator.set_description(
                        f'weight sum:{descriptor(depth)}; '
                        f'weight othogonal:{othogonal(depth):.4f}; '
                        f'total spikes:{output_spikes.sum().int()}; '
                        f'time coverage:{(output_spikes.sum((1, 2, 3)) > 0).float().mean() * 100:.2f}'
                    )

            depth_i_model_path = os.path.join(model_path, str(depth))
            print("saving", depth_i_model_path)
            torch.save(model.state_dict(), depth_i_model_path)

    model.train(mode=False)

    # build tester
    batch, channel, synapses_x, synapses_y, time = output_spikes.shape
    fc_fodep = time + fc_step + fc_leak
    tester = FullDualColumn(
        synapses_x * synapses_y,
        fc_neuron,
        input_channel=channel,
        output_channel=10,
        step=fc_step,
        leak=fc_leak,
        winners=fc_winners,
        fodep=fc_fodep,
        w_init=fc_w_init,
        dense=fc_dense,
        theta=fc_theta,
        bias=fc_bias,
    ).to(device)

    for epoch in range(epochs):
        print(f"tester epoch: {epoch}")
        tester.train(mode=True)
        with Interrupter():
            train_data_iterator = tqdm(train_data_loader)
            for data, label in train_data_iterator:
                output_spikes = model.forward(data)
                output_spikes = output_spikes.reshape(-1, channel,
                                                      synapses_x * synapses_y,
                                                      time)
                output_spikes = tester.forward(output_spikes,
                                               labels=label.to(device),
                                               mu_capture=fc_capture,
                                               mu_backoff=fc_backoff,
                                               mu_search=fc_search)

                y_preds = output_spikes.sum((-2, -1)).argmax(-1)
                accurate = (output_spikes.sum((-3, -2, -1)) > 0).logical_and(
                    output_spikes.sum((
                        -2, -1)).argmax(-1) == label.to(device)).sum()
                train_data_iterator.set_description(
                    f'{tester_descriptor()}; {output_spikes.sum()}, {accurate}'
                )

        # make prediction
        tester.train(mode=False)
        tester_model_path = os.path.join(model_path, "fc")
        torch.save(model.state_dict(), tester_model_path)

        spikes_tracer = SpikesTracer()
        with Interrupter():
            for data, label in tqdm(test_data_loader):
                output_spikes = model.forward(data)
                output_spikes = output_spikes.reshape(-1, channel,
                                                      synapses_x * synapses_y,
                                                      time)

                output_spikes = tester.forward(output_spikes)

                y_preds = spikes_tracer.get_predict(output_spikes)
                spikes_tracer.add_sample(label.numpy(), y_preds)

        spikes_tracer.describe_print_clear()

    return 0