Esempio n. 1
0

network.add_layer(input_layer, name="X")
network.add_layer(conv_layer, name="Y")
network.add_connection(conv_conn, source="X", target="Y")

# Train the network.
print("Begin training.\n")

if args.tensorboard:
    analyzer = TensorboardAnalyzer("logs/conv")
else:
    analyzer = MatplotlibAnalyzer()

for step, batch in enumerate(tqdm(train_dataloader)):
    # batch contains image, label, encoded_image since an image_encoder
    # was provided

    # batch["encoded_image"] is in BxTxCxHxW format
    inputs = {"X": batch["encoded_image"]}

    # Run the network on the input.
    # Specify the location of the time dimension
    network.run(inputs=inputs, time=time, input_time_dim=1)

    network.reset_state_variables()  # Reset state variables.

    analyzer.plot_conv2d_weights(conv_conn.w, step=step)

    analyzer.finalize_step()
Esempio n. 2
0
        voltage_ims, voltage_axes = plot_voltages(
            {
                layer: voltages[layer].get("v").view(-1, time)
                for layer in voltages
            },
            ims=voltage_ims,
            axes=voltage_axes,
        )
        weights_im = plot_weights(get_square_weights(C1.w, 23, 28),
                                  im=weights_im,
                                  wmin=-2,
                                  wmax=2)
        weights_im2 = plot_weights(C2.w, im=weights_im2, wmin=-2, wmax=2)

        plt.pause(1e-8)
    network.reset_state_variables()


# Define logistic regression model using PyTorch.
class NN(nn.Module):
    def __init__(self, input_size, num_classes):
        super(NN, self).__init__()
        # h = int(input_size/2)
        self.linear_1 = nn.Linear(input_size, num_classes)
        # self.linear_1 = nn.Linear(input_size, h)
        # self.linear_2 = nn.Linear(h, num_classes)

    def forward(self, x):
        out = torch.sigmoid(self.linear_1(x.float().view(-1)))
        # out = torch.sigmoid(self.linear_2(out))
        return out