Exemple #1
0
 def __init__(self):
     # Layers
     self.conv1 = l.Convolution("conv1",3,6,5,1)
     self.conv2 = l.Convolution("conv2",6,16,5,1)
     self.relu = l.ReLU("relu")
     self.pool = l.Maxpooling("pooling",2,2)
     self.dense1 = l.Dense("dense1",16*5*5,120)
     self.dense2 = l.Dense("dense1",120,84)
     self.dense3 = l.Dense("dense1",84,10)
    def test_conv(self):

        x = torch.ones(1, 4, 3, 3)

        c = layers.Convolution((4, 3, 3), 4, k=2, rprop=.5, gadditional=2, radditional=2)

        print(c(x))
 def __init__(self, inputSize, outputSize):
     super().__init__()
     self.outputSize = outputSize
     self.conv1 = layers.Convolution(5, 1)
     self.Pool1 = layers.Pool(5, stride=2)
     self.conv2 = layers.Convolution(10, 5)
     self.Pool2 = layers.Pool(10, stride=2)
     self.Flatten = layers.Flatten()
     self.fc1 = layers.FullyConnected(7 * 7 * 10,
                                      50,
                                      activation=activate.Relu())
     self.fc2 = layers.FullyConnected(50,
                                      outputSize,
                                      activation=activate.SoftMax())
     self.inputLayer = self.conv1
     self.outputLayer = self.fc2
     self.conv1.addSon(self.Pool1)
     self.Pool1.addSon(self.conv2)
     self.conv2.addSon(self.Pool2)
     self.Pool2.addSon(self.Flatten)
     self.Flatten.addSon(self.fc1)
     self.fc1.addSon(self.fc2)
Exemple #4
0
    def __init__(self, input_size, output_size, init_weight_std=0.01, filter_num=5, filter_size=3 ,pool_size = 2):
        # input = [C, h, w]
        size = input_size[1]
        conv_out_size = (size - filter_size + 1)
        pool_out_size = int((conv_out_size/2)**2*filter_num)


        self.params = dict()
        self.params["W1"] = init_weight_std * np.random.rand(filter_num,input_size[0], filter_size, filter_size)
        self.params["b1"] = np.zeros(filter_num)
        self.params["W2"] = init_weight_std * np.random.rand(pool_out_size, output_size)
        self.params["b2"] = np.zeros(output_size)

        self.layers = OrderedDict()
        self.layers["Conv"] = L.Convolution(self.params["W1"], self.params["b1"])
        self.layers["Activation1"] = L.Relu()
        self.layers["Pooling"] = L.Pooling(pool_size,pool_size,stride=2)
        self.layers["Affine1"] = L.Affine(self.params["W2"], self.params["b2"])
        self.layers["Activation2"] = L.Relu()

        self.output_layer = L.Softmax_with_loss()
        self.y = None
Exemple #5
0
def compose_layers(num_classes, layer_dims, shape_input):
    embed_layer = layers.SelfInteractionSimple(layer_dims[0])
    input_layer = layers.Input()
    
    model_layers = []
    for dim in layer_dims[1:]:
        model_layers.append(layers.Convolution())
        model_layers.append(layers.Concatenation())
        model_layers.append(layers.SelfInteraction(dim))
        model_layers.append(layers.Nonlinearity())
    output_layer = layers.Output(num_classes)

    x, rbf, rij = input_layer(shape_input)
    input_tensor_list = {0: [embed_layer(x)]}
    
    for layer in model_layers:
        if isinstance(layer, layers.Convolution):
            input_tensor_list = layer([input_tensor_list, rbf, rij])
        else:
            input_tensor_list = layer(input_tensor_list)
    output = output_layer(input_tensor_list)
    return output
train_x = ds_train.data.reshape([-1, 1, 28, 28]).numpy().astype(np.float) / 255
train_y = ds_train.targets.numpy()
train_x, valid_x = train_x[:55000], train_x[55000:]
train_y, valid_y = train_y[:55000], train_y[55000:]
test_x = ds_test.data.reshape([-1, 1, 28, 28]).numpy().astype(np.float) / 255
test_y = ds_test.targets.numpy()
train_mean = train_x.mean()
train_x, valid_x, test_x = (x - train_mean for x in (train_x, valid_x, test_x))
train_y, valid_y, test_y = (dense_to_one_hot(y, 10)
                            for y in (train_y, valid_y, test_y))

weight_decay = config['weight_decay']
net = []
regularizers = []
inputs = np.random.randn(config['batch_size'], 1, 28, 28)
net += [layers.Convolution(inputs, 16, 5, "conv1")]
regularizers += [
    layers.L2Regularizer(net[-1].weights, weight_decay, 'conv1_l2reg')
]
net += [layers.MaxPooling(net[-1], "pool1")]
net += [layers.ReLU(net[-1], "relu1")]
net += [layers.Convolution(net[-1], 32, 5, "conv2")]
regularizers += [
    layers.L2Regularizer(net[-1].weights, weight_decay, 'conv2_l2reg')
]
net += [layers.MaxPooling(net[-1], "pool2")]
net += [layers.ReLU(net[-1], "relu2")]
## 7x7
net += [layers.Flatten(net[-1], "flatten3")]
net += [layers.FC(net[-1], 512, "fc3")]
regularizers += [
Exemple #7
0
    grad_b_num = eval_numerical_gradient(func, b, grad_out)
    grads = layer.backward_params(grad_out)
    grad_w = grads[0][1]
    grad_b = grads[1][1]
    print("Check weights:")
    print("Relative error = ", rel_error(grad_w_num, grad_w))
    print("Error norm = ", np.linalg.norm(grad_w_num - grad_w))
    print("Check biases:")
    print("Relative error = ", rel_error(grad_b_num, grad_b))
    print("Error norm = ", np.linalg.norm(grad_b_num - grad_b))


print("Convolution")
x = np.random.randn(4, 3, 5, 5)
grad_out = np.random.randn(4, 2, 5, 5)
conv = layers.Convolution(x, 2, 3, "conv1")
print("Check grad wrt input")
check_grad_inputs(conv, x, grad_out)
print("Check grad wrt params")
check_grad_params(conv, x, conv.weights, conv.bias, grad_out)

print("\nMaxPooling")
x = np.random.randn(5, 4, 8, 8)
grad_out = np.random.randn(5, 4, 4, 4)
pool = layers.MaxPooling(x, "pool", 2, 2)
print("Check grad wrt input")
check_grad_inputs(pool, x, grad_out)

print("\nReLU")
x = np.random.randn(4, 3, 5, 5)
grad_out = np.random.randn(4, 3, 5, 5)
Exemple #8
0
np.set_printoptions(threshold=np.inf)

print((1, 2) + (3, ))

# c = np.array([[3,2,4,5,6],[1,2,3,4,5]])
# print(c)
# print(c.shape)
# a = np.zeros([3,3,4])
img = np.array([[[0, 1, 2, 3], [10, 11, 12, 13], [20, 21, 22, 23],
                 [30, 31, 32, 33]],
                [[100, 101, 102, 103], [110, 111, 112, 113],
                 [120, 121, 122, 123], [130, 131, 132, 133]]])
# [[100,101,102,103],[110,111,112,113],[120,121,122,123],[130,131,132,133]]])
filter = np.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])

c = L.Convolution(W=filter, b=np.zeros(filter.shape), stride=1)
print(c.forward(img))

# print(b)
#
# filter_h, filter_w = filter.shape
# stride = 1
#
# N, H, W = b.shape
# print(N)
# out_h = (H - filter_h) // stride + 1
# out_w = (W - filter_w) // stride + 1
#
#
# col = np.zeros((N, filter_h, filter_w, out_h, out_w))
# # col[0,0,:,:] = b[0:5,0:5]
Exemple #9
0
n_examples = len(images)

cost_values = []

network_name = "AlexNet"

graph = ComputationalGraph(network_name,
                           loss_function="cross-entropy",
                           lr=1e-4,
                           reg=1e-5)

graph.add_layer(
    layers.Convolution("conv1",
                       kernel_size=11,
                       kernel_channels=3,
                       n_kernels=96,
                       stride=4,
                       same_padding=False,
                       logging=False))
graph.add_layer(layers.Activation(activation="relu", logging=False))
graph.add_layer(
    layers.MaxPooling(window_size=3,
                      window_channels=96,
                      stride=2,
                      logging=False))

graph.add_layer(
    layers.Convolution("conv2",
                       kernel_size=5,
                       kernel_channels=96,
                       n_kernels=256,
valid_x = dataset.validation.images
valid_x = valid_x.reshape([-1, 1, 28, 28])
valid_y = dataset.validation.labels
test_x = dataset.test.images
test_x = test_x.reshape([-1, 1, 28, 28])
test_y = dataset.test.labels
train_mean = train_x.mean()
train_x -= train_mean
valid_x -= train_mean
test_x -= train_mean

net = []
inputs = np.random.randn(
    config['batch_size'], 1, 28, 28
)  #cemu normalna razdioba, neki placeholder za stvarne podatke? samo zbog shapea?
net += [layers.Convolution(inputs, 16, 5,
                           "conv1")]  #num filters = 16, kernel size= 5
net += [layers.MaxPooling(net[-1], "pool1")]  #ulazni parametar je ulaz u layer
net += [layers.ReLU(net[-1], "relu1")]
net += [layers.Convolution(net[-1], 32, 5, "conv2")]
net += [layers.MaxPooling(net[-1], "pool2")]
net += [layers.ReLU(net[-1], "relu2")]
# out = 7x7
net += [layers.Flatten(net[-1], "flatten3")]
net += [layers.FC(net[-1], 512, "fc3")]
net += [layers.ReLU(net[-1], "relu3")]
net += [layers.FC(net[-1], 10, "logits")]

loss = layers.SoftmaxCrossEntropyWithLogits()

nn.train(train_x, train_y, valid_x, valid_y, net, loss, config)
nn.evaluate("Test", test_x, test_y, net, loss, config)
Exemple #11
0
        #X = X[np.newaxis, :, :, :]
        images.append(X)

    return np.stack(images, axis=0)


X = load_images(["billie.png"], IMG_SIZE=50)
print(X.shape)

T = np.array([1, 0, 0, 0, 0])
T = T.T

######################################################################################################################
conv = layers.Convolution(kernel_size=3,
                          kernel_channels=3,
                          n_kernels=10,
                          stride=1,
                          same_padding=True)
X = conv.forward(X)
print("[+] CONVOLVED_shape:", X.shape)
print()

activation = layers.Activation(activation="relu")
X = activation.forward(X)
print("[+] ACTIVATION_shape:", X.shape)
dActivation = activation.backward(np.ones_like(X))
print("dActivation_SHAPE:", dActivation.shape)
print()

maxpool = layers.MaxPooling(window_size=2, window_channels=10, stride=2)
X = maxpool.forward(X)
Exemple #12
0
 def __init__(self):
     super(CapsNet, self).__init__()
     self.conv_layer = layers.Convolution()
     self.primary_caps = layers.PrimaryCaps()
     self.digit_caps = layers.DigitCaps()
     self.decoder = layers.Decoder()