def test_conv2d_forward_batch_pycuda():
    from brainstorm.handlers import PyCudaHandler
    _h = PyCudaHandler()
    for input_shape in ((3, 3), (5, 4), (4, 9)):
        for nr_images in (1, 4):
            for nr_input_maps in (1, 3):
                for nr_filters in (1, 3):
                    for kernel_shape in ((1, 1), (2, 2), (3, 2)):
                        for stride in ((1, 1), (2, 2), (1, 2)):
                            for padding in (0, 1):

                                inputs = np.random.rand(
                                    nr_images, input_shape[0], input_shape[1],
                                    nr_input_maps).astype(dtype)
                                weights = np.random.rand(
                                    nr_filters, kernel_shape[0],
                                    kernel_shape[1],
                                    nr_input_maps).astype(dtype)
                                bias = np.zeros(nr_filters).astype(dtype)

                                output_height = \
                                    (input_shape[0] + 2 * padding -
                                     kernel_shape[0]) / stride[0] + 1
                                output_width = \
                                    (input_shape[1] + 2 * padding -
                                     kernel_shape[1]) / stride[1] + 1

                                outputs = np.zeros((nr_images, output_height,
                                                    output_width, nr_filters),
                                                   dtype=dtype)
                                true_outputs = np.zeros_like(outputs)

                                _conv2d_forward_batch(inputs, weights, bias,
                                                      true_outputs, padding,
                                                      stride)

                                i_dev = _h.create_from_numpy(inputs)
                                w_dev = _h.create_from_numpy(weights)
                                b_dev = _h.create_from_numpy(bias)
                                o_dev = _h.create_from_numpy(outputs)
                                _h.conv2d_forward_batch(
                                    i_dev, w_dev, b_dev, o_dev, padding,
                                    stride)
                                outputs = _h.get_numpy_copy(o_dev)
                                passed = np.allclose(outputs, true_outputs)
                                if not passed:
                                    print("Checking Inputs:", (nr_images, ) +
                                          input_shape + (nr_input_maps, ))
                                    print("Filters:", (nr_filters, ) +
                                          kernel_shape + (nr_input_maps, ))
                                    print("Stride: ", stride, "padding: ",
                                          padding)
                                    print("Expected:\n", true_outputs)
                                    print("Obtained:\n", outputs)
                                assert passed
Пример #2
0
    def set_up_network(self):
        num_output_classes = 4  # up, right, down, left
        inp, fc = bs.tools.get_in_out_layers('classification',
                                             self.input_shape,
                                             num_output_classes,
                                             projection_name='FC')

        layer_spec = inp >> bs.layers.Dropout(
            drop_prob=self.args.dropout_probabilities[0])

        for i in range(self.args.num_hidden_layers):
            layer_name = 'Hid' + str(i + 1)
            activation_function = self.args.activation_functions[i]
            layer_size = self.args.hidden_layer_sizes[i]
            layer_spec = layer_spec >> bs.layers.FullyConnected(
                layer_size, name=layer_name, activation=activation_function)
            dropout_probability = self.args.dropout_probabilities[i + 1]
            layer_spec = layer_spec >> bs.layers.Dropout(
                drop_prob=dropout_probability)

        layer_spec = layer_spec >> fc

        self.network = bs.Network.from_layer(layer_spec)
        if not self.args.disable_cuda:
            from brainstorm.handlers import PyCudaHandler
            self.network.set_handler(PyCudaHandler())
        self.network.initialize(bs.initializers.Gaussian(0.01))
        self.network.set_weight_modifiers(
            {"FC": bs.value_modifiers.ConstrainL2Norm(1)})
def test_conv2d_forward_batch_pycuda():
    from brainstorm.handlers import PyCudaHandler
    _h = PyCudaHandler()
    for input_shape in ((3, 3), (5, 4), (4, 9)):
        for nr_images in (1, 4):
            for nr_input_maps in (1, 3):
                for nr_filters in (1, 3):
                    for kernel_shape in ((1, 1), (2, 2), (3, 2)):
                        for stride in ((1, 1), (2, 2), (1, 2)):
                            for padding in (0, 1):

                                inputs = np.random.rand(
                                    nr_images, input_shape[0], input_shape[1],
                                    nr_input_maps).astype(dtype)
                                weights = np.random.rand(
                                    nr_filters, kernel_shape[0],
                                    kernel_shape[1], nr_input_maps).astype(
                                    dtype)
                                bias = np.zeros(nr_filters).astype(dtype)

                                output_height = \
                                    (input_shape[0] + 2 * padding -
                                     kernel_shape[0]) / stride[0] + 1
                                output_width = \
                                    (input_shape[1] + 2 * padding -
                                     kernel_shape[1]) / stride[1] + 1

                                outputs = np.zeros((nr_images,
                                                    output_height,
                                                    output_width,
                                                    nr_filters), dtype=dtype)
                                true_outputs = np.zeros_like(outputs)

                                _conv2d_forward_batch(inputs, weights,
                                                      bias, true_outputs,
                                                      padding, stride)

                                i_dev = _h.create_from_numpy(inputs)
                                w_dev = _h.create_from_numpy(weights)
                                b_dev = _h.create_from_numpy(bias)
                                o_dev = _h.create_from_numpy(outputs)
                                _h.conv2d_forward_batch(i_dev, w_dev,
                                                        b_dev, o_dev,
                                                        padding, stride)
                                outputs = _h.get_numpy_copy(o_dev)
                                passed = np.allclose(outputs, true_outputs)
                                if not passed:
                                    print("Checking Inputs:",(nr_images,) +
                                          input_shape + (nr_input_maps,))
                                    print("Filters:",
                                          (nr_filters,) + kernel_shape +
                                          (nr_input_maps,))
                                    print("Stride: ", stride, "padding: ",
                                          padding)
                                    print("Expected:\n", true_outputs)
                                    print("Obtained:\n", outputs)
                                assert passed
def test_strided_elementwise_inplace():    
    from brainstorm.handlers import PyCudaHandler
    _h = PyCudaHandler()
    rdm = np.random.RandomState(1345)
    
    def get_rdm_array(shape, dims):
        if dims == 2: return rdm.randn(shape[0],shape[1])
        elif dims == 3: return rdm.randn(shape[0],shape[1], shape[2])
        else: return rdm.randn(shape[0],shape[1], shape[2], shape[3])
        
    for dims in range(2,5):
        for i in range(10):
            shape = rdm.randint(1,17,dims)            
            a1 = np.float32(get_rdm_array(shape, dims))
            a2 = np.float32(get_rdm_array(shape, dims))
            a3 = np.float32(get_rdm_array(shape, dims))
            a = np.vstack([a1,a2,a3])
            original_shape = a.shape
            a = a.reshape([int(original_shape[0]/3)] + list(original_shape[1:])+[3])
            b = np.zeros_like(a, dtype=np.float32)
            A = _h.create_from_numpy(a)
            
            _h.strided_elementwise_inplace(A, 1,'logistic')
            _h.strided_elementwise_inplace(A, 0,'tanh')
            outputs = _h.get_numpy_copy(A).reshape(original_shape)
            
            c1 = np.tanh(a1)
            c2 = 1./(1.+np.exp(a2))
            c3 = a3
            c = np.vstack([c1,c2,c3])
            
            passed = np.allclose(outputs, c)                
            assert passed
Пример #5
0
 def initialize_network(self):
     self.network = bs.Network.from_hdf5(self.network_filename)
     if not self.args.disable_cuda:
         from brainstorm.handlers import PyCudaHandler
         self.network.set_handler(PyCudaHandler())
Пример #6
0
#!/usr/bin/env python
# coding=utf-8
from __future__ import division, print_function, unicode_literals

import numpy as np

from brainstorm.handlers import default_handler
from brainstorm.optional import has_pycuda
from brainstorm.value_modifiers import ConstrainL2Norm

non_default_handlers = []
if has_pycuda:
    from brainstorm.handlers import PyCudaHandler
    non_default_handlers.append(PyCudaHandler())


def test_limit_incoming_weights_squared():
    for orig in (np.random.rand(4, 5), np.random.randn(3, 5, 4, 6)):
        for limit in [0.00001, 1, 10, 10000]:
            x = orig.reshape(orig.shape[0], int(orig.size / orig.shape[0])).copy()
            divisor = (x * x).sum(axis=1, keepdims=True) ** 0.5 / limit
            divisor[divisor < 1] = 1
            out = (x / divisor).reshape(orig.shape)

            y = orig.copy()
            mod = ConstrainL2Norm(limit)
            mod(default_handler, y)
            assert np.allclose(y, out)

            for handler in non_default_handlers:
                y = handler.create_from_numpy(orig)
Пример #7
0
inp, jis = bs.tools.get_in_out_layers('classification', (63, 64, 1),
                                      320,
                                      projection_name='JIS')

network = bs.Network.from_layer(
    inp >> bs.layers.Dropout(drop_prob=0.2) >> bs.layers.FullyConnected(
        3000, name='HiddenLayer_1', activation='rel') >> bs.layers.Dropout(
            drop_prob=0.5) >> bs.layers.FullyConnected(
                3000, name='HiddenLayer_2', activation='rel') >>
    bs.layers.Dropout(drop_prob=0.5) >> bs.layers.FullyConnected(
        3000, name='HiddenLayer_3', activation='rel') >> bs.layers.Dropout(
            drop_prob=0.5) >> jis)

#network = bs.Network.from_hdf5(C1_best_network)

network.set_handler(PyCudaHandler())
network.initialize(bs.initializers.Gaussian(0.01))
network.set_weight_modifiers({"JIS": bs.value_modifiers.ConstrainL2Norm(1)})

##################################
trainer = bs.Trainer(
    bs.training.MomentumStepper(learning_rate=0.1, momentum=0.9))
trainer.add_hook(bs.hooks.ProgressBar())
scorers = [bs.scorers.Accuracy(out_name='Output.outputs.predictions')]
trainer.add_hook(
    bs.hooks.MonitorScores('valid_getter', scorers, name='validation'))
trainer.add_hook(
    bs.hooks.SaveBestNetwork('validation.Accuracy',
                             filename='C1_best_network.hdf5',
                             name='best weights',
                             criterion='max'))
Пример #8
0
from __future__ import division, print_function, unicode_literals

import os

import h5py
import numpy as np
from sklearn.metrics import adjusted_mutual_info_score

import brainstorm as bs
from brainstorm import optional as opt
from brainstorm.tools import create_net_from_spec
from sacred import Experiment

if opt.has_pycuda:
    from brainstorm.handlers import PyCudaHandler
    HANDLER = PyCudaHandler()
else:
    from brainstorm.handlers import default_handler
    HANDLER = default_handler

ex = Experiment('binding_dae')


@ex.config
def cfg():
    dataset = {
        'name': 'corners',
        'salt_n_pepper': 0.5,
        'train_set': 'train_single'  # train_multi or train_single
    }
    training = {'learning_rate': 0.01, 'patience': 10, 'max_epochs': 500}