Beispiel #1
0
def test_clone():
    from nolearn.lasagne import NeuralNet
    from nolearn.lasagne import BatchIterator
    from nolearn.lasagne import objective

    params = dict(
        layers=[
            ('input', InputLayer),
            ('hidden', DenseLayer),
            ('output', DenseLayer),
            ],
        input_shape=(100, 784),
        output_num_units=10,
        output_nonlinearity=softmax,

        more_params={
            'hidden_num_units': 100,
            },
        update=nesterov_momentum,
        update_learning_rate=0.01,
        update_momentum=0.9,

        regression=False,
        objective=objective,
        objective_loss_function=categorical_crossentropy,
        batch_iterator_train=BatchIterator(batch_size=100),
        y_tensor_type=T.ivector,
        use_label_encoder=False,
        on_epoch_finished=None,
        on_training_finished=None,
        max_epochs=100,
        eval_size=0.1,  # BBB
        check_input=True,
        verbose=0,
        )
    nn = NeuralNet(**params)

    nn2 = clone(nn)
    params1 = nn.get_params()
    params2 = nn2.get_params()

    for ignore in (
        'batch_iterator_train',
        'batch_iterator_test',
        'output_nonlinearity',
        'loss',
        'objective',
        'train_split',
        'eval_size',
        'X_tensor_type',
        'on_epoch_finished',
        'on_batch_finished',
        'on_training_started',
        'on_training_finished',
        'custom_scores',
            ):
        for par in (params, params1, params2):
            par.pop(ignore, None)

    assert params == params1 == params2
Beispiel #2
0
def test_clone():
    from nolearn.lasagne import NeuralNet
    from nolearn.lasagne import BatchIterator
    from nolearn.lasagne import objective

    params = dict(
        layers=[("input", InputLayer), ("hidden", DenseLayer), ("output", DenseLayer)],
        input_shape=(100, 784),
        output_num_units=10,
        output_nonlinearity=softmax,
        more_params={"hidden_num_units": 100},
        update=nesterov_momentum,
        update_learning_rate=0.01,
        update_momentum=0.9,
        regression=False,
        objective=objective,
        objective_loss_function=categorical_crossentropy,
        batch_iterator_train=BatchIterator(batch_size=100),
        y_tensor_type=T.ivector,
        use_label_encoder=False,
        on_epoch_finished=None,
        on_training_finished=None,
        max_epochs=100,
        eval_size=0.1,  # BBB
        verbose=0,
    )
    nn = NeuralNet(**params)

    nn2 = clone(nn)
    params1 = nn.get_params()
    params2 = nn2.get_params()

    for ignore in (
        "batch_iterator_train",
        "batch_iterator_test",
        "output_nonlinearity",
        "loss",
        "objective",
        "train_split",
        "eval_size",
        "X_tensor_type",
        "on_epoch_finished",
        "on_batch_finished",
        "on_training_started",
        "on_training_finished",
        "custom_score",
    ):
        for par in (params, params1, params2):
            par.pop(ignore, None)

    assert params == params1 == params2
Beispiel #3
0
def test_clone():
    from nolearn.lasagne import NeuralNet
    from nolearn.lasagne import negative_log_likelihood
    from nolearn.lasagne import BatchIterator

    params = dict(
        layers=[
            ('input', InputLayer),
            ('hidden', DenseLayer),
            ('output', DenseLayer),
            ],
        input_shape=(100, 784),
        output_num_units=10,
        output_nonlinearity=softmax,

        more_params={
            'hidden_num_units': 100,
            },
        update=nesterov_momentum,
        update_learning_rate=0.01,
        update_momentum=0.9,

        regression=False,
        loss=negative_log_likelihood,
        batch_iterator_train=BatchIterator(batch_size=100),
        X_tensor_type=T.matrix,
        y_tensor_type=T.ivector,
        use_label_encoder=False,
        on_epoch_finished=None,
        on_training_finished=None,
        max_epochs=100,
        eval_size=0.1,
        verbose=0,
        )
    nn = NeuralNet(**params)

    nn2 = clone(nn)
    params1 = nn.get_params()
    params2 = nn2.get_params()

    for ignore in (
        'batch_iterator_train',
        'batch_iterator_test',
        'output_nonlinearity',
        ):
        for par in (params, params1, params2):
            par.pop(ignore, None)

    assert params == params1 == params2
Beispiel #4
0
def test_clone():
    from nolearn.lasagne import NeuralNet
    from nolearn.lasagne import negative_log_likelihood
    from nolearn.lasagne import BatchIterator

    params = dict(
        layers=[
            ('input', InputLayer),
            ('hidden', DenseLayer),
            ('output', DenseLayer),
        ],
        input_shape=(100, 784),
        output_num_units=10,
        output_nonlinearity=softmax,
        more_params={
            'hidden_num_units': 100,
        },
        update=nesterov_momentum,
        update_learning_rate=0.01,
        update_momentum=0.9,
        regression=False,
        loss=negative_log_likelihood,
        batch_iterator_train=BatchIterator(batch_size=100),
        X_tensor_type=T.matrix,
        y_tensor_type=T.ivector,
        use_label_encoder=False,
        on_epoch_finished=None,
        on_training_finished=None,
        max_epochs=100,
        eval_size=0.1,
        verbose=0,
    )
    nn = NeuralNet(**params)

    nn2 = clone(nn)
    params1 = nn.get_params()
    params2 = nn2.get_params()

    for ignore in (
            'batch_iterator_train',
            'batch_iterator_test',
            'output_nonlinearity',
    ):
        for par in (params, params1, params2):
            par.pop(ignore, None)

    assert params == params1 == params2
Beispiel #5
0
class CNN:
    def __init__(self,subject):
        self.convnet = NeuralNet(layers=[])
        self.subject = subject

    def make_cnn(self,X,y):
        #FSIZE = (int(np.floor(X.shape[2])), int(np.floor(X.shape[3]/4)))
        
        
        #FSIZE3 = (2,2)
        NUM_FILTERS1 = 16
        NUM_FILTERS2 = 32
        NUM_FILTERS3 = 256

        FSIZE1 = (X.shape[2],1)
        FSIZE2 = (NUM_FILTERS1,2)
        FSIZE3 = (NUM_FILTERS2,3)

        #x = theano.tensor.tensor4()
        #ax = theano.tensor.scalar()
        # geom_mean = theano.function(
        #     [x,axis = 3],
        #     theano.tensor.exp(theano.tensor.mean(theano.tensor.log(x), axis=axis, dtype='float32'))
        #     )
        # l2_norm = theano.function(
        #     [x,axis = 3],
        #     x.norm(2,axis=axis)
        #     )
        def geom_mean(x,axis=None):
            # x = theano.tensor.as_tensor_variable(x)
            # log = theano.tensor.log(x)
            # m = theano.tensor.mean(log,axis=axis)
            # g = m
            log = np.log(x)
            m = log.mean(axis = axis)
            g = np.exp(m)

            #g = theano.tensor.exp(m)
            #g = theano.tensor.exp(theano.tensor.mean(theano.tensor.log(x), axis=axis))
            print "gmean",g.type,g
            return g
        
        def l2_norm(x,axis=None):
            x = theano.tensor.as_tensor_variable(x)
            s = theano.tensor.sum(x,axis=axis)

            #l = x.norm(2, axis=axis)
            print "norm",l.type,l
            return l

        def me(x,axis=None):
            x = theano.tensor.as_tensor_variable(x)
            m = theano.tensor.mean(x,axis=axis)
            print "mean",m.type,m
            return m
        #print type(theano.tensor.mean),type(geom_mean),type(l2_norm)
        #learning_rate = 0.0001
        #learning_rate = 0.0005
        #learning_rate = .001
        learning_rate = .00001
        # if 'pat' in self.subject:
        #      learning_rate = 0.0001
        #FSIZE1 = (1, 2)
        #FSIZE2 = (1, X.shape[3])
        
        convnet = NeuralNet(
            layers = [
                (InputLayer,{'shape' : (None,1 , X.shape[2],X.shape[3])}),

                (Conv2DLayer,{'num_filters' : NUM_FILTERS1, 'filter_size' : FSIZE1}),

                (DropoutLayer,{'p' : .75}),
                
                (ReshapeLayer,{'shape' : ([0],[2],[1],[3])}),

                (Conv2DLayer,{'name': 'conv2', 'num_filters' : NUM_FILTERS2, 'filter_size' : FSIZE2}),

                #(DropoutLayer,{'p' : .85}),
                
                #(ReshapeLayer,{'shape' : ([0],[2],[1],[3])}),

                #(Conv2DLayer,{'name' : 'conv3', 'num_filters' : NUM_FILTERS3, 'filter_size' : FSIZE3}),
                
                (GlobalPoolLayer,{'name' : 'g1', 'incoming' : 'conv2','pool_function' : me }),
                (GlobalPoolLayer,{'name' : 'g2', 'incoming' : 'conv2','pool_function' : theano.tensor.max }),
                (GlobalPoolLayer,{'name' : 'g3', 'incoming' : 'conv2','pool_function' : theano.tensor.min }),
                (GlobalPoolLayer,{'name' : 'g4', 'incoming' : 'conv2','pool_function' : theano.tensor.var }),
                #(GlobalPoolLayer,{'name' : 'g5', 'incoming' : 'conv2','pool_function' : geom_mean}),
                #(GlobalPoolLayer,{'name' : 'g6', 'incoming' : 'conv2','pool_function' : l2_norm }),
                
                (ConcatLayer,{'incomings' : ['g1','g2','g3','g4']}),#]}),#
                
                (DenseLayer, {'num_units': 256}),
                (DropoutLayer,{'p':.5}),
                (DenseLayer, {'num_units': 256}),

                (DenseLayer, {'num_units': 2, 'nonlinearity': softmax}),
            ],

            update_learning_rate=theano.shared(float32(learning_rate)),
            update_momentum=theano.shared(float32(0.9)),
            verbose=1,
            max_epochs = 100000,
            on_epoch_finished=[
            EarlyStopping(patience=100)
            ],
            )
        return convnet

    def fit(self,X,y,xt,yt):
        
        X,y,xt,yt = formatData(X,y=y,Xt=xt,yt=yt)
        self.convnet = self.make_cnn(X,y)
        print "shape",X.shape
        self.convnet.fit(X,y,xt,yt)
        

    def predict_proba(self,X):
        X,_,_,_ = formatData(X)
        return self.convnet.predict_proba(X)

    def predict(self,X):
        X,_,_,_ = formatData(X)
        return self.convnet.predict(X)

    def get_params(self,deep):
        return self.convnet.get_params()
    def load_params_from(self,net):
        return self.convnet.load_params_from(net)