Exemplo n.º 1
0
 def BuildNetwork(self):
     ActVar = T.reshape(self.ActVar,(-1,1,10,12))
     ActInputLayer = lasagne.layers.InputLayer(shape=(None,1,10,12),input_var=ActVar,name='0')
     StateInputLayer = lasagne.layers.InputLayer(shape=(None,10,10,12),input_var=self.StateVar,name='0')
     
     network = lasagne.layers.ConcatLayer([ActInputLayer,StateInputLayer],axis=1)
     network = lasagne.layers.DenseLayer(network,num_units=2000,W=lasagne.init.GlorotUniform(gain='relu'),nonlinearity=lasagne.nonlinearities.leaky_rectify,name = '1')        
     network = BN.batch_norm(network,name = '1')
     #network = lasagne.layers.DropoutLayer(network)
     
     network = lasagne.layers.DenseLayer(network,num_units=1000,W=lasagne.init.GlorotUniform(gain='relu'),nonlinearity=lasagne.nonlinearities.leaky_rectify,name = '2')
     network = BN.batch_norm(network,name = '2')
     #network = lasagne.layers.DropoutLayer(network)
     
     network = lasagne.layers.DenseLayer(network,num_units=500,W=lasagne.init.GlorotUniform(gain='relu'),nonlinearity=lasagne.nonlinearities.leaky_rectify,name = '3')
     network = BN.batch_norm(network,name = '3')
     #network = lasagne.layers.DropoutLayer(network)
     
     network = lasagne.layers.DenseLayer(network,num_units=40,W=lasagne.init.GlorotUniform(gain='relu'),nonlinearity=lasagne.nonlinearities.leaky_rectify,name = '4')
     network = BN.batch_norm(network,name = '4')
     #network = lasagne.layers.DropoutLayer(network)
     
     network = lasagne.layers.DenseLayer(network,num_units=5,W=lasagne.init.GlorotUniform(gain='relu'),nonlinearity=lasagne.nonlinearities.leaky_rectify,name = '5')
     network = BN.batch_norm(network,name = '5')
     #network = lasagne.layers.DropoutLayer(network)
     
     network = lasagne.layers.DenseLayer(network,num_units=1,W=lasagne.init.GlorotUniform(gain='relu'),nonlinearity=lasagne.nonlinearities.leaky_rectify,name = '6')
     network = BN.batch_norm(network,name = '6')
     
     return network
Exemplo n.º 2
0
	def build(height, width, depth, classes):
		inputShape = (height, width, depth)
		chanDim = -1
		# if we are using "channel first", update the input shape
		if K.image_data_format() == "channels_first":
			inputShape = (depth, height, width)
			chanDim = 1

		# initialize the model
		model = Sequential()

		# CONV => RELU => POOL
		model.add(Conv2D(32, (3, 3), padding="same",
			input_shape=inputShape))
		model.add(Activation("relu"))
		model.add(BatchNormalization(axis=chanDim))
		model.add(MaxPooling2D(pool_size=(3, 3)))
		model.add(Dropout(0.25))

		# (CONV => RELU) * 2 => POOL
		model.add(Conv2D(64, (3, 3), padding="same",
			input_shape=inputShape))
		model.add(Activation("relu"))
		model.add(BatchNormalization(axis=chanDim))
		model.add(Conv2D(64, (3, 3), padding="same",
			input_shape=inputShape))
		model.add(Activation("relu"))
		model.add(BatchNormalization(axis=chanDim))
		model.add(MaxPooling2D(pool_size=(2, 2)))
		model.add(Dropout(0.25))

		# (CONV => RELU) * 2 => POOL
		model.add(Conv2D(128, (3, 3), padding="same",
			input_shape=inputShape))
		model.add(Activation("relu"))
		model.add(BatchNormalization(axis=chanDim))
		model.add(Conv2D(128, (3, 3), padding="same",
			input_shape=inputShape))
		model.add(Activation("relu"))
		model.add(BatchNormalization(axis=chanDim))
		model.add(MaxPooling2D(pool_size=(2, 2)))
		model.add(Dropout(0.25))

		# first (and only) set of FC => RELU layers
		model.add(Flatten())
		model.add(Dense(1024))
		model.add(Activation("relu"))
		model.add(BatchNormalization())
		model.add(Dropout(0.5))

		# softmax classifier
		model.add(Dense(classes))
		# model.add(Activation("softmax"))
		model.add(Softmax())

		# return the constructed network architecture
		return model

		pass
Exemplo n.º 3
0
    def __init__(self,
                 input_shape,
                 filter_shape,
                 border_mode="valid",
                 BN=True):

        # input_shape : shape of input / (minibatch size, input channel num, image height, image width)
        # filter_shape : shape of filter / (# of new channels to make, input channel num, filter height, filter width)
        # BN : boolean value that determines to apply Batch Normalization or not

        self.BN = BN
        self.input_shape = input_shape
        self.filter_shape = filter_shape
        self.border_mode = border_mode

        # initialize W (weight) randomly
        rng = np.random.RandomState(int(time.time()))
        w_bound = math.sqrt(filter_shape[1] * filter_shape[2] *
                            filter_shape[3])
        self.W = theano.shared(np.asarray(rng.uniform(low=-1.0 / w_bound,
                                                      high=1.0 / w_bound,
                                                      size=filter_shape),
                                          dtype=theano.config.floatX),
                               name='W',
                               borrow=True)
        # initialize b (bias) with zeros
        self.b = theano.shared(np.asarray(np.zeros(filter_shape[0], ),
                                          dtype=theano.config.floatX),
                               name='b',
                               borrow=True)

        if BN == True:
            # calculate appropriate input_shape
            new_shape = list(input_shape)
            new_shape[1] = filter_shape[0]
            if border_mode == "valid":
                new_shape[2] -= (filter_shape[2] - 1)
                new_shape[3] -= (filter_shape[3] - 1)
            elif border_mode == "full":
                new_shape[2] += (filter_shape[2] - 1)
                new_shape[3] += (filter_shape[3] - 1)
            new_shape = tuple(new_shape)
            self.BNlayer = BatchNormalization.BatchNormalization(new_shape,
                                                                 mode=1)

        # save parameter of this layer for back-prop convinience
        if BN == True: self.params = [self.W] + self.BNlayer.params
        else: self.params = [self.W, self.b]

        insize = input_shape[1] * input_shape[2] * input_shape[3]
        self.paramins = [insize, insize]
Exemplo n.º 4
0
 def BuildNetwork(self):
     network = lasagne.layers.InputLayer(shape=(None,10,10,12),input_var=self.InputVar,name='0')
     network = lasagne.layers.DenseLayer(network,num_units=1500,W=lasagne.init.GlorotUniform(gain='relu'),nonlinearity=lasagne.nonlinearities.leaky_rectify,name = '1')
     network = BN.batch_norm(network,name = '1')
     #network = lasagne.layers.DropoutLayer(network,0.8)
     
     network = lasagne.layers.DenseLayer(network,num_units=1000,W=lasagne.init.GlorotUniform(gain='relu'),nonlinearity=lasagne.nonlinearities.leaky_rectify,name = '2')
     network = BN.batch_norm(network,name = '2')
     #network = lasagne.layers.DropoutLayer(network)
     
     network = lasagne.layers.DenseLayer(network,num_units=500,W=lasagne.init.GlorotUniform(gain='relu'),nonlinearity=lasagne.nonlinearities.leaky_rectify,name = '3')
     network = BN.batch_norm(network,name = '3')
     #network = lasagne.layers.DropoutLayer(network)
     
     network = lasagne.layers.DenseLayer(network,num_units=300,W=lasagne.init.GlorotUniform(gain='relu'),nonlinearity=lasagne.nonlinearities.leaky_rectify,name = '4')
     network = BN.batch_norm(network,name = '4')
     #network = lasagne.layers.DropoutLayer(network)
     
     network = lasagne.layers.DenseLayer(network,num_units=120,W=lasagne.init.GlorotUniform(gain=1.0),nonlinearity=lasagne.nonlinearities.sigmoid,name = '5')
     network = BN.batch_norm(network,name = '5')
     
     network = lasagne.layers.ReshapeLayer(network,shape=([0],10,12),name='5')
     return network
Exemplo n.º 5
0
    def __init__(self, input_shape, hidden_num, output_num):
        # input_shape : shape of input / (mini-batch size, vector length)
        # hidden_num : number of hidden layer nodes
        # output_num : number of output layer nodes, in CIFAR-10 case : 10

        input_num = input_shape[1]

        # initialize W1, W2 (input->hidden, hidden->output) randomly
        rng = np.random.RandomState(int(time.time()))
        w1_bound = math.sqrt(input_num)
        w2_bound = math.sqrt(hidden_num)

        self.W1 = theano.shared(np.asarray(rng.uniform(low=-1.0 / w1_bound,
                                                       high=1.0 / w1_bound,
                                                       size=(input_num,
                                                             hidden_num)),
                                           dtype=theano.config.floatX),
                                name='W11',
                                borrow=True)
        self.W2 = theano.shared(np.asarray(rng.uniform(low=-1.0 / w2_bound,
                                                       high=1.0 / w2_bound,
                                                       size=(hidden_num,
                                                             output_num)),
                                           dtype=theano.config.floatX),
                                name='W2',
                                borrow=True)

        # initialize b1, b2 (input->hidden, hidden->output) randomly
        self.b1 = theano.shared(np.asarray(np.zeros(hidden_num, ),
                                           dtype=theano.config.floatX),
                                name='b1',
                                borrow=True)
        self.b2 = theano.shared(np.asarray(np.zeros(output_num, ),
                                           dtype=theano.config.floatX),
                                name='b2',
                                borrow=True)

        # BNlayer definition
        self.BNlayer = BN.BatchNormalization(input_shape, mode=0)

        # save parameter of this layer for back-prop convinience
        self.params = [self.W2, self.b2, self.W1, self.b1
                       ] + self.BNlayer.params
Exemplo n.º 6
0
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)

model = Sequential()

model.add(Convolution2D(nb_filters, nb_conv, nb_conv,
                        border_mode='valid', input_shape=(1, img_rows, img_cols)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Convolution2D(nb_filters, nb_conv, nb_conv))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))
model.add(Dropout(0.25))

model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))

model.compile(loss='categorical_crossentropy',