Esempio n. 1
0
    def __init__(self, outputShape, testData, modelSaver):
        self.set_network_specific_settings()
        modelSaver.model = self
        self.net = NeuralNet(
            layers=[
                ('input', layers.InputLayer),
                ('hidden1', layers.DenseLayer),
                ('hidden2', layers.DenseLayer),
                ('output', layers.DenseLayer),
            ],

            # Layer parameter
            input_shape=(None, Settings.NN_CHANNELS,
                         Settings.NN_INPUT_SHAPE[0], Settings.NN_INPUT_SHAPE[1]
                         ),  # variable batch size, single row shape
            hidden1_num_units=500,
            hidden2_num_units=50,
            output_num_units=outputShape,
            output_nonlinearity=lasagne.nonlinearities.softmax,

            # optimization method:
            update=nesterov_momentum,
            update_learning_rate=theano.shared(
                utils.to_float32(Settings.NN_START_LEARNING_RATE)),
            update_momentum=theano.shared(
                utils.to_float32(Settings.NN_START_MOMENTUM)),
            batch_iterator_train=AugmentingLazyBatchIterator(
                Settings.NN_BATCH_SIZE,
                testData,
                "train",
                True,
                loadingSize=(50, 50)),
            batch_iterator_test=LazyBatchIterator(Settings.NN_BATCH_SIZE,
                                                  testData,
                                                  "valid",
                                                  False,
                                                  newSegmentation=False),
            train_split=TrainSplit(
                eval_size=0.0),  # we cross validate on our own
            regression=False,  # classification problem
            on_epoch_finished=[
                AdjustVariable('update_learning_rate',
                               start=Settings.NN_START_LEARNING_RATE,
                               stop=0.0001),
                AdjustVariable('update_momentum',
                               start=Settings.NN_START_MOMENTUM,
                               stop=0.999),
                TrainingHistory("?", str(self), [], modelSaver),
                EarlyStopping(150),
                modelSaver,
            ],
            max_epochs=Settings.NN_EPOCHS,
            verbose=1,
        )
Esempio n. 2
0
	def __init__(self, outputShape, modelSaver):
		self.set_network_specific_settings()
		modelSaver.model = self
		self.net =  NeuralNet(
			layers=[
					('input', layers.InputLayer),
					('conv1', layers.Conv2DLayer),
					('pool1', layers.MaxPool2DLayer),
					('conv2', layers.Conv2DLayer),
					('pool2', layers.MaxPool2DLayer),
					('conv3', layers.Conv2DLayer),
					('pool3', layers.MaxPool2DLayer),
					('hidden4', layers.DenseLayer),
					('hidden5', layers.DenseLayer),
					('output', layers.DenseLayer),
					],

			input_shape=(None, Settings.NN_CHANNELS, Settings.NN_INPUT_SHAPE[0], Settings.NN_INPUT_SHAPE[1]), # variable batch size, 3 color shape row shape

			conv1_num_filters=32, conv1_filter_size=(3, 3), pool1_pool_size=(2, 2),

			conv2_num_filters=64, conv2_filter_size=(2, 2), pool2_pool_size=(2, 2),

			conv3_num_filters=128, conv3_filter_size=(2, 2), pool3_pool_size=(2, 2),

			hidden4_num_units=500, 
			hidden5_num_units=500,
			
			output_num_units=outputShape, 
			output_nonlinearity=lasagne.nonlinearities.softmax,
			
			# optimization method:
			update=nesterov_momentum,
			update_learning_rate=theano.shared(utils.to_float32(Settings.NN_START_LEARNING_RATE)),
			update_momentum=theano.shared(utils.to_float32(Settings.NN_START_MOMENTUM)),

			regression=False, # classification problem
			on_epoch_finished=[
				AdjustVariable('update_learning_rate', start=Settings.NN_START_LEARNING_RATE, stop=0.0001),
				AdjustVariable('update_momentum', start=Settings.NN_START_MOMENTUM, stop=0.999),
				TrainingHistory("?", str(self), [1], modelSaver),
				EarlyStopping(30),
				modelSaver,
				],
			max_epochs=Settings.NN_EPOCHS,
			verbose=1,
			)
Esempio n. 3
0
	def __init__(self, outputShape, testData, modelSaver):
		self.set_network_specific_settings()
		modelSaver.model = self
		self.net =  NeuralNet(
			layers=[
					('input', layers.InputLayer),
					('conv1', layers.Conv2DLayer),
					('pool1', layers.MaxPool2DLayer),
					('conv2', layers.Conv2DLayer),
					('pool2', layers.MaxPool2DLayer),
					('conv3', layers.Conv2DLayer),
					('conv4', layers.Conv2DLayer),
					('conv5', layers.Conv2DLayer),
					('pool3', layers.MaxPool2DLayer),
					('hidden6', layers.DenseLayer),
					('dropout1', layers.DropoutLayer),
					('hidden7', layers.DenseLayer),
					('dropout2', layers.DropoutLayer),
					('output', layers.DenseLayer),
					],


			input_shape=(None, Settings.NN_CHANNELS, Settings.NN_INPUT_SHAPE[0], Settings.NN_INPUT_SHAPE[1]), # variable batch size, 3 color shape row shape

			conv1_num_filters=96, conv1_filter_size=(11, 11), conv1_stride=(4, 4), 
			pool1_pool_size=(5, 5),
			

			conv2_num_filters=256, conv2_filter_size=(5, 5),
			pool2_pool_size=(3, 3),
			
			conv3_num_filters=384, conv3_filter_size=(3, 3), conv3_pad = (1,1),
			
			conv4_num_filters=384, conv4_filter_size=(3, 3), conv4_pad = (1,1),

			conv5_num_filters=256, conv5_filter_size=(3, 3), conv5_pad = (1,1),
			pool3_pool_size=(2, 2),

			hidden6_num_units=4096,
			dropout1_p=0.5,

			hidden7_num_units=4096,
			dropout2_p=0.5,

			
			output_num_units=outputShape, 
			output_nonlinearity=lasagne.nonlinearities.softmax,
			
			# optimization method:
			update=nesterov_momentum,
			update_learning_rate=theano.shared(utils.to_float32(Settings.NN_START_LEARNING_RATE)),
			update_momentum=theano.shared(utils.to_float32(Settings.NN_START_MOMENTUM)),

			batch_iterator_train=AugmentingLazyBatchIterator(Settings.NN_BATCH_SIZE, testData, "train", False, newSegmentation=False, loadingSize=(256,256)),
			batch_iterator_test=LazyBatchIterator(Settings.NN_BATCH_SIZE, testData, "valid", False, newSegmentation=False, loadingInputShape=Settings.NN_INPUT_SHAPE),
			train_split=TrainSplit(eval_size=0.0), # we cross validate on our own

			regression=False, # classification problem
			on_epoch_finished=[
				AdjustVariable('update_learning_rate', start=Settings.NN_START_LEARNING_RATE, stop=0.0001),
				AdjustVariable('update_momentum', start=Settings.NN_START_MOMENTUM, stop=0.999),
				TrainingHistory("Krizhevsky", str(self), [], modelSaver),
				EarlyStopping(150),
				modelSaver,
				],
			max_epochs=Settings.NN_EPOCHS,
			verbose=1,
			)
Esempio n. 4
0
    def __init__(self, outputShape, testData, modelSaver):
        super(net_cifar_cnn_do, self).__init__(outputShape)
        modelSaver.model = self
        self.net = NeuralNet(
            layers=[
                ('input', layers.InputLayer),
                ('conv2d1', layers.Conv2DLayer),
                ('maxpool1', layers.MaxPool2DLayer),
                ('conv2d2', layers.Conv2DLayer),
                ('maxpool2', layers.MaxPool2DLayer),
                ('dropout1', layers.DropoutLayer),
                ('dense', layers.DenseLayer),
                ('dropout2', layers.DropoutLayer),
                ('output', layers.DenseLayer),
            ],
            input_shape=(None, Settings.NN_CHANNELS,
                         Settings.NN_INPUT_SHAPE[0], Settings.NN_INPUT_SHAPE[1]
                         ),  # variable batch size, 3 color shape row shape
            conv2d1_num_filters=20,
            conv2d1_filter_size=(5, 5),
            conv2d1_stride=(1, 1),
            conv2d1_pad=(2, 2),
            conv2d1_nonlinearity=lasagne.nonlinearities.rectify,
            maxpool1_pool_size=(2, 2),
            conv2d2_num_filters=20,
            conv2d2_filter_size=(5, 5),
            conv2d2_stride=(1, 1),
            conv2d2_pad=(2, 2),
            conv2d2_nonlinearity=lasagne.nonlinearities.rectify,
            maxpool2_pool_size=(2, 2),
            dropout1_p=0.5,
            dense_num_units=1000,
            dense_nonlinearity=lasagne.nonlinearities.rectify,
            dropout2_p=0.5,
            output_num_units=outputShape,
            output_nonlinearity=lasagne.nonlinearities.softmax,

            # optimization method:
            update=nesterov_momentum,
            update_learning_rate=theano.shared(
                utils.to_float32(Settings.NN_START_LEARNING_RATE)),
            update_momentum=theano.shared(
                utils.to_float32(Settings.NN_START_MOMENTUM)),

            #batch_iterator_train=AugmentingLazyBatchIterator(Settings.NN_BATCH_SIZE, testData, "train", False, newSegmentation=False, loadingSize=(120,120)),
            #batch_iterator_test=LazyBatchIterator(Settings.NN_BATCH_SIZE, testData, "valid", False, newSegmentation=False, loadingInputShape=Settings.NN_INPUT_SHAPE),
            #train_split=TrainSplit(eval_size=0.0), # we cross validate on our own
            regression=False,  # classification problem
            on_epoch_finished=[
                AdjustVariable('update_learning_rate',
                               start=Settings.NN_START_LEARNING_RATE,
                               stop=0.0001),
                AdjustVariable('update_momentum',
                               start=Settings.NN_START_MOMENTUM,
                               stop=0.999),
                TrainingHistory("Cifar10-do1", str(self), [1], modelSaver),
                EarlyStopping(300),
                modelSaver,
            ],
            max_epochs=Settings.NN_EPOCHS,
            verbose=1,
        )