Пример #1
0
    def __call__(self, nn, train_history):
        #if self.ls is None:
        #    self.ls = np.linspace(self.start, self.stop, nn.max_epochs)
        ## detect if NN_EPOCHS has changed
        #if Settings.NN_EPOCHS != nn.max_epochs:
        #    changeEpochs = utils.radio_question("[!]", "Change in NN_EPOCHS detected. Change net?", None, ["Yes", "No"], [True, False])
        #    if changeEpochs:
        #        #getattr(nn, "max_epochs").set_value(Settings.NN_EPOCHS)
        #        nn.max_epochs = Settings.NN_EPOCHS
        #        self.ls = np.linspace(self.start, self.stop, Settings.NN_EPOCHS)
        #    else:
        #        raise StopIteration()

        epoch = train_history[-1]['epoch']

        stop = self.start * 10e-2 * 2
        stop2 = stop * 10e-4 * 2

        ls = np.linspace(self.start, stop, 50)
        ls2 = np.linspace(stop, stop2, nn.max_epochs - 100)

        if epoch <= 50:
            new_value = utils.to_float32(ls[epoch - 1])
        elif epoch <= 100:
            new_value = utils.to_float32(ls[-1])
        else:
            new_value = utils.to_float32(ls2[epoch - 1 - 100])

        getattr(nn, self.name).set_value(new_value)
Пример #2
0
    def __init__(self, outputShape, testData, modelSaver):
        self.set_network_specific_settings()
        modelSaver.model = self
        self.net = NeuralNet(
            layers=[
                ('input', layers.InputLayer),
                ('hidden1', layers.DenseLayer),
                ('hidden2', layers.DenseLayer),
                ('output', layers.DenseLayer),
            ],

            # Layer parameter
            input_shape=(None, Settings.NN_CHANNELS,
                         Settings.NN_INPUT_SHAPE[0], Settings.NN_INPUT_SHAPE[1]
                         ),  # variable batch size, single row shape
            hidden1_num_units=500,
            hidden2_num_units=50,
            output_num_units=outputShape,
            output_nonlinearity=lasagne.nonlinearities.softmax,

            # optimization method:
            update=nesterov_momentum,
            update_learning_rate=theano.shared(
                utils.to_float32(Settings.NN_START_LEARNING_RATE)),
            update_momentum=theano.shared(
                utils.to_float32(Settings.NN_START_MOMENTUM)),
            batch_iterator_train=AugmentingLazyBatchIterator(
                Settings.NN_BATCH_SIZE,
                testData,
                "train",
                True,
                loadingSize=(50, 50)),
            batch_iterator_test=LazyBatchIterator(Settings.NN_BATCH_SIZE,
                                                  testData,
                                                  "valid",
                                                  False,
                                                  newSegmentation=False),
            train_split=TrainSplit(
                eval_size=0.0),  # we cross validate on our own
            regression=False,  # classification problem
            on_epoch_finished=[
                AdjustVariable('update_learning_rate',
                               start=Settings.NN_START_LEARNING_RATE,
                               stop=0.0001),
                AdjustVariable('update_momentum',
                               start=Settings.NN_START_MOMENTUM,
                               stop=0.999),
                TrainingHistory("?", str(self), [], modelSaver),
                EarlyStopping(150),
                modelSaver,
            ],
            max_epochs=Settings.NN_EPOCHS,
            verbose=1,
        )
Пример #3
0
	def __init__(self, outputShape, modelSaver):
		self.set_network_specific_settings()
		modelSaver.model = self
		self.net =  NeuralNet(
			layers=[
					('input', layers.InputLayer),
					('conv1', layers.Conv2DLayer),
					('pool1', layers.MaxPool2DLayer),
					('conv2', layers.Conv2DLayer),
					('pool2', layers.MaxPool2DLayer),
					('conv3', layers.Conv2DLayer),
					('pool3', layers.MaxPool2DLayer),
					('hidden4', layers.DenseLayer),
					('hidden5', layers.DenseLayer),
					('output', layers.DenseLayer),
					],

			input_shape=(None, Settings.NN_CHANNELS, Settings.NN_INPUT_SHAPE[0], Settings.NN_INPUT_SHAPE[1]), # variable batch size, 3 color shape row shape

			conv1_num_filters=32, conv1_filter_size=(3, 3), pool1_pool_size=(2, 2),

			conv2_num_filters=64, conv2_filter_size=(2, 2), pool2_pool_size=(2, 2),

			conv3_num_filters=128, conv3_filter_size=(2, 2), pool3_pool_size=(2, 2),

			hidden4_num_units=500, 
			hidden5_num_units=500,
			
			output_num_units=outputShape, 
			output_nonlinearity=lasagne.nonlinearities.softmax,
			
			# optimization method:
			update=nesterov_momentum,
			update_learning_rate=theano.shared(utils.to_float32(Settings.NN_START_LEARNING_RATE)),
			update_momentum=theano.shared(utils.to_float32(Settings.NN_START_MOMENTUM)),

			regression=False, # classification problem
			on_epoch_finished=[
				AdjustVariable('update_learning_rate', start=Settings.NN_START_LEARNING_RATE, stop=0.0001),
				AdjustVariable('update_momentum', start=Settings.NN_START_MOMENTUM, stop=0.999),
				TrainingHistory("?", str(self), [1], modelSaver),
				EarlyStopping(30),
				modelSaver,
				],
			max_epochs=Settings.NN_EPOCHS,
			verbose=1,
			)
Пример #4
0
    def __init__(self, outputShape):
        self.set_network_specific_settings()

        self.net = NeuralNet(
            layers=[
                ('input', layers.InputLayer),
                ('conv1', layers.Conv2DLayer),
                ('pool1', layers.MaxPool2DLayer),
                ('conv2', layers.Conv2DLayer),
                ('pool2', layers.MaxPool2DLayer),
                ('conv3', layers.Conv2DLayer),
                ('pool3', layers.MaxPool2DLayer),
                ('hidden4', layers.DenseLayer),
                ('hidden5', layers.DenseLayer),
                ('output', layers.DenseLayer),
            ],
            input_shape=(None, Settings.NN_CHANNELS,
                         Settings.NN_INPUT_SHAPE[0], Settings.NN_INPUT_SHAPE[1]
                         ),  # variable batch size, 3 color shape row shape
            conv1_num_filters=32,
            conv1_filter_size=(3, 3),
            pool1_pool_size=(2, 2),
            conv2_num_filters=64,
            conv2_filter_size=(2, 2),
            pool2_pool_size=(2, 2),
            conv3_num_filters=128,
            conv3_filter_size=(2, 2),
            pool3_pool_size=(2, 2),
            hidden4_num_units=500,
            hidden5_num_units=500,
            output_num_units=outputShape,
            output_nonlinearity=lasagne.nonlinearities.softmax,

            # optimization method:
            update=nesterov_momentum,
            update_learning_rate=theano.shared(
                utils.to_float32(Settings.NN_START_LEARNING_RATE)),
            update_momentum=theano.shared(
                utils.to_float32(Settings.NN_START_MOMENTUM)),
            regression=False,  # classification problem
            batch_iterator_train=AugmentingBatchIterator(
                batch_size=Settings.NN_BATCH_SIZE),
            max_epochs=Settings.NN_EPOCHS,
            verbose=1,
        )
Пример #5
0
    def __init__(self, outputShape):
        self.set_network_specific_settings()

        self.net = NeuralNet(
            layers=[
                ('input', layers.InputLayer),
                ('conv2d1', layers.Conv2DLayer),
                ('maxpool1', layers.MaxPool2DLayer),
                ('conv2d2', layers.Conv2DLayer),
                ('maxpool2', layers.MaxPool2DLayer),
                ('dense', layers.DenseLayer),
                ('output', layers.DenseLayer),
            ],
            input_shape=(None, Settings.NN_CHANNELS,
                         Settings.NN_INPUT_SHAPE[0], Settings.NN_INPUT_SHAPE[1]
                         ),  # variable batch size, 3 color shape row shape
            conv2d1_num_filters=20,
            conv2d1_filter_size=(5, 5),
            conv2d1_stride=(1, 1),
            conv2d1_pad=(2, 2),
            conv2d1_nonlinearity=lasagne.nonlinearities.rectify,
            maxpool1_pool_size=(2, 2),
            conv2d2_num_filters=20,
            conv2d2_filter_size=(5, 5),
            conv2d2_stride=(1, 1),
            conv2d2_pad=(2, 2),
            conv2d2_nonlinearity=lasagne.nonlinearities.rectify,
            maxpool2_pool_size=(2, 2),
            dense_num_units=1000,
            dense_nonlinearity=lasagne.nonlinearities.rectify,
            output_num_units=outputShape,
            output_nonlinearity=lasagne.nonlinearities.softmax,

            # optimization method:
            update=nesterov_momentum,
            update_learning_rate=theano.shared(
                utils.to_float32(Settings.NN_START_LEARNING_RATE)),
            update_momentum=theano.shared(
                utils.to_float32(Settings.NN_START_MOMENTUM)),
            regression=False,  # classification problem
            max_epochs=Settings.NN_EPOCHS,
            verbose=1,
        )
Пример #6
0
	def __init__(self, outputShape, testData, modelSaver):
		self.set_network_specific_settings()
		modelSaver.model = self
		self.net =  NeuralNet(
			layers=[
					('input', layers.InputLayer),
					('conv1', layers.Conv2DLayer),
					('pool1', layers.MaxPool2DLayer),
					('conv2', layers.Conv2DLayer),
					('pool2', layers.MaxPool2DLayer),
					('conv3', layers.Conv2DLayer),
					('conv4', layers.Conv2DLayer),
					('conv5', layers.Conv2DLayer),
					('pool3', layers.MaxPool2DLayer),
					('hidden6', layers.DenseLayer),
					('dropout1', layers.DropoutLayer),
					('hidden7', layers.DenseLayer),
					('dropout2', layers.DropoutLayer),
					('output', layers.DenseLayer),
					],


			input_shape=(None, Settings.NN_CHANNELS, Settings.NN_INPUT_SHAPE[0], Settings.NN_INPUT_SHAPE[1]), # variable batch size, 3 color shape row shape

			conv1_num_filters=96, conv1_filter_size=(11, 11), conv1_stride=(4, 4), 
			pool1_pool_size=(5, 5),
			

			conv2_num_filters=256, conv2_filter_size=(5, 5),
			pool2_pool_size=(3, 3),
			
			conv3_num_filters=384, conv3_filter_size=(3, 3), conv3_pad = (1,1),
			
			conv4_num_filters=384, conv4_filter_size=(3, 3), conv4_pad = (1,1),

			conv5_num_filters=256, conv5_filter_size=(3, 3), conv5_pad = (1,1),
			pool3_pool_size=(2, 2),

			hidden6_num_units=4096,
			dropout1_p=0.5,

			hidden7_num_units=4096,
			dropout2_p=0.5,

			
			output_num_units=outputShape, 
			output_nonlinearity=lasagne.nonlinearities.softmax,
			
			# optimization method:
			update=nesterov_momentum,
			update_learning_rate=theano.shared(utils.to_float32(Settings.NN_START_LEARNING_RATE)),
			update_momentum=theano.shared(utils.to_float32(Settings.NN_START_MOMENTUM)),

			batch_iterator_train=AugmentingLazyBatchIterator(Settings.NN_BATCH_SIZE, testData, "train", False, newSegmentation=False, loadingSize=(256,256)),
			batch_iterator_test=LazyBatchIterator(Settings.NN_BATCH_SIZE, testData, "valid", False, newSegmentation=False, loadingInputShape=Settings.NN_INPUT_SHAPE),
			train_split=TrainSplit(eval_size=0.0), # we cross validate on our own

			regression=False, # classification problem
			on_epoch_finished=[
				AdjustVariable('update_learning_rate', start=Settings.NN_START_LEARNING_RATE, stop=0.0001),
				AdjustVariable('update_momentum', start=Settings.NN_START_MOMENTUM, stop=0.999),
				TrainingHistory("Krizhevsky", str(self), [], modelSaver),
				EarlyStopping(150),
				modelSaver,
				],
			max_epochs=Settings.NN_EPOCHS,
			verbose=1,
			)
Пример #7
0
    def __init__(self, outputShape, testData, modelSaver):
        super(net_cifar_cnn_do, self).__init__(outputShape)
        modelSaver.model = self
        self.net = NeuralNet(
            layers=[
                ('input', layers.InputLayer),
                ('conv2d1', layers.Conv2DLayer),
                ('maxpool1', layers.MaxPool2DLayer),
                ('conv2d2', layers.Conv2DLayer),
                ('maxpool2', layers.MaxPool2DLayer),
                ('dropout1', layers.DropoutLayer),
                ('dense', layers.DenseLayer),
                ('dropout2', layers.DropoutLayer),
                ('output', layers.DenseLayer),
            ],
            input_shape=(None, Settings.NN_CHANNELS,
                         Settings.NN_INPUT_SHAPE[0], Settings.NN_INPUT_SHAPE[1]
                         ),  # variable batch size, 3 color shape row shape
            conv2d1_num_filters=20,
            conv2d1_filter_size=(5, 5),
            conv2d1_stride=(1, 1),
            conv2d1_pad=(2, 2),
            conv2d1_nonlinearity=lasagne.nonlinearities.rectify,
            maxpool1_pool_size=(2, 2),
            conv2d2_num_filters=20,
            conv2d2_filter_size=(5, 5),
            conv2d2_stride=(1, 1),
            conv2d2_pad=(2, 2),
            conv2d2_nonlinearity=lasagne.nonlinearities.rectify,
            maxpool2_pool_size=(2, 2),
            dropout1_p=0.5,
            dense_num_units=1000,
            dense_nonlinearity=lasagne.nonlinearities.rectify,
            dropout2_p=0.5,
            output_num_units=outputShape,
            output_nonlinearity=lasagne.nonlinearities.softmax,

            # optimization method:
            update=nesterov_momentum,
            update_learning_rate=theano.shared(
                utils.to_float32(Settings.NN_START_LEARNING_RATE)),
            update_momentum=theano.shared(
                utils.to_float32(Settings.NN_START_MOMENTUM)),

            #batch_iterator_train=AugmentingLazyBatchIterator(Settings.NN_BATCH_SIZE, testData, "train", False, newSegmentation=False, loadingSize=(120,120)),
            #batch_iterator_test=LazyBatchIterator(Settings.NN_BATCH_SIZE, testData, "valid", False, newSegmentation=False, loadingInputShape=Settings.NN_INPUT_SHAPE),
            #train_split=TrainSplit(eval_size=0.0), # we cross validate on our own
            regression=False,  # classification problem
            on_epoch_finished=[
                AdjustVariable('update_learning_rate',
                               start=Settings.NN_START_LEARNING_RATE,
                               stop=0.0001),
                AdjustVariable('update_momentum',
                               start=Settings.NN_START_MOMENTUM,
                               stop=0.999),
                TrainingHistory("Cifar10-do1", str(self), [1], modelSaver),
                EarlyStopping(300),
                modelSaver,
            ],
            max_epochs=Settings.NN_EPOCHS,
            verbose=1,
        )