Exemple #1
0
net1 = nn(
	layers = [
		('input', layers.InputLayer),
		('conv1', layers.Conv2DLayer),
		('pool1', layers.MaxPool2DLayer),
		# ('dropout1', layers.DropoutLayer),
		('conv2', layers.Conv2DLayer),
		('pool2', layers.MaxPool2DLayer),
		('dropout2', layers.DropoutLayer),
		('hidden3', layers.DenseLayer),
		('dropout3', layers.DropoutLayer),
		('hidden4', layers.DenseLayer),
		('dropout4', layers.DropoutLayer),
		('output', layers.DenseLayer),
	],

	input_shape=(None, 1, 28, 28),
	conv1_num_filters = 128, conv1_filter_size=(5,5), pool1_pool_size=(2,2),
	conv1_W=lasagne.init.GlorotUniform(),  
	conv1_nonlinearity=leaky_rectify,
	# dropout1_p=0.3,
	conv2_num_filters = 64, conv2_filter_size=(3,3), pool2_pool_size=(2,2),
	conv2_W=lasagne.init.GlorotUniform(),
	conv2_nonlinearity=leaky_rectify,
	dropout2_p=0.4,
	hidden3_num_units = 2000,  hidden4_num_units=2000,
	hidden3_nonlinearity=leaky_rectify,
	hidden4_nonlinearity=leaky_rectify,
	dropout3_p=0.5,
	dropout4_p=0.5,
	
	batch_iterator_train=BatchIterator(batch_size=256),
	output_num_units=10,
	output_nonlinearity=softmax,
	# objective_loss_function=categorical_crossentropy,
	regression=False,
	use_label_encoder=True,
	max_epochs=30,

	update=nesterov_momentum,
	update_learning_rate = theano.shared(float32(0.1)),
	update_momentum = theano.shared(float32(0.9)),

	on_epoch_finished = [
		AdjustVariable('update_learning_rate', start=0.1, stop=0.001),
		AdjustVariable('update_momentum', start=0.9, stop=0.9999),
		EarlyStopping(patience=6),
	],

	verbose=1
)
Exemple #2
0
net9 = nn(
    layers=[
        ('input', layers.InputLayer),
        ('conv1', layers.Conv2DLayer),
        ('pool1', layers.MaxPool2DLayer),
        ('dropout1', layers.DropoutLayer),  # !
        ('conv2', layers.Conv2DLayer),
        ('pool2', layers.MaxPool2DLayer),
        ('dropout2', layers.DropoutLayer),  # !
        ('conv3', layers.Conv2DLayer),
        ('pool3', layers.MaxPool2DLayer),
        ('dropout3', layers.DropoutLayer),  # !
        ('hidden4', layers.DenseLayer),
        ('dropout4', layers.DropoutLayer),  # !
        ('hidden5', layers.DenseLayer),
        ('output', layers.DenseLayer),
        ],
    input_shape=(None, 1, 96, 96),
    conv1_num_filters=64, conv1_filter_size=(5, 5), pool1_pool_size=(2, 2),
    conv1_nonlinearity=leaky_rectify,
    dropout1_p=0.1,  # !
    conv2_num_filters=128, conv2_filter_size=(3, 3), pool2_pool_size=(2, 2),
    conv2_nonlinearity=leaky_rectify,
    dropout2_p=0.2,  # !
    conv3_num_filters=256, conv3_filter_size=(2, 2), pool3_pool_size=(2, 2),
    conv3_nonlinearity=leaky_rectify,
    dropout3_p=0.3,  # !
    hidden4_num_units=2048,
    hidden4_nonlinearity=leaky_rectify,
    dropout4_p=0.5,  # !
    hidden5_num_units=1000,
    hidden5_nonlinearity=leaky_rectify,
    output_num_units=30, output_nonlinearity=None,

    update_learning_rate=theano.shared(float32(0.03)),
    update_momentum=theano.shared(float32(0.9)),

    regression=True,
    batch_iterator_train=FlipBatchIterator(batch_size=128),
    on_epoch_finished=[
        AdjustVariable('update_learning_rate', start=0.01, stop=0.0007),
        AdjustVariable('update_momentum', start=0.9, stop=0.999),
        EarlyStopping(patience=20),
        ],
    max_epochs=5000,
    verbose=1,
)
Exemple #3
0
net = nn(
	layers = [
		('input', layers.InputLayer),
		('conv1', layers.Conv2DLayer),
		('pool1', layers.MaxPool2DLayer),
		('dropout1', layers.DropoutLayer),
		('conv2', layers.Conv2DLayer),
		('pool2', layers.MaxPool2DLayer),
		('dropout2', layers.DropoutLayer),
		# ('conv3', layers.Conv2DLayer),
		# ('pool3', layers.MaxPool2DLayer),
		# ('dropout3', layers.DropoutLayer),
		# ('conv4', layers.Conv2DLayer),
		# ('pool4', layers.MaxPool2DLayer),
		# ('dropout4', layers.DropoutLayer),
		('hidden5', layers.DenseLayer),
		('dropout5', layers.DropoutLayer),
		('hidden6', layers.DenseLayer),
		('dropout6', layers.DropoutLayer),
		# ('hidden7', layers.DenseLayer),
		# ('dropout7', layers.DropoutLayer),
		('output', layers.DenseLayer),
	],

	input_shape=(None, 3, 32, 32),
	conv1_num_filters = 256, conv1_filter_size=(5,5), pool1_pool_size=(2,2),
	conv1_W=lasagne.init.GlorotUniform(),  
	conv1_nonlinearity=LeakyRectify(leakiness=.03),
	dropout1_p=0.2,
	conv2_num_filters = 128, conv2_filter_size=(2,2), pool2_pool_size=(2,2),
	conv2_W=lasagne.init.GlorotUniform(),
	conv2_nonlinearity=LeakyRectify(leakiness=.03),
	dropout2_p=0.3,
	# conv3_num_filters = 128, conv3_filter_size=(2,2), pool3_pool_size=(2,2),
	# conv3_W=lasagne.init.GlorotUniform(),
	# conv3_nonlinearity=LeakyRectify,
	# dropout3_p=0.4,
	# conv4_num_filters = 256, conv4_filter_size=(2,2), pool4_pool_size=(2,2),
	# conv4_W=lasagne.init.GlorotUniform(),
	# conv4_nonlinearity=LeakyRectify,
	# dropout4_p=0.4,
	hidden5_num_units=4096,
	hidden5_nonlinearity=LeakyRectify(leakiness=.03),
	dropout5_p=0.5,
	hidden6_num_units = 4096,
	hidden6_nonlinearity=LeakyRectify(leakiness=.03),
	dropout6_p=0.5,
	# hidden7_num_units = 1000,
	# hidden7_nonlinearity=LeakyRectify,
	# dropout7_p=0.2,

	batch_iterator_train=BatchIterator(batch_size=128),
	train_split=TrainSplit(eval_size=0.08),
	output_num_units=10,
	output_nonlinearity=softmax,
	# objective_loss_function=categorical_crossentropy,
	regression=False,
	use_label_encoder=True,
	max_epochs=10000,

	update=nesterov_momentum,
	update_learning_rate = theano.shared(float32(0.03)),
	update_momentum = theano.shared(float32(0.9)),

	on_epoch_finished = [
		AdjustVariable('update_learning_rate', start=0.01, stop=0.0001),
		AdjustVariable('update_momentum', start=0.9, stop=0.9999),
		EarlyStopping(patience=200),
	],

	verbose=1
)