def __init__(self, num_features=93, num_classes=9): """ """ self.prefix = 'nl_simp' self.num_features = num_features self.num_classes = num_classes self.layers = [('input', InputLayer), ('dropouti', DropoutLayer), ('dense0', DenseLayer), ('dropout0', DropoutLayer), ('dense1', DenseLayer), ('dropout1', DropoutLayer), ('dense2', DenseLayer), ('dropout2', DropoutLayer), ('dense3', DenseLayer), ('dropout3', DropoutLayer), ('output', DenseLayer)] self.early_stopping = EarlyStopping(patience=25) self.nn = My_NeuralNet(layers=self.layers, input_shape=(None, self.num_features), dropouti_p=0.12, dense0_num_units=900, dense0_W=HeNormal(), dense0_nonlinearity = LeakyRectify(leakiness=0.002), dropout0_p=0.35, dense1_num_units=600, dense1_W=HeNormal(), dense1_nonlinearity = LeakyRectify(leakiness=0.002), dropout1_p=0.2, dense2_num_units=400, dense2_W=HeNormal(), dense2_nonlinearity = LeakyRectify(leakiness=0.002), dropout2_p=0.1, dense3_num_units=300, dense3_W=HeNormal(), dense3_nonlinearity = LeakyRectify(leakiness=0.002), dropout3_p=0.1, output_num_units=num_classes, output_nonlinearity=softmax, update=adagrad, update_learning_rate=shared(float32(0.01)), batch_iterator_train=BatchIterator(batch_size=512), on_epoch_finished=[ AdjustVariable('update_learning_rate', start=0.01, stop=0.005), # AdjustVariable('update_momentum', start=0.1, stop=0.0), self.early_stopping, ], eval_size=0., verbose=1, max_epochs=300 ) self.nn2 = deepcopy(self.nn)
def __init__(self, num_features=93, num_classes=9): """ """ self.prefix = 'nl_2lv_cal' self.num_features = num_features self.num_classes = num_classes self.layers0 = [('input', InputLayer), ('dropoutn', DropoutLayer), ('dense0', DenseLayer), ('dropout0', DropoutLayer), ('dense1', DenseLayer), ('dropout1', DropoutLayer), ('dense2', DenseLayer), ('dropout2', DropoutLayer), ('dense3', DenseLayer), ('dropout3', DropoutLayer), ('output', DenseLayer)] self.early_stopping0 = EarlyStopping(patience=20) self.nn0 = My_NeuralNet(layers=self.layers0, input_shape=(None, self.num_features), dropoutn_p=0.12, dense0_num_units=900, dense0_W=HeNormal(), dense0_nonlinearity = LeakyRectify(leakiness=0.002), dropout0_p=0.35, dense1_num_units=600, dense1_W=HeNormal(), dense1_nonlinearity = LeakyRectify(leakiness=0.002), dropout1_p=0.2, dense2_num_units=400, dense2_W=HeNormal(), dense2_nonlinearity = LeakyRectify(leakiness=0.002), dropout2_p=0.1, dense3_num_units=300, dense3_W=HeNormal(), dense3_nonlinearity = LeakyRectify(leakiness=0.002), dropout3_p=0.1, output_num_units=2, output_nonlinearity=softmax, update=adagrad, update_learning_rate=shared(float32(0.01)), batch_iterator_train=BatchIterator(batch_size=256), on_epoch_finished=[ AdjustVariable('update_learning_rate', start=0.01, stop=0.001), # AdjustVariable('update_momentum', start=0.1, stop=0.0), self.early_stopping0, ], eval_size=0, verbose=1, max_epochs=36 ) self.layers1 = [('input', InputLayer), ('dropoutn', DropoutLayer), ('dense0', DenseLayer), # ('gaussian0', GaussianNoiseLayer), ('dropout0', DropoutLayer), ('dense1', DenseLayer), # ('gaussian1', GaussianNoiseLayer), ('dropout1', DropoutLayer), ('dense2', DenseLayer), ('dropout2', DropoutLayer), # ('dense3', DenseLayer), # ('dropout3', DropoutLayer), ('output', DenseLayer)] self.early_stopping1 = EarlyStopping(patience=20) self.nn1 = My_NeuralNet(layers=self.layers1, input_shape=(None, self.num_features), dropoutn_p=0.18, dense0_num_units=900, dense0_W=HeNormal(), dense0_nonlinearity = LeakyRectify(leakiness=0.002), dropout0_p=0.35, dense1_num_units=600, dense1_W=HeNormal(), dense1_nonlinearity = LeakyRectify(leakiness=0.002), dropout1_p=0.15, dense2_num_units=400, dense2_W=HeNormal(), dense2_nonlinearity = LeakyRectify(leakiness=0.002), dropout2_p=0.05, output_num_units=3, output_nonlinearity=softmax, update=adagrad, update_learning_rate=shared(float32(0.01)), batch_iterator_train=BatchIterator(batch_size=256), on_epoch_finished=[ AdjustVariable('update_learning_rate', start=0.01, stop=0.001), # AdjustVariable('update_momentum', start=0.1, stop=0.0), self.early_stopping1, ], eval_size=0, verbose=1, max_epochs=60 ) self.layers2 = [('input', InputLayer), ('dropoutn', DropoutLayer), ('dense0', DenseLayer), # ('gaussian0', GaussianNoiseLayer), ('dropout0', DropoutLayer), ('dense1', DenseLayer), # ('gaussian1', GaussianNoiseLayer), ('dropout1', DropoutLayer), ('dense2', DenseLayer), ('dropout2', DropoutLayer), # ('dense3', DenseLayer), # ('dropout3', DropoutLayer), ('output', DenseLayer)] self.early_stopping2 = EarlyStopping(patience=20) self.nn2 = My_NeuralNet(layers=self.layers2, input_shape=(None, self.num_features), dropoutn_p=0.13, dense0_num_units=900, dense0_W=HeNormal(), dense0_nonlinearity = LeakyRectify(leakiness=0.002), dropout0_p=0.35, dense1_num_units=600, dense1_W=HeNormal(), dense1_nonlinearity = LeakyRectify(leakiness=0.002), dropout1_p=0.15, dense2_num_units=400, dense2_W=HeNormal(), dense2_nonlinearity = LeakyRectify(leakiness=0.002), dropout2_p=0.05, output_num_units=6, output_nonlinearity=softmax, update=adagrad, update_learning_rate=shared(float32(0.01)), batch_iterator_train=BatchIterator(batch_size=256), on_epoch_finished=[ AdjustVariable('update_learning_rate', start=0.01, stop=0.001), # AdjustVariable('update_momentum', start=0.1, stop=0.0), self.early_stopping2, ], # eval_size=0.2, eval_size=0, verbose=1, max_epochs=31 )