Exemple #1
0
Y_train = Y_train.T
Y_val = Y_val.T

#network settings
num_epoch = 100
minibatch_size = 64

#create network & trainer
network = FCNN_SoftmaxCE(2, [100, 100],
                         2, ['Relu', 'Relu'],
                         weight_init_std='he')
trainer = Trainer(network,
                  X_train,
                  Y_train,
                  X_val,
                  Y_val,
                  num_epoch,
                  minibatch_size,
                  'adam', {'lr': 0.01},
                  verbose=True,
                  LossAccInterval=10)

#start training network
train_loss_list, val_loss_list, train_acc_list, val_acc_list, x, lrs = trainer.train(
)
#network training finished

#feed the trained final network, whole moon data in order to visualize the result
draw_border(network,
            X_train,
            Y_train,
            X_val,
import os

np.random.seed(0)

DIR = os.path.dirname(os.path.abspath(__file__)) + '/SavedNetwork/MnistCNN/'

X_train, Y_train, X_val, Y_val, Y_train_label, Y_val_label = loadMnist(
    flatten=False)
num_epoch = 1
minibatch_size = 50
save_network = True
learning_rate = 0.01
optimizer_type = 'nesterov'

print('network created')
network = CNN_Simple()
print('network setting finished')
trainer = Trainer(network,
                  X_train,
                  Y_train,
                  X_val,
                  Y_val,
                  num_epoch,
                  minibatch_size,
                  optimizer_type, {'lr': learning_rate},
                  verbose=True,
                  LossAccInterval=50)
train_loss_list, val_loss_list, train_acc_list, val_acc_list, x_axis, lrs = trainer.train(
)

networkSaver(network, DIR)
                          weight_init_std='he')
network9 = FCNN_SoftmaxCE(784, [256, 256],
                          10, ['Relu', 'Relu'],
                          weight_init_std='he')
network10 = FCNN_SoftmaxCE(784, [256, 256],
                           10, ['Relu', 'Relu'],
                           weight_init_std='he')

trainer1 = Trainer(network1,
                   X_train,
                   Y_train,
                   X_val,
                   Y_val,
                   num_epoch,
                   minibatch_size,
                   'sgd', {'lr': -1},
                   lr_scheduler_type='triangular',
                   lr_scheduler_params={
                       'stepsize': stepsize,
                       'base_lr': 0.0001,
                       'max_lr': 0.35
                   },
                   verbose=True,
                   LossAccInterval=10)
trainer2 = Trainer(network2,
                   X_train,
                   Y_train,
                   X_val,
                   Y_val,
                   num_epoch,
                   minibatch_size,
                   'sgd', {'lr': -1},
Exemple #4
0
X_train, Y_train, X_val, Y_val, Y_train_label, Y_val_label = loadMnist()
num_epoch = 20
minibatch_size = 256

network = FCNN_MSE(784, [1000, 500, 100, 500, 1000],
                   784, ['Relu', 'Relu', 'Relu', 'Relu', 'Relu', 'Sigmoid'],
                   weight_init_std='he',
                   use_dropout=False,
                   keep_probs=[0.9, 0.9, 0.9, 0.9, 0.9],
                   use_batchnorm=False)
trainer = Trainer(network,
                  X_train,
                  X_train,
                  X_val,
                  X_val,
                  num_epoch,
                  minibatch_size,
                  'adam', {'lr': 0.0004},
                  verbose=True,
                  LossAccInterval=10000,
                  lr_scheduler_type='exp_decay',
                  lr_scheduler_params={'k': 0.00001})
train_loss_list, val_loss_list, train_acc_list, val_acc_list, x, lrs = trainer.train(
)

visualize_example = X_val.T[:25].T  #(784, 25)
reconstructed_example = network.predict(visualize_example)  #(784, 25)

DIR = os.path.dirname(
    os.path.abspath(__file__)) + '/SavedNetwork/MnistAutoencoder/'
networkSaver(network, DIR)
Exemple #5
0
save_network = False
learning_rate = 0.01
optimizer_type = 'adam'

network_prev = FCNN_SoftmaxCE(784, [1, 1],
                              10, ['Relu', 'Relu'],
                              weight_init_std='he',
                              use_dropout=True,
                              use_batchnorm=True,
                              keep_probs=[0.9, 0.9],
                              batchnorm_prev=True)
trainer_prev = Trainer(network_prev,
                       X_train,
                       Y_train,
                       X_val,
                       Y_val,
                       num_epoch,
                       minibatch_size,
                       optimizer_type, {'lr': learning_rate},
                       verbose=True,
                       LossAccInterval=5)
train_loss_list_prev, val_loss_list_prev, train_acc_list_prev, val_acc_list_prev, x_axis, lrs = trainer_prev.train(
)

network_after = FCNN_SoftmaxCE(784, [256, 256],
                               10, ['Relu', 'Relu'],
                               weight_init_std='he',
                               use_dropout=True,
                               use_batchnorm=True,
                               keep_probs=[0.9, 0.9],
                               batchnorm_prev=False)
trainer_after = Trainer(network_after,
learning_rate = 0.001
optimizer_type = 'adam'

network = FCNN_SoftmaxCE(784, [256, 256, 256, 256],
                         10, ['Relu', 'Relu', 'Relu', 'Relu'],
                         weight_init_std='he',
                         use_dropout=True,
                         use_batchnorm=True,
                         keep_probs=[0.9, 0.9, 0.9, 0.9])
trainer = Trainer(network,
                  X_train,
                  Y_train,
                  X_val,
                  Y_val,
                  num_epoch,
                  minibatch_size,
                  optimizer_type, {
                      'lr': learning_rate,
                      'epsilon': 1e-8
                  },
                  verbose=True,
                  LossAccInterval=200,
                  LossAccOnNum='whole')
train_loss_list, val_loss_list, train_acc_list, val_acc_list, x_axis, lrs = trainer.train(
)

if (save_network == True):
    networkSaver(network, DIR)

trainLoss = go.Scatter(x=x_axis,
                       y=train_loss_list,
                       mode='lines',
network_augment = FCNN_SoftmaxCE(784, [256, 256, 256, 256],
                                 10, ['Relu', 'Relu', 'Relu', 'Relu'],
                                 weight_init_std='he',
                                 use_dropout=True,
                                 keep_probs=[0.9, 0.9, 0.9, 0.9])
network_increase = FCNN_SoftmaxCE(784, [256, 256, 256, 256],
                                  10, ['Relu', 'Relu', 'Relu', 'Relu'],
                                  weight_init_std='he',
                                  use_dropout=True,
                                  keep_probs=[0.9, 0.9, 0.9, 0.9])

trainer_augment = Trainer(network_augment,
                          X_train_aug,
                          Y_train_aug,
                          X_val,
                          Y_val,
                          num_epoch,
                          minibatch_size,
                          'adam', {'lr': 0.001},
                          verbose=True,
                          LossAccInterval=100)
trainer_increase = Trainer(network_increase,
                           X_train_extended,
                           Y_train_extended,
                           X_val,
                           Y_val,
                           num_epoch,
                           minibatch_size,
                           'adam', {'lr': 0.001},
                           verbose=True,
                           LossAccInterval=100)
Exemple #8
0
learning_rate = 0.05
optimizer_type1 = 'sgd'
optimizer_type2 = 'momentum'
optimizer_type3 = 'nesterov'

network1 = FCNN_SoftmaxCE(784, [256, 256, 256, 256, 256],
                          10, ['Relu', 'Relu', 'Relu', 'Relu', 'Relu'],
                          weight_init_std='he',
                          use_dropout=True,
                          use_batchnorm=True,
                          keep_probs=[0.9, 0.9, 0.9, 0.9, 0.9])
trainer1 = Trainer(network1,
                   X_train,
                   Y_train,
                   X_val,
                   Y_val,
                   num_epoch,
                   minibatch_size,
                   optimizer_type1, {'lr': learning_rate},
                   verbose=True,
                   LossAccInterval=20)
train_loss_list1, val_loss_list1, train_acc_list1, val_acc_list1, x_axis, lrs = trainer1.train(
)

network2 = FCNN_SoftmaxCE(784, [256, 256, 256, 256, 256],
                          10, ['Relu', 'Relu', 'Relu', 'Relu', 'Relu'],
                          weight_init_std='he',
                          use_dropout=True,
                          use_batchnorm=True,
                          keep_probs=[0.9, 0.9, 0.9, 0.9, 0.9])
trainer2 = Trainer(network2,
                   X_train,
minibatch_size = 256
iteration_num = math.ceil(X_train.shape[1] / minibatch_size) * num_epoch
stepsize = iteration_num
cycle_num = iteration_num / (stepsize * 2)

network = FCNN_SoftmaxCE(784, [256, 256],
                         10, ['Relu', 'Relu'],
                         weight_init_std='he')
trainer = Trainer(network,
                  X_train,
                  Y_train,
                  X_val,
                  Y_val,
                  num_epoch,
                  minibatch_size,
                  'sgd', {'lr': -1},
                  lr_scheduler_type='triangular',
                  lr_scheduler_params={
                      'stepsize': stepsize,
                      'base_lr': 0.0001,
                      'max_lr': 0.5
                  },
                  verbose=True,
                  LossAccInterval=10)
train_loss_list, val_loss_list, train_acc_list, val_acc_list, x_axis, lrs = trainer.train(
)

trainLoss = go.Scatter(x=x_axis,
                       y=train_loss_list,
                       mode='lines',
                       name='training loss')
valLoss = go.Scatter(x=x_axis,
Exemple #10
0
save_network = True
learning_rate = 0.001
optimizer_type = 'adam'

network = FCNN_SoftmaxCE(784, [256, 256, 256, 256, 256],
                         10, ['Relu', 'Relu', 'Relu', 'Relu', 'Relu'],
                         weight_init_std='he',
                         use_dropout=True,
                         use_batchnorm=True,
                         keep_probs=[0.9, 0.9, 0.9, 0.9, 0.9])
trainer = Trainer(network,
                  X_train,
                  Y_train,
                  X_val,
                  Y_val,
                  num_epoch,
                  minibatch_size,
                  optimizer_type, {'lr': learning_rate},
                  verbose=True,
                  LossAccInterval=20,
                  lr_scheduler_type='exp_decay',
                  lr_scheduler_params={'k': 0.0001})
train_loss_list, val_loss_list, train_acc_list, val_acc_list, x_axis, lrs = trainer.train(
)

if (save_network == True):
    networkSaver(network, DIR)

trainLoss = go.Scatter(x=x_axis,
                       y=train_loss_list,
                       mode='lines',
                       name='training loss')