Example #1
0
def get_vals(lr,hidden_dims1,hidden_dims2,lr_decay,reg,drop):
    global json_log2,json_log
    lr = 10 ** lr
    model = FullyConnectedNet(hidden_dims=[int(hidden_dims1),int(hidden_dims2)], input_dim=48 * 48 * 1, reg=reg, num_classes=7, dtype=np.float64,dropout=drop)
    solver = Solver(model, data,
                    update_rule='sgd_momentum',
                    optim_config={'learning_rate': lr,}, lr_decay = lr_decay,
                    num_epochs=50, batch_size=70,
                    print_every=1000000)

    solver.train()
    solver._save_checkpoint()

    #SAVE THE VALUES TO A FILE
    val_acc = solver.best_val_acc
    acc = max(solver.train_acc_history)
    loss = min(solver.loss_history)
    json_log2.write(json.dumps({'Learning Rate': lr,
                               'accuracy': acc, 'val_acc': val_acc ,
                                'loss': loss,"lr_decay":lr_decay,
                                'dropout':drop,'reg':reg,
                                'layer_1': hidden_dims1,'layer_2': hidden_dims2}) + ',\n')
    
    json_log.write(json.dumps({'Learning Rate': lr,
                               'accuracy': solver.train_acc_history,
                                'val_acc': solver.val_acc_history,
                                'loss': solver.loss_history,"lr_decay":lr_decay,
                                'dropout':drop,'reg':reg,
                               'layer_1': hidden_dims1,'layer_2': hidden_dims2}) + ',\n')
    return solver.best_val_acc
Example #2
0
def train_FER2013():
    ###########################################################################
    #                           BEGIN OF YOUR CODE                            #
    ###########################################################################
    #pickle_FER() #used to load and store the FER2013 dataset for faster performance
    optim = False  # use for fine tuning learning rate
    out = get_FER(num_training=10000)
    data = {
        'X_train': out['X_train'],  # training data
        'y_train': out['y_train'],  # training labels
        'X_val': out['X_val'],  # validation data
        'y_val': out['y_val']  # validation labels
    }
    model = FullyConnectedNet(input_dim=48 * 48 * 1,
                              hidden_dims=[40],
                              num_classes=7,
                              dropout=0,
                              reg=0,
                              seed=int(time.time()))

    if optim:
        count = 1
        reached = False
        threshold = 0.35  #set threshold here
        while not reached:
            np.random.seed(int(time.time()))
            print("iteration number{}".format(count))
            lr = np.random.uniform(0.001, 0.003)
            print("testing lr: {}".format(lr))
            solver = Solver(model,
                            data,
                            update_rule='sgd_momentum',
                            optim_config={
                                'learning_rate': lr,
                                'momentum': 0.5
                            },
                            lr_decay=0.8,
                            num_epochs=100,
                            batch_size=100,
                            print_every=100)
            solver.train()
            if max(solver.val_acc_history) >= threshold:
                reached = True
            count += 1

        print("Final lr: {}".format(lr))
    else:
        solver = Solver(model,
                        data,
                        update_rule='sgd_momentum',
                        optim_config={
                            'learning_rate': 0.0018742807840127864,
                            'momentum': 0.5
                        },
                        lr_decay=0.8,
                        num_epochs=100,
                        batch_size=100,
                        print_every=100)
        solver.train()
Example #3
0
def train_overfit_net(save_net = False):


    # get CIFAR10 data
    data = get_CIFAR10_data()
        
    # subsample the data. 50 draws
    indices_to_select = np.random.randint(0, len(data["X_train"]), 50)

    # extract the samples
    data["X_train"] = data["X_train"][indices_to_select]
    data["y_train"] = data["y_train"][indices_to_select]
    
    # intialize net
    model = FullyConnectedNet([50],
                              input_dim      = 32*32*3,
                              num_classes    = 10,
                              dropout        = 0,
                              reg            = 0.0,
                              weight_scale   = 1e-2)
        
        
    # initialize solver
    solver = Solver(model,data,
                    update_rule  = 'sgd',
                    optim_config = {'learning_rate': 5e-4},
                    lr_decay     = 0.85,
                    num_epochs   = 20,
                    batch_size   = 5,
                    print_every  = 100)
    
    # train the net 
    solver.train()
    
    if save_net:
        
        # test the net and save its training, validation and testing accuracies.

        # get training accuracy
        train_acc = str.format("{0:.2f}", solver.check_accuracy(data["X_train"], data["y_train"]) * 100) + "\%"
        
        # get validation accuracy
        val_acc = str.format("{0:.2f}", solver.best_val_acc * 100) + "\%"
        
        # get testing accuracy
        test_acc = str.format("{0:.2f}", solver.check_accuracy(data["X_test"], data["y_test"]) * 100) + "\%"

        text = "Accuracies: " + train_acc + " training, " + val_acc + " validation  \& " + test_acc + " testing."

        # write to file
        append_to_file("nets/overfit_net/info.tex", text, mode =  "w")
        
        # save net info
        save_net_info("nets/overfit_net", solver)
def train_cifar10_net(save_net=False):
    """
    Uses a Solver instance to train a TwoLayerNet that achieves at least 50% 
    accuracy on the validation set.
    """

    # get CIFAR10 data
    data = get_CIFAR10_data()

    # intialize net
    model = FullyConnectedNet([100],
                              input_dim=32 * 32 * 3,
                              num_classes=10,
                              dropout=0,
                              reg=0.0)

    # initialize solver
    solver = Solver(model,
                    data,
                    update_rule='sgd',
                    optim_config={'learning_rate': 1e-3},
                    lr_decay=0.95,
                    num_epochs=20,
                    batch_size=100,
                    print_every=100)

    # train the net
    solver.train()

    if save_net:

        # test the net and save its training, validation and testing accuracies.
        # get training accuracy
        train_acc = str.format(
            "{0:.2f}",
            solver.check_accuracy(data["X_train"], data["y_train"]) *
            100) + "\%"

        # get validation accuracy
        val_acc = str.format("{0:.2f}", solver.best_val_acc * 100) + "\%"

        # get testing accuracy
        test_acc = str.format(
            "{0:.2f}",
            solver.check_accuracy(data["X_test"], data["y_test"]) * 100) + "\%"

        text = "Accuracies: " + train_acc + " training, " + val_acc + " validation  \& " + test_acc + " testing."

        # write to file
        append_to_file("nets/train_net/info.tex", text, mode="w")

        # save net info
        save_net_info("nets/train_net", solver)
Example #5
0
def train_net(data,
              hidden_dims,
              input_dim,
              num_classes,
              dropout         = 0,
              reg             = 0,
              learning_rate   = 5e-4,
              momentum        = 0,
              num_epochs      = 20,
              batch_size      = 100,
              lr_decay        = 0.95,
              update_rule     = "sdg",
              verbose = True):

    """ 
    Uses a solver instance to train a neural net on the given dataset.
    args:
    - data: dictionary with X_train, y_train, X_test, and y_test
    - plot: if True, generates a plot and saves in the given folder
    - pickle: if True, pickles the model in the given folder
    """

    # initialize net
    model = FullyConnectedNet(hidden_dims,
                              input_dim,
                              num_classes,
                              dropout,
                              reg)

    # initialize solver
    solver = Solver(model,
                    data,
                    update_rule  = update_rule,
                    optim_config = {'learning_rate': learning_rate,
                                    'momentum': momentum},
                    lr_decay     = lr_decay,
                    num_epochs   = num_epochs,
                    batch_size   = batch_size,
                    print_every  = 100,
                    verbose = verbose)

    # train the network
    solver.train()
    
    # return the solver
    return solver
Example #6
0
'lr_decay': 0.9,
'num_epochs': 20,
'batch_size': 64,
'num_training': 22000,
'num_validation': 2000,
'num_test': 2000,
'dropout': 0.3
}



data = get_FER2013_data(num_training=default_para['num_training'], 
	num_validation=default_para['num_validation'], 
	num_test=default_para['num_test'], subtract_mean=True)
model = FullyConnectedNet(default_para['hidden_layer'], 
							input_dim=48*48, num_classes=7, 
							reg = default_para['regularization'],
							dropout = default_para['dropout'])
solver = Solver(model, data,
				update_rule=default_para['update_rule'],
				optim_config={
				  'learning_rate': default_para['learning_rate'],
				  'momentum': default_para['momentum']
				},
				lr_decay=default_para['lr_decay'],
				num_epochs=default_para['num_epochs'], 
				batch_size=default_para['batch_size'] ,
				print_every=200,
				verbose = True)
solver.train()

best_val_acc = solver.best_val_acc
"""
TODO: Use a Solver instance to train a TwoLayerNet that achieves at least 50%
accuracy on the validation set.
"""
###########################################################################
#                           BEGIN OF YOUR CODE                            #
###########################################################################
# Load FER 2013 jpeg data from bitbucket (slow).
path_to_jpg_data = "/vol/bitbucket/395ML_NN_Data/datasets/FER2013"
data = load_FER_2013_jpg(path_to_jpg_data)

# OR use pickle data from bitbucket (faster). The path is specified within the function.
#data = get_FeR2013_data()

model = FullyConnectedNet(hidden_dims=[544, 801],
                          input_dim=48 * 48 * 1,
                          num_classes=7,
                          dtype=np.float64)  #,dropout=0.0reg=0,
model.mean_image = data['mean_image']
lr = 0.013614446824659357
solver = Solver(model,
                data,
                update_rule='sgd_momentum',
                optim_config={
                    'learning_rate': lr,
                },
                lr_decay=0.8,
                num_epochs=100,
                batch_size=70,
                print_every=100,
                checkpoint_name="intermediate")
Example #8
0
from src.fcnet import FullyConnectedNet
from src.utils.solver import Solver
from src.utils.data_utils import get_CIFAR10_data
"""
TODO: Overfit the network with 50 samples of CIFAR-10
"""
###########################################################################
#                           BEGIN OF YOUR CODE                            #
###########################################################################

t_data = get_CIFAR10_data()

# print(t_data["X_train"])
model = FullyConnectedNet(hidden_dims=[500, 500, 500, 500],
                          reg=0,
                          weight_scale=2e-2)
solver = Solver(model,
                data=t_data,
                update_rule='sgd',
                optim_config={'learning_rate': 4e-2},
                lr_decay=0.88,
                num_epochs=20,
                batch_size=50,
                print_every=100)
solver.train()

# plot
plt.subplot(2, 1, 1)
plt.title("Training loss")
plt.plot(solver.loss_history, "o")
data = get_FER2013_data(TRAIN_NUM, VALID_NUM, TEST_NUM)
# data = get_FER2013_data_from_binary(VALID_NUM)

INPUT_DIMS = np.prod(data["X_train"].shape[1:])
HIDDEN_DIMS = np.asarray([1150, 1150])

LEARNING_RATE = 0.00344
MOMENTUM = 0.9
LEARNING_DECAY = 0.9

CHECKOUT = False

fcnn = FullyConnectedNet(HIDDEN_DIMS,
                         INPUT_DIMS,
                         CLASS_NUM,
                         DROPOUT,
                         REGULAR,
                         weight_scale=5e-3)

solver = Solver(fcnn,
                data,
                update_rule='sgd_momentum',
                optim_config={
                    "learning_rate": LEARNING_RATE,
                    "momentum": MOMENTUM
                },
                lr_decay=LEARNING_DECAY,
                print_every=100,
                batch_size=BATCH_SIZE,
                checkpoint_name="checkpoints/test" if CHECKOUT else None,
                num_epochs=EPOCH_NUM)
Example #10
0
import numpy as np

from src.fcnet import FullyConnectedNet
from src.utils.solver1 import Solver
from src.utils.data_utils import get_CIFAR10_data
"""
TODO: Use a Solver instance to train a TwoLayerNet that achieves at least 50% 
accuracy on the validation set.
"""
###########################################################################
#                           BEGIN OF YOUR CODE                            #
###########################################################################
data = dict()
data = get_CIFAR10_data()
model = FullyConnectedNet([80], reg=0.1)
solver = Solver(model,
                data,
                update_rule='sgd',
                optim_config={
                    'learning_rate': 1e-3,
                },
                lr_decay=0.7,
                num_epochs=10,
                batch_size=100,
                print_every=100)
solver.train()

import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.title('Trainingloss')
plt.plot(solver.loss_history, 'o')
Example #11
0
###########################################################################
#                           BEGIN OF YOUR CODE                            #
###########################################################################
TRAIN_NUM = 50
VALID_NUM = 1
TEST_NUM = 10

CLASS_NUM = 10

data = get_CIFAR10_data(TRAIN_NUM, VALID_NUM, TEST_NUM)

print (data["y_test"].shape)
print (data["y_test"])

INPUT_DIMS = np.prod(data["X_train"].shape[1:])
HIDDEN_DIMS = np.asarray([400, 400])

fcnn = FullyConnectedNet(HIDDEN_DIMS, INPUT_DIMS, CLASS_NUM)
solver = Solver(fcnn, data, update_rule='sgd', optim_config={"learning_rate":1e-3}, print_every=1, num_epochs=20)
solver.train()

y = fcnn.predict(data["X_test"])
print (y)
fcnn.save()

draw_loss_acc(solver.loss_history, solver.train_acc_history, solver.val_acc_history, "overfit")

##############################################################################
#                             END OF YOUR CODE                               #
##############################################################################
from src.utils.data_utils import get_CIFAR10_data

"""
TODO: Overfit the network with 50 samples of CIFAR-10
"""
###########################################################################
#                           BEGIN OF YOUR CODE                            #
###########################################################################

datapath = datadir = ('/home/mat10/Documents/MSc Machine Learning/395-Machine Learning/'
           'CW2/assignment2_advanced/datasets/cifar-10-batches-py')
data = get_CIFAR10_data(datapath, num_training=50, num_validation=100, num_test=100,
                     subtract_mean=True)

hidden_dims = [1024, 512]
net = FullyConnectedNet(hidden_dims, num_classes=10, dropout=0., reg=0.0, seed=0)

solver = Solver(net,
                data,
                update_rule='sgd',
                optim_config={'learning_rate': 1e-3,
                              'momentum': 0.5},
                lr_decay=0.95,
                num_epochs=20,
                batch_size=10,
                print_every=100)
solver.train()

make_plots = False
if make_plots:
    import matplotlib.pyplot as plt
Example #13
0
import numpy as np

from src.fcnet import FullyConnectedNet
from src.utils.solver1 import Solver
from src.utils.data_utils import get_CIFAR10_data
"""
TODO: Overfit the network with 50 samples of CIFAR-10
"""
###########################################################################
#                           BEGIN OF YOUR CODE                            #
###########################################################################
data = dict()
data = get_CIFAR10_data(50, 50)
model = FullyConnectedNet([2000, 2000], reg=1e-3)
solver = Solver(model,
                data,
                update_rule='sgd',
                optim_config={
                    'learning_rate': 1e-3,
                },
                lr_decay=0.95,
                num_epochs=10,
                batch_size=100,
                print_every=100)
solver.train()

import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.title('Trainingloss')
plt.plot(solver.loss_history, 'o')
plt.xlabel('Iteration')
from src.utils.data_utils import get_FER2013_data

# configs
hidden_dims = [20, 30]
input_dim = 48 * 48 * 3
num_classes = 7
dropout = 0
reg = 0.0
seed = 42
weight_scale = 1e-2

# model
model = FullyConnectedNet(hidden_dims,
                          input_dim,
                          num_classes,
                          dropout,
                          reg,
                          weight_scale,
                          dtype=np.float32,
                          seed=None)

# dataset
data = get_FER2013_data(
    '/vol/bitbucket/jsh114/emotion-recognition-networks/datasets/FER2013')

# training
best = (0, 0)
initial_starting = 1e-2
minimal_learning = 1e-4
learning_rate = initial_starting
print("entering the loop")
while (learning_rate >= minimal_learning):
Example #15
0
"""
TODO: Overfit the network with 50 samples of CIFAR-10
"""
###########################################################################
#                           BEGIN OF YOUR CODE                            #
###########################################################################

out = get_CIFAR10_data(num_training=50)
data = {
      'X_train': out['X_train'], # training data
      'y_train': out['y_train'], # training labels
      'X_val':  out['X_val'], # validation data
      'y_val': out['y_val'] # validation labels
    }
model = FullyConnectedNet(hidden_dims=[100], num_classes=10)
solver = Solver(model, data,
                update_rule='sgd',
                optim_config={
                  'learning_rate': 1e-3,
                },
                lr_decay=0.95,
                num_epochs=20, batch_size=25,
                print_every=10)
solver.train()

#-----------------------Plotting--------------------------
plt.subplot(2,1,1)
plt.title("Training loss")
plt.plot(solver.loss_history,"o")
plt.xlabel('Iteration')
Example #16
0
import numpy as np
import matplotlib.pyplot as plt
from src.fcnet import FullyConnectedNet
from src.utils.solver import Solver
from src.utils.data_utils import get_CIFAR10_data

# Overfit the network with 50 samples of CIFAR-10

model = FullyConnectedNet([50], reg=0)
data = get_CIFAR10_data(num_training=50)
solver = Solver(model,
                data,
                update_rule='sgd',
                optim_config={
                    'learning_rate': 1e-3,
                },
                lr_decay=0.95,
                num_epochs=20,
                batch_size=100,
                print_every=100)
solver.train()

plt.subplot(2, 1, 1)
plt.title("Training loss")
plt.plot(solver.loss_history, "o")
plt.xlabel("Iteratition")

plt.subplot(2, 1, 2)
plt.title('Overfit Network Accuracy')
plt.plot(solver.train_acc_history, "-o", label='train')
plt.plot(solver.val_acc_history, "-o", label='val')
Example #17
0
TODO: Use a Solver instance to train a TwoLayerNet that achieves at least 50% 
accuracy on the validation set.
"""
###########################################################################
#                           BEGIN OF YOUR CODE                            #
###########################################################################

out = get_CIFAR10_data(num_training=25000)
data = {
    'X_train': out['X_train'],  # training data
    'y_train': out['y_train'],  # training labels
    'X_val': out['X_val'],  # validation data
    'y_val': out['y_val']  # validation labels
}
model = FullyConnectedNet(hidden_dims=[100],
                          num_classes=10,
                          dropout=0,
                          reg=0.5)
solver = Solver(model,
                data,
                update_rule='sgd',
                optim_config={
                    'learning_rate': 2e-3,
                },
                lr_decay=0.95,
                num_epochs=25,
                batch_size=250,
                print_every=100)
solver.train()

#-----------------------Plotting--------------------------
plt.subplot(2, 1, 1)
Example #18
0
    with open(path + filename, 'w') as file:
        for line, layer in enumerate(layers):
            stat = "{}. layer: [{:4}, {:4}] - accuracy: {:.4f}".format(
                line + 1, layer[0], layer[1], accs[line])
            file.write(stat + "\n")

        best_layer_idx = np.argmax(np.asarray(accs), axis=0)
        best_layer = layers[best_layer_idx]
        best_layer_acc = accs[best_layer_idx]

        file.write(
            "Best accur layer - {}: [{:4}, {:4}] with accuracy {:.4f}\n".
            format(best_layer_idx + 1, best_layer[0], best_layer[1],
                   best_layer_acc))


data = get_FER2013_data(TRAIN_NUM, VALID_NUM, TEST_NUM)
# data = get_FER2013_data_from_binary(VALID_NUM)

INPUT_DIMS = np.prod(data["X_train"].shape[1:])
HIDDEN_DIMS = np.asarray([1155, 1155])

fcnn = FullyConnectedNet(HIDDEN_DIMS,
                         INPUT_DIMS,
                         CLASS_NUM,
                         DROPOUT,
                         REGULAR,
                         weight_scale=5e-3)
stage_optim_layers(fcnn, data)
# stage_optim_drop(fcnn, data)
Example #19
0
import numpy as np

from src.fcnet import FullyConnectedNet
from src.utils.solver import Solver
from src.utils.data_utils import get_CIFAR10_data

"""
TODO: Overfit the network with 50 samples of CIFAR-10
"""
###########################################################################
#                           BEGIN OF YOUR CODE                            #
###########################################################################
data = get_CIFAR10_data(49, 1, 0)

INPUT_DIMS = np.prod(data["X_train"].shape[1:])
HIDDEN_DIMS = np.asarray([400,400])
NUM_CLASS = 10
net = FullyConnectedNet(HIDDEN_DIMS,INPUT_DIMS,NUM_CLASS)
solver = Solver(net, data,update_rule='sgd',optim_config={\
                'learning_rate': 1e-3},\
            num_epochs=20,\
            batch_size = 10,\
            print_every=1)
solver.train()


##############################################################################
#                             END OF YOUR CODE                               #
##############################################################################
Example #20
0
std = np.std(data['X_train'])
mean = np.mean(data['X_train'])

data['X_train'] = (data['X_train'] - mean) / std
data['X_val'] = (data['X_val'] - mean) / std
data['X_test'] = (data['X_test'] - mean) / std


targets = data['y_test']
# #data = get_FER2013_data(49,1 ,0)
#
INPUT_DIMS = np.prod(data["X_train"].shape[1:])
HIDDEN_DIMS = np.asarray(INPUT_NODE)
NUM_CLASS = 7
#net = FullyConnectedNet(HIDDEN_DIMS,INPUT_DIMS,num_classes=NUM_CLASS,dropout=0.6,seed =300)
net = FullyConnectedNet(HIDDEN_DIMS,INPUT_DIMS,num_classes=NUM_CLASS)

solver = Solver(net, data,update_rule='sgd_momentum',\
                optim_config={'learning_rate': 0.01, 'momentum': 0.2},\
                num_epochs=100,\
                batch_size = 64,\
                lr_decay=0.99,\
                print_every =1000)
solver.train()
#plotGraphs(net, solver)

################## OUTPUT TO PKL FILE ###########################

output = open('net.pkl', 'wb')
pkl.dump(solver.model, output, -1)
output.close()
Example #21
0
import pickle  # HACK

from src.fcnet import FullyConnectedNet
from src.utils.solver import Solver
from src.utils.data_utils import get_CIFAR10_data
"""
TODO: Use a Solver instance to train a TwoLayerNet that achieves at least 50%
accuracy on the validation set.
"""
###########################################################################
#                           BEGIN OF YOUR CODE                            #
###########################################################################
t_data = get_CIFAR10_data()

# print(t_data["X_train"])
model = FullyConnectedNet(hidden_dims=[250, 250], reg=0.5, weight_scale=1e-2)
solver = Solver(model,
                data=t_data,
                update_rule='sgd',
                optim_config={'learning_rate': 1e-3},
                lr_decay=0.95,
                num_epochs=20,
                batch_size=50,
                print_every=100)
solver.train()

# plot
# plot
plt.subplot(2, 1, 1)
plt.title("Training loss")
plt.plot(solver.loss_history, "o")
Example #22
0
import numpy as np

from src.fcnet import FullyConnectedNet
from src.utils.solver import Solver
from src.utils.data_utils import get_CIFAR10_data
"""
TODO: Use a Solver instance to train a TwoLayerNet that achieves at least 50% 
accuracy on the validation set.
"""
###########################################################################
#                           BEGIN OF YOUR CODE                            #
###########################################################################
#define model and data
model = FullyConnectedNet(hidden_dims=[20, 30])
data = get_CIFAR10_data()

# define solver which helps us to train our model using the data
solver = Solver(model, data, lr_decay=0.95, num_epochs=30, batch_size=120)

# train the model
solver.train()
##############################################################################
#                             END OF YOUR CODE                               #
##############################################################################
Example #23
0
import numpy as np

from src.fcnet import FullyConnectedNet
from src.utils.solver import Solver
from src.utils.data_utils import get_CIFAR10_data
"""
TODO: Use a Solver instance to train a TwoLayerNet that achieves at least 50%
accuracy on the validation set.
"""
###########################################################################
#                           BEGIN OF YOUR CODE                            #
###########################################################################
data = get_CIFAR10_data()
model = FullyConnectedNet(hidden_dims=[128],
                          reg=1e-4,
                          num_classes=10,
                          dtype=np.float64)

solver = Solver(model,
                data,
                update_rule='sgd',
                optim_config={
                    'learning_rate': 1e-3,
                },
                lr_decay=0.85,
                num_epochs=30,
                batch_size=65,
                print_every=1000)
solver.train()

acc = solver.check_accuracy(data['X_train'], data['y_train'])
Example #24
0
import numpy as np
import matplotlib.pyplot as plt
from src.fcnet import FullyConnectedNet
from src.utils.solver import Solver
from src.utils.data_utils import get_FER2013_data
import pickle

model = FullyConnectedNet([512, 512, 512],
                          input_dim=48 * 48 * 1,
                          num_classes=7,
                          dropout=0,
                          dtype=np.float32,
                          reg=0.1)
#f = open('model.pickle', 'rb')
#model = pickle.load(f)
#f.close()

data = get_FER2013_data(num_test=3589)
solver = Solver(model,
                data,
                update_rule='sgd_momentum',
                optim_config={
                    'learning_rate': 5e-3,
                },
                lr_decay=0.95,
                num_epochs=35,
                batch_size=100,
                print_every=200)
solver.train()

save = input("Save model?  ")
Example #25
0
}

reached = False
lr = 1e-2
ws = 1e-2

count = 1
while not reached:
    np.random.seed(int(time.time()))
    print("iteration number{}".format(count))
    ws = 10**(np.random.uniform(-5, -1))
    lr = 10**(np.random.uniform(-5, -3))
    print("testing lr, ws: {},{}".format(lr, ws))
    model = FullyConnectedNet(input_dim=48 * 48 * 3,
                              hidden_dims=[250, 50],
                              num_classes=7,
                              dropout=0,
                              reg=3)  #weight_scale=ws)
    solver = Solver(model,
                    data,
                    update_rule='sgd_momentum',
                    optim_config={'learning_rate': lr},
                    lr_decay=0.95,
                    num_epochs=30,
                    batch_size=100,
                    print_every=25)
    solver.train()
    if max(solver.val_acc_history) >= 0.3:
        reached = True
    count += 1