Пример #1
0
 def get_logs(self):
     cfg = configs()
     """ Rebuilding model while loading the status dictionary """
     # NOTE: L.R. has been set to the last L.R. in the latest training round
     self.arch, self.num_layers, self.layers, self.lr, self.decay_rate = (
         self.optimum['Arch'], self.optimum['Num layers'], self.optimum['Layer objs'],
         self.optimum['Learning rate'], self.optimum['L.R. decay'])
     self.model_type = cfg["MODEL"]["TYPE"]
     self.weights_decay = cfg["SOLVER"]["WEIGHT_DECAY"]
     if arguments().FIT:
         mode = "FIT"
     elif arguments().TRAIN:
         mode = "TRAIN"
         # If loaded model has never been trained, 
         # then set L.R. as base lr.
         if not self.optimum['Trained']:
             self.lr = cfg[mode]["BASE_LR"]
             self.optimum['Loss'] = self.loss = float("inf")
     # Constant params
     if arguments().FIT or arguments().TRAIN:
         self.lr_policy = cfg[mode]["LR_POLICY"]
         self.decay_rate = cfg[mode]["DECAY_RATE"]
         self.epochs = cfg[mode]["EPOCHS"]
     # For all model working modes
     [self.weights, self.biases] = (self.optimum['Weights'], 
         self.optimum['Biases'])
     # Set layer weights and biases (for fprop)
     i = 0
     for layer in self.layers:
         if layer.LayerName == 'Linear':
             layer.w = self.optimum['Weights'][i]
             layer.b = self.optimum['Biases'][i]
             i += 1
Пример #2
0
def setup_hardware():
    global dtype

    dtype = torch.FloatTensor
    args = arguments()
    use_gpu = using_gpu()
    if use_gpu:  # Want GPU
        if (-1 < args.GPU_ID < torch.cuda.device_count()
            ) and torch.cuda.is_available():  # GPU_ID exists & available
            # Subject to change
            torch.cuda.set_device(args.GPU_ID)
            print('\nUSING GPU: %d' % torch.cuda.current_device())
            print(
                colored(
                    'Check the GPU being used via "nvidia-smi" to avoid trouble mate!',
                    'red'))
            dtype = torch.cuda.FloatTensor
        else:  # GPU_ID doesn't/isn't exist/available
            print("Selected GPU %d is NOT available." % args.GPU_ID)
            use_gpu = False
            print('\nUSING CPU.')
            dtype = torch.FloatTensor
    else:  # Want CPU
        print('\nUSING CPU.')
        dtype = torch.FloatTensor
Пример #3
0
def set_hyper_parameters(config_file, model):
    global cfg
    args = arguments()
    with open(config_file, 'r') as f:
        cfg = yaml.load(f)

    model.model_type += cfg["MODEL"]["TYPE"]

    model.weights_decay = cfg["SOLVER"]["WEIGHT_DECAY"]
    model.reg = cfg["SOLVER"]["REG"]
    if args.FIT:
        model.data_set = cfg["FIT"]["DATASET"]
        model.lr = cfg["FIT"]["BASE_LR"]
        model.lr_policy += cfg["FIT"]["LR_POLICY"]
        model.decay_rate = cfg["FIT"]["DECAY_RATE"]
        model.epochs = cfg["FIT"]["EPOCHS"]
    elif args.TRAIN:
        model.data_set = cfg["TRAIN"]["DATASET"]
        model.lr = cfg["TRAIN"]["BASE_LR"]
        model.lr_policy += cfg["TRAIN"]["LR_POLICY"]
        model.decay_rate = cfg["TRAIN"]["DECAY_RATE"]
        model.epochs = cfg["TRAIN"]["EPOCHS"]
    if args.TEST:
        model.data_set = cfg["TEST"]["DATASET"]
    if args.INFER:
        model.data_set = cfg["TEST"]["DATASET"]
    return
Пример #4
0
def test(model, fitting_loader=None):
    """ Evaluate model results on test/train set """
    global images, ground_truths
    args = arguments()

    print("\n+++++++     TESTING     +++++++\n")
    model.show_log(test=True)

    # Get data
    test_dataset = dset.CIFAR10(directory='data', download=True, test=True)

    # If fitting is done, get
    # the correct dataset to be tested
    if fitting_loader is None:
        test_loader = dset.data_loader(data=test_dataset.data,
                                       batch_size=dset.CIFAR10.test_size,
                                       shuffled=False)
    else:
        test_loader = fitting_loader

    # In case test set is divided in batches
    for images, ground_truths in test_loader:
        if using_gpu():
            images = images.cuda()
        model.test(images, ground_truths)
        # Clear cache if using GPU (Unsure of effectiveness)
        if using_gpu():
            torch.cuda.empty_cache()

    # Convert tensor --> numpy ndarray
    ground_truths = torch.from_numpy(np.array(ground_truths))

    # Print testing loss & accuracy
    print(colored('\n# Testing Loss:', 'red'), end="")
    print('[%.4f]' % model.loss)
    model.test_acc = model.optimum['TestAcc'] = \
        (torch.mean((model.predictions == ground_truths).float()) * 100)  # Testing accuracy
    print(colored('\nTesting accuracy:', 'green'), end="")
    print(" = %.2f %%" % model.test_acc)

    # Tested model status
    if args.TRAIN:
        model.tested = True

    model.show_log(curr_status=True)
    model.set_logs()

    # Saving fitted model
    if args.SAVE:
        save_model(args.SAVE, model)
    else:
        f = raw_input('Do you want to save the model? (y)es/(n)o: ').lower()
        if f.lower() == 'y' or f.lower() == 'yes':
            save_model('model.pkl', model)
        else:
            print('Not saving model.')
Пример #5
0
def create_model():
    """ Build the net & model """
    args = arguments()
    # Define the network
    print('\n' + '+' * 20, '\nBuilding net & model\n' + '+' * 20)
    
    model = nnc.ModelNN()

    set_hyper_parameters(args.CFG, model)

    model.add(nnc.LinearLayer(32 * 32 * 3, 2048))
    model.add(nnc.Activation('ReLU'))
    model.add(nnc.LinearLayer(2048, 512))
    model.add(nnc.Activation('ReLU'))
    model.add(nnc.LinearLayer(512, 128))
    model.add(nnc.Activation('ReLU'))
    model.add(nnc.LinearLayer(128, 10))
    model.add(nnc.CeCriterion('Softmax'))

    return model
Пример #6
0
def fit(model=None):

    args = arguments()

    if model is None:
        model = create.create_model()

    print("\n+++++     FITTING     +++++\n")
    model.show_log(arch=True, fit=True)

    # Get data
    train_dataset = CIFAR10(directory='data', 
        download=True, 
        train=True)

    # Optimizer/Scheduler
    optimizer = Optimize(model)

    # SGD
    print("\n# Stochastic gradient descent #")
    print("Learning rate: %.4f\n" % model.lr)

    # Get one batch from the dataset
    fitting_loader = data_loader(data=train_dataset.data, 
        batch_size=CIFAR10.batch_size,
        model_testing=True)

    # Epochs
    for epoch in range(model.epochs):
        print('Epoch: [%d/%d]' % (epoch + 1, model.epochs), end=" ")
        for images, labels in fitting_loader:
            if using_gpu():
                images = images.cuda()
            model.train(images, labels)
            # Clear cache if using GPU (Unsure of effectiveness)
            if using_gpu():
                torch.cuda.empty_cache()
        # Print fitting loss
        print(colored('# Fitting test Loss:', 'red'), end="")
        print('[%.4f] @ L.R: %.9f' % (model.loss, model.lr))
        model.loss_history.append(model.loss)

        optimizer.time_decay(epoch, 0.0005)
        optimizer.set_optim_param(epoch)

    # model.plot_loss('Fitting loss')

    # Model status
    model.fitted = True

    model.show_log(curr_status=True)
    model.set_logs()

    # Saving fitted model    
    if args.SAVE:
        save_model(args.SAVE, model)
    else:
        f = raw_input('Do you want to save the model? (y)es/(n)o: ').lower()
        if f.lower() == 'y' or f.lower() == 'yes':
            save_model('model.pkl', model)
        else:
            print('Not saving model.')

    return [model, fitting_loader]
Пример #7
0
def main():

    # Parse arguments provided
    parse_arg()
    args = arguments()
    # Setup GPU or CPU
    setup_hardware()

    global model
    # Load or create new ?
    if args.bms:
        best_model_selection(replace=True)
    elif args.LOAD:
        print('\nWorking with loaded model.')
        if args.FIT:
            model = load_model(args.LOAD)
            print('Fitting net for loaded model')
            model, fitting_loader = fit(model)
            if args.TEST:
                print('Testing model fitting:')
                test(model, fitting_loader)
            if args.INFER:
                print('Inference model fitting:')
                inferences(model)
            args.FIT = False
        elif args.TRAIN:
            model = load_model(args.LOAD)
            print('Training net for loaded model')
            model = train(model)
            if args.TEST:
                print('Testing trained model:')
                test(model)
            if args.INFER:
                print('Inference trained model:')
                inferences(model)
            args.TRAIN = False
        elif args.TEST:
            model = load_model(args.LOAD)
            print('Testing net for loaded model')
            test(model)
        elif args.INFER:
            model = load_model(args.LOAD)
            print('Testing net for loaded model')
            inferences(model)

    elif args.NEW:
        print('\nWorking with new model.')
        if args.FIT:
            print('Fitting net for new model')
            model, fitting_loader = fit()
            if args.TEST:
                print('Testing model fitting:')
                test(model, fitting_loader)
            if args.INFER:
                print('Inference model fitting:')
                inferences(model)
            args.FIT = False
        elif args.TRAIN:
            print('Training net for new model')
            model = train()
            if args.TEST:
                print('Testing trained model:')
                test(model)
            if args.INFER:
                print('Inference trained model:')
                inferences(model)
            args.TRAIN = False

    # Final goodbye
    print('\n' + '-' * 7 + '\nExiting\n' + '-' * 7)
    # Clear cache if using GPU (Unsure of effectiveness)
    if using_gpu():
        torch.cuda.empty_cache()
Пример #8
0
def inferences(model, fitting_loader=None, all_exp=False):
    """ Display model results i.e. predictions on test/train set """
    args = arguments()

    global images, ground_truths

    print("\n+++++++     INFERENCE     +++++++\n")
    model.show_log(infer=True)

    # Get data
    test_dataset = dset.CIFAR10(directory='data', download=True, test=True)

    # If fitting is done, get
    # the correct dataset to be infered
    if fitting_loader is None:
        infer_loader = dset.data_loader(data=test_dataset.data,
                                        batch_size=dset.CIFAR10.test_size,
                                        shuffled=False)
    else:
        infer_loader = fitting_loader

    print("Test accuracy:", model.optimum['TestAcc'], '%')

    # In case test set is divided in batches
    for images, ground_truths in infer_loader:
        if using_gpu():
            images = images.cuda()
        model.test(images, ground_truths)
        if using_gpu():
            images = images.cuda()
        model.test(images, ground_truths)
        # Clear cache if using GPU (Unsure of effectiveness)
        if using_gpu():
            torch.cuda.empty_cache()
        ground_truths = torch.from_numpy(np.array(ground_truths))

    # Print out (text) inferences
    if all_exp:
        for example in range(dset.CIFAR10.test_size):
            print(
                "Ground truth: (%d) %s || Predicition: (%d) %s || Confidence: %.2f %"
                % (ground_truths[example], dset.CIFAR10.classes[int(
                    ground_truths[example])], int(model.predictions[example]),
                   dset.CIFAR10.classes[int(model.predictions[example])],
                   model.output[-1][example] * 100))
    else:
        # Convert from tensor --> numpy to reshape
        images = images.cpu()
        images = \
            (images.numpy().reshape(dset.CIFAR10.test_size, 3, 32, 32).transpose(0, 2, 3, 1).astype("uint8"))
        while True:
            example = input("Which test example?: ")
            print("(0-%d)" % len(images))
            if example < 0 or example >= dset.CIFAR10.test_size:
                print("Out of test set bounds.")
                break

            # Print ground truths & predictions
            print('Ground truth: (%d) %s' %
                  (int(ground_truths[example]), dset.CIFAR10.classes[int(
                      ground_truths[example])]))

            # Using matplotlib to display images
            imshow(images[example])
            xlabel(
                str(int(model.predictions[example])) + ' : ' +
                dset.CIFAR10.classes[int(model.predictions[example])])
            ylabel('Confidence: ' +
                   str(format(model.output[-1][example] * 100, '.2f')) + '%')
            show()

    # Model status
    model.infered = True

    model.show_log(curr_status=True)
    model.set_logs()

    # Saving inferenced model
    if args.SAVE:
        save_model('model.pkl', model)
    else:
        f = raw_input('Do you want to save the model? (y)es/(n)o: ').lower()
        if f.lower() == 'y' or f.lower() == 'yes':
            save_model('model.pkl', model)
        else:
            print('Not saving model.')
Пример #9
0
def train(model=None):

    args = arguments()

    if model is None:
        model = create.create_model()
    
    print("\n+++++     TRAINING     +++++\n")

    model.show_log(arch=True, train=True)

    # Get data
    train_dataset = dset.CIFAR10(directory='data', 
        download=True, 
        train=True)

    # Data augmentation
    train_dataset = Transforms(
        dataset=train_dataset,
        lr_flip=True,
        rotate90=True, times=1)

    # Size after augmentation
    print("Training set size:", len(train_dataset.data), "images.")

    # Optimizer/Scheduler
    optimizer = nnc.Optimize(model)

    # SGD
    print("\n# Stochastic gradient descent #")
    print("Learning rate: %.4f\n" % model.lr)

    # Epochs
    for epoch in range(model.epochs):

        print('Epoch: [%d/%d]' % (epoch + 1, model.epochs), end=" ")
        # Prepare batches from whole dataset
        train_loader = dset.data_loader(data=train_dataset.data, 
            batch_size=dset.CIFAR10.batch_size, 
            shuffled=True)
        # Iterate over batches
        for images, labels in train_loader:
            if using_gpu():
                images = images.cuda()
            # Training round
            model.train(images, labels)
            # Clear cache if using GPU (Unsure of effectiveness)
            if using_gpu():
                torch.cuda.empty_cache()
        # Print training loss
        print(colored('# Training Loss:', 'red'), end=" ")
        print('[%.4f] @ L.R: %.4f' % (model.loss, model.lr))
        model.loss_history.append(model.loss)

        optimizer.time_decay(epoch, model.decay_rate)
        optimizer.set_optim_param(epoch)
    
    # model.plot_loss('Training loss')
    
    # Model status
    model.trained = True
    
    model.show_log(curr_status=True)
    model.set_logs()

    # Saving fitted model    
    if args.SAVE:
        save_model(args.SAVE, model)
    else:
        f = raw_input("Do you want to save the model? (y)es/(n)o: ").lower()
        if f.lower() == 'y' or f.lower() == 'yes':
            save_model('model.pkl', model)
        else:
            print('Not saving model.')

    return model