def train(train_files,
          test_files,
          train_batch_size,
          eval_batch_size,
          model_file,
          vocab_size,
          num_classes,
          n_epoch,
          print_every=50,
          eval_every=500):
    torch.multiprocessing.set_sharing_strategy('file_system')
    torch.backends.cudnn.benchmark = True
    print "Setting seed..."
    seed = 1234
    torch.manual_seed(seed)

    if torch.cuda.is_available():
        torch.cuda.manual_seed(seed)

    # setup CNN model
    CONFIG["vocab_size"] = vocab_size
    CONFIG["num_classes"] = num_classes
    model = Net()

    if torch.cuda.is_available():
        print "CUDA is available on this machine. Moving model to GPU..."
        model.cuda()
    print model

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters())

    train_set = HDF5Dataset(train_files)
    test_set = HDF5Dataset(test_files)

    train_loader = DataLoader(dataset=train_set,
                              batch_size=train_batch_size,
                              shuffle=True,
                              num_workers=2)

    test_loader = DataLoader(dataset=test_set,
                             batch_size=eval_batch_size,
                             num_workers=2)

    _train_loop(train_loader=train_loader,
                test_loader=test_loader,
                model=model,
                criterion=criterion,
                optimizer=optimizer,
                n_epoch=n_epoch,
                print_every=print_every,
                eval_every=eval_every,
                model_file=model_file)
        #accuracy on whole test set
        acc /= (nbs * bs)
        print('Test accuracy on this epoch: {}'.format(acc))
        test_acc_values.append(acc)

    #save model
    torch.save(net.state_dict(), 'models/' + model_name)

    return lr_values, running_loss_values, train_acc_values, test_acc_values


if __name__ == '__main__':

    net = Net()
    net.cuda()

    print(net)

    lr_values, running_loss_values, train_acc_values, test_acc_values = make_iterations(
        net, lr_ini)

    plt.plot(running_loss_values)
    plt.ylim((0, 5))
    plt.title('Loss over iterations')
    plt.show()

    plt.plot(lr_values)
    plt.ylim((0, 20 * lr_ini))
    plt.title('Learning rate over iterations')
    plt.show()
Пример #3
0
    nStride = int(param["nonreducing_stride"])
    rOut = int(param["reducing_out"])
    nOut = int(param["nonreducing_out"])

    # constructing a model (converting model to double precision)
    model = Net(red_kernel=rKern,
                nonred_kernel=nKern,
                red_stride=rStride,
                nonred_stride=nStride,
                red_out=rOut,
                nonred_out=nOut,
                d="cuda:0" if enableCuda else "cpu")

    model.double()
    if enableCuda:
        model.cuda()

    # declare optimizer and gradient and loss function
    optimizer = optim.Adadelta(model.parameters(), lr=lr_rate)
    loss = torch.nn.MSELoss(reduction='mean')

    print("Loading model")
    model.load_state_dict(torch.load(modelFile))

    print("Starting Testing")
    out_nrgs, test_val = model.test(images, energies, loss)
    print("Testing Finished")

    out_nrgs = convertToNumpy(out_nrgs, enableCuda)
    # scaling energies to mHa
    energies = 1000 * energies
Пример #4
0
def main():
    try:
        args, check = argvcontrol()

        if check:
            if platform.system() == "Windows" or platform.system() == "win32":
                args.model = os.path.abspath(".") + "\\" + args.model
                args.path = os.path.abspath(".") + "\\" + args.path
            else:
                args.model = os.path.abspath(".") + "/" + args.model
                args.path = os.path.abspath(".") + "/" + args.path

            classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog',
                       'horse', 'ship', 'truck')

            net = Net()
            if args.cuda:
                net.cuda()

            if args.model and args.training:
                if not os.path.exists(args.model):
                    print("Model will be created")
                else:
                    loadModel(net, args.model)
            elif args.model and not args.training:
                if os.path.exists(args.model):
                    loadModel(net, args.model)
                else:
                    print(
                        "Invalid model checkpoint file. Please enter a valid path."
                    )

            test_transform, train_transform = getTransformtions()

            if args.training:
                trainloader, testloader, criterion = CIFAR10Init(
                    args.cuda, args.path, int(args.batch_size),
                    int(args.workers))
                if trainloader and testloader:
                    frequency = int(len(trainloader) / 4)
                    training(net, args.cuda, int(args.epochs),
                             trainloader, frequency, criterion,
                             float(args.learning_rate), int(args.batch_size),
                             int(args.workers), args.model)
                else:
                    exit()

            elif args.image:
                _class = recognition(net, args.image, test_transform, classes)
                if _class:
                    print("\nClassification: %s\n" % (_class))

            else:
                trainloader, testloader, criterion = CIFAR10Init(
                    args.cuda, args.path, int(args.batch_size),
                    int(args.workers))
                if trainloader and testloader:
                    validation(net, args.cuda, testloader, classes, args.model,
                               int(args.batch_size))
                else:
                    exit()
        else:
            print("\nTraining: python image-recognizer.py --train")
            print("Validation: python image-recognizer.py --no-train")
            print(
                "Recognition: python image-recognizer.py --image PATH_TO_IMAGE"
            )
            print(
                "You can set parameters for this operations. Add --help for more informations.\n"
            )

    except (KeyboardInterrupt, SystemExit):
        pass
def main():
    # load images as a numpy array
    train_dataset = np.array(
        np.load('/content/drive/My Drive/McGill/comp551/data/train_max_x',
                allow_pickle=True))
    train_dataset = train_dataset / 255.0
    train_dataset = train_dataset.astype('float32')
    targets = pd.read_csv(
        '/content/drive/My Drive/McGill/comp551/data/train_max_y.csv',
        delimiter=',',
        skipinitialspace=True)
    targets = targets.to_numpy()
    # remove id column
    targets = targets[:, 1]
    targets = targets.astype(int)

    X_train, X_test, y_train, y_test = train_test_split(train_dataset,
                                                        targets,
                                                        test_size=0.2,
                                                        random_state=42)
    # Clean memory
    train_dataset = None

    # converting training images into torch format
    dim1, dim2, dim3 = X_train.shape
    X_train = X_train.reshape(dim1, 1, dim2, dim3)
    X_train = torch.from_numpy(X_train)
    y_train = torch.from_numpy(y_train)

    # converting validation images into torch format
    dim1, dim2, dim3 = X_test.shape
    X_test = X_test.reshape(dim1, 1, dim2, dim3)
    X_test = torch.from_numpy(X_test)
    y_test = torch.from_numpy(y_test)

    # defining the model
    model = Net()

    criterion = nn.NLLLoss()
    optimizer = optim.SGD(model.parameters(), lr=0.003, momentum=0.9)

    if torch.cuda.is_available():
        model = model.cuda()
        criterion = criterion.cuda()
    print(model)

    time0 = time()
    epochs = 1
    for e in range(epochs):
        model.train()
        running_loss = 0

        x_train, y_train = Variable(X_train).cuda(), Variable(y_train).cuda()
        x_val, y_val = Variable(X_test).cuda(), Variable(y_test).cuda()
        # converting the data into GPU format
        # if torch.cuda.is_available():
        #     x_train = x_train.cuda()
        #     y_train = y_train.cuda()
        #     x_val = x_val.cuda()
        #     y_val = y_val.cuda()

        # clearing the Gradients of the model parameters
        optimizer.zero_grad()

        # prediction for training and validation set
        output_train = model(x_train)
        output_val = model(x_val)

        # computing the training and validation loss
        loss_train = criterion(output_train, y_train)
        loss_val = criterion(output_val, y_val)

        # computing the updated weights of all the model parameters
        loss_train.backward()

        # And optimizes its weights here
        optimizer.step()

        running_loss += loss_train.item()
        print("Epoch {} - Training loss: {}".format(
            e, running_loss / len(train_dataset)))

    print("\nTraining Time (in minutes) =", (time() - time0) / 60)

    # prediction for validation set
    with torch.no_grad():
        output = model(x_val.cuda())

    ps = torch.exp(output).cpu()
    probab = list(ps.numpy())
    predictions = np.argmax(probab, axis=1)

    # accuracy on validation set

    print("\nModel Accuracy =", (accuracy_score(y_val, predictions)))