Beispiel #1
0
def evaluate(config):

    # define model
    if config.model.name == 'mlp':
        model = MLP(config)
    elif config.model.name == 'cnn':
        model = CNN(config)

    # load model & statistics
    model = load_model(config, model)
    loss, accuracy = load_statistics(config)

    # print performance graphs
    display_model_performance(loss, accuracy)

    # load mnist dataset
    train_loader, test_loader = load_mnist(config)
    test_iter = iter(test_loader)
    images, labels = test_iter.next()

    # evaluate accuracy and loss on test data
    logits = model.forward(images)

    test_loss = nn.CrossEntropyLoss()(logits, labels).detach().numpy()
    test_acc = calculate_accuracy(logits.detach().numpy(), labels)

    print("test loss:      ", test_loss)
    print("test accuracy:  ", test_acc)
Beispiel #2
0
def select_dataset(args):
    if args.dataset == "cifar10" or args.dataset == "cifar100":
        from utils.data import load_cifar
        trainloader, testloader, num_classes = load_cifar(
            args.dataset, args.train_batch, test_batch=args.test_batch)

    elif args.dataset == "imagenet":
        folder = "/data/imagenet"
        from utils.data import load_imagenet
        trainloader, testloader, num_classes = load_imagenet(
            folder, args.train_batch, test_batch=args.test_batch)

    elif args.dataset == "mnist" or args.dataset == "fmnist":
        from utils.data import load_mnist
        trainloader, testloader, num_classes = load_mnist(
            args.dataset, args.train_batch, test_batch=args.test_batch)

    else:
        print("select right dataset", args.dataset, " is not included.")

    return trainloader, testloader, num_classes
Beispiel #3
0
    parser.add_argument('--fold', help='select train fold (only for stl10, (0-9), default=0).', type=int, default=0)
    args = parser.parse_args()

    # select model
    if str(args.model) == "mnist_dlda":
        from models import mnist_dlda as model
    elif str(args.model) == "cifar10_dlda":
        from models import cifar10_dlda as model
    elif str(args.model) == "stl10_dlda":
        from models import stl10_dlda as model
    else:
        pass

    print("\nLoading data ...")
    if str(args.data) == 'mnist':
        data = load_mnist()
    if str(args.data) == 'mnist_60k':
        data = load_mnist(k60=True)
    elif str(args.data) == 'cifar10':
        data = load_cifar10()
    elif str(args.data) == 'cifar10_50k':
        data = load_cifar10(k50=True)
    elif str(args.data) == 'stl10':
        data = load_stl10(fold=args.fold)
    else:
        pass

    # path to net dump
    exp_root = os.path.join(os.path.join(EXP_ROOT), model.EXP_NAME)
    dump_file = os.path.join(exp_root, 'params.pkl')
Beispiel #4
0
from utils.data import load_mnist, save_data_for_toolbox
from utils.saving import save_model_config
# WORKING DIRECTORY #
#####################

# Define path where model and output files will be stored.
# The user is responsible for cleaning up this temporary directory.
path_wd = os.path.abspath(
    os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', 'temp',
                 str(time.time())))
os.makedirs(path_wd)

# GET DATASET #
###############

(x_train, y_train), (x_test, y_test) = load_mnist()
# store a part of the train data and the test data in the path for the toolbox to be able to use it
save_data_for_toolbox(x_train, x_test, y_test, path_wd)

# CREATE ANN #
##############

# Create the ANN in Keras and train


def build_model(hidden_units=1200,
                dropout_rate=0.5,
                activity_regularizer=None):

    input_shape = x_train.shape[1:]
    input_layer = Input(input_shape)
Beispiel #5
0
def train(config):

    # load mnist dataset
    train_loader, test_loader = load_mnist(config)

    # define model
    if config.model.name == 'mlp':
        model = MLP(config)
    elif config.model.name == 'cnn':
        model = CNN(config)

    # define loss criteria & optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(),
                          lr=config.optimizer.params.lr,
                          momentum=config.optimizer.params.momentum,
                          weight_decay=config.optimizer.params.regularization)

    BATCH_SIZE = config.data.mnist.batch_size
    MAX_EPOCH = config.optimizer.epochs
    BATCHES_PER_EPOCH = len(train_loader)

    loss_batch = []
    acc_batch = []
    running_loss, running_acc = 0, 0

    for epoch in range(MAX_EPOCH):
        batch_loss, batch_acc = 0, 0

        for i, data in enumerate(train_loader, 0):
            inputs, labels = data

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward pass
            logits = model.forward(inputs)

            # calculate softmax cross-entropy loss
            loss = criterion(logits, labels)

            # backward pass & gradient descent
            loss.backward()
            optimizer.step()

            # fetch statistics for the current batch
            acc = calculate_accuracy(logits.detach().numpy(), labels)
            running_acc += acc
            running_loss += loss.item()
            batch_loss += loss.item()
            batch_acc += acc

            # print running statistics every 50 batches
            if i % 50 == 49:
                print(
                    'Epoch [{}]/[{}]\t Batch [{}]/[{}]\t loss: [{:.3f}]\t accuracy: [{:.4f}]'
                    .format(epoch + 1, MAX_EPOCH, i + 1, BATCHES_PER_EPOCH,
                            running_loss / 50, running_acc / 50))
                running_loss, running_acc = 0, 0

        # save batch statistics
        loss_batch.append(batch_loss / BATCH_SIZE)
        acc_batch.append(batch_acc / BATCH_SIZE)

    # save model and loss & accuracy curves
    save_model(model, config)
    save_statistics(loss_batch, acc_batch, config)
Beispiel #6
0
        default=0)
    args = parser.parse_args()

    # select model
    if str(args.model) == "mnist_dlda":
        from models import mnist_dlda as model
    elif str(args.model) == "cifar10_dlda":
        from models import cifar10_dlda as model
    elif str(args.model) == "stl10_dlda":
        from models import stl10_dlda as model
    else:
        pass

    print("\nLoading data ...")
    if str(args.data) == 'mnist':
        data = load_mnist()
    if str(args.data) == 'mnist_60k':
        data = load_mnist(k60=True)
    elif str(args.data) == 'cifar10':
        data = load_cifar10()
    elif str(args.data) == 'cifar10_50k':
        data = load_cifar10(k50=True)
    elif str(args.data) == 'stl10':
        data = load_stl10(fold=args.fold)
    else:
        pass

    # path to net dump
    exp_root = os.path.join(os.path.join(EXP_ROOT), model.EXP_NAME)
    dump_file = os.path.join(exp_root, 'params.pkl')
Beispiel #7
0
# Validate dataset name
assert args.dataset == 'mnist' or args.dataset == 'fmnist', 'Dataset can only be mnist or fmnist'
# Use CUDA
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
use_cuda = torch.cuda.is_available()
# set parameters
best_acc = 0  # best test accuracy
do_save_checkpoint = True
start_epoch = args.start_epoch  # start from epoch 0 or last checkpoint epoch
if not os.path.isdir(args.checkpoint):
    mkdir_p(args.checkpoint)

# ............. Load Data ...................
print('==> Preparing dataset %s' % args.dataset)
trainloader, testloader, num_classes = load_mnist(args.dataset,
                                                  args.train_batch,
                                                  test_batch=args.test_batch)

# ............. Create Model ................
# create model
model = ResNet_MNIST(args.depth, norm_type=args.norm)
# set model and its secondaries
model = model.cuda()
model = torch.nn.DataParallel(model)
cudnn.benchmark = True
print('    Total params: %.4fM' %
      (sum(p.numel() for p in model.parameters()) / 1000000.0))
criterion = nn.CrossEntropyLoss()
optimizer = set_optimizer(model, args)

title = '{}-{}'.format(args.dataset, args.norm)