Exemplo n.º 1
0
    def __init__(self, config):

        for key, value in config.items():
            self.config[key] = value

        config = self.config
        self.i = 0

        mnist_transforms = tv_transforms.Compose([
            tv_transforms.ToTensor(),
            tv_transforms.Normalize((0.1307, ), (0.3081, ))
        ])

        # Train Set Initialiazation
        self.train_loader = DataLoader(tv_datasets.MNIST(
            "~/data", train=True, download=True, transform=mnist_transforms),
                                       batch_size=config.get("batch_size", 64),
                                       shuffle=True)

        # Validation and Test Set Initialization
        test_valid_dataset = tv_datasets.MNIST("~/data",
                                               train=False,
                                               transform=mnist_transforms)

        nb_test = int((1.0 - self.valid_ratio) * len(test_valid_dataset))
        nb_valid = int(self.valid_ratio * len(test_valid_dataset))
        test_dataset, val_dataset = torch.utils.data.dataset.random_split(
            test_valid_dataset, [nb_test, nb_valid])

        self.test_loader = DataLoader(test_dataset,
                                      batch_size=64,
                                      shuffle=True)

        self.val_loader = DataLoader(val_dataset, batch_size=64, shuffle=True)

        # Initialization of the model
        if True:
            self.model = model.LeNet(192,
                                     int(round(config.get("hidden_dim",
                                                          64))), 10,
                                     int(round(config.get("n_layer", 1))),
                                     config.get("droupout_prob", 0.5),
                                     nn.Tanh())
        elif True:
            raise NotImplementedError
        else:
            raise ValueError(
                "Model with name {} is not recognized.".format(model_name))

        # Method of Optimization
        self.optimizer = torch.optim.Adam(
            self.model.parameters(),
            lr=config.get("lr", 0.01),
            betas=((config.get("b1", 0.999), config.get("b2", 0.9999))),
            eps=config.get("eps", 1e-08),
            weight_decay=config.get("weight_decay", 0),
            amsgrad=True)
Exemplo n.º 2
0
def train(train_iter, test_iter, input_n, output_n, epochs, loss, lra):
    # net = model.MySoftModel(input_n, output_n)
    net = model.LeNet()
    for params in net.parameters():
        init.normal_(params, mean=0, std=1)

    optimizer = optim.SGD(net.parameters(), lr=lra)
    acc_rate = cal_acc(test_iter, net)
    print("not train acc:{}".format(acc_rate))
    for epoch in range(1, epochs + 1):
        print("epoch {} start".format(epoch))
        for features, labels in train_iter:
            outputs = net(features)
            l = loss(outputs, labels)
            optimizer.zero_grad()
            l.backward()
            optimizer.step()

        acc_rate_test = cal_acc(test_iter, net)
        acc_rate_train = cal_acc(train_iter, net)
        print("epoch {}, loss:{}, acc_rate_train:{}, acc_rate_test:{}".format(
            epoch, l.item(), acc_rate_train, acc_rate_test))
Exemplo n.º 3
0
        [transforms.ToTensor(),
         transforms.Normalize((0.1307, ), (0.3081, ))])),
                                           batch_size=args.batch_size,
                                           shuffle=True,
                                           **kwargs)
test_loader = torch.utils.data.DataLoader(datasets.MNIST(
    '../data',
    train=False,
    transform=transforms.Compose(
        [transforms.ToTensor(),
         transforms.Normalize((0.1307, ), (0.3081, ))])),
                                          batch_size=args.test_batch_size,
                                          shuffle=True,
                                          **kwargs)

model = model.LeNet()
if args.cuda:
    model.cuda()

optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)


def train(epoch):
    model.train()
    for batch_idx, (data, target) in enumerate(train_loader):
        if args.cuda:
            data, target = data.cuda(), target.cuda()
        data, target = Variable(data), Variable(target)
        optimizer.zero_grad()
        output = model(data)
        loss = F.nll_loss(output, target)
    def _make_model(self):
        if self.model_type == 'LeNet':
            self.model = model.LeNet(self.input_size,
                                     self.class_num).to(self.device)
            self.optimizer = optim.SGD(self.model.parameters(),
                                       lr=self.lr,
                                       momentum=0.9)
            # self.scheduler = optim.lr_scheduler.StepLR(
            #     self.optimizer,step_size = 25, gamma=0.5,last_epoch = -1)
            self.scheduler = optim.lr_scheduler.CosineAnnealingLR(
                self.optimizer, T_max=60, last_epoch=-1)
            self.criterion = torch.nn.CrossEntropyLoss().to(self.device)

        elif self.model_type == 'ResNet18':
            self.model = torchvision.models.ResNet(
                torchvision.models.resnet.BasicBlock, [2, 2, 2, 2],
                self.class_num).to(self.device)
            self.optimizer = optim.SGD(self.model.parameters(),
                                       lr=self.lr,
                                       momentum=0.9)
            self.scheduler = optim.lr_scheduler.CosineAnnealingLR(
                self.optimizer, T_max=60, last_epoch=-1)
            self.criterion = torch.nn.CrossEntropyLoss().to(self.device)

        elif self.model_type == 'ResNet34':
            self.model = torchvision.models.ResNet(
                torchvision.models.resnet.BasicBlock, [3, 4, 6, 3],
                self.class_num).to(self.device)
            self.optimizer = optim.SGD(self.model.parameters(),
                                       lr=self.lr,
                                       momentum=0.9)
            self.scheduler = optim.lr_scheduler.CosineAnnealingLR(
                self.optimizer, T_max=60, last_epoch=-1)
            self.criterion = torch.nn.CrossEntropyLoss().to(self.device)

        elif self.model_type == 'EfficientNetb0':
            self.model = EfficientNet.from_name('efficientnet-b0').to(
                self.device)
            self.optimizer = optim.SGD(self.model.parameters(),
                                       lr=self.lr,
                                       momentum=0.9)
            self.scheduler = optim.lr_scheduler.CosineAnnealingLR(
                self.optimizer, T_max=60, last_epoch=-1)
            self.criterion = torch.nn.CrossEntropyLoss().to(self.device)

        elif self.model_type == 'DesenNet':
            self.model = torchvision.models.DenseNet(
                num_classes=self.class_num).to(self.device)
            self.optimizer = optim.SGD(self.model.parameters(),
                                       lr=self.lr,
                                       momentum=0.9)
            self.scheduler = optim.lr_scheduler.CosineAnnealingLR(
                self.optimizer, T_max=60, last_epoch=-1)
            self.criterion = torch.nn.CrossEntropyLoss().to(self.device)

        elif self.model_type == 'VGG19bn':
            cfg = [
                64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512,
                512, 512, 'M', 512, 512, 512, 512, 'M'
            ]
            features = torchvision.models.vgg.make_layers(cfg, batch_norm=True)
            self.model = torchvision.models.VGG(features=features,
                                                num_classes=self.class_num).to(
                                                    self.device)
            self.optimizer = optim.SGD(self.model.parameters(),
                                       lr=self.lr,
                                       momentum=0.9)
            self.scheduler = optim.lr_scheduler.CosineAnnealingLR(
                self.optimizer, T_max=60, last_epoch=-1)
            self.criterion = torch.nn.CrossEntropyLoss().to(self.device)
        elif self.model_type == 'VGG11':
            cfg = [
                64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'
            ]
            features = torchvision.models.vgg.make_layers(cfg,
                                                          batch_norm=False)
            self.model = torchvision.models.VGG(features=features,
                                                num_classes=self.class_num).to(
                                                    self.device)
            self.optimizer = optim.SGD(self.model.parameters(),
                                       lr=self.lr,
                                       momentum=0.9)
            self.scheduler = optim.lr_scheduler.CosineAnnealingLR(
                self.optimizer, T_max=60, last_epoch=-1)
            self.criterion = torch.nn.CrossEntropyLoss().to(self.device)
        elif self.model_type == 'VGG11bn':
            cfg = [
                64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'
            ]
            features = torchvision.models.vgg.make_layers(cfg, batch_norm=True)
            self.model = torchvision.models.VGG(features=features,
                                                num_classes=self.class_num).to(
                                                    self.device)
            self.optimizer = optim.SGD(self.model.parameters(),
                                       lr=self.lr,
                                       momentum=0.9)
            self.scheduler = optim.lr_scheduler.CosineAnnealingLR(
                self.optimizer, T_max=60, last_epoch=-1)
            self.criterion = torch.nn.CrossEntropyLoss().to(self.device)
        elif self.model_type == 'AlexNet':
            self.model = torchvision.models.AlexNet(
                num_classes=self.class_num).to(self.device)
            self.optimizer = optim.SGD(self.model.parameters(),
                                       lr=self.lr,
                                       momentum=0.9)
            self.scheduler = optim.lr_scheduler.CosineAnnealingLR(
                self.optimizer, T_max=60, last_epoch=-1)
            self.criterion = torch.nn.CrossEntropyLoss().to(self.device)
Exemplo n.º 5
0
import data as dt
import model
import os
import visualisation as vis

toggleaug = 0
toggle_large_data = 0

architect = 0  # 0 == architecture 2

if __name__ == '__main__':

    dat = dt.Data()

    # toggle to see how faces are extracted.
    # dat.face_extr()

    # toggle comment to visualize the data
    data_full = dat.load_full_data()
    vis.multi(data_full)

    # function to return the data
    train_data, train_labels, test_data, test_labels, width, height = dat.load_data_test(
        toggle_large_data)

    # create model and run test
    mod = model.LeNet()
    mod.model_test(train_data, train_labels, test_data, test_labels, toggleaug,
                   width, height, architect)
Exemplo n.º 6
0
def main(args):
    
    args.cuda = not args.no_cuda and torch.cuda.is_available()
    init_seeds(seed=int(time.time()))
    kwargs = {'num_workers': 2, 'pin_memory': True} if args.cuda else {}
    print(args.dataset)

    if args.dataset == 'MNIST':
        test_dataloader = data.DataLoader(
            MNIST(args.data_path, args.run_folder, transform=mnist_transformer()),
                batch_size=10000, shuffle=False, **kwargs)

        train_dataset = MNIST(args.data_path, args.run_folder, train=True, transform=mnist_transformer(), imbalance_ratio=args.imbalance_ratio)

        if args.imbalance_ratio == 100:
            args.num_images = 25711
        else:
            args.num_images = 50000

        args.budget = 125
        args.initial_budget = 125
        args.num_classes = 10
        args.num_channels = 1
        args.arch_scaler = 2
    elif args.dataset == 'SVHN':
        test_dataloader = data.DataLoader(
            SVHN(args.data_path, args.run_folder, transform=svhn_transformer()),
                batch_size=5000, shuffle=False, **kwargs)

        train_dataset = SVHN(args.data_path, args.run_folder, train=True, transform=svhn_transformer(), imbalance_ratio=args.imbalance_ratio)

        if args.imbalance_ratio == 100:
            args.num_images = 318556
        else:
            args.num_images = 500000

        args.budget = 1250
        args.initial_budget = 1250
        args.num_classes = 10
        args.num_channels = 3
        args.arch_scaler = 1
    elif args.dataset == 'cifar10':
        test_dataloader = data.DataLoader(
                datasets.CIFAR10(args.data_path, download=True, transform=cifar_transformer(), train=False),
            batch_size=args.batch_size, drop_last=False)

        train_dataset = CIFAR10(args.data_path)

        args.num_images = 50000
        args.budget = 2500
        args.initial_budget = 5000
        args.num_classes = 10
        args.num_channels = 3
    elif args.dataset == 'cifar100':
        test_dataloader = data.DataLoader(
                datasets.CIFAR100(args.data_path, download=True, transform=cifar_transformer(), train=False),
             batch_size=args.batch_size, drop_last=False)

        train_dataset = CIFAR100(args.data_path)

        args.num_images = 50000
        args.budget = 2500
        args.initial_budget = 5000
        args.num_classes = 100
        args.num_channels = 3
    elif args.dataset == 'ImageNet':
        test_dataloader = data.DataLoader(
            ImageNet(args.data_path + '/val', transform=imagenet_test_transformer()),
                batch_size=args.batch_size, shuffle=False, drop_last=False, **kwargs)

        if args.imbalance_ratio == 100:
            train_dataset = ImageNet(args.data_path + '/train_ir_100', transform=imagenet_train_transformer())
            args.num_images = 645770
        else:
            train_dataset = ImageNet(args.data_path + '/train', transform=imagenet_train_transformer())
            args.num_images = 1281167

        args.budget = 64000
        args.initial_budget = 64000
        args.num_classes = 1000
        args.num_channels = 3
        args.arch_scaler = 1
    else:
        raise NotImplementedError

    all_indices = set(np.arange(args.num_images))
    initial_indices = random.sample(all_indices, args.initial_budget)
    sampler = data.sampler.SubsetRandomSampler(initial_indices)
    #print(args.batch_size, sampler)
    # dataset with labels available
    querry_dataloader = data.DataLoader(train_dataset, sampler=sampler,
            batch_size=args.batch_size, drop_last=False, **kwargs)
    print('Sampler size =', len(querry_dataloader))
    solver = Solver(args, test_dataloader)

    splits = range(1,11)

    current_indices = list(initial_indices)

    accuracies = []
    
    for split in splits:
        print("Split =", split)
        # need to retrain all the models on the new images
        # re initialize and retrain the models
        #task_model = vgg.vgg16_bn(num_classes=args.num_classes)
        if args.dataset == 'MNIST':
            task_model = model.LeNet(num_classes=args.num_classes)
        elif args.dataset == 'SVHN':
            task_model = resnet.resnet10(num_classes=args.num_classes)
        elif args.dataset == 'ImageNet':
            task_model = resnet.resnet18(num_classes=args.num_classes)
        else:
            print('WRONG DATASET!')
        # loading pretrained
        if args.pretrained:
            print("Loading pretrained model", args.pretrained)
            checkpoint = torch.load(args.pretrained)
            task_model.load_state_dict({k: v for k, v in checkpoint['state_dict'].items() if 'fc' not in k}, strict=False) # copy all but last linear layers
        #
        vae = model.VAE(z_dim=args.latent_dim, nc=args.num_channels, s=args.arch_scaler)
        discriminator = model.Discriminator(z_dim=args.latent_dim, s=args.arch_scaler)
        #print("Sampling starts")
        unlabeled_indices = np.setdiff1d(list(all_indices), current_indices)
        unlabeled_sampler = data.sampler.SubsetRandomSampler(unlabeled_indices)
        unlabeled_dataloader = data.DataLoader(train_dataset, sampler=unlabeled_sampler,
                batch_size=args.batch_size, drop_last=False, **kwargs)
        #print("Train starts")
        # train the models on the current data
        acc, vae, discriminator = solver.train(querry_dataloader,
                                               task_model, 
                                               vae, 
                                               discriminator,
                                               unlabeled_dataloader)


        print('Final accuracy with {}% of data is: {:.2f}'.format(int(split*100.0*args.budget/args.num_images), acc))
        accuracies.append(acc)

        sampled_indices = solver.sample_for_labeling(vae, discriminator, unlabeled_dataloader)
        current_indices = list(current_indices) + list(sampled_indices)
        sampler = data.sampler.SubsetRandomSampler(current_indices)
        querry_dataloader = data.DataLoader(train_dataset, sampler=sampler,
                batch_size=args.batch_size, drop_last=False, **kwargs)

    torch.save(accuracies, os.path.join(args.out_path, args.log_name))
Exemplo n.º 7
0
epochsize = 500
learningRate = 0.001
print_step = 50

trainPath = r'data/trainingDigits'

train_dataset = MyDataset(trainPath,
                          transform=transforms.Compose([ToTensor()]))
train_loader = DataLoader(dataset=train_dataset,
                          batch_size=batchsize,
                          shuffle=True)

SEED = 0
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
net = model.LeNet().cuda()
criteron = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(net.parameters(),
                             lr=learningRate,
                             weight_decay=0.0001)

scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                            step_size=100,
                                            gamma=0.1)


def train(epoch):
    net.train()
    epoch_loss = 0
    scheduler.step()
    for iteration, batch in enumerate(train_loader):
Exemplo n.º 8
0
def main():
    # Reset graph
    tf.reset_default_graph()

    with open("config.json", "r") as f:
        config = json.load(f)

    data = DataLoader(config)

    # Create placeholders
    X = tf.placeholder(tf.float32, [None, 32, 32, 1])
    y = tf.placeholder(tf.float32, [None, 10])

    # Create model and logits
    LeNet = model.LeNet(config)
    logits = LeNet.forward(X)

    # Compute metrics
    cost = compute_loss_xent(logits, targets=y)
    accuracy = compute_accuracy(logits, targets=y)

    # Define optimizer
    optimizer = LeNet.train_optimizer(cost, learning_rate=config["learning_rate"], \
     beta1=0.9, beta2=0.999, epsilon=1e-08)

    # Merging all summaries
    merged_summary = tf.summary.merge_all()

    # Create saver to save and restore model
    saver = tf.train.Saver(max_to_keep=config["max_to_keep"])

    ## Launching the execution graph for training
    with tf.Session() as sess:
        # Initializing all variables
        sess.run(tf.global_variables_initializer())
        # Visualizing the Graph
        writer = tf.summary.FileWriter("./tensorboard/" +
                                       config["experiment_name"])
        writer.add_graph(sess.graph)

        for i in range(config["num_epochs"]):
            for j in range(config["num_iter_per_epoch"]):
                # Yield batches of data
                batch_X, batch_y = next(data.next_batch(config["batch_size"]))
                # Run the optimizer
                sess.run(optimizer, feed_dict={X: batch_X, y: batch_y})
                # Compute train loss and accuracy
                loss, acc = sess.run([cost, accuracy],
                                     feed_dict={
                                         X: batch_X,
                                         y: batch_y
                                     })

            if (i % config["writer_step"] == 0):
                # Run the merged summary and write it to disk
                s = sess.run(merged_summary,
                             feed_dict={
                                 X: batch_X,
                                 y: batch_y
                             })
                writer.add_summary(s, (i + 1))

            if (i % config["save_step"] == 0):
                # Saving session
                saver.save(sess,
                           "./saver/" + config["experiment_name"] +
                           "/model_epoch",
                           global_step=(i + 1))

            # Evaluate the validation data
            loss_val, acc_val = sess.run([cost, accuracy],
                                         feed_dict={
                                             X: data.X_valid,
                                             y: data.y_valid
                                         })

            if (i % config["display_step"] == 0):
                print("Epoch:", "%03d," % (i + 1), \
                 "loss=", "%.5f," % (loss), \
                 "train acc=", "%.5f," % (acc), \
                 "val loss=", "%.5f," % (loss_val), \
                 "val acc=", "%.5f" % (acc_val)
                 )

        print("Training complete")

    ## Evaluate on test data by loading the saver
    with tf.Session() as sess:
        # Load the network from meta file created by saver
        new_saver = tf.train.import_meta_graph("./saver/" +
                                               config["experiment_name"] +
                                               "/model_epoch-" +
                                               str(config["num_epochs"]) +
                                               ".meta")
        # Restore the parameters
        new_saver.restore(
            sess,
            tf.train.latest_checkpoint("./saver/" + config["experiment_name"] +
                                       "/"))

        loss_test, acc_test = sess.run([cost, accuracy],
                                       feed_dict={
                                           X: data.X_test,
                                           y: data.y_test
                                       })

        print("test loss=", "%.5f," % (loss_test), "test accuracy=",
              "%.5f" % (acc_test))

        print("Testing complete")
Exemplo n.º 9
0
    testloader = loader.get_loader(dir2)

classes = ('yes', 'no')

# Model
if args.resume or args.validate:
    # Load checkpoint.
    print('-- Resuming From Checkpoint')
    assert os.path.isdir('Checkpoint'), 'Error: no checkpoint directory found!'
    checkpoint = torch.load('./Checkpoint/misaka.t7')
    net = checkpoint['net']
    best_acc = checkpoint['acc']
    start_epoch = checkpoint['epoch']
else:
    print('-- Building Model and Allocating Memory')
    net = model.LeNet()

if use_cuda:
    net.cuda()
    net = torch.nn.DataParallel(net,
                                device_ids=range(torch.cuda.device_count()))
    cudnn.benchmark = True

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(),
                      lr=args.init_lr,
                      momentum=0.9,
                      weight_decay=5e-4)


# Training