Exemplo n.º 1
0
def main():
    model_dir = '../hhjung/20180914/Resnet14'
    utils.default_model_dir = model_dir
    lr = 0.1
    start_time = time.time()
    train_loader, test_loader = utils.cifar10_loader()
    model = ResNet()

    if torch.cuda.is_available():
        # os.environ["CUDA_VISIBLE_DEVICES"] = '0'
        print("USE", torch.cuda.device_count(), "GPUs!")
        model = nn.DataParallel(model).cuda()
        cudnn.benchmark = True

    else:
        print("NO GPU -_-;")

    optimizer = optim.SGD(model.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=1e-4)
    criterion = nn.CrossEntropyLoss().cuda()

    start_epoch = 0
    checkpoint = utils.load_checkpoint(model_dir)

    if not checkpoint:
        pass
    else:
        start_epoch = checkpoint['epoch'] + 1
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])

    for epoch in range(start_epoch, 165):
        if epoch < 80:
            learning_rate = lr
        elif epoch < 120:
            learning_rate = lr * 0.1
        else:
            learning_rate = lr * 0.01
        for param_group in optimizer.param_groups:
            param_group['lr'] = learning_rate

        train(model, optimizer, criterion, train_loader, epoch)
        test(model, criterion, test_loader, epoch)

        if epoch % 5 == 0:
            model_filename = 'checkpoint_%03d.pth.tar' % epoch
            utils.save_checkpoint(
                {
                    'epoch': epoch,
                    'model': model,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                }, model_filename, model_dir)

    utils.conv_weight_L1_printing(model.module)
    now = time.gmtime(time.time() - start_time)
    print('{} hours {} mins {} secs for training'.format(
        now.tm_hour, now.tm_min, now.tm_sec))
Exemplo n.º 2
0
def main(model_dir, model, dataset):
    utils.default_model_dir = model_dir
    utils.c = None
    utils.str_w = ''
    # model = model
    lr = 0.1
    start_time = time.time()

    if dataset == 'cifar10':
        train_loader, test_loader = utils.cifar10_loader()
    elif dataset == 'cifar100':
        train_loader, test_loader = utils.cifar100_loader()

    if torch.cuda.is_available():
        # os.environ["CUDA_VISIBLE_DEVICES"] = '0'
        print("USE", torch.cuda.device_count(), "GPUs!")
        model = nn.DataParallel(model).cuda()
        cudnn.benchmark = True

    else:
        print("NO GPU -_-;")

    optimizer = optim.SGD(model.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=1e-4)
    criterion = nn.CrossEntropyLoss().cuda()

    start_epoch = 0
    checkpoint = utils.load_checkpoint(model_dir)

    if not checkpoint:
        pass
    else:
        start_epoch = checkpoint['epoch'] + 1
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])

    utils.init_learning(model.module)

    weight_extract_test(model, criterion, test_loader)
Exemplo n.º 3
0
def train_quantizer(model, train_config, n_epochs=1000):
    try:
        if VERBOSE == 1:
            print(f"Training Config : {train_config}")
        model.cuda()
        trainloader, testloader = cifar10_loader(4096, "../data")
        n_fs = train_config.n_functions
        n_bits = train_config.n_bits
        n_iter = train_config.n_iter

        avoid = []
        bit_quantizer = BitQuantizer(model,
                                     n_fs,
                                     n_bits,
                                     verbose=VERBOSE,
                                     avoid=avoid)
        with mlflow.start_run(run_name=str(train_config)):

            for epoch in tqdm(range(1, n_epochs + 1)):

                bit_quantizer.train_hash_functions(n_iter=1)
                hashed_model = bit_quantizer.get_hashed_model()
                hashed_model.cuda()

                # Evaluating hashed model
                train_acc_hashed = evaluate(hashed_model,
                                            trainloader,
                                            cuda=True)
                test_acc_hashed = evaluate(hashed_model, testloader, cuda=True)

                mlflow.log_metric("train_acc", train_acc_hashed, step=epoch)
                mlflow.log_metric("test_acc", test_acc_hashed, step=epoch)

        return hashed_model, bit_quantizer
    except RuntimeError as e:
        if "out of memory" in str(e):
            print(f"| WARNING: ran out of memory,skipping {train_config}")
            torch.cuda.empty_cache()
        else:
            raise e
Exemplo n.º 4
0
def train_model_hash(model, train_config):
    try:
        if VERBOSE == 1:
            print(f"Training Config : {train_config}")
        model.cuda()
        trainloader, testloader = cifar10_loader(4096, "./data")
        n_fs = train_config.n_functions
        n_bits = train_config.n_bits
        n_iter = train_config.n_iter

        avoid = []
        bit_quantizer = BitQuantizer(model, n_fs, n_bits, verbose=VERBOSE, avoid=avoid)
        bit_quantizer.train_hash_functions(n_iter=n_iter)
        hashed_model = bit_quantizer.get_hashed_model()
        hashed_model.cuda()

        # Evaluating model before hashing
        train_acc = evaluate(model, trainloader, cuda=True)
        test_acc = evaluate(model, testloader, cuda=True)

        # Evaluating hashed model
        train_acc_hashed = evaluate(hashed_model, trainloader, cuda=True)
        test_acc_hashed = evaluate(hashed_model, testloader, cuda=True)

        with mlflow.start_run(run_name=str(train_config)):
            mlflow.log_param("train_acc_before_hashing", train_acc)
            mlflow.log_param("test_acc_before_hashing", test_acc)
            mlflow.log_param("train_acc_after_hashing", train_acc_hashed)
            mlflow.log_param("test_acc_after_hashing", test_acc_hashed)
            mlflow.log_param("model_name", model_name)
            mlflow.log_param("n_bits", n_bits)
            mlflow.log_param("n_fs", n_fs)

    except RuntimeError as e:
        if "out of memory" in str(e):
            print(f"| WARNING: ran out of memory,skipping {train_config}")
            torch.cuda.empty_cache()
Exemplo n.º 5
0
Arquivo: main.py Projeto: share020/dl
def main():
    """Main pipeline for GAN Visualization."""
    args = parse_args()

    testloader = cifar10_loader(args.dataroot, args.batch_size_test)

    if args.option == "option1":
        # Perturb Real Images
        perturb_real_images(testloader, args.modelroot, args.batch_size_test,
                            args.cuda)
        print("==> Perturb Real Images done ...")
    elif args.option == "option2":
        # Synthetic Images Maximizing Classification Output
        syn_img(testloader, args.modelroot, args.cuda)
        print("==> Synthetic Images Maximizing Classification Output done ...")
    elif args.option == "option3":
        # Synthetic Features Maximizing Features at Various Layers
        syn_features(testloader, args.modelroot, args.batch_size_test,
                     args.cuda)
        print(
            "==> Synthetic Features Maximizing Features at Various Layers done ..."
        )
    else:
        print("==> No such option ...")
Exemplo n.º 6
0
def main(model_dir, model, dataset, iteration):
    utils.default_model_dir = model_dir
    utils.c = None
    utils.str_w = ''
    # model = model
    lr = 0.1
    start_time = time.time()

    if dataset == 'cifar10':
        train_loader, test_loader = utils.cifar10_loader()
    elif dataset == 'cifar100':
        train_loader, test_loader = utils.cifar100_loader()

    if torch.cuda.is_available():
        # os.environ["CUDA_VISIBLE_DEVICES"] = '0'
        print("USE", torch.cuda.device_count(), "GPUs!")
        model = nn.DataParallel(model).cuda()
        cudnn.benchmark = True

    else:
        print("NO GPU -_-;")

    optimizer = optim.SGD(model.parameters(),
                          lr=lr,
                          momentum=0.9,
                          weight_decay=1e-4)
    criterion = nn.CrossEntropyLoss().cuda()

    start_epoch = 0
    checkpoint = utils.load_checkpoint(model_dir)

    if not checkpoint:
        pass
    else:
        start_epoch = checkpoint['epoch'] + 1
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])

    utils.init_learning(model.module)

    for epoch in range(start_epoch, 350):
        if epoch < 150:
            learning_rate = lr
        elif epoch < 250:
            learning_rate = lr * 0.1
        else:
            learning_rate = lr * 0.01
        for param_group in optimizer.param_groups:
            param_group['lr'] = learning_rate

        train(model, optimizer, criterion, train_loader, epoch, True)
        test(model, criterion, test_loader, epoch, True)

        if epoch % iteration == iteration - 1:

            for i in range(iteration):
                utils.switching_learning(model.module)
                print('switching_learning to Gate')

                train(model, optimizer, criterion, train_loader, i, False)
                test(model, criterion, test_loader, i, False)

                utils.switching_learning(model.module)
                print('switching_learning to Gate')

        if epoch % 5 == 0:
            model_filename = 'checkpoint_%03d.pth.tar' % epoch
            utils.save_checkpoint(
                {
                    'epoch': epoch,
                    'model': model,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                }, model_filename, model_dir)

    now = time.gmtime(time.time() - start_time)
    weight_extract(model, optimizer, criterion, train_loader, epoch)
    utils.conv_weight_L1_printing(model.module)

    print('{} hours {} mins {} secs for training'.format(
        now.tm_hour, now.tm_min, now.tm_sec))
Exemplo n.º 7
0
def main(model_dir, model, dataset, layer_name, layer_n):
    utils.default_model_dir = model_dir
    utils.c = None
    utils.str_w = ''
    # model = model
    lr = 0.1
    start_time = time.time()

    if dataset == 'cifar10':
        train_loader, test_loader = utils.cifar10_loader()
    elif dataset == 'cifar100':
        train_loader, test_loader = utils.cifar100_loader()

    if torch.cuda.is_available():
        # os.environ["CUDA_VISIBLE_DEVICES"] = '0'
        print("USE", torch.cuda.device_count(), "GPUs!")
        model = nn.DataParallel(model).cuda()
        cudnn.benchmark = True

    else:
        print("NO GPU -_-;")

    optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=1e-4)
    criterion = nn.CrossEntropyLoss().cuda()

    start_epoch = 0
    checkpoint = utils.load_checkpoint(model_dir)
    
    if not checkpoint:
        pass
    else:
        start_epoch = checkpoint['epoch'] + 1
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])

    utils.init_learning(model.module)

    for epoch in range(start_epoch, 165): # change 165
        if epoch < 80:
            learning_rate = lr
        elif epoch < 120:
            learning_rate = lr * 0.1
        else:
            learning_rate = lr * 0.01
        for param_group in optimizer.param_groups:
            param_group['lr'] = learning_rate

        train(model, optimizer, criterion, train_loader, epoch, True)
        test(model, criterion, test_loader, epoch, True)

        utils.switching_learning(model.module)
        print('switching_learning to Gate')
        
        train(model, optimizer, criterion, train_loader, epoch, False)
        test(model, criterion, test_loader, epoch, False)        

        utils.switching_learning(model.module)
        print('switching_learning to Gate')

        model_filename = 'checkpoint_%03d.pth.tar' % epoch
        utils.save_checkpoint({
            'epoch': epoch,
            'model': model,
            'state_dict': model.state_dict(),
            'optimizer': optimizer.state_dict(),
        }, model_filename, model_dir)
            

    now = time.gmtime(time.time() - start_time)
    weight_extract(model, optimizer, criterion, train_loader, epoch)

    class_counter, class_weight_sum, class_average, total_average = utils.load_gate_csv()

    _, index = torch.sort(total_average)
    layer_name = utils.make_layer_name(layer_n)

    for i in index:
        # weight delete layer_name[i]
        utils.weight_pruning_by_name(model.module, layer_name[i])
        test(model, criterion, test_loader, epoch, True)

    change index[0] to 'layerN and layerN-M'
    # index about (smallest) index[0], index[1], .... (biggest)
    # layer name change, layer0 to layer'n-1'
    # find 'layer' + str(index[0]) from model.module
    # and change self.z to 0

    # utils.conv_weight_L1_printing(model.module)
    
    print('{} hours {} mins {} secs for training'.format(now.tm_hour, now.tm_min, now.tm_sec))
Exemplo n.º 8
0
def main(model_dir, model, dataset, layer_n, reversed=False):
    utils.default_model_dir = model_dir
    utils.c = None
    utils.str_w = ''
    # model = model
    lr = 0.1
    start_time = time.time()

    if dataset == 'cifar10':
        train_loader, test_loader = utils.cifar10_loader()
    elif dataset == 'cifar100':
        train_loader, test_loader = utils.cifar100_loader()

    if torch.cuda.is_available():
        # os.environ["CUDA_VISIBLE_DEVICES"] = '0'
        print("USE", torch.cuda.device_count(), "GPUs!")
        model = nn.DataParallel(model).cuda()
        cudnn.benchmark = True

    else:
        print("NO GPU -_-;")

    optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=1e-4)
    criterion = nn.CrossEntropyLoss().cuda()

    start_epoch = 0
    checkpoint = utils.load_checkpoint(model_dir)
    
    if not checkpoint:
        pass
    else:
        start_epoch = checkpoint['epoch'] + 1
        model.load_state_dict(checkpoint['state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer'])

    utils.init_learning(model.module)

    # for epoch in range(start_epoch, 165): # change 165
    #     if epoch < 80:
    #         learning_rate = lr
    #     elif epoch < 120:
    #         learning_rate = lr * 0.1
    #     else:
    #         learning_rate = lr * 0.01
    #     for param_group in optimizer.param_groups:
    #         param_group['lr'] = learning_rate

    #     train(model, optimizer, criterion, train_loader, epoch, True)
    #     test(model, criterion, test_loader, epoch, True)

    #     utils.switching_learning(model.module)
    #     print('switching_learning to Gate')
        
    #     train(model, optimizer, criterion, train_loader, epoch, False)
    #     test(model, criterion, test_loader, epoch, False)        

    #     utils.switching_learning(model.module)
    #     print('switching_learning to Gate')

    #     model_filename = 'checkpoint_%03d.pth.tar' % epoch
    #     utils.save_checkpoint({
    #         'epoch': epoch,
    #         'model': model,
    #         'state_dict': model.state_dict(),
    #         'optimizer': optimizer.state_dict(),
    #     }, model_filename, model_dir)
    # now = time.gmtime(time.time() - start_time)

    utils.del_csv_weight_for_test()
            

    weight_extract(model, optimizer, criterion, train_loader, 160)

    class_counter, class_weight_sum, class_average, total_average = utils.load_gate_csv()

    if reversed is True:
        _, index = torch.sort(total_average, descending=True)
    else:
        _, index = torch.sort(total_average)
    layer_name = utils.make_layer_name(layer_n)


    for i in index:
        # weight delete layer_name[i]
        utils.weight_pruning_by_name(model.module, layer_name[i])
        test(model, criterion, test_loader, 160, True)
Exemplo n.º 9
0
import torch
from torch.nn.functional import softmax

from alexnet import AlexNet
from utils import cifar10_loader, device, cifar10_classes

torch.random.manual_seed(128)
batch_size = 1
testloader = cifar10_loader(train=False, batch_size=batch_size)

net = AlexNet()
net.load_state_dict(torch.load("model/model.h5"))
net.eval()

correct = 0
total = 0


def run():
    global correct, total
    with torch.no_grad():
        for data in testloader:
            images, labels = data
            inputs, labels = images.to(device), labels.to(device)
            outputs = net(inputs)
            _, predicted = torch.topk(outputs.data, 5)
            #print(predicted)
            indexes = predicted.numpy()[0].tolist()
            #print(indexes)
            #print(softmax(outputs).numpy()[0][indexes])
            #print([cifar10_classes[i] for i in indexes])
Exemplo n.º 10
0
from quantizer import BitQuantizer
from utils import cifar10_loader, evaluate

# model = AlexNet()
# model.load_state_dict(torch.load("./alexnet_pretrained"))

model = resnet32()
checkpoint = torch.load(
    "./pytorch_resnet/pretrained_models/resnet32-d509ac18.th")
state = {
    k.replace("module.", ""): v
    for k, v in checkpoint["state_dict"].items()
}
model.load_state_dict(state)

trainloader, testloader = cifar10_loader(256, "../data")

model.cuda()

n_fs = 32
n_bits = 6
n_iter = 50
avoid = []
bit_quantizer = BitQuantizer(model, n_fs, n_bits, avoid=avoid)
bit_quantizer.train_hash_functions(n_iter=n_iter)
hashed_model = bit_quantizer.get_hashed_model()

hashed_model.cuda()

# Evaluating model before hashing
train_accracy = evaluate(model, trainloader, cuda=True)
Exemplo n.º 11
0
Arquivo: main.py Projeto: share020/dl
def main():
    """Main pipleline implements Generative Adversarial Networks in Pytorch."""
    args = parse_args()

    # load cifar10 dataset
    trainloader, testloader = cifar10_loader(args.dataroot,
                                             args.batch_size_train,
                                             args.batch_size_test)

    # Train the Discriminator without the Generator
    if args.option == "option1":
        print("Train the Discriminator without the Generator ...")
        model = Discriminator()
        if args.cuda:
            model = nn.DataParallel(model).cuda()
            cudnn.benchmark = True
        criterion = nn.CrossEntropyLoss()
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=args.lr,
                                     weight_decay=args.weight_decay)

        # train
        trainer_d = Trainer_D(model, criterion, optimizer, trainloader,
                              testloader, args.start_epoch, args.epochs1,
                              args.cuda, args.batch_size_train, args.lr)
        trainer_d.train()

    # Train the Discriminator with the Generator
    else:
        # instantiate discriminator and generator
        aD, aG = Discriminator(), Generator()

        # resume training from the last time
        if args.resume:
            # Load checkpoint
            print('==> Resuming training from checkpoint ...')
            g_ckpt_pth = os.path.join(args.ckptroot, "tempG.model")
            d_ckpt_pth = os.path.join(args.ckptroot, "tempD.model")

            checkpoint_g = torch.load(g_ckpt_pth)
            checkpoint_d = torch.load(d_ckpt_pth)

            args.start_epoch = checkpoint_d['epoch']

            aG.load_state_dict(checkpoint_g['state_dict'])
            aD.load_state_dict(checkpoint_d['state_dict'])

        else:
            # start over
            print("Train the Discriminator with the Generator ...")

        if args.cuda:
            aD, aG = nn.DataParallel(aD).cuda(), nn.DataParallel(aG).cuda()
            cudnn.benchmark = True

        optimizer_g = torch.optim.Adam(aG.parameters(),
                                       lr=args.lr,
                                       betas=(args.beta1, args.beta2))
        optimizer_d = torch.optim.Adam(aD.parameters(),
                                       lr=args.lr,
                                       betas=(args.beta1, args.beta2))

        criterion = nn.CrossEntropyLoss()

        # train
        trainer_gd = Trainer_GD(aD, aG, criterion, optimizer_d, optimizer_g,
                                trainloader, testloader, args.batch_size_train,
                                args.gen_train, args.cuda, args.n_z,
                                args.start_epoch, args.epochs2)
        trainer_gd.train()
Exemplo n.º 12
0
import time

import torch
import torch.nn as nn
import torch.optim as optim
from tensorboardX import SummaryWriter

from alexnet import AlexNet
from utils import cifar10_loader, device

trainloader = cifar10_loader(train=True)
testloader = cifar10_loader(train=False)
writer = SummaryWriter("./logs")

epochs = 100
batch_size = 128
log_batch = 200
train_metrics = []
test_metrics = []

net = AlexNet()
net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(net.parameters(), lr=0.001)


def train():
    for epoch in range(epochs):
        running_loss = 0.0
        correct_classified = 0
        total = 0
Exemplo n.º 13
0
        x = self.ops5(x)
        x = x.reshape(-1, 256 * 2 * 2)
        x = self.classifier(x)
        x = self.softmax(x)
        return x


def get_loss(labels, preds):
    ce = nn.NLLLoss(reduction="mean")
    l1 = ce(torch.log(preds), labels)
    return l1


if __name__ == "__main__":

    trainloader, testloader = cifar10_loader(batch_size=4096,
                                             data_path="./data")
    CUDA = True
    model = AlexNet()

    if LOAD_CKPT is not None:
        LOAD_CKPT = f"./checkpoint/alexnet_{LOAD_CKPT}"
        model.load_state_dict(torch.load(LOAD_CKPT))

    if CUDA:
        model.cuda()
    optimizer = torch.optim.Adam(model.parameters())
    writer = SummaryWriter()

    global_step = 0

    for epoch in range(1, N_EPOCHS + 1):