Пример #1
0
def model_config(net_type, num_classes, OOD_num_classes):
    if net_type == "resnet50":
        model = models.resnet50(num_c=num_classes,
                                num_cc=OOD_num_classes,
                                pretrained=True)
    elif net_type == "resnet34":
        model = models.resnet34(num_c=num_classes,
                                num_cc=OOD_num_classes,
                                pretrained=True)
    elif net_type == "vgg19":
        model = models.vgg19(num_c=num_classes,
                             num_cc=OOD_num_classes,
                             pretrained=True)
    elif net_type == "vgg16":
        model = models.vgg16(num_c=num_classes,
                             num_cc=OOD_num_classes,
                             pretrained=True)
    elif net_type == "vgg19_bn":
        model = models.vgg19_bn(num_c=num_classes,
                                num_cc=OOD_num_classes,
                                pretrained=True)
    elif net_type == "vgg16_bn":
        model = models.vgg16_bn(num_c=num_classes,
                                num_cc=OOD_num_classes,
                                pretrained=True)
    return model
Пример #2
0
def create_model(name, num_classes):
    if name == 'resnet34':
        model = models.resnet34(True)
        model.fc = nn.Linear(model.fc.in_features, num_classes)
        nn.init.xavier_uniform(model.fc.weight)
        nn.init.constant(model.fc.bias, 0)
    elif name == 'resnet152':
        model = models.resnet152(True)
        model.fc = nn.Linear(model.fc.in_features, num_classes)
        nn.init.xavier_uniform(model.fc.weight)
        nn.init.constant(model.fc.bias, 0)
    elif name == 'densenet121':
        model = models.densenet121(True)
        model.classifier = nn.Linear(model.classifier.in_features, num_classes)
        nn.init.xavier_uniform(model.classifier.weight)
        nn.init.constant(model.classifier.bias, 0)
    elif name == 'vgg11_bn':
        model = models.vgg11_bn(False, num_classes)
    elif name == 'vgg19_bn':
        model = models.vgg19_bn(True)
        model.classifier._modules['6'] = nn.Linear(model.classifier._modules['6'].in_features, num_classes)
        nn.init.xavier_uniform(model.classifier._modules['6'].weight)
        nn.init.constant(model.classifier._modules['6'].bias, 0)
    elif name == 'alexnet':
        model = models.alexnet(True)
        model.classifier._modules['6'] = nn.Linear(model.classifier._modules['6'].in_features, num_classes)
        nn.init.xavier_uniform(model.classifier._modules['6'].weight)
        nn.init.constant(model.classifier._modules['6'].bias, 0)
    else:
        model = Net(num_classes)

    return model
Пример #3
0
def create_model(name, num_classes):
    if name == 'resnet34':
        model = models.resnet34(True)
        model.fc = nn.Linear(model.fc.in_features, num_classes)
        nn.init.xavier_uniform(model.fc.weight)
        nn.init.constant(model.fc.bias, 0)
    elif name == 'resnet50':
        model = models.resnet50(True)
        model.fc = nn.Linear(model.fc.in_features, num_classes)
        nn.init.xavier_uniform(model.fc.weight)
        nn.init.constant(model.fc.bias, 0)
    elif name == 'resnet152':
        model = models.resnet152(True)
        model.fc = nn.Linear(model.fc.in_features, num_classes)
        nn.init.xavier_uniform(model.fc.weight)
        nn.init.constant(model.fc.bias, 0)
    elif name == 'seresnet50':
        model = models.se_resnet50()
        model.last_linear = nn.Linear(model.last_linear.in_features,
                                      num_classes,
                                      bias=True)
    elif name == 'seresnet152':
        model = models.se_resnet152()
        model.last_linear = nn.Linear(model.last_linear.in_features,
                                      num_classes,
                                      bias=True)
    elif name == 'dpn131':
        model = models.dpn131()
        model.classifier = nn.Conv2d(2688,
                                     num_classes,
                                     kernel_size=1,
                                     bias=True)
    elif name == 'densenet121':
        model = models.densenet121(True)
        model.classifier = nn.Linear(model.classifier.in_features, num_classes)
        nn.init.xavier_uniform(model.classifier.weight)
        nn.init.constant(model.classifier.bias, 0)
    elif name == 'vgg11_bn':
        model = models.vgg11_bn(False, num_classes)
    elif name == 'vgg19_bn':
        model = models.vgg19_bn(True)
        model.classifier._modules['6'] = nn.Linear(
            model.classifier._modules['6'].in_features, num_classes)
        nn.init.xavier_uniform(model.classifier._modules['6'].weight)
        nn.init.constant(model.classifier._modules['6'].bias, 0)
    elif name == 'alexnet':
        model = models.alexnet(True)
        model.classifier._modules['6'] = nn.Linear(
            model.classifier._modules['6'].in_features, num_classes)
        nn.init.xavier_uniform(model.classifier._modules['6'].weight)
        nn.init.constant(model.classifier._modules['6'].bias, 0)
    else:
        model = Net(num_classes)

    return model
Пример #4
0
def get_network(args, depth=10, width=10):
    """ return given network
    """
    if args.task == 'cifar10':
        nclass = 10
    elif args.task == 'cifar100':
        nclass = 100
    #Yang added none bn vggs
    if args.net == 'vgg11':
        if args.batch_norm:
            net = vgg11_bn(num_classes=nclass)
        else:
            net = vgg11(num_classes=nclass)
    elif args.net == 'vgg13':
        if args.batch_norm:
            net = vgg13_bn(num_classes=nclass)
        else:
            net = vgg13(num_classes=nclass)
    elif args.net == 'vgg16':
        if args.batch_norm:
            net = vgg16_bn(num_classes=nclass)
        else:
            net = vgg16(num_classes=nclass)
    elif args.net == 'vgg19':
        if args.batch_norm:
            net = vgg19_bn(num_classes=nclass)
        else:
            net = vgg19(num_classes=nclass)

    elif args.net == 'resnet':
        net = resnet(num_classes=nclass, depth=depth, width=width)
    # elif args.net == 'resnet34':
    #     net = resnet34(num_classes=nclass)
    # elif args.net == 'resnet50':
    #     net = resnet50(num_classes=nclass)
    # elif args.net == 'resnet101':
    #     net = resnet101(num_classes=nclass)
    # elif args.net == 'resnet152':
    #     net = resnet152(num_classes=nclass)

    else:
        print('the network name you have entered is not supported yet')
        sys.exit()

    if args.gpu:  #use_gpu
        net = net.cuda()

    return net
def main():
    output_file = 'vgg19_sparse_model_ciafr100.dat'

    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010))
    ])

    transform_val = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010))
    ])

    trainset = torchvision.datasets.CIFAR100(root='./',
                                             train=True,
                                             download=True,
                                             transform=transform_train)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=128,
                                              shuffle=True,
                                              num_workers=6)

    testset = torchvision.datasets.CIFAR100(root='./',
                                            train=False,
                                            download=True,
                                            transform=transform_val)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=32,
                                             shuffle=False,
                                             num_workers=2)

    conv_net = vgg19_bn(num_classes=100).cuda()
    conv_net.train()
    criterion = nn.CrossEntropyLoss()

    init_lr = 1.0
    lam = 1e-6
    av_param = 0.0
    training_specs = IterationSpecs(step_size=init_lr,
                                    mom_ts=9.5,
                                    b_mom_ts=9.5,
                                    weight_decay=5e-4,
                                    av_param=av_param)
    optimizer = xRDA(conv_net.parameters(),
                     it_specs=training_specs,
                     prox=l1_prox(lam=lam, maximum_factor=500))

    lr = init_lr
    prev_train_acc = 0
    prev_sparsity = 0
    for epoch in range(500):
        total = 0
        correct = 0
        for data in trainloader:
            # get the inputs
            inputs, labels = data
            inputs = Variable(inputs).cuda()
            labels = Variable(labels).cuda()

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            outputs = conv_net(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            # Calculate train accuracy
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum()

        train_acc = correct
        sparsity = sum(
            torch.nonzero(x).size()[0] for x in list(conv_net.parameters()))
        accuracy = 10000 * correct / total
        t_accuracy = test_accuracy(testloader, conv_net, cuda=True)
        print(
            'Epoch:%d %% Training Accuracy: %d.%02d %% Test Accuracy: %d.%02d %% Sparsity: %d'
            % (epoch + 1, accuracy / 100, accuracy % 100, t_accuracy / 100,
               t_accuracy % 100, sparsity))

        # At about every 40 epochs, halve step size and double averaging.
        if epoch in [60, 100, 140, 180, 220, 260, 300, 340, 380, 420]:
            lr /= 2
            training_specs.set_step_size(lr)
            av_param = 1.0 - (1.0 - av_param) / 2.0
            training_specs.set_av_param(av_param)

    # Calculate accuracy and save output.
    final_accuracy = test_accuracy(testloader, conv_net, cuda=True)
    print('Accuracy of the network on the 10000 test images: %d.%02d %%' %
          (final_accuracy / 100, final_accuracy % 100))
    torch.save(conv_net, output_file)
Пример #6
0
def main():
    start_epoch = 0

    pretrained_model = os.path.join("./pre_trained", args.dataset,
                                    args.net_type + ".pth.tar")
    save_model = "./save_model_dis/pre_training"
    tensorboard_dir = "./tensorboard/OOD_dis/pre_training" + args.dataset
    # Device configuration
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # Hyper-parameters
    eps = 1e-8

    ### data config
    train_dataset = load_data.Dog_metric_dataloader(image_dir=image_dir,
                                                    num_class=args.num_classes,
                                                    mode="train",
                                                    soft_label=args.soft_label)
    if args.custom_sampler:
        MySampler = load_data.customSampler(train_dataset, args.batch_size,
                                            args.num_instances)
        train_loader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_sampler=MySampler,
                                                   num_workers=2)
    else:
        train_loader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size=args.batch_size,
                                                   shuffle=True,
                                                   num_workers=2)

    test_dataset = load_data.Dog_dataloader(image_dir=image_dir,
                                            num_class=args.num_classes,
                                            mode="test")
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=8,
                                              shuffle=False,
                                              num_workers=2)

    out_test_dataset = load_data.Dog_dataloader(image_dir=image_dir,
                                                num_class=args.num_classes,
                                                mode="OOD")
    out_test_loader = torch.utils.data.DataLoader(out_test_dataset,
                                                  batch_size=args.batch_size,
                                                  shuffle=False,
                                                  num_workers=2)

    if args.transfer:
        ### perfectly OOD data
        OOD_dataset = load_data.Dog_dataloader(image_dir=OOD_dir,
                                               num_class=args.OOD_num_classes,
                                               mode="OOD")
        OOD_loader = torch.utils.data.DataLoader(OOD_dataset,
                                                 batch_size=args.batch_size,
                                                 shuffle=True,
                                                 num_workers=2)

    ##### model, optimizer config
    if args.net_type == "resnet50":
        model = models.resnet50(num_c=args.num_classes, pretrained=True)
    elif args.net_type == "resnet34":
        model = models.resnet34(num_c=args.num_classes, pretrained=True)
    elif args.net_type == "vgg19":
        model = models.vgg19(num_c=args.num_classes, pretrained=True)
    elif args.net_type == "vgg16":
        model = models.vgg16(num_c=args.num_classes, pretrained=True)
    elif args.net_type == "vgg19_bn":
        model = models.vgg19_bn(num_c=args.num_classes, pretrained=True)
    elif args.net_type == "vgg16_bn":
        model = models.vgg16_bn(num_c=args.num_classes, pretrained=True)

    if args.transfer:
        extra_fc = nn.Linear(2048, args.num_classes + args.OOD_num_classes)

    if args.load == True:
        print("loading model")
        checkpoint = torch.load(pretrained_model)

        ##### load model
        model.load_state_dict(checkpoint["model"])

    batch_num = len(
        train_loader) / args.batch_size if args.custom_sampler else len(
            train_loader)

    optimizer = optim.SGD(model.parameters(),
                          lr=args.init_lr,
                          momentum=0.9,
                          nesterov=args.nesterov)
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, args.num_epochs * batch_num)

    #### loss config
    criterion = nn.BCEWithLogitsLoss()

    #### create folder
    Path(os.path.join(save_model, env, args.net_type)).mkdir(exist_ok=True,
                                                             parents=True)

    if args.board_clear == True:
        files = glob.glob(tensorboard_dir + "/*")
        for f in files:
            shutil.rmtree(f)
    i = 0
    while True:
        if Path(os.path.join(tensorboard_dir, str(i))).exists() == True:
            i += 1
        else:
            Path(os.path.join(tensorboard_dir, str(i))).mkdir(exist_ok=True,
                                                              parents=True)
            break
    summary = SummaryWriter(os.path.join(tensorboard_dir, str(i)))

    # Start training
    j = 0
    best_score = 0
    score = 0
    membership_loss = torch.tensor(0)
    transfer_loss = torch.tensor(0)
    for epoch in range(start_epoch, args.num_epochs):
        running_loss = 0
        running_membership_loss = 0
        running_transfer_loss = 0
        running_class_loss = 0
        train_acc = 0
        test_acc = 0
        stime = time.time()

        # for i, (train_data, OOD_data) in enumerate(zip(train_loader, OOD_loader)):
        for i, train_data in enumerate(train_loader):
            #### initialized
            org_image = train_data['input'] + 0.01 * torch.randn_like(
                train_data['input'])
            org_image = org_image.to(device)
            gt = train_data['label'].type(torch.FloatTensor).to(device)

            model = model.to(device).train()
            optimizer.zero_grad()

            #### forward path
            out1, out2 = model.pendis_forward(org_image)

            if args.membership:
                membership_loss = (
                    Membership_loss(out2, gt, args.num_classes) +
                    Membership_loss(out1, gt, args.num_classes))
                running_membership_loss += membership_loss.item()

            if args.transfer:
                extra_fc = extra_fc.to(device).train()

                OOD_image = (
                    OOD_data['input'] +
                    0.01 * torch.randn_like(OOD_data['input'])).to(device)
                OOD_gt = torch.cat(
                    (torch.zeros(args.batch_size, args.num_classes),
                     OOD_data['label'].type(torch.FloatTensor)),
                    dim=1).to(device)

                #### forward path
                _, feature = model.gen_forward(OOD_image)
                OOD_output = extra_fc(feature)
                transfer_loss = criterion(OOD_output, OOD_gt)
                running_transfer_loss += transfer_loss.item()

            #### calc loss
            class1_loss = criterion(out1, gt)
            class2_loss = criterion(out2, gt)
            class_loss = (class1_loss + class2_loss)

            total_loss = class_loss + membership_loss * 0.3 + transfer_loss

            #### calc accuracy
            train_acc += sum(
                torch.argmax(out1, dim=1) == torch.argmax(
                    gt, dim=1)).cpu().detach().item()
            train_acc += sum(
                torch.argmax(out2, dim=1) == torch.argmax(
                    gt, dim=1)).cpu().detach().item()

            total_loss.backward()
            optimizer.step()
            scheduler.step()

            running_class_loss += class_loss.item()
            running_loss += total_loss.item()

        with torch.no_grad():
            for i, test_data in enumerate(test_loader):
                org_image = test_data['input'].to(device)
                model = model.to(device).eval()
                gt = test_data['label'].type(torch.FloatTensor).to(device)

                #### forward path
                out1, out2 = model.pendis_forward(org_image)
                score_1 = nn.functional.softmax(out1, dim=1)
                score_2 = nn.functional.softmax(out2, dim=1)
                dist = torch.sum(torch.abs(score_1 - score_2), dim=1).reshape(
                    (org_image.shape[0], -1))
                if i == 0:
                    dists = dist
                    labels = torch.zeros((org_image.shape[0], ))
                else:
                    dists = torch.cat((dists, dist), dim=0)
                    labels = torch.cat(
                        (labels, torch.zeros((org_image.shape[0]))), dim=0)

                test_acc += sum(
                    torch.argmax(torch.sigmoid(out1), dim=1) == torch.argmax(
                        gt, dim=1)).cpu().detach().item()
                test_acc += sum(
                    torch.argmax(torch.sigmoid(out2), dim=1) == torch.argmax(
                        gt, dim=1)).cpu().detach().item()

            for i, out_org_data in enumerate(out_test_loader):
                out_org_image = out_org_data['input'].to(device)

                out1, out2 = model.pendis_forward(out_org_image)
                score_1 = nn.functional.softmax(out1, dim=1)
                score_2 = nn.functional.softmax(out2, dim=1)
                dist = torch.sum(torch.abs(score_1 - score_2), dim=1).reshape(
                    (out_org_image.shape[0], -1))

                dists = torch.cat((dists, dist), dim=0)
                labels = torch.cat((labels, torch.ones(
                    (out_org_image.shape[0]))),
                                   dim=0)

        roc = evaluate(labels.cpu(), dists.cpu(), metric='roc')
        print('Epoch{} AUROC: {:.3f}, test accuracy : {:.4f}'.format(
            epoch, roc, test_acc / test_dataset.num_image / 2))

        print(
            'Epoch [{}/{}], Step {}, total_loss = {:.4f}, class = {:.4f}, membership = {:.4f}, transfer = {:.4f}, exe time: {:.2f}, lr: {:.4f}*e-4'
            .format(epoch, args.num_epochs, i + 1, running_loss / batch_num,
                    running_class_loss / batch_num,
                    running_membership_loss / batch_num,
                    running_transfer_loss / batch_num,
                    time.time() - stime,
                    scheduler.get_last_lr()[0] * 10**4))
        print('exe time: {:.2f}, lr: {:.4f}*e-4'.format(
            time.time() - stime,
            scheduler.get_last_lr()[0] * 10**4))

        print("train accuracy total : {:.4f}".format(
            train_acc / train_dataset.num_image / 2))
        print("test accuracy total : {:.4f}".format(
            test_acc / test_dataset.num_image / 2))

        summary.add_scalar('loss/total_loss', running_loss / batch_num, epoch)
        summary.add_scalar('loss/class_loss', running_class_loss / batch_num,
                           epoch)
        summary.add_scalar('loss/membership_loss',
                           running_membership_loss / batch_num, epoch)
        summary.add_scalar('acc/train_acc',
                           train_acc / train_dataset.num_image / 2, epoch)
        summary.add_scalar('acc/test_acc',
                           test_acc / test_dataset.num_image / 2, epoch)
        summary.add_scalar("learning_rate/lr",
                           scheduler.get_last_lr()[0], epoch)
        time.sleep(0.001)
        torch.save(
            {
                'model': model.state_dict(),
                'epoch': epoch,
                'init_lr': scheduler.get_last_lr()[0]
            },
            os.path.join(save_model, env, args.net_type,
                         'checkpoint_last_pre.pth.tar'))
Пример #7
0
def main():
    output_file = 'vgg19_sparse_model.dat'
    batch_size = 128
    epoch_count = 600

    transform_train = transforms.Compose([
        transforms.RandomCrop(32, padding=4),
        transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010))
    ])

    transform_val = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465),
                             (0.2023, 0.1994, 0.2010))
    ])

    trainset = torchvision.datasets.CIFAR10(root='./',
                                            train=True,
                                            download=True,
                                            transform=transform_train)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=batch_size,
                                              shuffle=True,
                                              num_workers=4)

    testset = torchvision.datasets.CIFAR10(root='./',
                                           train=False,
                                           download=True,
                                           transform=transform_val)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=32,
                                             shuffle=False,
                                             num_workers=2)

    conv_net = vgg19_bn(num_classes=10).cuda()
    conv_net.train()
    criterion = nn.CrossEntropyLoss()

    init_lr = 1.0
    lam = 1e-6
    av_param = 0.0
    training_specs = CosineSpecs(max_iter=math.ceil(50000 / batch_size) *
                                 epoch_count,
                                 init_step_size=init_lr,
                                 mom_ts=10.0,
                                 b_mom_ts=10.0,
                                 weight_decay=5e-4)
    optimizer = xRDA(conv_net.parameters(),
                     it_specs=training_specs,
                     prox=l1_prox(lam=lam, maximum_factor=500))

    lr = init_lr
    prev_train_acc = 0
    prev_sparsity = 0
    for epoch in range(epoch_count):
        total = 0
        correct = 0
        for data in trainloader:
            # get the inputs
            inputs, labels = data
            inputs = Variable(inputs).cuda()
            labels = Variable(labels).cuda()

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            outputs = conv_net(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            # Calculate train accuracy
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum()

        train_acc = correct
        sparsity = sum(
            torch.nonzero(x).size()[0] for x in list(conv_net.parameters()))
        accuracy = 10000 * correct / total
        t_accuracy = test_accuracy(testloader, conv_net, cuda=True)
        print(
            'Training Accuracy: %d.%02d %% Test Accuracy: %d.%02d %% Sparsity: %d'
            % (accuracy / 100, accuracy % 100, t_accuracy / 100,
               t_accuracy % 100, sparsity))

    # Calculate accuracy and save output.
    final_accuracy = test_accuracy(testloader, conv_net, cuda=True)
    print('Accuracy of the network on the 10000 test images: %d.%02d %%' %
          (final_accuracy / 100, final_accuracy % 100))
    torch.save(conv_net, output_file)
def main():
    output_dir = "./save_fig"

    # Device configuration
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # Hyper-parameters
    eps = 1e-8

    ### data config
    test_dataset = load_data.Dog_dataloader(image_dir=image_dir,
                                            num_class=args.num_classes,
                                            mode="test")
    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=2)

    ### novelty data
    out_test_dataset = load_data.Dog_dataloader(image_dir=image_dir,
                                                num_class=args.num_classes,
                                                mode="OOD")
    out_test_loader = torch.utils.data.DataLoader(out_test_dataset,
                                                  batch_size=1,
                                                  shuffle=False,
                                                  num_workers=2)

    ##### model, optimizer config
    if args.net_type == "resnet50":
        model = models.resnet50(num_c=args.num_classes, pretrained=True)
    elif args.net_type == "resnet34":
        model = models.resnet34(num_c=args.num_classes,
                                num_cc=args.OOD_num_classes,
                                pretrained=True)
    elif args.net_type == "vgg19":
        model = models.vgg19(num_c=args.num_classes,
                             num_cc=args.OOD_num_classes,
                             pretrained=True)
    elif args.net_type == "vgg16":
        model = models.vgg16(num_c=args.num_classes,
                             num_cc=args.OOD_num_classes,
                             pretrained=True)
    elif args.net_type == "vgg19_bn":
        model = models.vgg19_bn(num_c=args.num_classes,
                                num_cc=args.OOD_num_classes,
                                pretrained=True)
    elif args.net_type == "vgg16_bn":
        model = models.vgg16_bn(num_c=args.num_classes,
                                num_cc=args.OOD_num_classes,
                                pretrained=True)

    print("load checkpoint_last")
    checkpoint = torch.load(args.model_path)

    ##### load model
    model.load_state_dict(checkpoint["model"])
    start_epoch = checkpoint["epoch"]
    optimizer = optim.SGD(model.parameters(), lr=checkpoint["init_lr"])

    #### create folder
    Path(output_dir).mkdir(exist_ok=True, parents=True)

    model = model.to(device).eval()
    # Start grad-CAM
    bp = BackPropagation(model=model)
    inv_normalize = transforms.Normalize(
        mean=[-0.485 / 0.229, -0.456 / 0.224, -0.406 / 0.255],
        std=[1 / 0.229, 1 / 0.224, 1 / 0.255])
    target_layer = "layer4"

    stime = time.time()

    gcam = GradCAM(model=model)

    grad_cam = GradCAMmodule(target_layer, output_dir)
    grad_cam.model_config(model)
    for j, test_data in enumerate(test_loader):
        #### initialized
        org_image = test_data['input'].to(device)
        target_class = test_data['label'].to(device)

        target_class = int(target_class.argmax().cpu().detach())
        result = model(org_image).argmax()
        print("number: {} pred: {} target: {}".format(j, result, target_class))
        result = int(result.cpu().detach())
        grad_cam.saveGradCAM(org_image, result, j)
Пример #9
0
 #%% ########## Load Data ##########
 # change to dataset
 train_dataset = Data.TensorDataset(train_x, train_y)
 test_dataset = Data.TensorDataset(test_x, test_y)
 # load data
 print('load data')
 loader_train = Data.DataLoader(
     dataset=train_dataset,  # torch TensorDataset format
     batch_size=Args.batch_size,  # mini batch size
     shuffle=True,  # shuffle or not
     num_workers=0,  # multi-worker for load data
 )
 loader_test = Data.DataLoader(test_dataset, Args.batch_size)
 del ECG_data, ECG_label, train_x, test_x, train_y, test_y
 #%% ########## Create model ##########
 vgg19 = models.vgg19_bn().cuda() if Args.cuda else models.vgg19_bn()
 # optimizer
 optimizer = torch.optim.Adam(vgg19.parameters(), lr=Args.learn_rate)
 # loss function
 loss_func = nn.CrossEntropyLoss()
 # evaluate
 Accuracy = []
 F1 = []
 # save
 Best_result = [0, [], []]
 #%% ########## Training ##########
 if Args.show_plot:
     fig = plt.figure(1)
     plt.ion()
 print('Start Training')
 for epoch in range(Args.epoch):