def __init__(self, pathModel, nnArchitecture, nnClassCount, transCrop):

        #---- Initialize the network
        if nnArchitecture == 'DENSE-NET-121': model = densenet121(False).cuda()
        elif nnArchitecture == 'DENSE-NET-169':
            model = densenet169(False).cuda()
        elif nnArchitecture == 'DENSE-NET-201':
            model = densenet201(False).cuda()

        model = torch.nn.DataParallel(model).cuda()

        modelCheckpoint = torch.load(pathModel)
        model.load_state_dict(modelCheckpoint['best_model_wts'], strict=False)

        self.model = model.module.features
        self.model.eval()

        #---- Initialize the weights
        self.weights = list(self.model.parameters())[-2]

        #---- Initialize the image transform - resize + normalize
        normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                         [0.229, 0.224, 0.225])
        transformList = []
        transformList.append(transforms.Resize(transCrop))
        transformList.append(transforms.ToTensor())
        transformList.append(normalize)

        self.transformSequence = transforms.Compose(transformList)
def get_net(name):
    if name == 'densenet121':
        net = densenet121()
    elif name == 'densenet161':
        net = densenet161()
    elif name == 'densenet169':
        net = densenet169()
    elif name == 'googlenet':
        net = googlenet()
    elif name == 'inception_v3':
        net = inception_v3()
    elif name == 'mobilenet_v2':
        net = mobilenet_v2()
    elif name == 'resnet18':
        net = resnet18()
    elif name == 'resnet34':
        net = resnet34()
    elif name == 'resnet50':
        net = resnet50()
    elif name == 'resnet_orig':
        net = resnet_orig()
    elif name == 'vgg11_bn':
        net = vgg11_bn()
    elif name == 'vgg13_bn':
        net = vgg13_bn()
    elif name == 'vgg16_bn':
        net = vgg16_bn()
    elif name == 'vgg19_bn':
        net = vgg19_bn()
    else:
        print(f'{name} not a valid model name')
        sys.exit(0)

    return net.to(device)
Exemple #3
0
def main():
    test_dir = opt.test_dir
    feature_param_file = opt.feat
    class_param_file = opt.cls
    bsize = opt.b

    # models
    if 'vgg' == opt.i:
        feature = Vgg16()
    elif 'resnet' == opt.i:
        feature = resnet50()
    elif 'densenet' == opt.i:
        feature = densenet121()
    feature.cuda()
    # feature.load_state_dict(torch.load(feature_param_file))
    feature.eval()

    classifier = Classifier(opt.i)
    classifier.cuda()
    # classifier.load_state_dict(torch.load(class_param_file))
    classifier.eval()

    loader = torch.utils.data.DataLoader(MyClsTestData(test_dir,
                                                       transform=True),
                                         batch_size=bsize,
                                         shuffle=True,
                                         num_workers=4,
                                         pin_memory=True)
    acc = eval_acc(feature, classifier, loader)
    print acc
Exemple #4
0
def make_model():
    global og_req_grads
    base = densenet121(pretrained=True)
    selector = classSelector(classesToKeep)
    model = nn.Sequential(base, selector)

    og_req_grads = [p.requires_grad for p in model.parameters()]

    return model
Exemple #5
0
 def __init__(self, out_size):
     super(DenseNet121, self).__init__()
     # self.densenet121 = torchvision.models.densenet121(pretrained=True,x='')
     self.densenet121 = densenet.densenet121(pretrained=True)
     num_ftrs = self.densenet121.classifier.in_features
     self.densenet121.classifier = nn.Sequential(
         nn.Linear(num_ftrs, out_size),
         nn.Sigmoid()
     )
Exemple #6
0
def get_densenet_model(x, args, is_training):
    with slim.arg_scope(densenet.densenet_arg_scope(weight_decay=args.weight_decay,
                                                    batch_norm_decay=0.99,
                                                    batch_norm_epsilon=1.1e-5,
                                                    data_format='NHWC')):
        net, end_points = densenet.densenet121(x,
                                               None,
                                               is_training=is_training,
                                               with_top=False)
        net = slim.batch_norm(net, is_training=is_training, activation_fn=tf.nn.relu)
        return get_classifier_and_reconstruct_model(net, args, is_training, end_points)
 def __init__(self, out_size):
     super(DenseNet121, self).__init__()
     self.inplanes = 1024
     self.densenet121 = densenet.densenet121(pretrained=True, small=args.small)
     num_ftrs = self.densenet121.classifier.in_features
     self.classifier_font = nn.Sequential(
             # 这里可以用fc做分类
             # nn.Linear(num_ftrs, out_size)
             # 这里可以用1×1卷积做分类
             nn.Conv2d(num_ftrs, out_size, kernel_size=1, bias=False)
     )
     self.train_params = []
     self.unpool = nn.MaxUnpool2d(kernel_size=2, stride=2)
Exemple #8
0
def main(args):

    transform_train = transforms.Compose([
        ReScale((args.image_size, args.image_size)),
        transforms.ToTensor()
        ])

    anno = pd.read_csv(args.anno_txt, sep=';', header=None)
    image_paths0 = list(anno.ix[:, 1])
    image_paths1 = list(anno.ix[:, 2])
    targets = np.array(anno.ix[:,3])
    targets = np.sign(targets - 3)
    global plotter 
    plotter = VisdomLinePlotter(env_name=args.name)


    train_set = SiameseData(args.data_dir, image_paths0,
            image_paths1, targets, transform=transform_train)
    train_loader = DataLoader(train_set, batch_size=
            args.batch_size, #num_workers=1, # pin_memory=True,
            collate_fn = collate_cat,
            shuffle=True)
    model = densenet121(drop_rate=args.drop_rate)
    print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in model.parameters()])))
    model = model.cuda()
    if args.ckpt:
        if os.path.isfile(args.ckpt):
            print('loading ckpt {}'.format(args.ckpt))
            checkpoint = torch.load(args.ckpt)
            state_dict = OrderedDict()
            for k, v in checkpoint.items():
                if k in model.state_dict().keys():
                    state_dict[k] = v
            model.load_state_dict(state_dict)
            print('checkpoint loaded')

    cudnn.benchmark = True
    optimizer = torch.optim.Adam(model.parameters(), args.lr,
            weight_decay=args.weight_decay)
    criterion = RankLoss()
    for epoch in range(args.epoch):
        # adjust learning rate
        train(train_loader, model, criterion, optimizer, epoch)
        save_ckpt(model, args.training_dir, epoch)
def set_model():
    if Config.backbone == 'resnet18':
        model = resnet.resnet18(num_class=Config.out_class)
    if Config.backbone == 'resnet34':
        model = resnet.resnet34(num_class=Config.out_class, pretrained=Config.pretrain)
    if Config.backbone == 'resnet50':
        model = resnet.resnet50(num_class=Config.out_class, pretrained=Config.pretrain)
    if Config.backbone == 'ncrf18':
        model = ncrf.resnet18(num_class=Config.out_class)
    if Config.backbone == 'ncrf34':
        model = ncrf.resnet34(num_class=Config.out_class)
    if Config.backbone == 'ncrf50':
        model = ncrf.resnet50(num_class=Config.out_class)
    if Config.backbone == 'densenet121':
        model = densenet.densenet121(Config.out_class, pretrained=Config.pretrain, drop_rate=Config.drop_rate)
    if Config.backbone == 'msdn18':
        model = hardcore_msdn.msdn18(Config.out_class, ss=Config.ss, drop_rate=Config.drop_rate)
    if Config.backbone == 'alexnet':
        model = alexnet.alexnet(2)
    return model
Exemple #10
0
def CreatNet(name):
    if name == 'LSTM':
        return LSTM(100, 27 * 5, 5)
    elif name == 'CNN':
        return CNN()
    elif name == 'resnet18_1d':
        return resnet18_1d()
    elif name == 'dfcnn':
        return dfcnn()
    elif name in ['resnet101', 'resnet50', 'resnet18']:
        if name == 'resnet101':
            net = torchvision.models.resnet101(pretrained=False)
            net.fc = nn.Linear(2048, 5)
        elif name == 'resnet50':
            net = torchvision.models.resnet50(pretrained=False)
            net.fc = nn.Linear(2048, 5)
        elif name == 'resnet18':
            net = torchvision.models.resnet18(pretrained=False)
            net.fc = nn.Linear(512, 5)
        net.conv1 = nn.Conv2d(1, 64, 7, 2, 3, bias=False)

        return net

    elif 'densenet' in name:
        if name == 'densenet121':
            net = densenet.densenet121(pretrained=False, num_classes=5)
        elif name == 'densenet201':
            net = densenet.densenet201(pretrained=False, num_classes=5)
        # net.features = nn.Sequential(OrderedDict([
        #     ('conv0', nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False)),
        #     ('norm0', nn.BatchNorm2d(64)),
        #     ('relu0', nn.ReLU(inplace=True)),
        #     ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
        # ]))
        # net.classifier = nn.Linear(64, 5)
        return net
Exemple #11
0
                                          pin_memory=True,
                                          num_workers=4)

if args.model == 'resnet18':
    cnn = ResNet18(num_classes=num_classes)
elif args.model == 'resnet50':
    from resnet import resnet50
    cnn = resnet50(num_classes)
    print(cnn)
elif args.model == 'resnext50':
    from resnext import resnext50
    cnn = resnext50(num_classes)
    print(cnn)
elif args.model == 'densenet121':
    from densenet import densenet121
    cnn = densenet121(num_classes)
    print(cnn)
elif args.model == 'wideresnet':
    if args.dataset == 'svhn':
        cnn = WideResNet(depth=16,
                         num_classes=num_classes,
                         widen_factor=8,
                         dropRate=0.4)
    else:
        cnn = WideResNet(depth=28,
                         num_classes=num_classes,
                         widen_factor=10,
                         dropRate=0.3)
elif args.model == 'wideresnet101':
    cnn = torch.hub.load('pytorch/vision:v0.5.0',
                         'wide_resnet101_2',
Exemple #12
0
                        help='Input image path')

    args = parser.parse_args()
    args.use_cuda = args.use_cuda and torch.cuda.is_available()
    if args.use_cuda:
        print("Using GPU for acceleration")
    else:
        print("Using CPU for computation")
    return args


if __name__ == '__main__':
    args = get_args()

    # loading models
    model = densenet121()
    model = torch.nn.DataParallel(model)
    checkpoint = torch.load(
        "/Users/fredrickang/Desktop/github/ChestXray/logs/densenet121_model_best.pth"
    )
    model.load_state_dict(checkpoint['state_dict'])

    grad_cam = GradCam(model=model,
                       target_layer_names=["denseblock3"],
                       use_cuda=True)

    img = cv2.imread(
        "/Users/fredrickang/Desktop/github/ChestXray/00009709_010.png", 1)
    img = np.float32(cv2.resize(img, (256, 256))) / 255
    inputs = preprocess_image(img)
def get_model_dics(device, model_list= None):
    if model_list is None:
        model_list = ['densenet121', 'densenet161', 'resnet50', 'resnet152',
                      'incept_v1', 'incept_v3', 'inception_v4', 'incept_resnet_v2',
                      'incept_v4_adv2', 'incept_resnet_v2_adv2',
                      'black_densenet161','black_resnet50','black_incept_v3',
                      'old_vgg','old_res','old_incept']
    models = {}
    for model in model_list:
        if model=='densenet121':
            models['densenet121'] = densenet121(num_classes=110)
            load_model(models['densenet121'],"../pre_weights/ep_38_densenet121_val_acc_0.6527.pth",device)
        if model=='densenet161':
            models['densenet161'] = densenet161(num_classes=110)
            load_model(models['densenet161'],"../pre_weights/ep_30_densenet161_val_acc_0.6990.pth",device)
        if model=='resnet50':
            models['resnet50'] = resnet50(num_classes=110)
            load_model(models['resnet50'],"../pre_weights/ep_41_resnet50_val_acc_0.6900.pth",device)
        if model=='incept_v3':
            models['incept_v3'] = inception_v3(num_classes=110)
            load_model(models['incept_v3'],"../pre_weights/ep_36_inception_v3_val_acc_0.6668.pth",device)
        if model=='incept_v1':
            models['incept_v1'] = googlenet(num_classes=110)
            load_model(models['incept_v1'],"../pre_weights/ep_33_googlenet_val_acc_0.7091.pth",device)
    #vgg16 = vgg16_bn(num_classes=110)
    #load_model(vgg16, "./pre_weights/ep_30_vgg16_bn_val_acc_0.7282.pth",device)
        if model=='incept_resnet_v2':
            models['incept_resnet_v2'] = InceptionResNetV2(num_classes=110)  
            load_model(models['incept_resnet_v2'], "../pre_weights/ep_17_InceptionResNetV2_ori_0.8320.pth",device)

        if model=='incept_v4':
            models['incept_v4'] = InceptionV4(num_classes=110)
            load_model(models['incept_v4'],"../pre_weights/ep_17_InceptionV4_ori_0.8171.pth",device)
        if model=='incept_resnet_v2_adv':
            models['incept_resnet_v2_adv'] = InceptionResNetV2(num_classes=110)  
            load_model(models['incept_resnet_v2_adv'], "../pre_weights/ep_22_InceptionResNetV2_val_acc_0.8214.pth",device)

        if model=='incept_v4_adv':
            models['incept_v4_adv'] = InceptionV4(num_classes=110)
            load_model(models['incept_v4_adv'],"../pre_weights/ep_24_InceptionV4_val_acc_0.6765.pth",device)
        if model=='incept_resnet_v2_adv2':
            models['incept_resnet_v2_adv2'] = InceptionResNetV2(num_classes=110)  
            #load_model(models['incept_resnet_v2_adv2'], "../test_weights/ep_29_InceptionResNetV2_adv2_0.8115.pth",device)
            load_model(models['incept_resnet_v2_adv2'], "../test_weights/ep_13_InceptionResNetV2_val_acc_0.8889.pth",device)

        if model=='incept_v4_adv2':
            models['incept_v4_adv2'] = InceptionV4(num_classes=110)
#            load_model(models['incept_v4_adv2'],"../test_weights/ep_32_InceptionV4_adv2_0.7579.pth",device)
            load_model(models['incept_v4_adv2'],"../test_weights/ep_50_InceptionV4_val_acc_0.8295.pth",device)

        if model=='resnet152':
            models['resnet152'] = resnet152(num_classes=110)
            load_model(models['resnet152'],"../pre_weights/ep_14_resnet152_ori_0.6956.pth",device)
        if model=='resnet152_adv':
            models['resnet152_adv'] = resnet152(num_classes=110)
            load_model(models['resnet152_adv'],"../pre_weights/ep_29_resnet152_adv_0.6939.pth",device)
        if model=='resnet152_adv2':
            models['resnet152_adv2'] = resnet152(num_classes=110)
            load_model(models['resnet152_adv2'],"../pre_weights/ep_31_resnet152_adv2_0.6931.pth",device)



        if model=='black_resnet50':
            models['black_resnet50'] = resnet50(num_classes=110)
            load_model(models['black_resnet50'],"../test_weights/ep_0_resnet50_val_acc_0.7063.pth",device)
        if model=='black_densenet161':
            models['black_densenet161'] = densenet161(num_classes=110)
            load_model(models['black_densenet161'],"../test_weights/ep_4_densenet161_val_acc_0.6892.pth",device)
        if model=='black_incept_v3':
            models['black_incept_v3']=inception_v3(num_classes=110)
            load_model(models['black_incept_v3'],"../test_weights/ep_28_inception_v3_val_acc_0.6680.pth",device)
        if model=='old_res':
            MainModel = imp.load_source('MainModel', "./models_old/tf_to_pytorch_resnet_v1_50.py")
            models['old_res'] = torch.load('./models_old/tf_to_pytorch_resnet_v1_50.pth').to(device)
        if model=='old_vgg':
            MainModel = imp.load_source('MainModel', "./models_old/tf_to_pytorch_vgg16.py")
            models[model] = torch.load('./models_old/tf_to_pytorch_vgg16.pth').to(device)
        if model=='old_incept':
            MainModel = imp.load_source('MainModel', "./models_old/tf_to_pytorch_inception_v1.py")
            models[model]  = torch.load('./models_old/tf_to_pytorch_inception_v1.pth').to(device)
       
    return models
Exemple #14
0
os.system('rm -rf ./runs2/*')
writer = SummaryWriter('./runs2/' + datetime.now().strftime('%B%d  %H:%M:%S'))

if not os.path.exists('./runs2'):
    os.mkdir('./runs2')

if not os.path.exists(check_dir):
    os.mkdir(check_dir)

# models
if 'vgg' == opt.i:
    feature = Vgg16(pretrained=True)
elif 'resnet' == opt.i:
    feature = resnet50(pretrained=True)
elif 'densenet' == opt.i:
    feature = densenet121(pretrained=True)
feature.cuda()

classifier = Classifier(opt.i)
classifier.cuda()

if resume_ep >= 0:
    feature_param_file = glob.glob('%s/feature-epoch-%d*.pth' %
                                   (check_dir, resume_ep))
    classifier_param_file = glob.glob('%s/classifier-epoch-%d*.pth' %
                                      (check_dir, resume_ep))
    feature.load_state_dict(torch.load(feature_param_file[0]))
    classifier.load_state_dict(torch.load(classifier_param_file[0]))

train_loader = torch.utils.data.DataLoader(MyClsData(train_dir,
                                                     transform=True,
print(x.shape)
y = np.load("lab_train_normal_1234_random.npy")
print(y.shape)

x = torch.Tensor(x)
y = torch.Tensor(y)
#y = torch.topk(y, 1)[1].squeeze(1)
#print(y.size())
torch_dataset = Data.TensorDataset(x, y)
loader = Data.DataLoader(
    dataset=torch_dataset,
    batch_size=BS,
    shuffle=True,
)

net = densenet.densenet121(in_channel=6)
net = net.to(device)
if device == 'cuda':
    net = torch.nn.DataParallel(net)
    cudnn.benchmark = True

criterion = nn.MSELoss()
optimizer = optim.SGD(net.parameters(), lr=LR, momentum=0.9, weight_decay=5e-4)


def train(epoch):
    print('\nEpoch: %d' % epoch)
    net.train()
    train_loss = 0
    correct = 0
    total = 0
Exemple #16
0
parser.add_argument('--b', type=int, default=16)  # batch size
opt = parser.parse_args()
print(opt)

test_dir = opt.test_dir
feature_param_file = opt.feat
class_param_file = opt.cls
bsize = opt.b

# models
if 'vgg' == opt.i:
    feature = Vgg16()
elif 'resnet' == opt.i:
    feature = resnet50()
elif 'densenet' == opt.i:
    feature = densenet121()
feature.cuda()
feature.load_state_dict(torch.load(feature_param_file))

classifier = Classifier(opt.i)
classifier.cuda()
classifier.load_state_dict(torch.load(class_param_file))

loader = torch.utils.data.DataLoader(
    MyClsTestData(test_dir, transform=True),
    batch_size=bsize, shuffle=True, num_workers=4, pin_memory=True)

it = 0.0
num_correct = 0
for ib, (data, lbl) in enumerate(loader):
    inputs = Variable(data.float()).cuda()
Exemple #17
0
        print('Best val Loss: {:4f}'.format(lowest_val_loss))
        print(f'Best epoch: {best_epoch}')

    # save best model weights
    torch.save(model, f'./weights/full_{model_name}_epoch{best_epoch}.pth')
    return model


if __name__ == "__main__":
    now = datetime.now()
    model_name = f'wide_resnet101_AutoWtdCE_{now.date()}_{now.hour}-{now.minute}'

    # default `log_dir` is "runs" - we'll be more specific here
    writer = SummaryWriter(f'runs/{model_name}')

    model_ft = densenet121(pretrained=True)
    num_ftrs = model_ft.classifier.in_features
    model_ft.classifier = nn.Linear(num_ftrs, 5)
    model_ft = model_ft.to(device)

    criterion = nn.CrossEntropyLoss(weight=training.weights.to(device))

    # Observe that all parameters are being optimized
    optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.01, momentum=0.9)

    # Decay LR by a factor of 0.1 every 7 epochs
    exp_lr_scheduler = lr_scheduler.StepLR(
        optimizer_ft, step_size=10, gamma=0.1)

    model_ft = train_model(model_ft, criterion, optimizer_ft,
                           exp_lr_scheduler, writer, model_name, batch_size=16, num_epochs=50)
def get_model(class_weights=None):

    model = densenet121(pretrained=True, drop_rate=0.25)
    # model_size: penultimate_layer_output_dim,
    # 201: 1920, 169: 1664, 121: 1024

    # make all params untrainable
    for p in model.parameters():
        p.requires_grad = False

    # reset the last fc layer
    model.classifier = nn.Linear(1024, 257)
    normal(model.classifier.weight, 0.0, 0.01)
    constant(model.classifier.bias, 0.0)

    # make some other params trainable
    trainable_params = []
    trainable_params += [
        n for n, p in model.named_parameters() if 'norm5' in n
    ]
    trainable_params += [
        n for n, p in model.named_parameters() if 'denseblock4' in n
    ]
    for n, p in model.named_parameters():
        if n in trainable_params:
            p.requires_grad = True

    for m in model.features.denseblock4.modules():
        if isinstance(m, nn.ReLU):
            m.inplace = False

    # create different parameter groups
    classifier_weights = [model.classifier.weight]
    classifier_biases = [model.classifier.bias]
    features_weights = [
        p for n, p in model.named_parameters()
        if n in trainable_params and 'conv' in n
    ]
    features_bn_weights = [
        p for n, p in model.named_parameters()
        if n in trainable_params and 'norm' in n and 'weight' in n
    ]
    features_bn_biases = [
        p for n, p in model.named_parameters()
        if n in trainable_params and 'bias' in n
    ]

    # you can set different learning rates
    classifier_lr = 1e-2
    features_lr = 1e-2
    # but they are not actually used (because lr_scheduler is used)

    params = [{
        'params': classifier_weights,
        'lr': classifier_lr,
        'weight_decay': 1e-4
    }, {
        'params': classifier_biases,
        'lr': classifier_lr
    }, {
        'params': features_weights,
        'lr': features_lr,
        'weight_decay': 1e-4
    }, {
        'params': features_bn_weights,
        'lr': features_lr
    }, {
        'params': features_bn_biases,
        'lr': features_lr
    }]
    optimizer = optim.SGD(params, momentum=0.9, nesterov=True)

    # loss function
    criterion = nn.CrossEntropyLoss(weight=class_weights).cuda()
    # move the model to gpu
    model = model.cuda()
    return model, criterion, optimizer
def get_model(args):
    network = args.network

    if network == 'vgg11':
        model = vgg.vgg11(num_classes=args.class_num)
    elif network == 'vgg13':
        model = vgg.vgg13(num_classes=args.class_num)
    elif network == 'vgg16':
        model = vgg.vgg16(num_classes=args.class_num)
    elif network == 'vgg19':
        model = vgg.vgg19(num_classes=args.class_num)
    elif network == 'vgg11_bn':
        model = vgg.vgg11_bn(num_classes=args.class_num)
    elif network == 'vgg13_bn':
        model = vgg.vgg13_bn(num_classes=args.class_num)
    elif network == 'vgg16_bn':
        model = vgg.vgg16_bn(num_classes=args.class_num)
    elif network == 'vgg19_bn':
        model = vgg.vgg19_bn(num_classes=args.class_num)
    elif network == 'resnet18':
        model = models.resnet18(num_classes=args.class_num)
        model.conv1 = torch.nn.Conv2d(in_channels=1,
                                      out_channels=model.conv1.out_channels,
                                      kernel_size=model.conv1.kernel_size,
                                      stride=model.conv1.stride,
                                      padding=model.conv1.padding,
                                      bias=model.conv1.bias)
    elif network == 'resnet34':
        model = models.resnet34(num_classes=args.class_num)
        model.conv1 = torch.nn.Conv2d(in_channels=1,
                                      out_channels=model.conv1.out_channels,
                                      kernel_size=model.conv1.kernel_size,
                                      stride=model.conv1.stride,
                                      padding=model.conv1.padding,
                                      bias=model.conv1.bias)
    elif network == 'resnet50':
        model = models.resnet50(num_classes=args.class_num)
        model.conv1 = torch.nn.Conv2d(in_channels=1,
                                      out_channels=model.conv1.out_channels,
                                      kernel_size=model.conv1.kernel_size,
                                      stride=model.conv1.stride,
                                      padding=model.conv1.padding,
                                      bias=model.conv1.bias)
    elif network == 'resnet101':
        model = models.resnet101(num_classes=args.class_num)
        model.conv1 = torch.nn.Conv2d(in_channels=1,
                                      out_channels=model.conv1.out_channels,
                                      kernel_size=model.conv1.kernel_size,
                                      stride=model.conv1.stride,
                                      padding=model.conv1.padding,
                                      bias=model.conv1.bias)
    elif network == 'resnet152':
        model = models.resnet152(num_classes=args.class_num)
        model.conv1 = torch.nn.Conv2d(in_channels=1,
                                      out_channels=model.conv1.out_channels,
                                      kernel_size=model.conv1.kernel_size,
                                      stride=model.conv1.stride,
                                      padding=model.conv1.padding,
                                      bias=model.conv1.bias)
    elif network == 'densenet121':
        model = densenet.densenet121(num_classes=args.class_num)
    elif network == 'densenet169':
        model = densenet.densenet169(num_classes=args.class_num)
    elif network == 'densenet161':
        model = densenet.densenet161(num_classes=args.class_num)
    elif network == 'densenet201':
        model = densenet.densenet201(num_classes=args.class_num)

    return model
Exemple #20
0
def generate_model(opt):
    assert opt.model in [
        'resnet', 'preresnet', 'wideresnet', 'resnext', 'densenet'
    ]

    if opt.model == 'resnet':
        assert opt.model_depth in [10, 18, 34, 50, 101, 152, 200]

        from resnet import get_fine_tuning_parameters

        if opt.model_depth == 10:
            model = resnet.resnet10(num_classes=opt.n_classes,
                                    shortcut_type=opt.resnet_shortcut,
                                    sample_size=opt.sample_size,
                                    sample_duration=opt.sample_duration)
        elif opt.model_depth == 18:
            model = resnet.resnet18(num_classes=opt.n_classes,
                                    shortcut_type=opt.resnet_shortcut,
                                    sample_size=opt.sample_size,
                                    sample_duration=opt.sample_duration)
        elif opt.model_depth == 34:
            model = resnet.resnet34(num_classes=opt.n_classes,
                                    shortcut_type=opt.resnet_shortcut,
                                    sample_size=opt.sample_size,
                                    sample_duration=opt.sample_duration)
        elif opt.model_depth == 50:
            model = resnet.resnet50(num_classes=opt.n_classes,
                                    shortcut_type=opt.resnet_shortcut,
                                    sample_size=opt.sample_size,
                                    sample_duration=opt.sample_duration)
        elif opt.model_depth == 101:
            model = resnet.resnet101(num_classes=opt.n_classes,
                                     shortcut_type=opt.resnet_shortcut,
                                     sample_size=opt.sample_size,
                                     sample_duration=opt.sample_duration)
        elif opt.model_depth == 152:
            model = resnet.resnet152(num_classes=opt.n_classes,
                                     shortcut_type=opt.resnet_shortcut,
                                     sample_size=opt.sample_size,
                                     sample_duration=opt.sample_duration)
        elif opt.model_depth == 200:
            model = resnet.resnet200(num_classes=opt.n_classes,
                                     shortcut_type=opt.resnet_shortcut,
                                     sample_size=opt.sample_size,
                                     sample_duration=opt.sample_duration)
    elif opt.model == 'wideresnet':
        assert opt.model_depth in [50]

        from models.wide_resnet import get_fine_tuning_parameters

        if opt.model_depth == 50:
            model = wide_resnet.resnet50(num_classes=opt.n_classes,
                                         shortcut_type=opt.resnet_shortcut,
                                         k=opt.wide_resnet_k,
                                         sample_size=opt.sample_size,
                                         sample_duration=opt.sample_duration)
    elif opt.model == 'resnext':
        assert opt.model_depth in [50, 101, 152]

        from models.resnext import get_fine_tuning_parameters

        if opt.model_depth == 50:
            model = resnext.resnet50(num_classes=opt.n_classes,
                                     shortcut_type=opt.resnet_shortcut,
                                     cardinality=opt.resnext_cardinality,
                                     sample_size=opt.sample_size,
                                     sample_duration=opt.sample_duration)
        elif opt.model_depth == 101:
            model = resnext.resnet101(num_classes=opt.n_classes,
                                      shortcut_type=opt.resnet_shortcut,
                                      cardinality=opt.resnext_cardinality,
                                      sample_size=opt.sample_size,
                                      sample_duration=opt.sample_duration)
        elif opt.model_depth == 152:
            model = resnext.resnet152(num_classes=opt.n_classes,
                                      shortcut_type=opt.resnet_shortcut,
                                      cardinality=opt.resnext_cardinality,
                                      sample_size=opt.sample_size,
                                      sample_duration=opt.sample_duration)
    elif opt.model == 'preresnet':
        assert opt.model_depth in [18, 34, 50, 101, 152, 200]

        from models.pre_act_resnet import get_fine_tuning_parameters

        if opt.model_depth == 18:
            model = pre_act_resnet.resnet18(
                num_classes=opt.n_classes,
                shortcut_type=opt.resnet_shortcut,
                sample_size=opt.sample_size,
                sample_duration=opt.sample_duration)
        elif opt.model_depth == 34:
            model = pre_act_resnet.resnet34(
                num_classes=opt.n_classes,
                shortcut_type=opt.resnet_shortcut,
                sample_size=opt.sample_size,
                sample_duration=opt.sample_duration)
        elif opt.model_depth == 50:
            model = pre_act_resnet.resnet50(
                num_classes=opt.n_classes,
                shortcut_type=opt.resnet_shortcut,
                sample_size=opt.sample_size,
                sample_duration=opt.sample_duration)
        elif opt.model_depth == 101:
            model = pre_act_resnet.resnet101(
                num_classes=opt.n_classes,
                shortcut_type=opt.resnet_shortcut,
                sample_size=opt.sample_size,
                sample_duration=opt.sample_duration)
        elif opt.model_depth == 152:
            model = pre_act_resnet.resnet152(
                num_classes=opt.n_classes,
                shortcut_type=opt.resnet_shortcut,
                sample_size=opt.sample_size,
                sample_duration=opt.sample_duration)
        elif opt.model_depth == 200:
            model = pre_act_resnet.resnet200(
                num_classes=opt.n_classes,
                shortcut_type=opt.resnet_shortcut,
                sample_size=opt.sample_size,
                sample_duration=opt.sample_duration)
    elif opt.model == 'densenet':
        assert opt.model_depth in [121, 169, 201, 264]

        from models.densenet import get_fine_tuning_parameters

        if opt.model_depth == 121:
            model = densenet.densenet121(num_classes=opt.n_classes,
                                         sample_size=opt.sample_size,
                                         sample_duration=opt.sample_duration)
        elif opt.model_depth == 169:
            model = densenet.densenet169(num_classes=opt.n_classes,
                                         sample_size=opt.sample_size,
                                         sample_duration=opt.sample_duration)
        elif opt.model_depth == 201:
            model = densenet.densenet201(num_classes=opt.n_classes,
                                         sample_size=opt.sample_size,
                                         sample_duration=opt.sample_duration)
        elif opt.model_depth == 264:
            model = densenet.densenet264(num_classes=opt.n_classes,
                                         sample_size=opt.sample_size,
                                         sample_duration=opt.sample_duration)

    if not opt.no_cuda:
        model = model.cuda()
        model = nn.DataParallel(model, device_ids=None)

        if opt.pretrain_path:
            print('loading pretrained model {}'.format(opt.pretrain_path))
            pretrain = torch.load(opt.pretrain_path)
            assert opt.arch == pretrain['arch']

            model.load_state_dict(pretrain['state_dict'])

            if opt.model == 'densenet':
                model.module.classifier = nn.Linear(
                    model.module.classifier.in_features,
                    opt.n_finetune_classes)
                model.module.classifier = model.module.classifier.cuda()
            else:
                model.module.fc = nn.Linear(model.module.fc.in_features,
                                            opt.n_finetune_classes)
                model.module.fc = model.module.fc.cuda()

            parameters = get_fine_tuning_parameters(model, opt.ft_begin_index)
            return model, parameters
    else:
        if opt.pretrain_path:
            print('loading pretrained model {}'.format(opt.pretrain_path))
            pretrain = torch.load(opt.pretrain_path)
            assert opt.arch == pretrain['arch']

            model.load_state_dict(pretrain['state_dict'])

            if opt.model == 'densenet':
                model.classifier = nn.Linear(model.classifier.in_features,
                                             opt.n_finetune_classes)
            else:
                model.fc = nn.Linear(model.fc.in_features,
                                     opt.n_finetune_classes)

            parameters = get_fine_tuning_parameters(model, opt.ft_begin_index)
            return model, parameters

    return model, model.parameters()
Exemple #21
0
    RootPath = "D:/project/shallowNN/dataset/"
    df = pd.read_csv(RootPath + "CheXpert-v1.0-small/TEST.csv")
    df = preprocess(df, RootPath)

    # df = df.sample(100)

    test_dl = DataLoader(CheXpert(df), batch_size=20, shuffle=True)

    # model_f = torch.load("D:/project/shallowNN/ShallowDeepNN/scripts/models/Chexpert/checkpoints/m_0120_012145.pth.tar",
    # 	map_location=torch.device("cpu"))
    # model_f = model_f['state_dict']
    model_f = torch.load(
        "D:/project/shallowNN/ShallowDeepNN/scripts/models/Chexpert/checkpoints/epoch_31.pth"
    )

    model = densenet121(num_classes=df.shape[1] - 1)
    model = torch.nn.DataParallel(model)
    model.load_state_dict(model_f)

    LABELS = []
    OUTPUTS = []
    model.eval()
    for d, l in tqdm(test_dl):
        print(d.shape)
        o = model(d)
        LABELS.extend(l.detach().cpu().numpy())
        OUTPUTS.extend(o.detach().cpu().numpy())

    LABELS = np.asarray(LABELS)
    OUTPUTS = np.asarray(OUTPUTS)
Exemple #22
0
    def __init__(self,
                 base_model,
                 in_channels=3,
                 out_channels=1,
                 dropout_rate=0.2,
                 pretrained=False):
        super().__init__()

        assert base_model in [
            'resnet50', 'resnet101', 'densenet121', 'densenet201',
            'efficientnetb0', 'efficientnetb4'
        ]

        self._in_channels = in_channels
        self._out_channels = out_channels

        if base_model == 'resnet50':
            if pretrained:
                assert in_channels == 3
                self._base_model = resnet50(pretrained=True,
                                            drop_rate=dropout_rate)
            else:
                self._base_model = resnet50(pretrained=False,
                                            in_channels=in_channels,
                                            drop_rate=dropout_rate)
            fc_in_features = 2048
        if base_model == 'resnet101':
            if pretrained:
                assert in_channels == 3
                self._base_model = resnet101(pretrained=True,
                                             drop_rate=dropout_rate)
            else:
                self._base_model = resnet101(pretrained=False,
                                             in_channels=in_channels,
                                             drop_rate=dropout_rate)
            fc_in_features = 2048
        if base_model == 'densenet121':
            if pretrained:
                assert in_channels == 3
                self._base_model = densenet121(pretrained=True,
                                               drop_rate=dropout_rate)
            else:
                self._base_model = densenet121(pretrained=False,
                                               drop_rate=dropout_rate,
                                               in_channels=in_channels)
            fc_in_features = 1024
        if base_model == 'densenet201':
            if pretrained:
                assert in_channels == 3
                self._base_model = densenet201(pretrained=True,
                                               drop_rate=dropout_rate)
            else:
                self._base_model = densenet201(pretrained=False,
                                               drop_rate=dropout_rate,
                                               in_channels=in_channels)
            fc_in_features = 1920
        if base_model == 'efficientnetb0':
            if pretrained:
                assert in_channels == 3
                self._base_model = EfficientNet.from_pretrained(
                    'efficientnet-b0')
            else:
                self._base_model = EfficientNet.from_name(
                    'efficientnet-b0', {'in_channels': in_channels})
            fc_in_features = 1280
        if base_model == 'efficientnetb4':
            if pretrained:
                assert in_channels == 3
                self._base_model = EfficientNet.from_pretrained(
                    'efficientnet-b4')
            else:
                self._base_model = EfficientNet.from_name(
                    'efficientnet-b4', {'in_channels': in_channels})
            fc_in_features = 1792

        self._fc_mu1 = torch.nn.Linear(fc_in_features, fc_in_features)
        self._fc_mu2 = torch.nn.Linear(fc_in_features, out_channels)
        self._fc_logvar1 = torch.nn.Linear(fc_in_features, fc_in_features)
        # self._fc_logvar2 = torch.nn.Linear(fc_in_features, out_channels)
        self._fc_logvar2 = torch.nn.Linear(fc_in_features, 1)

        if 'resnet' in base_model:
            self._base_model.fc = torch.nn.Identity()
        elif 'densenet' in base_model:  # densenet
            self._base_model.classifier = torch.nn.Identity()
        elif 'efficientnet' in base_model:
            self._base_model._fc = torch.nn.Identity()

        self._dropout_T = 25
        self._dropout_p = 0.5
    np.random.seed(seed)

    ##################
    # Initialization #
    ##################
    # TODO: Put into command line args
    num_classes = 10
    batch_size = 16
    num_epochs = 10
    base_lr = 0.1
    use_cuda = False
    device = torch.device("cuda" if use_cuda else "cpu")
    pathlib.Path('./snapshots').mkdir(parents=True, exist_ok=True)

    # Load a model pretrained on ImageNet
    model = densenet.densenet121(pretrained=True)
    # Replace classification layer
    model.classifier = torch.nn.Linear(model.classifier.in_features,
                                       num_classes)
    model.to(device)

    criterion = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=base_lr, momentum=0.9)

    ############
    # Training #
    ############
    train_loader, val_loader = get_train_val_split(batch_size)

    # Use model that performs best on validation for testing
    best_val_accuracy = 0
Exemple #24
0
        n=5).reset_index()  # split train - test set.
    tensor_transform_test = transforms.Compose([
        CenterCrop(896),
        transforms.ToTensor(),
        lambda x: x.float(),
    ])
    dataset_test = KneeGradingDataset(val,
                                      HOME_PATH,
                                      tensor_transform_test,
                                      stage='val')

    test_loader = data.DataLoader(dataset_test, batch_size=2)
    print('Validation data:', len(dataset_test))
    # Network
    #net = DenseNet(121,True,pool)
    net = dn.densenet121(pretrained=True)
    net.classifier = nn.Sequential(nn.Dropout(0.4), nn.Linear(1024, 5))
    print(net)
    print('Number of model parameters: {}'.format(
        sum([p.data.nelement() for p in net.parameters()])))
    print('############### Model Finished ####################')
    criterion = nn.CrossEntropyLoss()
    if USE_CUDA:
        state_dict = torch.load(model_file_path)
        net.load_state_dict(state_dict)
        criterion.cuda()
        net.cuda()
    else:
        state_dict = torch.load(model_file_path, map_location='cpu')
        net.load_state_dict(state_dict)
Exemple #25
0
def testRecord():
    rootpath = 'data/Test/'
    csv_path = 'Tests/question.csv'

    csv_file = pd.read_csv(rootpath + csv_path, header=None).values
    count = 0
    anno = {}
    outlist = []
    print('Data Preprocessing!')
    for row in tqdm(csv_file):
        singlelist = []
        for unit in row:
            singlelist.append(unit)
        anno[row[0]] = count
        count += 1
        outlist.append(singlelist)
    print('Start Testing!')

    for attribute_type in _ATTRIBUTE_INFO.keys():
        test_dataset = attribute.Attribute(csv_path,
                                           True,
                                           attribute_type,
                                           train=False,
                                           is_full_mode=True,
                                           csv_prefix=rootpath)
        test_loader = DataLoader(dataset=test_dataset,
                                 batch_size=_BATCH_SIZE,
                                 shuffle=False)

        if _NETWORK_TYPE == 'resnet-v2':
            Network = resnet.resnet50(pretrained=True, v2=True,
                                      dp_ratio=0.5).cuda()
            Network.fc = torch.nn.Linear(
                2048, _ATTRIBUTE_INFO[attribute_type]).cuda()
        elif _NETWORK_TYPE == 'resnet':
            Network = resnet.resnet50(pretrained=True, v2=False,
                                      dp_ratio=0.5).cuda()
            Network.fc = torch.nn.Linear(
                2048, _ATTRIBUTE_INFO[attribute_type]).cuda()
        elif _NETWORK_TYPE == 'densenet':
            Network = densenet.densenet121(
                pretrained=True,
                num_classes=_ATTRIBUTE_INFO[attribute_type],
                dp_ratio=0.5).cuda()
        else:
            raise 'Unknown network type error!'
        Network.eval()
        # print(resnet.state_dict().keys())
        para_dict = torch.load(_ATTRIBUTE_MODELS[attribute_type] +
                               attribute_type + '/' +
                               str(_ATTRIBUTE_EPOCH[attribute_type]) +
                               '-params.pk1')
        # print('para_dict')
        # print(para_dict.keys())
        Network.load_state_dict(para_dict)

        attribute_prob = []
        attribute_mark = []
        for test_num in range(_TEST_AUGTIMES):
            test_count = 0
            print('===>' + attribute_type + ' aug-testing ' +
                  str(test_num + 1))
            for batch_x, batch_y, meta in tqdm(test_loader):
                batch_x = Variable(batch_x, volatile=True).cuda()
                out = Network(batch_x)
                out = torch.nn.functional.softmax(out, dim=1)

                out = out.cpu().data.numpy()

                for i in range(len(out)):
                    if test_count >= len(attribute_prob):
                        attribute_prob.append([])
                        attribute_mark.append(meta['img_path'][i])
                    attribute_prob[test_count].append(out[i])
                    test_count += 1

        for i in range(len(attribute_prob)):
            mean_prob = np.sum(attribute_prob[i], axis=0) / _TEST_AUGTIMES
            resultstr = ''
            for j in range(len(mean_prob)):
                resultstr += '{:.4f};'.format(mean_prob[j])
            resultstr = resultstr[0:len(resultstr) - 1]
            outlist[anno[attribute_mark[i][
                len(rootpath):len(attribute_mark[i])]]].append(resultstr)
            # print(meta)
        print(attribute_type + ' done!')
    out_csv = pd.DataFrame(np.array(outlist))
    out_csv.to_csv('answer_full_label.csv', index=False, header=None)
Exemple #26
0
def main():
    rootpath = 'data/test/'
    txtpath = 'data/test.txt'
    inp_pre = 556
    inp_res = 512

    print('Start Testing!')
    test_dataset = brand.Brand(txtpath,
                               rootpath,
                               num_classes=_NUM_CLASSES,
                               inp_pre=inp_pre,
                               inp_res=inp_res,
                               train=False,
                               val=False)
    test_loader = DataLoader(dataset=test_dataset,
                             batch_size=_BATCH_SIZE,
                             shuffle=False)

    if _NETWORK_TYPE == 'resnet-v2':
        Network = resnet.resnet50(pretrained=True, v2=True,
                                  dp_ratio=0.5).cuda()
        Network.fc = torch.nn.Linear(2048,
                                     _ATTRIBUTE_INFO[attribute_type]).cuda()
    elif _NETWORK_TYPE == 'resnet':
        Network = resnet.resnet50(pretrained=True, v2=False,
                                  dp_ratio=0.5).cuda()
        Network.fc = torch.nn.Linear(2048,
                                     _ATTRIBUTE_INFO[attribute_type]).cuda()
    elif _NETWORK_TYPE == 'senet':
        Network = pretrainedmodels.__dict__['se_resnet50'](
            pretrained=False).cuda()
        Network.avg_pool = torch.nn.AvgPool2d(16, stride=1)
        Network.last_linear = torch.nn.Linear(2048, _NUM_CLASSES).cuda()
    elif _NETWORK_TYPE == 'senet154':
        Network = pretrainedmodels.__dict__['senet154']().cuda()
        Network.avg_pool = torch.nn.AvgPool2d(16, stride=1)
        Network.last_linear = torch.nn.Linear(2048, _NUM_CLASSES).cuda()
    elif _NETWORK_TYPE == 'densenet':
        Network = densenet.densenet121(pretrained=False,
                                       num_classes=_NUM_CLASSES).cuda()
    else:
        raise 'Unknown network type error!'
    Network.eval()
    # print(resnet.state_dict().keys())
    para_dict = torch.load(_MODELS_PATH + '/' + str(_EPOCH) + '-params.pk1')
    # print('para_dict')
    # print(para_dict.keys())
    Network.load_state_dict(para_dict)

    attribute_prob = []
    attribute_mark = []
    for test_num in range(_TEST_AUGTIMES):
        test_count = 0
        print('===> aug-testing ' + str(test_num + 1))
        for batch_x, batch_y, meta in tqdm(test_loader):
            batch_x = Variable(batch_x, volatile=True).cuda()
            out = Network(batch_x)
            out = torch.nn.functional.softmax(out, dim=1)

            out = out.cpu().data.numpy()

            for i in range(len(out)):
                if test_count >= len(attribute_prob):
                    attribute_prob.append([])
                    attribute_mark.append(meta['img_path'][i])
                attribute_prob[test_count].append(out[i])
                test_count += 1

    results = []
    for i in range(len(attribute_prob)):
        mean_prob = np.sum(attribute_prob[i], axis=0) / _TEST_AUGTIMES
        resultstr = attribute_mark[i][len(rootpath):len(
            attribute_mark[i])] + ' ' + str(np.argmax(mean_prob) + 1)
        results.append(resultstr)
    print('done!')
    # print(meta)
    with open('results/result.txt', 'w') as f:
        for i in range(len(results)):
            f.write(results[i] + '\n')
Exemple #27
0
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils,datasets, models
from PIL import Image
import time
import torch.nn as nn
import torch.optim as optim 
from torch.optim import lr_scheduler
from torch.autograd import Variable
from densenet import densenet121
import torch.nn.functional as F
from sklearn.metrics import roc_curve 
from get_data import train_model

use_gpu = torch.cuda.is_available()
#model_ft = models.densenet121(pretrained = False)
model_ft = densenet121(pretrained = False)
#torch.nn.Linear(in_features,out_features,bias = True)
num_ftrs = model_ft.classifier.in_features

model_ft.classifier = nn.Linear(num_ftrs,1)

if use_gpu:
    model_ft = model_ft.cuda()



criterion = nn.BCELoss()

optimizer_ft = optim.Adam(model_ft.parameters(), lr = 0.001,  betas=(0.9, 0.999))

exp_lr_scheduler = lr_scheduler.ReduceLROnPlateau(optimizer_ft, mode='min', factor=0.1, patience=2,verbose=True)
Exemple #28
0
def main():
    global args, best_acc1
    args = parser.parse_args()

    if args.logging:
        filename = 'resnet' if args.resnet else 'densenet'
        filename += 'pretrained' if args.pretrained else ''
        f = open(filename + '.txt', 'w')

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    # create model
    if args.resnet:
        print("=> creating resnet model")
        model = resnet18(pretrained=args.pretrained)
        f.write("Model: resnet \n")
    else:
        print("=> creating densenet model")
        model = densenet121(pretrained=args.pretrained)
        f.write("Model: densenet \n")

    if args.gpu:
        model = model.cuda()
        model = torch.nn.DataParallel(model).cuda()

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.Adam(model.parameters(),
                                 args.lr,
                                 betas=(0.9, 0.999))

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    f.write("File_directory: " + args.data + "\n")
    args.data = os.path.expanduser(args.data)
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_dataset = datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]))

    val_dataset = datasets.ImageFolder(
        valdir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalize,
        ]))

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    if args.evaluate:
        validate(val_loader, model, criterion, args.gpu, args.print_freq, f)
        return

    mode = 'evaluate' if args.evaluate else 'training'
    f.write("Mode :" + mode + "\n")

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, args.lr, epoch, args.epoch_decay)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch, args.gpu,
              args.print_freq, f)

        # evaluate on validation set
        acc1 = validate(val_loader, model, criterion, args.gpu,
                        args.print_freq, f)

        # remember best acc@1 and save checkpoint
        is_best = acc1 > best_acc1
        best_acc1 = max(acc1, best_acc1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_acc1': best_acc1,
                'optimizer': optimizer.state_dict(),
            },
            is_best,
            model='resnet18' if args.resnet else 'densenet121')

    f.write("Best accuracy :" + str(best_acc1))
    f.close()
Exemple #29
0
    def __init__(self, base):
        super(Model, self).__init__()
        self.base = base
        self.bn1 = nn.BatchNorm1d(1)
        self.classifier = nn.Linear(1024 + 112, 34)

    def forward(self, x1, x2, x3):
        x1 = self.base(x1)
        x2 = self.bn1(x2)
        x2 = x2.view(x2.size(0), -1)
        x = torch.cat([x1, x2, x3], 1)
        x = self.classifier(x)
        return x


model = Model(densenet121()).cuda()
testres = []
for foldNum in range(5):
    testData = TestData(subA,
                        'testA',
                        Feat_RR,
                        transform=transforms.Compose([ToTensor()]))
    testloader = DataLoader(testData, batch_size=64, shuffle=False)
    model.load_state_dict(
        torch.load('dense121/weight_best_%s.pt' % str(foldNum)))
    model.cuda()
    model.eval()
    test_outputs = []
    test_fnames = []
    for images, fea, fea2, fnames in testloader:
        preds = torch.sigmoid(
Exemple #30
0
def main():
    resume_ep = opt.r
    train_dir = opt.train_dir
    check_dir = opt.check_dir
    val_dir = opt.val_dir

    bsize = opt.b
    iter_num = opt.e

    label_weight = [4.858, 17.57]
    std = [.229, .224, .225]
    mean = [.485, .456, .406]

    os.system('rm -rf ./runs2/*')
    writer = SummaryWriter('./runs2/' +
                           datetime.now().strftime('%B%d  %H:%M:%S'))

    if not os.path.exists('./runs2'):
        os.mkdir('./runs2')

    if not os.path.exists(check_dir):
        os.mkdir(check_dir)

    # models
    if 'vgg' == opt.i:
        feature = Vgg16(pretrained=True)
    elif 'resnet' == opt.i:
        feature = resnet50(pretrained=True)
    elif 'densenet' == opt.i:
        feature = densenet121(pretrained=True)
    feature.cuda()

    classifier = Classifier(opt.i)
    classifier.cuda()

    if resume_ep >= 0:
        feature_param_file = glob.glob('%s/feature-epoch-%d*.pth' %
                                       (check_dir, resume_ep))
        classifier_param_file = glob.glob('%s/classifier-epoch-%d*.pth' %
                                          (check_dir, resume_ep))
        feature.load_state_dict(torch.load(feature_param_file[0]))
        classifier.load_state_dict(torch.load(classifier_param_file[0]))

    train_loader = torch.utils.data.DataLoader(MyClsData(train_dir,
                                                         transform=True,
                                                         crop=True,
                                                         hflip=True,
                                                         vflip=False),
                                               batch_size=bsize,
                                               shuffle=True,
                                               num_workers=4,
                                               pin_memory=True)

    val_loader = torch.utils.data.DataLoader(MyClsTestData(val_dir,
                                                           transform=True),
                                             batch_size=1,
                                             shuffle=True,
                                             num_workers=4,
                                             pin_memory=True)

    criterion = nn.CrossEntropyLoss(weight=torch.FloatTensor(label_weight))
    criterion.cuda()

    optimizer_classifier = torch.optim.Adam(classifier.parameters(), lr=1e-3)
    optimizer_feature = torch.optim.Adam(feature.parameters(), lr=1e-4)

    acc = 0.0
    for it in range(resume_ep + 1, iter_num):
        for ib, (data, lbl) in enumerate(train_loader):
            inputs = Variable(data.float()).cuda()
            lbl = Variable(lbl.long()).cuda()
            feats = feature(inputs)

            output = classifier(feats)
            loss = criterion(output, lbl)

            classifier.zero_grad()
            feature.zero_grad()

            loss.backward()

            optimizer_feature.step()
            optimizer_classifier.step()
            if ib % 20 == 0:
                # image = make_image_grid(inputs.data[:4, :3], mean, std)
                # writer.add_image('Image', torchvision.utils.make_grid(image), ib)
                writer.add_scalar('M_global', loss.data[0], ib)
            print('loss: %.4f (epoch: %d, step: %d), acc: %.4f' %
                  (loss.data[0], it, ib, acc))
            del inputs, lbl, loss, feats
            gc.collect()
        new_acc = eval_acc(feature, classifier, val_loader)
        if new_acc > acc:
            filename = ('%s/classifier-epoch-%d-step-%d.pth' %
                        (check_dir, it, ib))
            torch.save(classifier.state_dict(), filename)
            filename = ('%s/feature-epoch-%d-step-%d.pth' %
                        (check_dir, it, ib))
            torch.save(feature.state_dict(), filename)
            print('save: (epoch: %d, step: %d)' % (it, ib))
            acc = new_acc