Exemplo n.º 1
0
def main():

    args = parser.parse_args()
    if args.test:
        test_dataloader = cifar10_dataloader(train=False,
                                             batch_size=4,
                                             shuffle=True,
                                             num_workers=2)
        # model = cfn._cifarnet(pretrained=args.test, path=MODEL_PATH).to(device)
        # model = densnet(growth_rate=32, num_classes=10).to(device)
        model = resnet50(num_classes=10).to(device)
        state_dict = torch.load(MODEL_PATH)
        model.load_state_dict(state_dict)
        test(test_dataloader, model, args.show_data)
        return

    if args.show_data:
        dataloader = cifar10_dataloader(train=False,
                                        batch_size=4,
                                        shuffle=True,
                                        num_workers=0)
        show_data(dataloader)
        return

    train_dataloader = cifar10_dataloader(train=True,
                                          batch_size=4,
                                          shuffle=True,
                                          num_workers=2)
    val_dataloader = cifar10_dataloader(train=False,
                                        batch_size=4,
                                        shuffle=True,
                                        num_workers=0)

    # model = cfn._cifarnet().to(device)
    # model = densnet(growth_rate=32, num_classes=10).to(device)
    model = resnet50(num_classes=10).to(device)
    criterion = nn.CrossEntropyLoss().to(device)
    optimizer = optim.SGD(model.parameters(),
                          lr=0.1,
                          momentum=0.9,
                          weight_decay=0.0001)

    best_loss = 10.0
    for epoch in range(10):
        epoch_loss = train(train_dataloader, model, criterion, optimizer,
                           epoch)
        print('[train][%depoch] loss: %.5f' % (epoch, epoch_loss))
        val(val_dataloader, model, criterion, epoch)
        if best_loss > epoch_loss:
            best_loss = epoch_loss
            torch.save(model.state_dict(), MODEL_PATH)
Exemplo n.º 2
0
def getblock_max(cnt, name):
    if cnt == 0:
        return 0

    model = ""
    block_max = 0
    blockname = "BasicBlock"
    sys.path.append("input")

    if "resnet18" in name:
        from ResNet import resnet18
        model = resnet18()
    elif "resnet34" in name:
        from ResNet import resnet34
        model = resnet34()
    elif "resnet50" in name:
        from ResNet import resnet50
        model = resnet50()
        blockname = "Bottleneck"
    modeltxt = f'.debug/{name}.txt'
    with open(modeltxt, 'w') as fmd:
        fmd.write(str(model))
    with open(modeltxt, 'r') as frd:
        for line in frd:
            line = line.strip()
            if line.startswith(f'(layer{cnt}): Sequential('):
                for line in frd:
                    line = line.strip()
                    if f"): {blockname}(" in line:
                        block_max = line.split(')')[0].split('(')[1]
                    elif line.startswith(f'(layer{cnt+1}): Sequential(') or "quant_fc" in line:
                        return int(block_max)+1
Exemplo n.º 3
0
    def __init__(self, num_classes):
        super(ResNet50Baseline, self).__init__()
        # Use pretrained ResNet50 as baseline
        baseline = resnet50(pretrained=True)
        # Modify the average pooling layer
        baseline.avgpool = nn.AdaptiveAvgPool2d((1, 1))

        self.model = baseline
        self.classifier = NewBlock(2048, num_classes)
Exemplo n.º 4
0
def build_model(model_name, num_classes, pretrained=True):
    if model_name == 'ResNet-50':
        net = resnet50(num_classes=num_classes, pretrained=pretrained)
    elif model_name == 'ResNet-152':
        net = resnet152(num_classes=num_classes, pretrained=pretrained)
    elif model_name == 'ResNet-101':
        net = resnet101(num_classes=num_classes, pretrained=pretrained)
    else:
        print('wate a minute')
    return net
Exemplo n.º 5
0
    def __init__(self):
        super(Mymodel, self).__init__()
        use_layers = {'used_layers': [2, 3, 4]}
        self.backbone = resnet50(**use_layers)

        pretrained_resnet50 = torch.load('pretrained_resnet50.pt')
        assert len(self.backbone.state_dict()) == len(pretrained_resnet50)
        pretrained_Weights = []
        for key in pretrained_resnet50:
            pretrained_Weights.append(pretrained_resnet50[key])
        for i, key in enumerate(self.backbone.state_dict()):
            self.backbone.state_dict()[key] = pretrained_Weights[i]
        for param in self.backbone.parameters():
            param.requires_grad = False
        # Adjust all layers to 256
        self.adj = AdjustAllLayer([512, 1024, 2048], [256, 256, 256])
        self.head = CARHead(in_channels=256)
Exemplo n.º 6
0
				nn.BatchNorm2d(dim_out, momentum=bn_mom),
				nn.ReLU(inplace=True),		
		)

	def forward(self, x):
		[b,c,row,col] = x.size()
		conv1x1 = self.branch1(x)
		conv3x3_1 = self.branch2(x)
		conv3x3_2 = self.branch3(x)
		conv3x3_3 = self.branch4(x)
		global_feature = torch.mean(x,2,True)
		global_feature = torch.mean(global_feature,3,True)
		global_feature = self.branch5_conv(global_feature)
		if b != 1:
			global_feature = self.branch5_bn(global_feature)
		global_feature = self.branch5_relu(global_feature)
		global_feature = F.interpolate(global_feature, (row,col), None, 'bilinear', True)
		feature_cat = torch.cat([conv1x1, conv3x3_1, conv3x3_2, conv3x3_3, global_feature], dim=1)
		result = self.conv_cat(feature_cat)
		return result

if __name__ == '__main__':
	from ResNet import resnet50
	model1 = nn.Sequential(*[
		resnet50(),
		ASPP(2048, 1, bn_mom=None)
	])
	x = torch.randn([1,3,320,320])
	y1 = model1(x)
	print(y1.shape)
Exemplo n.º 7
0
def main():
    global args, best_prec1
    args = parser.parse_args()

    print('img_dir:', args.img_dir)
    print('end2end?:', args.end2end)

    # load data and prepare dataset
    train_list_file = '../../data/msceleb/train_list.txt'
    train_label_file = '../../data/msceleb/train_label.txt'
    caffe_crop = CaffeCrop('train')
    train_dataset = MsCelebDataset(
        args.img_dir, train_list_file, train_label_file,
        transforms.Compose([caffe_crop, transforms.ToTensor()]))
    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               num_workers=args.workers,
                                               pin_memory=True)

    caffe_crop = CaffeCrop('test')
    val_list_file = '../../data/msceleb/test_list.txt'
    val_label_file = '../../data/msceleb/test_label.txt'
    val_dataset = MsCelebDataset(
        args.img_dir, val_list_file, val_label_file,
        transforms.Compose([caffe_crop, transforms.ToTensor()]))
    val_loader = torch.utils.data.DataLoader(val_dataset,
                                             batch_size=args.batch_size,
                                             shuffle=False,
                                             num_workers=args.workers,
                                             pin_memory=True)

    assert (train_dataset.max_label == val_dataset.max_label)
    class_num = train_dataset.max_label + 1

    print('class_num: ', class_num)

    # prepare model
    model = None
    assert (args.arch in ['resnet18', 'resnet50', 'resnet101'])
    if args.arch == 'resnet18':
        model = resnet18(pretrained=False,
                         num_classes=class_num,
                         end2end=args.end2end)
    if args.arch == 'resnet50':
        model = resnet50(pretrained=False,
                         num_classes=class_num,
                         end2end=args.end2end)
    if args.arch == 'resnet101':
        model = resnet101(pretrained=False,
                          num_classes=class_num,
                          end2end=args.end2end)
    model = torch.nn.DataParallel(model).cuda()

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.pretrained:
        checkpoint = torch.load(args.pretrained)
        pretrained_state_dict = checkpoint['state_dict']
        model_state_dict = model.state_dict()

        for key in pretrained_state_dict:
            model_state_dict[key] = pretrained_state_dict[key]
        model.load_state_dict(model_state_dict)

    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    if args.evaluate:
        validate(val_loader, model, criterion)
        return

    for epoch in range(args.start_epoch, args.epochs):
        adjust_learning_rate(optimizer, epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch)

        # evaluate on validation set
        prec1 = validate(val_loader, model, criterion)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            }, is_best)
Exemplo n.º 8
0
img_path = sys.argv[2]
pt_path = sys.argv[3]
output_dir = sys.argv[4]

model = vgg()
if sys.argv[1] == "vggnet":
    model = vgg(num_classes=1000)
elif sys.argv[1] == "resnet18":
    from ResNet import resnet18
    model = resnet18()
elif sys.argv[1] == "resnet34":
    from ResNet import resnet34
    model = resnet34()
elif sys.argv[1] == "resnet50":
    from ResNet import resnet50
    model = resnet50()

# TODO: merge this into vgg.py
for m in model.modules():
    if isinstance(m, (torch.nn.Conv2d, torch.nn.Linear)):
        m.register_buffer(f'scale', torch.tensor(0.0))
    # if isinstance(m, torch.nn.BatchNorm2d):
        # m.register_buffer(f'bn_k', torch.zeros_like(m.weight.data))
        # m.register_buffer(f'bn_b', torch.zeros_like(m.weight.data))

# Load checkpoint
prefix = 'model.'
state_dict = torch.load(pt_path, map_location=torch.device('cpu'))['state_dict']
if any(k.startswith(prefix) for k in state_dict.keys()):
    state_dict = {
        k[len(prefix):]: v
Exemplo n.º 9
0
])

# 下载训练集 MNIST 手写数字训练集
train_dataset = datasets.CIFAR10(root='~/data/resnet_data',
                                 train=True,
                                 transform=transforms.ToTensor(),
                                 download=True)

test_dataset = datasets.CIFAR10(root='~/data/resnet_data',
                                train=False,
                                transform=transform_test)

train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)

model = resnet50(pretrained=False)

# model = lenet.Cnn(1, 10)  # 图片大小是28x28
# model.load_state_dict(torch.load('lenet.pth'))

# use_gpu = torch.cuda.is_available()  # 判断是否有GPU加速
# if use_gpu:
#     model = model.cuda()

# 定义是否使用GPU
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

with torch.no_grad():
    correct = 0
    total = 0
    for data in test_loader:
Exemplo n.º 10
0
def extract_feat(arch, model_path, yaw_type):
    global args, best_prec1
    args = parser.parse_args()

    if arch.find('end2end') >= 0:
        end2end = True
    else:
        end2end = False

    arch = arch.split('_')[0]

    class_num = 87020
    #class_num = 13386

    model = None
    assert (arch in ['resnet18', 'resnet50', 'resnet101'])
    if arch == 'resnet18':
        model = resnet18(pretrained=False, num_classes=class_num, \
                extract_feature=True, end2end=end2end)
    if arch == 'resnet50':
        model = resnet50(pretrained=False, num_classes=class_num, \
                extract_feature=True, end2end=end2end)
    if arch == 'resnet101':
        model = resnet101(pretrained=False, num_classes=class_num, \
                extract_feature=True, end2end=end2end)

    model = torch.nn.DataParallel(model).cuda()
    model.eval()

    assert (os.path.isfile(model_path))
    checkpoint = torch.load(model_path)
    pretrained_state_dict = checkpoint['state_dict']
    model_state_dict = model.state_dict()
    for key in pretrained_state_dict:
        if key in model_state_dict:
            model_state_dict[key] = pretrained_state_dict[key]
    model.load_state_dict(model_state_dict)

    print('load trained model complete')

    caffe_crop = CaffeCrop('test')

    infos = [('../../data/IJBA/align_image_11', 'ijb_a_11_align_split',
              'frame'),
             ('../../data/IJBA/align_image_11', 'ijb_a_11_align_split', 'img'),
             ('../../data/IJBA/align_image_1N', 'split', 'gallery'),
             ('../../data/IJBA/align_image_1N', 'split', 'probe')]

    for root_dir, sub_dir, img_type in infos:

        for split in range(1, 11):
            split_dir = os.path.join(root_dir, sub_dir + str(split))
            img_dir = os.path.join(split_dir, img_type)
            img_list_file = os.path.join(
                split_dir, '{}_list_{}.txt'.format(img_type, yaw_type))

            img_dataset = CFPDataset(
                args.img_dir, img_list_file,
                transforms.Compose([caffe_crop,
                                    transforms.ToTensor()]))
            img_loader = torch.utils.data.DataLoader(
                img_dataset,
                batch_size=args.batch_size,
                shuffle=False,
                num_workers=args.workers,
                pin_memory=True)

            data_num = len(img_dataset)
            img_feat_file = os.path.join(
                split_dir, '{}_{}_feat.bin'.format(arch, img_type))
            feat_dim = 256
            with open(img_feat_file, 'wb') as bin_f:
                bin_f.write(st.pack('ii', data_num, feat_dim))
                for i, (input, yaw) in enumerate(img_loader):
                    yaw = yaw.float().cuda(async=True)
                    yaw_var = torch.autograd.Variable(yaw)
                    input_var = torch.autograd.Variable(input, volatile=True)
                    output = model(input_var, yaw_var)
                    output_data = output.cpu().data.numpy()
                    feat_num = output.size(0)

                    for j in range(feat_num):
                        bin_f.write(
                            st.pack('f' * feat_dim, *tuple(output_data[j, :])))

            print('we have complete {} {}'.format(img_type, split))
Exemplo n.º 11
0
def extract_feat(arch, resume):
    global args, best_prec1
    args = parser.parse_args()

    if arch.find('end2end') >= 0:
        end2end = True
    else:
        end2end = False

    arch = arch.split('_')[0]
    dataset = '/home/u0060/Datasets/cfp-align/'

    # load data and prepare dataset
    frontal_list_file = 'cfp_protocol/protocol/frontal_list_nonli.txt'
    caffe_crop = CaffeCrop('test')
    frontal_dataset = CFPDataset(
        dataset, frontal_list_file,
        transforms.Compose([caffe_crop, transforms.ToTensor()]))
    frontal_loader = torch.utils.data.DataLoader(frontal_dataset,
                                                 batch_size=args.batch_size,
                                                 shuffle=False,
                                                 num_workers=args.workers,
                                                 pin_memory=True)

    caffe_crop = CaffeCrop('test')
    profile_list_file = 'cfp_protocol/profile_list_nonli.txt'
    profile_dataset = CFPDataset(
        dataset, profile_list_file,
        transforms.Compose([caffe_crop, transforms.ToTensor()]))
    profile_loader = torch.utils.data.DataLoader(profile_dataset,
                                                 batch_size=args.batch_size,
                                                 shuffle=False,
                                                 num_workers=args.workers,
                                                 pin_memory=True)

    class_num = 13386

    model = None
    assert (arch in ['resnet18', 'resnet50', 'resnet101'])
    if arch == 'resnet18':
        model = resnet18(pretrained=False,
                         num_classes=class_num,
                         extract_feature=True,
                         end2end=end2end)
    if arch == 'resnet50':
        model = resnet50(pretrained=False,
                         num_classes=class_num,
                         extract_feature=True,
                         end2end=end2end)
    if arch == 'resnet101':
        model = resnet101(pretrained=False,
                          num_classes=class_num,
                          extract_feature=True,
                          end2end=end2end)

    model = torch.nn.DataParallel(model).cuda()
    model.eval()

    assert (os.path.isfile(resume))
    checkpoint = torch.load(resume)
    model.load_state_dict(checkpoint['state_dict'])

    cudnn.benchmark = True

    data_num = len(frontal_dataset)
    frontal_feat_file = './frontal_feat.bin'
    feat_dim = 256
    with open(frontal_feat_file, 'wb') as bin_f:
        bin_f.write(st.pack('ii', data_num, feat_dim))
        for i, (input, yaw) in enumerate(frontal_loader):
            yaw = yaw.float().cuda(async=True)
            input_var = torch.autograd.Variable(input, volatile=True)
            yaw_var = torch.autograd.Variable(yaw, volatile=True)
            output = model(input_var, yaw_var)
            output_data = output.cpu().data.numpy()
            feat_num = output.size(0)

            for j in range(feat_num):
                bin_f.write(st.pack('f' * feat_dim, *tuple(output_data[j, :])))

    data_num = len(profile_dataset.imgs)
    profile_feat_file = './profile_feat.bin'
    with open(profile_feat_file, 'wb') as bin_f:
        bin_f.write(st.pack('ii', data_num, feat_dim))
        for i, (input, yaw) in enumerate(profile_loader):
            yaw = yaw.float().cuda(async=True)
            input_var = torch.autograd.Variable(input, volatile=True)
            yaw_var = torch.autograd.Variable(yaw, volatile=True)
            output = model(input_var, yaw_var)
            output_data = output.cpu().data.numpy()
            feat_num = output.size(0)

            for j in range(feat_num):
                bin_f.write(st.pack('f' * feat_dim, *tuple(output_data[j, :])))