import os

print('Loading model..')
net = SSD300(num_classes=2)

checkpoint = torch.load('./example/ssd+/checkpoint/ckpt.pth')
net.load_state_dict(checkpoint['net'])

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
cudnn.benchmark = True
net.to(device)
# net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))
cudnn.benchmark = True

net.eval()
box_coder = SSDBoxCoder(net)

transform = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.4140, 0.4265, 0.4172), (0.2646, 0.2683, 0.2751))
])

file_path = '/home/zwj/project/data/caltech/ImageSets/Main/test.txt'
data_dir = '/home/zwj/project/data/caltech/JPEGImages/'
result_dir = './results/'
with open(file_path) as f:
    lines = f.readlines()

al = len(lines)
file_content = {}
for num, line in enumerate(lines):
Beispiel #2
0
}
net.extractor.features.layers.load_state_dict(d_proc, strict=False)

# net.load_state_dict(torch.load(args.model))

best_loss = float('inf')
start_epoch = 0
if args.resume:
    print('==> Resuming from checkpoint..')
    checkpoint = torch.load(args.checkpoint)
    net.load_state_dict(checkpoint['net'])
    best_loss = checkpoint['loss']
    start_epoch = checkpoint['epoch']

print('==> Preparing dataset..')
box_coder = SSDBoxCoder(net)
img_size = 300


def transform_train(img, boxes, labels):
    img = random_distort(img)
    # if random.random() < 0.5:
    #     img, boxes = random_paste(img, boxes, max_ratio=4, fill=(123, 116, 103))
    img, boxes, labels = random_crop(img, boxes, labels)
    img, boxes = resize(img,
                        boxes,
                        size=(img_size, img_size),
                        random_interpolation=True)
    img, boxes = random_flip(img, boxes)
    img = transforms.Compose([
        transforms.ToTensor(),