class TrainDataset(chainer.dataset.DatasetMixin): def __init__(self, datasets, model): self.datasets = datasets self.insize = model.insize self.mean = model.mean self.encoder = MultiBoxEncoder(model) def __len__(self): return sum(map(len, self.datasets)) def get_example(self, i): for dataset in self.datasets: if i >= len(dataset): i -= len(dataset) continue image = dataset.image(i) boxes, labels = dataset.annotations(i) image, boxes, labels = preproc_for_train(image, boxes, labels, self.insize, self.mean) loc, conf = self.encoder.encode(boxes, labels) return image, loc, conf
def __init__(self, datasets, model): self.datasets = datasets self.insize = model.insize self.mean = model.mean self.encoder = MultiBoxEncoder(model)
parser.add_argument('--gpu', type=int, default=-1) parser.add_argument('--arch', choices=('300', '512'), default='300') parser.add_argument('model') parser.add_argument('test') args = parser.parse_args() if args.arch == '300': model = SSD300(20) elif args.arch == '512': model = SSD512(20) serializers.load_npz(args.model, model) if args.gpu >= 0: chainer.cuda.get_device(args.gpu).use() model.to_gpu() multibox_encoder = MultiBoxEncoder(model) year, subset = args.test.split('-') dataset = TestDataset(VOCDataset(args.root, year, subset), model) iterator = iterators.SerialIterator(dataset, args.batchsize, repeat=False, shuffle=False) os.makedirs(args.output, exist_ok=True) files = [ open(os.path.join(args.output, 'comp4_det_test_{:s}.txt'.format(label)), mode='w') for label in VOCDataset.labels ]