Beispiel #1
0
    def __init__(self, args, **kwargs):
        super(DecTrainer, self).__init__(args, **kwargs)

        # dataloader
        self.trainloader = get_dataloader(args, cfg, 'train')
        # self.trainloader_val = get_dataloader(args, cfg, 'train_voc')
        self.valloader = get_dataloader(args, cfg, 'val')
        self.denorm = self.trainloader.dataset.denorm
        self.use_triplet = args.use_triplet
        self.loss_3d = args.loss_3d
        self.normalize_feature = args.normalize_feature

        self.nclass = get_num_classes(args)
        self.classNames = get_class_names(args)
        assert self.nclass == len(self.classNames) - 1

        self.classIndex = {}
        for i, cname in enumerate(self.classNames):
            self.classIndex[cname] = i

        # model
        self.enc = get_model(cfg.NET, num_classes=self.nclass)
        self.criterion_cls = get_criterion(cfg.NET.LOSS)

        # optimizer using different LR
        enc_params = self.enc.parameter_groups(cfg.NET.LR, cfg.NET.WEIGHT_DECAY)
        self.optim_enc = self.get_optim(enc_params, cfg.NET)

        # checkpoint management
        self._define_checkpoint('enc', self.enc, self.optim_enc)
        self._load_checkpoint(args.resume)

        self.fixed_batch = None
        self.fixed_batch_path = args.fixed_batch_path
        if os.path.isfile(self.fixed_batch_path):
            print("Loading fixed batch from {}".format(self.fixed_batch_path))
            self.fixed_batch = torch.load(self.fixed_batch_path)

        # using cuda
        if cfg.NUM_GPUS != 0:
            self.enc = nn.DataParallel(self.enc)
            self.criterion_cls = nn.DataParallel(self.criterion_cls)
            self.enc = self.enc.cuda()
            self.criterion_cls = self.criterion_cls.cuda()

        # CHANGE: visual
        self.visual_times = 0
        self.dataset = args.dataset.lower()
Beispiel #2
0
    def __init__(self, prior_boxes, dataset_name='VOC2007',
                 box_scale_factors=[.1, .1, .2, .2],
                 background_index=0, lower_probability_threshold=.1,
                 iou_threshold=.2, class_names=None):

        self.prior_boxes = prior_boxes
        self.box_scale_factors = box_scale_factors
        self.background_index = background_index
        self.iou_threshold = iou_threshold
        self.lower_probability_threshold = lower_probability_threshold
        self.class_names = class_names
        if self.class_names is None:
            self.class_names = get_class_names(dataset_name)
        self.num_classes = len(self.class_names)
        self.colors = plt.cm.hsv(np.linspace(0, 1, self.num_classes)).tolist()
        self.colors = np.asarray(self.colors) * 255
        self.arg_to_class = dict(zip(list(range(self.num_classes)),
                                 self.class_names))
        self.font = cv2.FONT_HERSHEY_SIMPLEX
Beispiel #3
0
    def __init__(self, args, **kwargs):
        super(DecTrainer, self).__init__(args, **kwargs)

        # dataloader
        self.trainloader = get_dataloader(args, cfg, 'train')
        self.trainloader_val = get_dataloader(args, cfg, 'train_voc')
        self.valloader = get_dataloader(args, cfg, 'val')
        self.denorm = self.trainloader.dataset.denorm

        self.nclass = get_num_classes(args)
        self.classNames = get_class_names(args)[:-1]
        assert self.nclass == len(self.classNames)

        self.classIndex = {}
        for i, cname in enumerate(self.classNames):
            self.classIndex[cname] = i

        # model
        self.enc = get_model(cfg.GENERATOR, num_classes=self.nclass)
        self.criterion_cls = get_criterion(cfg.GENERATOR.LOSS)
        print(self.enc)

        # optimizer using different LR
        enc_params = self.enc.parameter_groups(cfg.GENERATOR.LR, cfg.GENERATOR.WEIGHT_DECAY)
        self.optim_enc = self.get_optim(enc_params, cfg.GENERATOR)

        # checkpoint management
        self._define_checkpoint('enc', self.enc, self.optim_enc)
        self._load_checkpoint(args.resume)

        self.fixed_batch = None
        self.fixed_batch_path = args.fixed_batch_path
        if os.path.isfile(self.fixed_batch_path):
            print("Loading fixed batch from {}".format(self.fixed_batch_path))
            self.fixed_batch = torch.load(self.fixed_batch_path)

        # using cuda
        self.enc = nn.DataParallel(self.enc).cuda()
        self.criterion_cls = nn.DataParallel(self.criterion_cls).cuda()
Beispiel #4
0
image_prefix = '../datasets/VOCdevkit/VOC2007/JPEGImages/'
weights_path = '../trained_models/SSD300_weights.hdf5'
model = SSD300(weights_path=weights_path)

prior_boxes = create_prior_boxes()
input_shape = model.input_shape[1:3]
class_threshold = .1
iou_nms_threshold = .45
iou_threshold = .5
num_classes = 21

image_prefix = '../datasets/VOCdevkit/VOC2007/JPEGImages/'
with_difficult_objects = False
split = 'test'

class_names = get_class_names(dataset_name)
class_names = class_names[1:]
average_precisions = []
for class_name in class_names:
    selected_classes = ['background'] + [class_name]
    data_manager = DataManager(dataset_name, split, selected_classes,
                               with_difficult_objects)
    ground_truth_data = data_manager.load_data()
    difficult_data_flags = data_manager.parser.difficult_objects
    scores = []
    labels = []
    num_gt_boxes = 0
    for image_name, gt_sample in tqdm(ground_truth_data.items()):
        image_path = image_prefix + image_name
        reference_size = get_image_size(image_path)
        detections = infer_from_path(image_path, model, prior_boxes)
                                         boxes[indices]), axis=1)
            self.output[0, class_arg, :count] = selections
        return self.output


# parameters
trained_weights_path = '../trained_models/ssd_300_VOC0712.pth'
input_size = 300
num_classes = 21
iou_threshold = .5
lower_probability_threshold = .01
background_index = 0
dataset_name = 'VOC2007'
data_prefix = '../datasets/VOCdevkit/VOC2007/Annotations/'
image_prefix = '../datasets/VOCdevkit/VOC2007/JPEGImages/'
selected_classes = get_class_names(dataset_name)
prior_boxes = create_prior_boxes()

# loading pytorch model
pytorch_ssd = build_ssd('test', input_size, num_classes)
pytorch_ssd.load_weights(trained_weights_path)

# loading keras model
weights_path = '../trained_models/SSD300_weights.hdf5'
with tf.device('/cpu:0'):
    model = SSD300(weights_path=weights_path)

split = 'train'
data_manager = DataManager(dataset_name, split, selected_classes)

ground_truth_data = data_manager.load_data()