Esempio n. 1
0
    def __init__(self, opts):
        self.opts = opts

        self.n_classes = opts['n_classes']
        self.pretrained_model = opts['pretrained_model']
        self.detector_name = opts['detector']
        self.dataset_name = opts['dataset']
        self.threshold = opts['threshold']
        self.data_dir = os.path.join(opts['project_root'], '..',
                                     opts['data_dir'])
        self.split = opts['split']
        self.super_type = opts['super_type']
        self.logs_root = opts['logs_root']
        self.verbosity = opts['verbosity']
        self.project_root = opts['project_root']
        self.super_root = os.path.join(opts['project_root'], '..',
                                       opts['data_dir'], 'superpixels',
                                       opts['super_type'])

        # please change me later
        self.year = '2007'

        self.detector = Detector(self.detector_name, self.n_classes,
                                 self.pretrained_model, self.opts['gpu_id'])
        self.visualizer = Visualize(opts['label_names'])
        if self.dataset_name == 'voc':
            self.loader = voc_loader(data_dir=self.data_dir,
                                     split=self.split,
                                     super_root=self.super_root)
        elif self.dataset_name == 'yto':
            self.loader = yto_loader(data_dir=self.data_dir,
                                     split=self.split,
                                     super_root=self.super_root)
        else:
            print('No such dataset exists')
Esempio n. 2
0
def ipu_options(opt: argparse.ArgumentParser, cfg: yacs.config.CfgNode,
                model: Detector):
    """Configurate the IPU options using cfg and opt options.
    Parameters:
        opt: opt object containing options introduced in the command line
        cfg: yacs object containing the config
        model[Detector]: a torch Detector Model
    Returns:
        ipu_opts: Options for the IPU configuration
    """
    batches_per_step = cfg.ipuopts.batches_per_step
    half = cfg.model.half

    ipu_opts = poptorch.Options()
    ipu_opts.deviceIterations(batches_per_step)
    ipu_opts.autoRoundNumIPUs(True)

    if opt.benchmark:
        ipu_opts.Distributed.disable()

    if half:
        ipu_opts.Precision.setPartialsType(torch.float16)
        model.half()

    return ipu_opts
Esempio n. 3
0
    def get_detector(self, kks, startTime=None, finishTime=None) -> Detector:
        '''возвращает значение с указанным kks за указанный промежуток времени
        если не введено значение starttime, то берется интервал с начала и до finishtime
        если не введено значение finishtime, то берется интервал с starttime и до конца'''
        res = Detector(kks)
        try:
            for detect in self:
                if detect.kks == kks:
                    res = Detector(kks, detect.get_description())
                    # проверяем дату и время выборки данных и в случае неправильного ввода берем все данные
                    minDate = detect.get_start_date()
                    maxDate = detect.get_finish_date()
                    isValidStartTime = startTime and startTime > minDate and startTime < maxDate
                    startTime = startTime if isValidStartTime else minDate
                    isValidFinishTime = finishTime and finishTime >= startTime
                    finishTime = finishTime if isValidFinishTime else maxDate
                    # выбор среза данных по времени
                    values = [
                        val for val in detect.copy_indication_list()
                        if (val.dt >= startTime) & (val.dt <= finishTime)
                    ]
                    # данные за каждую секунду
                    if ((finishTime - startTime).seconds == len(values) - 1):
                        res.add_indication_list(values)
                    # данные с пропусками (не каждую секунду)
                    else:
                        # первое значение
                        val = detect.get_value_by_time(startTime)
                        res.add_indication(
                            Indication(startTime, val.value, val.status))
                        delta = timedelta(seconds=1)
                        startTime += delta

                        dates = set(val.dt for val in values)

                        while startTime <= finishTime:
                            if startTime in dates:
                                val = detect.get_value_by_time(startTime)
                            else:
                                val = Indication(startTime, val.value,
                                                 val.status)
                            res.add_indication(val)
                            startTime += delta
                    break
        except:
            print('Дата введена некорректно!!!')
        return res
Esempio n. 4
0
def detect():
    detector = Detector(anchor_scales=anchor_scales,
                        anchor_strides=anchor_strides,
                        eval_params=eval_params)
    classifier = tf.estimator.Estimator(model_fn=detector.model_fn,
                                        model_dir=ckpt_dir)
    results = classifier.predict(get_dataset, yield_single_examples=False)
    fnames = get_imgs()
    save_result(fnames, results)
Esempio n. 5
0
def main():

    # data
    data_root = Path(args.data_root)
    train_dataset = COCO(annFile=str(data_root /
                                     'annotations/instances_train2014.json'),
                         root=str(data_root / 'train2014/'),
                         image_size=args.img_size)
    val_dataset = COCO(annFile=str(data_root /
                                   'annotations/instances_val2014.json'),
                       root=str(data_root / 'val2014/'),
                       image_size=args.img_size)
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=args.nworkers,
                              pin_memory=args.cuda,
                              collate_fn=collate_fn)
    val_loader = DataLoader(dataset=val_dataset,
                            batch_size=args.batch_size,
                            shuffle=False,
                            num_workers=args.nworkers,
                            pin_memory=args.cuda,
                            collate_fn=collate_fn)

    # model
    model = Detector(args.nclasses + 1)
    optimizer = optim.Adam(params=model.parameters(), lr=args.lr)

    if args.cuda:
        model = torch.nn.DataParallel(model)
        model = model.cuda()

    prev_loss = np.inf
    for epoch in range(args.epoch):
        print('{:3d}/{:3d} epoch'.format(epoch + 1, args.epoch))

        train(model, train_loader, optimizer)
        loss = validate(model, val_loader)

        if loss < prev_loss:
            torch.save(model, str('model.save'))
            prev_loss = loss
Esempio n. 6
0
def eval_on_wider():
    output_dir = os.path.join(ckpt_dir, '%s_result' % wider_type)
    os.makedirs(output_dir, exist_ok=True)
    tf.logging.info('output_dir: %s' % output_dir)

    dataset = WiderEval(imgs_dir=imgs_dir, anno_mat=anno_mat)

    detector = Detector(anchor_scales=anchor_scales,
                        anchor_strides=anchor_strides,
                        eval_params=eval_params)

    classifier = tf.estimator.Estimator(model_fn=detector.model_fn,
                                        model_dir=ckpt_dir)

    results = classifier.predict(dataset.get_dataset,
                                 yield_single_examples=False)
    save_wider_result(output_dir, dataset.imgs, results)
Esempio n. 7
0
from config import cfg as opt
os.environ['CUDA_VISIBLE_DEVICES'] = '3'
valid_ids = [
    1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22,
    23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44,
    46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
    65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88,
    89, 90
]

## config recover weights
opt.weights = 'exp/coco_person/model_last.pth'
opt.vis_trehs = 0.01
split = 'val'

detector = Detector(opt)
data = coco.COCO(
    os.path.join(opt.data_dir, 'annotations',
                 'instances_{}2017.json').format(split))

if opt.class_name != '*':  ## for one class
    catIds = data.getCatIds(opt.class_name)
    imgIds = data.getImgIds(catIds=catIds)
    valid_ids = catIds

detections = []
for img_id in tqdm(data.getImgIds()):
    img_name = os.path.join(
        os.path.join(opt.data_dir, '{}2017'.format(split)),
        data.loadImgs(ids=[img_id])[0]['file_name']).strip()
    boxs, masks = detector.run(img_name, vis=False)
Esempio n. 8
0
        args.size,
        args.base_anchor_size,
        ('_MG' if args.mutual_guide else ''),
        ))
    print('Saving to {}'.format(save_path))
    torch.save(model.state_dict(), save_path)


if __name__ == '__main__':

    print('Loading Dataset...')
    (show_classes, num_classes, dataset, epoch_size, max_iter, testset) =  load_dataset()

    print('Loading Network...')
    from models.detector import Detector
    model = Detector(args.size, num_classes, args.backbone, args.neck)
    model.train()
    model.cuda()
    num_param = sum(p.numel() for p in model.parameters() if p.requires_grad)
    print('Total param is : {:e}'.format(num_param))

    print('Preparing Optimizer & AnchorBoxes...')
    optimizer = optim.SGD(tencent_trick(model), lr=args.lr, momentum=0.9, weight_decay=0.0005)
    criterion = MultiBoxLoss(num_classes, mutual_guide=args.mutual_guide)
    priorbox = PriorBox(args.base_anchor_size, args.size)
    with torch.no_grad():
        priors = priorbox.forward()
        priors = priors.cuda()

    if args.trained_model is not None:
        print('loading weights from', args.trained_model)
Esempio n. 9
0
    def add_detector(self, name, parent_id=None):
        if not self.__validate_str(name):
            raise TypeError(
                "Please pass the correct type of input: name should be String")
        # Detector names cannot be empty string
        if name == "":
            raise ValueError(
                "Please pass the correct value of input: name should not "
                "be an empty string")
        # If a / is in the name parameter we raise ValueError Exception
        if '/' in name:
            raise ValueError("The name parameter cannot contain a / ")

        # Remove any unwanted symbols from the name
        name = self.__sanitize_str(name)

        # This executes when trying to add a root level detector and wrapper
        if parent_id is None or parent_id is "":
            wrapper = self.__add_wrapper(name)

            if wrapper is not None:
                detector = Detector()
                detector.name = name
                wrapper.detector = detector
                wrapper.save()
            # If the wrapper already exist throw an error
            else:
                raise ValueError("The detector '" + name + "' already exists")

        # If we add a subdetector
        else:
            if not self.__validate_path(parent_id):
                raise TypeError(
                    "Please pass the correct type of input: parent_id  "
                    "should be String")

            parent_id = self.__sanitize_path(parent_id)
            detector_names = self.__split_name(parent_id)

            try:
                detector_wrapper = self.__get_wrapper(detector_names[0])
            except Exception:
                raise ValueError("The detector '", detector_names[0],
                                 "' does not exist in the database")

            try:
                detector = self.__get_detector(detector_wrapper, parent_id)
                added_detector = Detector()
                added_detector.name = name
            except Exception:
                raise ValueError("The detector with id '" + parent_id +
                                 "' does not exist")

            try:
                detector.subdetectors.get(name=name)
            except:
                detector.subdetectors.append(added_detector)
                detector_wrapper.save()
                return
            raise ValueError("Detector '" + parent_id + "/" + name +
                             "' already exist")
Esempio n. 10
0
class Baseline:
    def __init__(self, opts):
        self.opts = opts

        self.n_classes = opts['n_classes']
        self.pretrained_model = opts['pretrained_model']
        self.detector_name = opts['detector']
        self.dataset_name = opts['dataset']
        self.threshold = opts['threshold']
        self.data_dir = os.path.join(opts['project_root'], '..',
                                     opts['data_dir'])
        self.split = opts['split']
        self.super_type = opts['super_type']
        self.logs_root = opts['logs_root']
        self.verbosity = opts['verbosity']
        self.project_root = opts['project_root']
        self.super_root = os.path.join(opts['project_root'], '..',
                                       opts['data_dir'], 'superpixels',
                                       opts['super_type'])

        # please change me later
        self.year = '2007'

        self.detector = Detector(self.detector_name, self.n_classes,
                                 self.pretrained_model, self.opts['gpu_id'])
        self.visualizer = Visualize(opts['label_names'])
        if self.dataset_name == 'voc':
            self.loader = voc_loader(data_dir=self.data_dir,
                                     split=self.split,
                                     super_root=self.super_root)
        elif self.dataset_name == 'yto':
            self.loader = yto_loader(data_dir=self.data_dir,
                                     split=self.split,
                                     super_root=self.super_root)
        else:
            print('No such dataset exists')

    def is_valid_box(self, bbox_mask):
        """ Checks if the box is valid
            Just a sanity check, some bboxes were invalid
        """
        return np.any(bbox_mask)

    def box_to_mask(self, box, size):
        """Convert box co-ordinates into a mask"""
        H, W = size
        mask_img = np.zeros((H, W), dtype=np.bool)
        y_min, x_min, y_max, x_max = box.astype(np.int32)
        # FIXME: Should be adding `y_max+1` here?
        mask_img[y_min:y_max, x_min:x_max] = True
        return mask_img

    def SD_metric(self, img, bbox, masks, stype=0):
        _s_in, _s_st = [], []
        for mask in masks:
            intersect = np.bitwise_and(bbox, mask).sum()
            ratio = intersect / np.count_nonzero(mask)
            if ratio == 1:
                _s_in.append(mask)
            elif ratio < 1:
                _s_st.append(mask.astype(np.bool))
        return np.array(_s_in), np.array(_s_st)

    def rebase_sst(self, s_in, s_st, bboxes):
        _sst = []
        for sin, sst, bbox in zip(s_in, s_st, bboxes):
            n, h, w = sst.shape
            union_masks = np.empty((n, h, w), dtype=np.float32)
            for idx, s_mask in enumerate(sst):
                union_masks[idx] = np.bitwise_or(sin, s_mask)
            union_bboxes = mask_to_bbox(union_masks)
            iou = np.squeeze(bbox_iou(union_bboxes, np.array([bbox])))
            order = np.argsort(iou, axis=0)[::-1]
            _sst.append(sst[order])
        return _sst

    def get_initial_sets(self, img, bboxes, masks, boxes):
        c, h, w = img.shape
        s_in, s_st = [], []
        for box in bboxes:
            box_mask = self.box_to_mask(box, (h, w))
            if not self.is_valid_box(box_mask):
                continue
            _s_in, _s_st = self.SD_metric(img, box_mask, masks, stype=-1)
            if len(_s_in) == 0:
                continue
            _s_in = np.sum(_s_in, axis=0).astype(np.bool)
            s_in.append(_s_in)
            s_st.append(_s_st)
        return s_in, s_st

    def box_alignment(self, img, bboxes, masks, boxes):
        s_in, s_st = self.get_initial_sets(img, bboxes, masks, boxes)

        if len(s_in) == 0 or len(s_st) == 0:
            return [], [], []

        s_st = self.rebase_sst(s_in, s_st, bboxes)
        final_boxes = []
        final_masks = []
        added_superpixel_masks = []
        for bbox, sin, sst in zip(bboxes, s_in, s_st):
            s = sin
            if s.ndim == 0:
                continue
            assert len(sst) >= 1, "No straddling boxes are found"

            proc = 0
            new_superpixels = np.zeros_like(s)
            new_s = np.bitwise_or(s, sst[0])
            iou_old = bbox_iou(mask_to_bbox(np.array([s])),
                               np.array([bbox]))[0][0]
            iou_new = bbox_iou(mask_to_bbox(np.array([new_s])),
                               np.array([bbox]))[0][0]
            for sk in sst[1:]:
                if iou_old > iou_new:
                    break
                iou_old = iou_new
                s = new_s
                new_s = np.bitwise_or(s, sk)
                iou_new = bbox_iou(mask_to_bbox(np.array([new_s])),
                                   np.array([bbox]))[0][0]
                proc += 1
                new_superpixels = np.bitwise_or(new_superpixels, sk)
            final_masks.append(s)
            final_boxes.append(mask_to_bbox(np.array([s]))[-1])
            added_superpixel_masks.append(new_superpixels.astype(np.int32))
            if self.verbosity:
                print('No. of superpixels added: {:2d}'.format(proc))
        final_masks, final_boxes = np.array(final_masks), np.array(final_boxes)
        return final_boxes, final_masks, added_superpixel_masks

    def multi_thresholding_superpixel_merging(self,
                                              img,
                                              initial_boxes,
                                              aligned_boxes,
                                              aligned_masks,
                                              s_masks,
                                              s_boxes,
                                              threshold=None):
        """ 1. performs multi-thresholding step for different thresholds
            2. incorporate some randomness by scoring these randomly
            3. remove redundant boxes using non-maximum suppression

        args:
            initial_boxes: bboxes predicted from detector
            aligned_boxes: bboxes after bbox-alignment
            aligned_masks: masks  after bbox-alignment
                           `aligned_boxes` are generated by enclosing these masks
            s_masks      : masks corresponding to superpixels
            s_boxes      : bounding boxes for the corresponding superpixels
            threshold    : straddling expansion threshold
        """
        _, h, w = img.shape

        def get_thresholded_spixels(threshold, s_masks, a_bbox):
            """ generates the set of superpixels which have greater than `threshold`
                overlap with the `a_bbox`
            """
            req_masks = []
            box_mask = self.box_to_mask(a_bbox, (h, w))
            for mask in s_masks:
                intersect = np.bitwise_and(box_mask, mask).sum()
                ratio = intersect / np.count_nonzero(mask)
                if ratio >= threshold:
                    req_masks.append(mask)
            return np.array(req_masks).astype(np.bool)

        # generate sets for different thresholds
        thresholds = [0.1, 0.2, 0.3, 0.4, 0.5]
        final_set_ = {}
        for idx, (a_mask,
                  a_bbox) in enumerate(zip(aligned_masks, aligned_boxes)):
            box_set = {}
            for threshold in thresholds:
                req_superpixels = get_thresholded_spixels(
                    threshold, s_masks, a_bbox)
                super_segment = np.sum(req_superpixels, axis=0)
                final_segment = np.bitwise_or(super_segment, a_mask)
                final_bbox = mask_to_bbox(np.array([final_segment]))[0]
                box_set.update({threshold: [final_segment, final_bbox]})
            final_set_.update({idx: box_set})

        # score the boxes
        for box_set in final_set_.values():
            for idx, (thresh, seg_box) in enumerate(box_set.items()):
                r = np.random.rand()
                score = r * (idx + 1)
                box_set.update({thresh: seg_box + [score]})

        # nms
        for key, box_set in final_set_.items():
            segments, bboxes, scores = zip(*box_set.values())
            segments, bboxes, scores = np.array(segments), np.array(
                bboxes), np.array(scores)
            idxs = nms(bboxes, thresh=0.9, score=scores)
            final_picks = [segments[idxs][0], bboxes[idxs][0], scores[idxs][0]]
            final_set_.update({key: final_picks})
        return final_set_

    def predict_single(self, img_path, sup_path):

        img = read_image(img_path)
        contours, masks, boxes = get_superpixels(sup_path)

        # use the detector and predict bounding boxes
        p_bboxes, p_labels, p_scores = self.detector.predict([img])
        p_bboxes = np.rint(p_bboxes[0])

        # box-alignment
        final_bboxes, final_masks, added_superpixel_masks = self.box_alignment(
            img, p_bboxes, masks, boxes)

        if len(final_bboxes) == 0 or len(final_masks) == 0:
            print('No bboxes predicted')

        final_set = self.multi_thresholding_superpixel_merging(
            img, p_bboxes, final_bboxes, final_masks, masks, boxes)
        f_segments, f_bboxes, f_scores = zip(*final_set.values())
        f_segments, f_bboxes, f_scores = np.array(f_segments), np.array(
            f_bboxes), np.array(f_scores)

        if self.verbosity:
            print('Predicted bboxes:')
            print(p_bboxes)
            print('After bbox-alignment:')
            print(final_bboxes)
            print('After straddling expansion:')
            print(f_bboxes)

        img_file = os.path.join(sup_path.replace('.csv', ''))
        self.visualizer.mtsm(img,
                             p_bboxes,
                             final_bboxes,
                             final_masks,
                             contours,
                             f_bboxes,
                             f_segments,
                             save=True,
                             path=img_file)

    def predict(self, inputs=None):
        if inputs is None:
            img, bboxes, labels, contours, masks, boxes = self.loader.load_single(
                0)
        else:
            img, bboxes, labels, contours, masks, boxes = inputs

        p_bboxes, p_labels, p_scores = self.detector.predict([img])
        p_bboxes = p_bboxes[0]

        final_bboxes, final_masks = self.box_alignment(img, bboxes, masks,
                                                       boxes)
        final_set = self.multi_thresholding_superpixel_merging(
            p_bboxes, final_bboxes, final_masks, masks, boxes)
        mtsm_masks, mtsm_bboxes, _ = zip(*final_set.values())
        self.visualizer.mtsm(img,
                             bboxes,
                             final_bboxes,
                             final_masks,
                             contours,
                             mtsm_bboxes,
                             mtsm_masks,
                             save=false)

    def predict_all(self, inputs=None):
        metrics = {}
        print('evaluating a total of {} images'.format(self.loader.len()))
        time_taken = []
        box_align_time = []
        straddling_time = []
        begin_time = time.time()
        total_size = self.loader.len()
        invalid_count = 0
        for idx in range(self.loader.len()):
            # progress bar
            done_l = (idx + 1.0) / total_size
            per_done = int(done_l * 30)
            args = ['=' * per_done, ' ' * (30 - per_done - 1), done_l * 100]
            sys.stdout.write('\r')
            sys.stdout.write('[{}>{}]{:.0f}%'.format(*args))
            sys.stdout.flush()

            # load images and ground truth stuff
            img, bboxes, labels, contours, masks, boxes = self.loader.load_single(
                idx)

            # use the detector and predict bounding boxes
            start_time = time.time()
            p_bboxes, p_labels, p_scores = self.detector.predict([img])
            p_bboxes = np.rint(p_bboxes[0])
            time_taken.append(time.time() - start_time)

            # box-alignment
            start_time = time.time()
            final_bboxes, final_masks, _ = self.box_alignment(
                img, p_bboxes, masks, boxes)
            box_align_time.append(time.time() - start_time)

            # Just a precaution
            if len(final_bboxes) == 0 or len(final_masks) == 0:
                invalid_count += 1
                continue

            # Straddling expansion
            start_time = time.time()
            final_set = self.multi_thresholding_superpixel_merging(
                img, p_bboxes, final_bboxes, final_masks, masks, boxes)
            straddling_time.append(time.time() - start_time)
            mtsm_masks, mtsm_bboxes, _ = zip(*final_set.values())

            # store the results in a file
            metrics.update({
                '{}'.format(self.loader.ids[idx]): [
                    p_bboxes, p_labels, p_scores, final_bboxes, bboxes, labels,
                    mtsm_bboxes
                ]
            })
            img_file = os.path.join(self.opts['project_root'], 'logs',
                                    self.logs_root, 'qualitative',
                                    str(self.loader.ids[idx]))

            self.visualizer.mtsm(img,
                                 p_bboxes,
                                 final_bboxes,
                                 final_masks,
                                 contours,
                                 mtsm_bboxes,
                                 mtsm_masks,
                                 save=True,
                                 path=img_file)

        print('\nTotal time taken for detection per image: {:.3f}'.format(
            np.mean(time_taken)))
        print('Total time taken for box alignment per image: {:.3f}'.format(
            np.mean(box_align_time)))
        print('Total time taken for straddling expansion per image: {:.3f}'.
              format(np.mean(straddling_time)))
        print('Total time elapsed: {:.3f}'.format(time.time() - begin_time))
        print('Total invalid images encountered {:4d}/{:4d}'.format(
            invalid_count, total_size))
        with open(join([self.logs_root, 'metrics.list']), 'wb') as f:
            pickle.dump(metrics, f)

    def predict_from_file(self, detections_file, annotations_file):
        metrics = {}

        with open(detections_file, 'r') as dets:
            dets = np.load(dets)

        with open(annotations_file, 'r') as anns:
            ress = np.load(anns)

        print('evaluating a total of {} images'.format(len(ress)))
        time_taken = []
        box_align_time = []
        begin_time = time.time()
        total_size = len(ress)
        invalid_count = 0
        for idx in range(len(ress)):
            # progress bar
            done_l = (idx + 1.0) / total_size
            per_done = int(done_l * 30)
            args = ['=' * per_done, ' ' * (30 - per_done - 1), done_l * 100]
            sys.stdout.write('\r')
            sys.stdout.write('[{}>{}]{:.0f}%'.format(*args))
            sys.stdout.flush()

            # load images and ground truth stuff
            img, bboxes, labels, contours, masks, boxes = self.loader.load_single(
                idx)

            # use the detector and predict bounding boxes
            start_time = time.time()
            p_bboxes, p_labels, p_scores = self.detector.predict([img])
            p_bboxes = np.rint(p_bboxes[0])
            time_taken.append(time.time() - start_time)

            # box-alignment
            start_time = time.time()
            final_bboxes, final_masks = self.box_alignment(
                img, p_bboxes, masks, boxes)
            box_align_time.append(time.time() - start_time)

            if len(final_bboxes) == 0 or len(final_masks) == 0:
                invalid_count += 1
                continue

            # store the results in a file
            metrics.update({
                '{}'.format(self.loader.ids[idx]):
                [p_bboxes, p_labels, p_scores, final_bboxes, bboxes, labels]
            })
            img_file = os.path.join(self.opts['project_root'], 'logs',
                                    self.logs_root, 'qualitative',
                                    str(self.loader.ids[idx]))
            self.visualizer.box_alignment(img,
                                          p_bboxes,
                                          final_bboxes,
                                          final_masks,
                                          contours,
                                          save=True,
                                          path=img_file)

        print('\nTotal time taken for detection per image: {:.3f}'.format(
            np.mean(time_taken)))
        print('Total time taken for box alignment per image: {:.3f}'.format(
            np.mean(box_align_time)))
        print('Total time elapsed: {:.3f}'.format(time.time() - begin_time))
        print('Total invalid images encountered {:4d}/{:4d}'.format(
            invalid_count, total_size))
        with open(join([self.logs_root, 'metrics.list']), 'wb') as f:
            pickle.dump(metrics, f)
Esempio n. 11
0
import math
import numpy as np
import pandas as pd
from game import Game
from pathlib import Path
from utils import evaluate_game
import matplotlib.pyplot as plt
from models.detector import Detector
from utils import (compute_standard_points, compute_advanced_points,
                   preds_to_ranks, print_results, save_img_files)

idx = np.random.randint(1, 7)
path = "data/train_games/game%d" % idx
game = Game(path)
images = game.load_images()
model = Detector()
pred_dealer = []
pred_rank = []
pred_pts_stand = np.zeros((1, 4), dtype=int)
pred_pts_advan = np.zeros((1, 4), dtype=int)
while not game.is_done:
    ROI, dst = game.next_step(show_step=False, save_fig=True)
    preds = model.prediction_step(ROI)
    dealer_idx = game.dealer_idx
    pred_dealer.append(dealer_idx)
    pred_rank.append(preds.tolist())
    ranks = preds_to_ranks(preds)
    pred_pts_stand += compute_standard_points(preds)
    pred_pts_advan += compute_advanced_points(preds, dealer_idx)

pred_rank = np.asarray(pred_rank)
Esempio n. 12
0
import os
import cv2
from config import cfg as opt
from models.detector import Detector

os.environ['CUDA_VISIBLE_DEVICES'] = '3'
image_ext = ['jpg', 'jpeg', 'png', 'webp']
video_ext = ['mp4', 'mov', 'avi', 'mkv', 'h264']
time_stats = ['tot', 'load', 'pre', 'net', 'dec', 'post', 'merge']

opt.demo = '/data/yoloCao/DataSet/coco/val2017'
opt.weights = 'exp/coco_person/model_last.pth'
opt.vis_trehs = 0.4
detector = Detector(opt)
cv2.namedWindow('result', cv2.WINDOW_NORMAL)
cv2.resizeWindow('result', 1024, 768)
if opt.demo == 'webcam' or \
    opt.demo[opt.demo.rfind('.') + 1:].lower() in video_ext:
    cam = cv2.VideoCapture(0 if opt.demo == 'webcam' else opt.demo)

    while True:
        _, img = cam.read()
        ret = detector.run(img)
        if cv2.waitKey(1) == 27:
            break
else:
    if os.path.isdir(opt.demo):
        image_names = []
        ls = os.listdir(opt.demo)
        for file_name in sorted(ls):
            ext = file_name[file_name.rfind('.') + 1:].lower()