Ejemplo n.º 1
0
    def __init__(self,
                 path='',
                 title='',
                 params=None,
                 resume=False,
                 data_format='csv',
                 epochs=0,
                 index='epoch'):
        """
        Parameters
        ----------
        path: string
            path to directory to save data files
        plot_path: string
            path to directory to save plot files
        title: string
            title of HTML file
        params: Namespace
            optionally save parameters for results
        resume: bool
            resume previous logging
        data_format: str('csv'|'json')
            which file format to use to save the data
        """
        if data_format not in ResultsLog.supported_data_formats:
            raise ValueError('data_format must of the following: ' + '|'.join(
                ['{}'.format(k) for k in ResultsLog.supported_data_formats]))

        if data_format == 'json':
            self.data_path = '{}.json'.format(path)
        else:
            self.data_path = '{}.csv'.format(path)
        self.data_format = data_format
        if params is not None:
            export_args_namespace(params, '{}.json'.format(path))
        self.plot_path = '{}.html'.format(path)
        self.results = None
        self.clear()
        self.first_save = True
        if os.path.isfile(self.data_path):
            if resume:
                self.load(self.data_path)
                self.first_save = False
            else:
                os.remove(self.data_path)
                self.results = pd.DataFrame()
        else:
            self.results = pd.DataFrame()

        self.title = title
        self.epochs = epochs
        self.index = index

        if HYPERDASH_AVAILABLE:
            name = self.title if title != '' else path
            self.hd_experiment = hyperdash.Experiment(name)
            if params is not None:
                for k, v in params._get_kwargs():
                    self.hd_experiment.param(k, v, log=False)
Ejemplo n.º 2
0
def hyper_dash_manager(exp_name, debug=False):
    """
    debug=True の時に、hyperdashにログを送信しないダミーインスタンスを返す。
    """
    if debug:
        exp = debugHyperDash()
    else:
        exp = hyperdash.Experiment(exp_name)

    try:
        yield exp
    finally:
        exp.end()
Ejemplo n.º 3
0
def test(arguments):
    log.debug('Creating network')
    net = ln.models.Yolo(CLASSES, arguments.weight, CONF_THRESH, NMS_THRESH)
    net.postprocess.append(
        ln.data.transform.TensorToBrambox(NETWORK_SIZE, LABELS))

    net.eval()
    if arguments.cuda:
        net.cuda(non_blocking=PIN_MEM)

    log.debug('Creating dataset')
    loader = torch.utils.data.DataLoader(
        CustomDataset(TESTFILE, net),
        batch_size=MINI_BATCH,
        shuffle=False,
        drop_last=False,
        num_workers=WORKERS if arguments.cuda else 0,
        pin_memory=PIN_MEM if arguments.cuda else False,
        collate_fn=ln.data.list_collate,
    )

    if arguments.visdom:
        log.debug('Creating visdom visualisation wrappers')
        vis = visdom.Visdom(port=VISDOM_PORT)
        plot_pr = ln.engine.VisdomLinePlotter(vis,
                                              'pr',
                                              opts=dict(
                                                  xlabel='Recall',
                                                  ylabel='Precision',
                                                  title='Precision Recall',
                                                  xtickmin=0,
                                                  xtickmax=1,
                                                  ytickmin=0,
                                                  ytickmax=1,
                                                  showlegend=True))

    if arguments.hyperdash:
        log.debug('Creating hyperdash visualisation wrappers')
        hd = hyperdash.Experiment('YOLOv2 Test')
        hyperdash_plot_pr = ln.engine.HyperdashLinePlotter(hd)

    log.debug('Running network')
    tot_loss = []
    coord_loss = []
    conf_loss = []
    cls_loss = []
    anno, det = {}, {}

    for idx, (data, box) in enumerate(tqdm(loader, total=len(loader))):
        if arguments.cuda:
            data = data.cuda(non_blocking=PIN_MEM)
        data = torch.autograd.Variable(data).no_grad()

        output, loss = net(data, box)

        if torch.__version__.startswith('0.3'):
            data = torch.autograd.Variable(data).no_grad()
            output, loss = net(data, box)
        else:
            with torch.no_grad():
                output, loss = net(data, box)

        if torch.__version__.startswith('0.3'):
            tot_loss.append(net.loss.loss_tot.data[0] * len(box))
            coord_loss.append(net.loss.loss_coord.data[0] * len(box))
            conf_loss.append(net.loss.loss_conf.data[0] * len(box))
            if net.loss.loss_cls is not None:
                cls_loss.append(net.loss.loss_cls.data[0] * len(box))
        else:
            tot_loss.append(net.loss.loss_tot.item() * len(box))
            coord_loss.append(net.loss.loss_coord.item() * len(box))
            conf_loss.append(net.loss.loss_conf.item() * len(box))
            if net.loss.loss_cls is not None:
                cls_loss.append(net.loss.loss_cls.item() * len(box))

        key_val = len(anno)
        anno.update(
            {loader.dataset.keys[key_val + k]: v
             for k, v in enumerate(box)})
        det.update({
            loader.dataset.keys[key_val + k]: v
            for k, v in enumerate(output)
        })

    pr_dict, ap_dict, m_ap, all_key = bbb.pr_ap_dicts(det, anno, LABELS,
                                                      IGNORE)
    pr = pr_dict[all_key]

    log.info('Computed statistics')
    for label in sorted(ap_dict.keys()):
        log.info('\tLabel %r: m_ap = %0.04f' % (
            label,
            ap_dict[label],
        ))

    tot = round(sum(tot_loss) / len(anno), 5)
    coord = round(sum(coord_loss) / len(anno), 2)
    conf = round(sum(conf_loss) / len(anno), 2)
    if len(cls_loss) > 0:
        cls = round(sum(cls_loss) / len(anno), 2)
        log.test(
            '\n{seen} mAP:{m_ap}% Loss:{tot} (Coord:{coord} Conf:{conf} Cls:{cls})'
            .format(seen=net.seen // BATCH,
                    m_ap=m_ap,
                    tot=tot,
                    coord=coord,
                    conf=conf,
                    cls=cls))
    else:
        log.test('\n{seen} mAP:{m_ap}% Loss:{tot} (Coord:{coord} Conf:{conf})'.
                 format(seen=net.seen // BATCH,
                        m_ap=m_ap,
                        tot=tot,
                        coord=coord,
                        conf=conf))

    name = 'mAP: {m_ap}%'.format(m_ap=m_ap)
    if arguments.visdom:
        plot_pr(np.array(pr[0]), np.array(pr[1]), name=name)

    if arguments.hyperdash:
        now = time.time()
        re_seen = None
        for index, (re_, pr_) in enumerate(sorted(zip(pr[1], pr[0]))):
            re_ = round(re_, 2)
            if re_ != re_seen:
                re_seen = re_
                re_ = int(re_ * 100.0)
                hyperdash_plot_pr(name, pr_, now + re_, log=False)

    if arguments.save_det is not None:
        # Note: These detection boxes are the coordinates for the letterboxed images,
        #       you need ln.data.ReverseLetterbox to have the right ones.
        #       Alternatively, you can save the letterboxed annotations, and use those for statistics later on!
        bbb.generate('det_pickle', det,
                     Path(arguments.save_det).with_suffix('.pkl'))
        #bbb.generate('anno_pickle', det, Path('anno-letterboxed_'+arguments.save_det).with_suffix('.pkl'))

    if arguments.hyperdash:
        hyperdash_plot_pr.close()
Ejemplo n.º 4
0
    # Parse arguments
    if args.cuda:
        if not torch.cuda.is_available():
            log.debug('CUDA not available')
            args.cuda = False
        else:
            log.debug('CUDA enabled')

    if args.visdom:
        args.visdom = visdom.Visdom(port=VISDOM_PORT)
    else:
        args.visdom = None

    if args.hyperdash:
        args.hyperdash = hyperdash.Experiment('YOLOv2 Pascal VOC Train')
    else:
        args.hyperdash = None

    if not os.path.isdir(args.backup):
        if not os.path.exists(args.backup):
            log.warn('Backup folder does not exist, creating...')
            os.makedirs(args.backup)
        else:
            raise ValueError('Backup path is not a folder')

    # Train
    eng = VOCTrainingEngine(args)
    b1 = eng.batch
    t1 = time.time()
    eng()
Ejemplo n.º 5
0
    # Parse arguments
    if args.cuda:
        if not torch.cuda.is_available():
            log.error('CUDA not available')
            args.cuda = False
        else:
            log.debug('CUDA enabled')

    if args.visdom:
        args.visdom = visdom.Visdom(port=VISDOM_PORT)
    else:
        args.visdom = None

    if args.hyperdash:
        args.hyperdash = hyperdash.Experiment('YOLOv2 Train')
    else:
        args.hyperdash = None

    if not os.path.isdir(args.backup):
        if not os.path.exists(args.backup):
            log.warn('Backup folder does not exist, creating...')
            os.makedirs(args.backup)
        else:
            raise ValueError('Backup path is not a folder')

    # Train
    eng = TrainingEngine(args)
    b1 = eng.batch
    t1 = time.time()
    eng()