Ejemplo n.º 1
0
    def _eval(self):
        from tensorpack.utils.utils import get_tqdm_kwargs
        score = 0.0
        ind = 0.0

        x_valid, y_valid = self.data
        valid_predictions = []
        with tqdm.tqdm(total=len(x_valid) // config.INFERENCE_BATCH + 1,
                       **get_tqdm_kwargs()) as pbar:
            start = 0
            end = 0
            for i in range(len(x_valid) // config.INFERENCE_BATCH + 1):
                start = i * config.INFERENCE_BATCH
                end = start + config.INFERENCE_BATCH if start + config.INFERENCE_BATCH < len(
                    x_valid) else len(x_valid)
                x = x_valid[start:end]
                final_probs, final_labels = self.pred(x)
                valid_predictions.extend(final_probs)
                #score += mapk(la, final_labels)
                pbar.update()
        valid_predictions = np.array(valid_predictions)
        map3 = mapk(self.valid_df[['y']].values,
                    preds2catids(valid_predictions).values)
        print('Map3: {:.5f}'.format(map3))
        self.trainer.monitors.put_scalar("Map3", map3)
Ejemplo n.º 2
0
def query_eval_output(df, pred_func, tqdm_bar=None):
    """
    Args:
        df: a DataFlow which produces (image, image_id)
        detect_func: a callable, takes [image] and returns [DetectionResult]
        tqdm_bar: a tqdm object to be shared among multiple evaluation instances. If None,
            will create a new one.

    Returns:
        list of dict, to be dumped to COCO json format
    """
    df.reset_state()
    all_results = []
    # tqdm is not quite thread-safe: https://github.com/tqdm/tqdm/issues/323
    with ExitStack() as stack:
        if tqdm_bar is None:
            tqdm_bar = stack.enter_context(
                tqdm.tqdm(total=df.size(), **get_tqdm_kwargs()))
        for img, gt_boxes, gt_ids in df.get_data():
            fvs = pred_func(img, gt_boxes)
            # print(fvs.shape)

            result_list = []
            fv_list = []
            id_list = []
            for fv, gt_id in zip(fvs, gt_ids):
                fv_list.append(fv.tolist())
                id_list.append(int(gt_id))
            result_list.append(id_list)
            result_list.append(fv_list)

            all_results.append(result_list)
            tqdm_bar.update(1)
    return all_results
Ejemplo n.º 3
0
def pred_brats(df, detect_func):
    """
    prediction
    """
    df.reset_state()
    gts = []
    results = []
    with tqdm.tqdm(total=df.size(), **get_tqdm_kwargs()) as pbar:
        for filename, image_id, data in df.get_data():
            final_label, probs = detect_func(data)
            if config.TEST_FLIP:
                pred_flip, probs_flip = detect_func(flip_lr(data))
                final_prob = (probs + probs_flip) / 2.0
                pred = np.argmax(final_prob, axis=-1)
                pred[pred == 3] = 4
                if config.ADVANCE_POSTPROCESSING:
                    pred = crop_ND_volume_with_bounding_box(pred, data['bbox'][0], data['bbox'][1])
                    pred = post_processing(pred, data['weights'][:,:,:,0])
                    pred = np.asarray(pred, np.int16)
                    final_label = np.zeros(data['original_shape'], np.int16)
                    final_label = set_ND_volume_roi_with_bounding_box_range(final_label, data['bbox'][0], data['bbox'][1], pred)
                else:
                    final_label = pred

            save_to_nii(final_label, image_id, outdir="eval_out18", mode="label")
            # save prob to ensemble
            # save_to_pkl(probs, image_id, outdir="eval_out18_prob_{}".format(config.CROSS_VALIDATION))
            pbar.update()
    return None
Ejemplo n.º 4
0
def eval_on_dataflow(df, detect_func):
    """
    Args:
        df: a DataFlow which produces (image, image_id)
        detect_func: a callable, takes [image] and returns a dict

    Returns:
        list of dict, to be dumped to COCO json format
    """
    df.reset_state()
    all_results = []
    with tqdm.tqdm(total=df.size(), **get_tqdm_kwargs()) as pbar:
        for img, img_id in df.get_data():
            results = detect_func(img)
            for classid, boxes, scores in results:
                cat_id = COCOMeta.class_id_to_category_id[classid]
                boxes[:, 2] -= boxes[:, 0]
                boxes[:, 3] -= boxes[:, 1]
                for box, score in zip(boxes, scores):
                    all_results.append({
                        'image_id':
                        img_id,
                        'category_id':
                        cat_id,
                        'bbox':
                        list(map(lambda x: float(round(x, 1)), box)),
                        'score':
                        float(round(score, 2)),
                    })
            pbar.update(1)
    return all_results
Ejemplo n.º 5
0
    def _eval(self):
        from tensorpack.utils.utils import get_tqdm_kwargs

        valid_predictions = []
        valid_y = []
        th = 0.15
        with tqdm.tqdm(total=len(self.valid_ds) // config.INFERENCE_BATCH + 1,
                       **get_tqdm_kwargs()) as pbar:
            for i in range(len(self.valid_ds) // config.INFERENCE_BATCH + 1):
                start = i * config.INFERENCE_BATCH
                end = start + config.INFERENCE_BATCH if start + config.INFERENCE_BATCH < len(
                    self.valid_ds) else len(self.valid_ds)
                data = self.valid_ds[start:end]
                data = [preprocess(d, is_training=False) for d in data]
                x = np.array([_[0] for _ in data])
                y = np.array([_[1] for _ in data])
                final_probs = self.pred(x)
                valid_predictions.extend(np.argmax(final_probs[0], axis=-1))
                valid_y.extend(y)
                pbar.update()
        valid_predictions = np.array(valid_predictions)
        valid_y = np.array(valid_y)
        score = (valid_y == valid_predictions).mean()
        self.trainer.monitors.put_scalar("score", score)
        return score
def eval_with_funcs(predictors, nr_eval, get_player_fn, verbose=False):
    """
    Args:
        predictors ([PredictorBase])
    """

    class Worker(StoppableThread, ShareSessionThread):
        def __init__(self, func, func_refine, queue):
            super(Worker, self).__init__()
            self._func = func
            self._func_refine = func_refine
            self.q = queue

        def func(self, *args, **kwargs):
            if self.stopped():
                raise RuntimeError("stopped!")
            return self._func(*args, **kwargs)

        def func_refine(self, *args, **kwargs):
            if self.stopped():
                raise RuntimeError("stopped!")
            return self._func_refine(*args, **kwargs)

        def run(self):
            with self.default_sess():
                player = get_player_fn()
                while not self.stopped():
                    try:
                        r, tp = play_one_episode(player, self.func, self.func_refine, verbose)
                    except RuntimeError:
                        return
                    self.queue_put_stoppable(self.q, (r, tp))

    q = queue.Queue()
    threads = [Worker(f[0], f[1], q) for f in predictors]

    for k in threads:
        k.start()
        time.sleep(0.1)  # avoid simulator bugs
    stat_r = StatCounter()
    stat_tp = StatCounter()

    def fetch():
        r, tp = q.get()
        stat_r.feed(r)
        stat_tp.feed(tp)
        if verbose:
            logger.info("reward %f" % r)
            logger.info("true positive %f" % tp)

    for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
        fetch()
    logger.info("Waiting for all the workers to finish the last run...")
    for k in threads:
        k.stop()
    for k in threads:
        k.join()
    while q.qsize():
        fetch()
    return stat_r.average, stat_tp.average
Ejemplo n.º 7
0
def eval_on_dataflow(df, detect_func, tqdm_bar=None):
    """
    Args:
        df: a DataFlow which produces (image, image_id)
        detect_func: a callable, takes [image] and returns [DetectionResult]
        tqdm_bar: a tqdm object to be shared among multiple evaluation instances. If None,
            will create a new one.
    Return:
        list of dict, to be dumped to COCO json format
    """
    csv_path = os.path.join(config.BASEDIR, 'train_ship_segmentations_v2.csv')
    seg_df = pd.read_csv(csv_path, engine="python")
    seg_df = seg_df.dropna(axis=0)
    seg_df = seg_df.set_index('ImageId')

    df.reset_state()
    all_results = []
    # tqdm is not quite thread-safe: https://github.com/tqdm/tqdm/issues/323
    with ExitStack() as stack:
        if tqdm_bar is None:
            tqdm_bar = stack.enter_context(
                tqdm.tqdm(total=df.size(), **get_tqdm_kwargs()))
        score = 0.0
        all_score = []
        count = 0.0
        eval_names = []
        eval_imgs = []
        all_det = []
        all_im = []
        for img, img_id in df.get_data():
            results = detect_func(img)
            #mask_whole = detect_func(img)
            #all_det.append(mask_whole)
            all_im.append(img)
            eval_names.append(img_id)
            final = draw_final_outputs(img, results)
            cv2.imwrite('./eval_out_bb/{}'.format(img_id), final)
            mask_instances = [r.mask for r in results]
            score_instances = [r.score for r in results]

            masks = clean_overlap_instance(mask_instances, score_instances,
                                           img_id)
            if len(masks) == 0:
                print("no mask!!", img_id)
                v = 0
            else:
                v = local_eval(masks, img_id, seg_df)  #pred, imgId
            score += v
            all_score.append(v)
            count += 1
            tqdm_bar.update(1)
        for k in np.array(all_score).argsort()[:20]:
            print(all_score[k], eval_names[k])
        #    cv2.imwrite("./eval_out/{}".format(eval_names[k]), all_im[k])
        #    cv2.imwrite("./eval_out/{}_mask.jpg".format(eval_names[k].split(".")[0]), all_det[k]*255)
        print("Local Eval: ", score / count)
    return all_results, score / count
Ejemplo n.º 8
0
    def main_loop(self):
        # some final operations that might modify the graph
        logger.info("[{}] Initializing graph variables ...".format(os.environ['PBS_ARRAY_INDEX']))

        #self.sess.run(tf.initialize_all_variables())

        self.config.session_init.init(self.sess)
#        tf.get_default_graph().finalize()
        callbacks = self.config.callbacks
        logger.info("[{}] Starting concurrency...".format(os.environ['PBS_ARRAY_INDEX']))
        self._start_concurrency()
        #with self.sess.as_default():
        logger.info("[{}] Setting default session".format(os.environ['PBS_ARRAY_INDEX']))
        with ops.default_session(self.sess):
            try:
                logger.info("[{}] Getting global step".format(os.environ['PBS_ARRAY_INDEX']))
                self.global_step = get_global_step()
                logger.info("[{}] Start training with global_step={}".format(os.environ['PBS_ARRAY_INDEX'], self.global_step))

                if self.config.extra_arg['is_chief']:
                    server = neptune_mp_server.Server(
                            self.config.extra_arg['n_workers'],
                            port=self.config.extra_arg['port'],
                            debug_charts=self.config.extra_arg['debug_charts'],
                            adam_debug=self.config.extra_arg['adam_debug'],
                            schedule_hyper=self.config.extra_arg['schedule_hyper'],
                            experiment_dir=self.config.extra_arg['experiment_dir'])
                    server.main_loop()

                callbacks.before_train()
                for epoch in range(self.config.starting_epoch, self.config.max_epoch+1):
                    with timed_operation(
                        'Epoch {}, global_step={}'.format(
                            epoch, self.global_step + self.config.step_per_epoch)):
                        for step in tqdm.trange(
                                self.config.step_per_epoch,
                                **get_tqdm_kwargs(leave=True)):
                            if self.coord.should_stop():
                                return
                            self.run_step()
                            callbacks.trigger_step()
                            try:
                                self.global_step += 1
                            except:
                                self.global_step = -1
                        self.trigger_epoch()
                        print 'EPOCH ENDS HERE'
            except (KeyboardInterrupt, Exception):
                raise
            finally:
                # Do I need to run queue.close?
                print('Handling finally block')
                callbacks.after_train()
                self.coord.request_stop()
                self.summary_writer.close()
                self.sess.close()
Ejemplo n.º 9
0
def eval_with_funcs(predictors, nr_eval, get_player_fn, verbose=False):
    """
    Args:
        predictors ([PredictorBase])
    """

    class Worker(StoppableThread, ShareSessionThread):
        def __init__(self, func, queue):
            super(Worker, self).__init__()
            self._func = func
            self.q = queue

        def func(self, *args, **kwargs):
            if self.stopped():
                raise RuntimeError("stopped!")
            return self._func(*args, **kwargs)

        def run(self):
            with self.default_sess():
                player = get_player_fn()
                while not self.stopped():
                    try:
                        val = play_one_episode(player, self.func)
                    except RuntimeError:
                        return
                    self.queue_put_stoppable(self.q, val)

    q = queue.Queue()
    threads = [Worker(f, q) for f in predictors]

    for k in threads:
        k.start()
        time.sleep(0.1)  # avoid simulator bugs
    stat = StatCounter()

    def fetch():
        val = q.get()
        stat.feed(val)
        if verbose:
            if val > 0:
                logger.info("farmer wins")
            else:
                logger.info("lord wins")

    for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
        fetch()
    logger.info("Waiting for all the workers to finish the last run...")
    for k in threads:
        k.stop()
    for k in threads:
        k.join()
    while q.qsize():
        fetch()
    farmer_win_rate = stat.average
    return farmer_win_rate
Ejemplo n.º 10
0
def eval_with_funcs(predictors, nr_eval, get_player_fn, verbose=False):
    """
    Args:
        predictors ([PredictorBase])
    """
    class Worker(StoppableThread, ShareSessionThread):
        def __init__(self, func, queue):
            super(Worker, self).__init__()
            self._func = func
            self.q = queue

        def func(self, *args, **kwargs):
            if self.stopped():
                raise RuntimeError("stopped!")
            return self._func(*args, **kwargs)

        def run(self):
            with self.default_sess():
                player = get_player_fn(train=False)
                while not self.stopped():
                    try:
                        score = play_one_episode(player, self.func)
                    except RuntimeError:
                        return
                    self.queue_put_stoppable(self.q, score)

    q = queue.Queue()
    threads = [Worker(f, q) for f in predictors]

    for k in threads:
        k.start()
        time.sleep(0.1)  # avoid simulator bugs
    stat = StatCounter()

    def fetch():
        r = q.get()
        stat.feed(r)
        if verbose:
            logger.info("Score: {}".format(r))

    for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
        fetch()
    # waiting is necessary, otherwise the estimated mean score is biased
    logger.info("Waiting for all the workers to finish the last run...")
    for k in threads:
        k.stop()
    for k in threads:
        k.join()
    while q.qsize():
        fetch()

    if stat.count > 0:
        return (stat.average, stat.max)
    return (0, 0)
Ejemplo n.º 11
0
def eval_with_funcs(predictors, nr_eval, get_player_fn, verbose=False):
    """
    Args:
        predictors ([PredictorBase])
    """
    class Worker(StoppableThread, ShareSessionThread):
        def __init__(self, func, queue):
            super(Worker, self).__init__()
            self._func = func
            self.q = queue

        def func(self, *args, **kwargs):
            if self.stopped():
                raise RuntimeError("stopped!")
            return self._func(*args, **kwargs)

        def run(self):
            with self.default_sess():
                player = get_player_fn(train=False)
                while not self.stopped():
                    try:
                        score = play_one_episode(player, self.func)
                    except RuntimeError:
                        return
                    self.queue_put_stoppable(self.q, score)

    q = queue.Queue()
    threads = [Worker(f, q) for f in predictors]

    for k in threads:
        k.start()
        time.sleep(0.1)  # avoid simulator bugs
    stat = StatCounter()

    def fetch():
        r = q.get()
        stat.feed(r)
        if verbose:
            logger.info("Score: {}".format(r))

    for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
        fetch()
    # waiting is necessary, otherwise the estimated mean score is biased
    logger.info("Waiting for all the workers to finish the last run...")
    for k in threads:
        k.stop()
    for k in threads:
        k.join()
    while q.qsize():
        fetch()

    if stat.count > 0:
        return (stat.average, stat.max)
    return (0, 0)
Ejemplo n.º 12
0
 def _eval(self):
     from tensorpack.utils.utils import get_tqdm_kwargs
     score = 0.0
     ind = 0.0
     self.df.reset_state()
     with tqdm.tqdm(total=self.df.size(), **get_tqdm_kwargs()) as pbar:
         for img, la in self.df.get_data():
             ind += 1
             final_probs, final_labels = self.pred(img)
             score += (np.array(final_labels) == np.array(la)).mean()
             pbar.update()
     print("Val Acc: ", score / ind)
     self.trainer.monitors.put_scalar("Acc", score / ind)
Ejemplo n.º 13
0
def re_id_eval(args):
    with open(args.gallery_file, 'r') as gallery_file:
        gallery_list = json.load(gallery_file)
        gallery_bb = []
        for frame in gallery_list:
            """
                frame[0]: abs fname
                frame[1]: bb list
                frame[2]: label list
                frame[3]: score list
                frame[4]: feature vectors list
            """
            viz_detection(args, frame[0], frame[1])

            gt_bb_array, gt_cls_array = read_annotations(
                os.path.join(args.anno_dir,
                             os.path.basename(frame[0]).split('.')[0] +
                             '.txt'))
            if not frame[1]:
                # print('No detection')
                continue
            det_gt_cls_array, pos_ind = bb_cls_matching(
                np.array(frame[1]), gt_bb_array, gt_cls_array)
            for bb, fv, det_cls in zip(frame[1], frame[4], det_gt_cls_array):
                gallery_bb.append(fv + bb + [det_cls])
    gallery_bb = np.array(gallery_bb)
    print(gallery_bb.shape)

    with open(args.query_file, 'r') as query_file:
        query_list = json.load(query_file)
        tp_top20 = 0.0
        with tqdm.tqdm(total=len(query_list), **get_tqdm_kwargs()) as tqdm_bar:
            for query in query_list:
                """
                    query[0]: feature vector list
                    query[1]: query id list
                """
                fv = np.array(query[1][0][0])
                distance = []
                for gfv in gallery_bb:
                    distance.append(np.linalg.norm(fv - gfv[:256]))
                distance_array = np.array(distance)
                index_sort = np.argsort(distance_array)
                cls_top20 = gallery_bb[index_sort[:20], 260]
                if query[0][0] in cls_top20.astype(int).tolist():
                    # print(query[0][0], cls_top20.astype(int).tolist())
                    print('yay')
                    tp_top20 += 1
                tqdm_bar.update(1)

    print('Top 20 accuracy: ' + str(tp_top20 / len(query_list)))
Ejemplo n.º 14
0
def classifier_eval_output(df, pred_func, tqdm_bar=None):
    """
    Args:
        df: a DataFlow which produces (image, image_id)
        detect_func: a callable, takes [image] and returns [DetectionResult]
        tqdm_bar: a tqdm object to be shared among multiple evaluation instances. If None,
            will create a new one.

    Returns:
        list of dict, to be dumped to COCO json format
    """
    df.reset_state()
    all_results = []
    jsonable = False
    # tqdm is not quite thread-safe: https://github.com/tqdm/tqdm/issues/323
    with ExitStack() as stack:
        if tqdm_bar is None:
            tqdm_bar = stack.enter_context(
                tqdm.tqdm(total=df.size(), **get_tqdm_kwargs()))
        for fname, img, orig_shape in df.get_data():
            bbs, probs = pred_func(img, orig_shape)

            if VIZ:
                input_file = os.path.join(
                    '/media/yingges/TOSHIBA EXT/datasets/re-ID/PRW-v16.04.20/frames',
                    os.path.basename(fname).split('.')[0] + '.jpg')
                img = cv2.imread(input_file, cv2.IMREAD_COLOR)
                final = draw_final_outputs(img,
                                           bbs,
                                           tags_on=False,
                                           bb_list_input=True)
                viz = np.concatenate((img, final), axis=1)
                cv2.imwrite(os.path.basename(input_file), viz)

            result_list = []
            result_list.append(fname)
            bb_list = []
            prob_list = []
            for bb, prob in zip(bbs, probs):
                bb_list.append(list(map(lambda x: round(float(x), 2), bb)))
                prob_list.append(list(map(lambda x: round(float(x), 4), prob)))
            result_list.append(bb_list)
            result_list.append(prob_list)

            if not jsonable:
                jsonable = jsonable_test(result_list)

            all_results.append(result_list)
            tqdm_bar.update(1)
    return all_results
 def start(self):
     """
     Start testing with a progress bar.
     """
     itr = self.ds.__iter__()
     if self.warmup:
         for _ in tqdm.trange(self.warmup, **get_tqdm_kwargs()):
             next(itr)
     # add smoothing for speed benchmark
     with get_tqdm(total=self.test_size, leave=True, smoothing=0.2) as pbar:
         for idx, dp in enumerate(itr):
             pbar.update()
             if idx == self.test_size - 1:
                 break
Ejemplo n.º 16
0
def eval_with_funcs(predictors, nr_eval, get_player_fn):
    class Worker(StoppableThread, ShareSessionThread):
        def __init__(self, func, queue):
            super(Worker, self).__init__()
            self._func = func
            self.q = queue

        def func(self, *args, **kwargs):
            if self.stopped():
                raise RuntimeError("stopped!")
            return self._func(*args, **kwargs)

        def run(self):
            with self.default_sess():
                player = get_player_fn(train=False)
                while not self.stopped():
                    try:
                        score = play_one_episode(player, self.func)
                        # print("Score, ", score)
                    except RuntimeError:
                        return
                    self.queue_put_stoppable(self.q, score)

    q = queue.Queue()
    threads = [Worker(f, q) for f in predictors]

    for k in threads:
        k.start()
        time.sleep(0.1)  # avoid simulator bugs
    stat = StatCounter()
    try:
        for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
            r = q.get()
            stat.feed(r)
        logger.info("Waiting for all the workers to finish the last run...")
        for k in threads:
            k.stop()
        for k in threads:
            k.join()
        while q.qsize():
            r = q.get()
            stat.feed(r)
    except:
        logger.exception("Eval")
    finally:
        if stat.count > 0:
            return (stat.average, stat.max)
        return (0, 0)
Ejemplo n.º 17
0
def eval_brats(df, detect_func, with_gt=True):
    """
    Args:
        df: a DataFlow which produces (image, image_id)
        detect_func: a callable, takes [image] and returns [DetectionResult]

    Returns:
        list of dict, to be dumped to COCO json format
    """
    df.reset_state()
    gts = []
    results = []
    with tqdm.tqdm(total=df.size(), **get_tqdm_kwargs()) as pbar:
        for filename, image_id, data in df.get_data():
            final_label, probs = detect_func(data)
            if config.TEST_FLIP:
                pred_flip, probs_flip = detect_func(flip_lr(data))
                final_prob = (probs + probs_flip) / 2.0
                pred = np.argmax(final_prob, axis=-1)
                pred[pred == 3] = 4
                if config.ADVANCE_POSTPROCESSING:
                    pred = crop_ND_volume_with_bounding_box(
                        pred, data['bbox'][0], data['bbox'][1])
                    pred = post_processing(pred, data['weights'][:, :, :, 0])
                    pred = np.asarray(pred, np.int16)
                    final_label = np.zeros(data['original_shape'], np.int16)
                    final_label = set_ND_volume_roi_with_bounding_box_range(
                        final_label, data['bbox'][0], data['bbox'][1], pred)
                else:
                    final_label = pred
            gt = load_nifty_volume_as_array("{}/{}_seg.nii.gz".format(
                filename, image_id))
            gts.append(gt)
            results.append(final_label)
            pbar.update()
    test_types = ['whole', 'core', 'enhancing']
    ret = {}
    for type_idx in range(3):
        dice = dice_of_brats_data_set(gts, results, type_idx)
        dice = np.asarray(dice)
        dice_mean = dice.mean(axis=0)
        dice_std = dice.std(axis=0)
        test_type = test_types[type_idx]
        ret[test_type] = dice_mean[0]
        print('tissue type', test_type)
        print('dice mean', dice_mean)
    return ret
Ejemplo n.º 18
0
Archivo: eval.py Proyecto: fsk119/RFCN
def eval_coco(df, detect_func, tqdm_bar=None):
    """
    Args:
        df: a DataFlow which produces (image, image_id)
        detect_func: a callable, takes [image] and returns [DetectionResult]
        tqdm_bar: a tqdm object to be shared among multiple evaluation instances. If None,
            will create a new one.

    Returns:
        list of dict, to be dumped to COCO json format
    """
    df.reset_state()
    all_results = []
    # tqdm is not quite thread-safe: https://github.com/tqdm/tqdm/issues/323
    with ExitStack() as stack:
        if tqdm_bar is None:
            tqdm_bar = stack.enter_context(
                tqdm.tqdm(total=df.size(), **get_tqdm_kwargs()))
        for img, img_id in df.get_data():
            results = detect_func(img)
            for r in results:
                box = r.box
                cat_id = mapper.get(
                    r.class_id, 'background'
                )  #COCOMeta.class_id_to_category_id[r.class_id]
                # this two lines are coco format(xywh)
                # ignore
                # box[2] -= box[0]
                # box[3] -= box[1]

                res = {
                    'image_id': img_id,
                    'category_id': cat_id,
                    'bbox': list(map(lambda x: round(float(x), 2), box)),
                    'score': round(float(r.score), 3),
                }

                # also append segmentation to results
                if r.mask is not None:
                    rle = cocomask.encode(
                        np.array(r.mask[:, :, None], order='F'))[0]
                    rle['counts'] = rle['counts'].decode('ascii')
                    res['segmentation'] = rle
                all_results.append(res)
            tqdm_bar.update(1)
    return all_results
Ejemplo n.º 19
0
def eval_output(df, detect_func, tqdm_bar=None):
    """
    Args:
        df: a DataFlow which produces (image, image_id)
        detect_func: a callable, takes [image] and returns [DetectionResult]
        tqdm_bar: a tqdm object to be shared among multiple evaluation instances. If None,
            will create a new one.

    Returns:
        list of dict, to be dumped to COCO json format
    """
    df.reset_state()
    all_results = []
    # tqdm is not quite thread-safe: https://github.com/tqdm/tqdm/issues/323
    with ExitStack() as stack:
        if tqdm_bar is None:
            tqdm_bar = stack.enter_context(
                tqdm.tqdm(total=df.size(), **get_tqdm_kwargs()))
        for img, img_fname in df.get_data():
            results = detect_func(img)

            result_list = []
            result_list.append(img_fname)

            bb_list = []
            label_list = []
            score_list = []
            fv_list = []
            for r in results:
                box = r.box
                bb_list.append(list(map(lambda x: round(float(x), 2), box)))
                label_list.append(int(r.class_id))
                score_list.append(round(float(r.score), 3))
                # print(len(r.fv.tolist()))
                fv_list.append(r.fv.tolist())
            result_list.append(bb_list)
            result_list.append(label_list)
            result_list.append(score_list)
            result_list.append(fv_list)

            # dump a dummy json here to check for validity
            all_results.append(result_list)
            tqdm_bar.update(1)
    return all_results
Ejemplo n.º 20
0
    def main_loop(self):
        # some final operations that might modify the graph
        self._init_summary()
        get_global_step_var(
        )  # ensure there is such var, before finalizing the graph
        logger.info("Setup callbacks ...")
        callbacks = self.config.callbacks
        callbacks.setup_graph(self)  # TODO use weakref instead?
        logger.info("Initializing graph variables ...")
        self.sess.run(tf.initialize_all_variables())
        self.config.session_init.init(self.sess)
        tf.get_default_graph().finalize()
        self._start_concurrency()

        with self.sess.as_default():
            try:
                self.global_step = get_global_step()
                logger.info("Start training with global_step={}".format(
                    self.global_step))

                callbacks.before_train()
                for epoch in range(self.config.starting_epoch,
                                   self.config.max_epoch + 1):
                    with timed_operation('Epoch {}, global_step={}'.format(
                            epoch,
                            self.global_step + self.config.step_per_epoch)):
                        for step in tqdm.trange(self.config.step_per_epoch,
                                                **get_tqdm_kwargs(leave=True)):
                            if self.coord.should_stop():
                                return
                            self.run_step()
                            #callbacks.trigger_step()   # not useful?
                            self.global_step += 1
                        self.trigger_epoch()
            except (KeyboardInterrupt, Exception):
                raise
            finally:
                # Do I need to run queue.close?
                callbacks.after_train()
                self.coord.request_stop()
                self.summary_writer.close()
                self.sess.close()
Ejemplo n.º 21
0
def eval_coco(df, detect_func, tqdm_bar=None):
    """
    Args:
        df: a DataFlow which produces (image, image_id)
        detect_func: a callable, takes [image] and returns [DetectionResult]
        tqdm_bar: a tqdm object to be shared among multiple evaluation instances. If None,
            will create a new one.

    Returns:
        list of dict, to be dumped to COCO json format
    """
    df.reset_state()
    all_results = []
    # tqdm is not quite thread-safe: https://github.com/tqdm/tqdm/issues/323
    with ExitStack() as stack:
        if tqdm_bar is None:
            tqdm_bar = stack.enter_context(
                tqdm.tqdm(total=df.size(), **get_tqdm_kwargs()))
        for img, img_id in df:
            results = detect_func(img)
            for r in results:
                box = r.box
                cat_id = COCOMeta.class_id_to_category_id[r.class_id]
                box[2] -= box[0]
                box[3] -= box[1]

                res = {
                    'image_id': img_id,
                    'category_id': cat_id,
                    'bbox': list(map(lambda x: round(float(x), 3), box)),
                    'score': round(float(r.score), 4),
                }

                # also append segmentation to results
                if r.mask is not None:
                    rle = cocomask.encode(
                        np.array(r.mask[:, :, None], order='F'))[0]
                    rle['counts'] = rle['counts'].decode('ascii')
                    res['segmentation'] = rle
                all_results.append(res)
            tqdm_bar.update(1)
    return all_results
Ejemplo n.º 22
0
def predict_dataflow(df, model_func, tqdm_bar=None):
    """
    Args:
        df: a DataFlow which produces (image, image_id)
        model_func: a callable from the TF model.
            It takes image and returns (boxes, probs, labels, [masks])
        tqdm_bar: a tqdm object to be shared among multiple evaluation instances. If None,
            will create a new one.

    Returns:
        list of dict, in the format used by
        `DetectionDataset.eval_or_save_inference_results`
    """
    df.reset_state()
    all_results = []
    # tqdm is not quite thread-safe: https://github.com/tqdm/tqdm/issues/323
    with ExitStack() as stack:
        if tqdm_bar is None:
            tqdm_bar = stack.enter_context(
                tqdm.tqdm(total=df.size(), **get_tqdm_kwargs()))
        for img, img_id in df:
            results = predict_image(img, model_func)
            for r in results:
                res = {
                    'image_id': img_id,
                    'category_id': r.class_id,
                    'bbox': list(r.box),
                    'score': round(float(r.score), 4),
                }

                # also append segmentation to results
                if r.mask is not None:
                    rle = cocomask.encode(
                        np.array(r.mask[:, :, None], order='F'))[0]
                    rle['counts'] = rle['counts'].decode('ascii')
                    res['segmentation'] = rle
                all_results.append(res)
            tqdm_bar.update(1)
    return all_results
Ejemplo n.º 23
0
 def _eval(self):
     from tensorpack.utils.utils import get_tqdm_kwargs
     valid_predictions = []
     valid_y = []
     valid_logits = []
     th = 0.15
     total_run = len(self.valid_ds) // config.INFERENCE_BATCH
     total_run = total_run + 1 if len(
         self.valid_ds) % config.INFERENCE_BATCH != 0 else total_run
     with tqdm.tqdm(total=total_run, **get_tqdm_kwargs()) as pbar:
         for i in range(total_run):
             start = i * config.INFERENCE_BATCH
             end = start + config.INFERENCE_BATCH if start + config.INFERENCE_BATCH < len(
                 self.valid_ds) else len(self.valid_ds)
             data = self.valid_ds[start:end]
             data = [preprocess(d, is_training=False) for d in data]
             x = np.array([_[0] for _ in data])
             y = np.array([_[1] for _ in data])
             if len(x) == 0:
                 break
             final_probs = self.pred(x)
             valid_predictions.extend(final_probs[0])
             valid_logits.extend(final_probs[0])
             valid_y.extend(y)
             #score += mapk(la, final_labels)
             pbar.update()
     valid_predictions = np.array(valid_predictions)
     valid_y = np.array(valid_y)
     valid_logits = np.array(valid_logits)
     val_loss = tf_ce(valid_logits, valid_y)
     F1_score_05 = calc_macro_f1(valid_predictions, valid_y, 0.5)
     F1_score_015 = calc_macro_f1(valid_predictions, valid_y, 0.15)
     F1_score_02 = calc_macro_f1(valid_predictions, valid_y, 0.2)
     print('F1_score: {:.5f} {:.5f} {:.5f}'.format(F1_score_05,
                                                   F1_score_015,
                                                   F1_score_02))
     self.trainer.monitors.put_scalar("F1_score", F1_score_015)
     return F1_score_015
Ejemplo n.º 24
0
def eval_on_dataflow(df, detect_func):
    """
    Args:
        df: a DataFlow which produces (image, image_id)
        detect_func: a callable, takes [image] and returns [DetectionResult]

    Returns:
        list of dict, to be dumped to COCO json format
    """
    df.reset_state()
    all_results = []
    with tqdm.tqdm(total=df.size(), **get_tqdm_kwargs()) as pbar:
        for img, img_id in df.get_data():
            results = detect_func(img)
            for r in results:
                box = r.box
                #cat_id = COCOMeta.class_id_to_category_id[r.class_id]
                cat_id = r.class_id
                box[2] -= box[0]
                box[3] -= box[1]

                res = {
                    'image_id': img_id,
                    'category_id': cat_id,
                    'bbox': list(map(lambda x: float(round(x, 1)), box)),
                    'score': float(round(r.score, 2)),
                }

                # also append segmentation to results
                if r.mask is not None:
                    #                    rle = cocomask.encode(
                    #                        np.array(r.mask[:, :, None], order='F'))[0]
                    #                    rle['counts'] = rle['counts'].decode('ascii')
                    res['segmentation'] = r.mask
                all_results.append(res)
            pbar.update(1)
    return all_results
Ejemplo n.º 25
0
def eval_coco(df, detect_func):
    """
    Args:
        df: a DataFlow which produces (image, image_id)
        detect_func: a callable, takes [image] and returns [DetectionResult]

    Returns:
        list of dict, to be dumped to COCO json format
    """
    df.reset_state()
    all_results = []
    with tqdm.tqdm(total=df.size(), **get_tqdm_kwargs()) as pbar:
        for img, img_id in df.get_data():
            results = detect_func(img)
            for r in results:
                box = r.box
                cat_id = COCOMeta.class_id_to_category_id[r.class_id]
                box[2] -= box[0]
                box[3] -= box[1]

                res = {
                    'image_id': img_id,
                    'category_id': cat_id,
                    'bbox': list(map(lambda x: float(round(x, 1)), box)),
                    'score': float(round(r.score, 2)),
                }

                # also append segmentation to results
                if r.mask is not None:
                    rle = cocomask.encode(
                        np.array(r.mask[:, :, None], order='F'))[0]
                    rle['counts'] = rle['counts'].decode('ascii')
                    res['segmentation'] = rle
                all_results.append(res)
            pbar.update(1)
    return all_results
Ejemplo n.º 26
0
def eval_W(df, eval_one_image, tqdm_bar=None):
    df.reset_state()
    all_results = []
    with ExitStack() as stack:
        if tqdm_bar is None:
            tqdm_bar = stack.enter_context(
                tqdm.tqdm(total=df.size(), **get_tqdm_kwargs()))
        for img, boxes, img_id, male, longhair, sunglass, hat, tshirt, longsleeve, \
            formal, shorts, jeans, skirt, facemask, logo, stripe, longpants in df:
            results = eval_one_image(img, boxes, male, longhair, sunglass, hat,
                                     tshirt, longsleeve, formal, shorts, jeans,
                                     skirt, facemask, logo, stripe, longpants)

            male, longhair, sunglass, hat, tshirt, longsleeve, \
            formal, shorts, jeans, skirt, facemask, logo, stripe, longpants, \
            male_predict, longhair_predict, sunglass_predict, hat_predict, tshirt_predict, \
            longsleeve_predict, formal_predict, shorts_predict, jeans_predict, skirt_predict, \
            facemask_predict, logo_predict, stripe_predict, longpants_predict = list(
                zip(*[[int(r.male), int(r.longhair), int(r.sunglass), int(r.hat), int(r.tshirt), int(r.longsleeve),
                       int(r.formal), int(r.shorts), int(r.jeans), int(r.skirt), int(r.facemask), int(r.logo),
                       int(r.stripe), int(r.longpants), float(r.male_predict), float(r.longhair_predict),
                       float(r.sunglass_predict), float(r.hat_predict), float(r.tshirt_predict),
                       float(r.longsleeve_predict),
                       float(r.formal_predict), float(r.shorts_predict), float(r.jeans_predict), float(r.skirt_predict),
                       float(r.facemask_predict), float(r.logo_predict),
                       float(r.stripe_predict), float(r.longpants_predict)]
                      for r in results]))

            res = {
                'image_id': int(img_id),
                'male': male,
                'longhair': longhair,
                'sunglass': sunglass,
                'hat': hat,
                'tshirt': tshirt,
                'longsleeve': longsleeve,
                'formal': formal,
                'shorts': shorts,
                'jeans': jeans,
                'skirt': skirt,
                'facemask': facemask,
                'logo': logo,
                'stripe': stripe,
                'longpants': longpants,
                'male_predict': male_predict,
                'longhair_predict': longhair_predict,
                'sunglass_predict': sunglass_predict,
                'hat_predict': hat_predict,
                'tshirt_predict': tshirt_predict,
                'longsleeve_predict': longsleeve_predict,
                'formal_predict': formal_predict,
                'shorts_predict': shorts_predict,
                'jeans_predict': jeans_predict,
                'skirt_predict': skirt_predict,
                'facemask_predict': facemask_predict,
                'logo_predict': logo_predict,
                'stripe_predict': stripe_predict,
                'longpants_predict': longpants_predict
            }

            all_results.append(res)
            tqdm_bar.update(1)
    return all_results
Ejemplo n.º 27
0
def test_rpcio():
    from . import dataio_recv, dataio_send
    import sys, os
    # if len(sys.argv) > 2:
    #     # client mode
    #     pass
    # else:
    from .server import init_rpcio_server
    host = '127.0.0.1'
    port = 50000
    dataio = init_rpcio_server(host, port)
    batchSize = 4096
    subBatchSize = 256
    clients = []

    with tf.Session() as sess:  #, tf.device(':/cpu:0'):
        dataios = []
        import multiprocessing as mp
        dim0 = 256
        dim1 = 512
        for qidx in range(4):
            v0, v1 = dataio_recv("main%d" % qidx,
                                 batchSize, [tf.float32, tf.int32],
                                 [[dim0], [dim1]],
                                 sub_processor_batch_size=subBatchSize)
            op_send = dataio_send([v0 * 2, v1 * 2],
                                  'main%d' % qidx,
                                  batchSize,
                                  sub_processor_batch_size=subBatchSize)

            for idx in range(batchSize // subBatchSize):
                proc = mp.Process(target=_client_loop,
                                  args=('main%d' % qidx, idx, subBatchSize,
                                        dim0, dim1))
                clients.append(proc)
                proc.start()
            dataios.append(op_send)

        sess.run(tf.global_variables_initializer())

        def thread_run_op(op, qidx):
            print("thread {} start ".format(qidx))
            for _ in range(100000):
                sess.run(op)

        for qidx, op in enumerate(dataios):
            if qidx == 0: continue
            import threading
            threading._start_new_thread(thread_run_op, (op, qidx))

        from tensorpack.utils.utils import get_tqdm_kwargs
        import tqdm
        for _ in tqdm.tqdm(range(100000), **get_tqdm_kwargs()):
            try:
                sess.run(dataios[0])
            except KeyboardInterrupt:
                sess.close()
                break
        from time import sleep
        # sleep(1)
        dataio.close()
        pass
Ejemplo n.º 28
0
def eval_with_funcs(predictors,
                    nr_eval,
                    get_player_fn,
                    directory=None,
                    files_list=None):
    """
    Args:
        predictors ([PredictorBase])

    Runs episodes in parallel, returning statistics about the model performance.
    """
    class Worker(StoppableThread, ShareSessionThread):
        def __init__(self, func, queue, distErrorQueue):
            super(Worker, self).__init__()
            self._func = func
            self.q = queue
            self.q_dist = distErrorQueue

        def func(self, *args, **kwargs):
            if self.stopped():
                raise RuntimeError("stopped!")
            return self._func(*args, **kwargs)

        def run(self):
            with self.default_sess():
                player = get_player_fn(directory=directory,
                                       task=False,
                                       files_list=files_list)
                while not self.stopped():
                    try:
                        score, filename, ditance_error, q_values = play_one_episode(
                            player, self.func)
                        # print("Score, ", score)
                    except RuntimeError:
                        return
                    self.queue_put_stoppable(self.q, score)
                    self.queue_put_stoppable(self.q_dist, ditance_error)

    q = queue.Queue()
    q_dist = queue.Queue()

    threads = [Worker(f, q, q_dist) for f in predictors]

    # start all workers
    for k in threads:
        k.start()
        time.sleep(0.1)  # avoid simulator bugs
    stat = StatCounter()
    dist_stat = StatCounter()

    # show progress bar w/ tqdm
    for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
        r = q.get()
        stat.feed(r)
        dist = q_dist.get()
        dist_stat.feed(dist)

    logger.info("Waiting for all the workers to finish the last run...")
    for k in threads:
        k.stop()
    for k in threads:
        k.join()
    while q.qsize():
        r = q.get()
        stat.feed(r)

    while q_dist.qsize():
        dist = q_dist.get()
        dist_stat.feed(dist)

    if stat.count > 0:
        return (stat.average, stat.max, dist_stat.average, dist_stat.max)
    return (0, 0, 0, 0)
Ejemplo n.º 29
0
def eval_with_funcs(predictors, nr_eval, get_player_fn, verbose=False):
    """
    Args:
        predictors ([PredictorBase])
    """
    class Worker(StoppableThread, ShareSessionThread):
        def __init__(self, func, queue):
            super(Worker, self).__init__()
            self._func = func
            self.q = queue

        def func(self, *args, **kwargs):
            if self.stopped():
                raise RuntimeError("stopped!")
            return self._func(*args, **kwargs)

        def run(self):
            with self.default_sess():
                player = get_player_fn()
                while not self.stopped():
                    try:
                        stats = play_one_episode(player, self.func)
                    except RuntimeError:
                        return
                    scores = [
                        stat.average if stat.count > 0 else -1
                        for stat in stats
                    ]
                    self.queue_put_stoppable(self.q, scores)

    q = queue.Queue()
    threads = [Worker(f, q) for f in predictors]

    for k in threads:
        k.start()
        time.sleep(0.1)  # avoid simulator bugs
    stats = [StatCounter() for _ in range(7)]

    def fetch():
        scores = q.get()
        for i, score in enumerate(scores):
            if scores[i] >= 0:
                stats[i].feed(scores[i])
        accs = [stat.average if stat.count > 0 else 0 for stat in stats]
        if verbose:
            logger.info("passive decision accuracy: {}\n"
                        "passive bomb accuracy: {}\n"
                        "passive response accuracy: {}\n"
                        "active decision accuracy: {}\n"
                        "active response accuracy: {}\n"
                        "active sequence accuracy: {}\n"
                        "minor response accuracy: {}\n".format(
                            accs[0], accs[1], accs[2], accs[3], accs[4],
                            accs[5], accs[6]))

    for _ in tqdm(range(nr_eval), **get_tqdm_kwargs()):
        fetch()
    logger.info("Waiting for all the workers to finish the last run...")
    for k in threads:
        k.stop()
    for k in threads:
        k.join()
    while q.qsize():
        fetch()
    accs = [stat.average if stat.count > 0 else 0 for stat in stats]
    return accs
Ejemplo n.º 30
0
                    PredictConfig(
                        model=ResnetModel(),
                        session_init=get_model_loader(args.load),
                        input_names=['image'],
                        output_names=get_resnet_model_output_names()))

                from tensorpack.utils.utils import get_tqdm_kwargs
                valid_ds = get_dataflow(is_train=False)
                valid_predictions = []
                valid_y = []
                valid_logits = []
                th = 0.5
                total_run = len(valid_ds) // config.INFERENCE_BATCH
                total_run = total_run + 1 if len(
                    valid_ds) % config.INFERENCE_BATCH != 0 else total_run
                with tqdm.tqdm(total=total_run, **get_tqdm_kwargs()) as pbar:
                    for i in range(total_run):
                        start = i * config.INFERENCE_BATCH
                        end = start + config.INFERENCE_BATCH if start + config.INFERENCE_BATCH < len(
                            valid_ds) else len(valid_ds)
                        data = valid_ds[start:end]
                        data = [preprocess(d, is_training=False) for d in data]
                        x = np.array([_[0] for _ in data])
                        y = np.array([_[1] for _ in data])

                        final_probs = pred(x)
                        valid_predictions.extend(final_probs[0])
                        valid_logits.extend(final_probs[0])
                        valid_y.extend(y)
                        pbar.update()
                valid_predictions = np.array(valid_predictions)
Ejemplo n.º 31
0
         model=ResnetModel(),
         session_init=get_model_loader(args.load),
         input_names=['image'],
         output_names=get_resnet_model_output_names()))
 images = ResnetDetection.load_many(config.BASEDIR,
                                    config.VAL_DATASET)
 images = list(images)
 imgs = [img['image_data'] for img in images]
 labels = [img['with_ship'] for img in images]
 from tensorpack.utils.utils import get_tqdm_kwargs
 score = 0.0
 ind = 0.0
 b_size = config.RESNET_BATCH
 final_pred_mask = []
 with tqdm.tqdm(total=len(imgs) // b_size + 1,
                **get_tqdm_kwargs()) as pbar:
     for i in range(len(imgs) // b_size + 1):
         ind += 1
         start = i * b_size
         end = start + b_size if start + b_size < len(
             imgs) else len(imgs)
         batch_image = imgs[start:end]
         batch_image = np.array([
             cv2.resize(
                 cv2.imread(im),
                 (config.RESNET_SIZE, config.RESNET_SIZE))
             for im in batch_image
         ])
         batch_label = np.array(labels[start:end])
         final_probs, final_labels = pred(batch_image)
         final_pred_mask += list(final_labels)