Exemple #1
0
    def _init_memory(self):
        logger.info("Populating replay memory with epsilon={} ...".format(self.exploration))

        with get_tqdm(total=self.init_memory_size) as pbar:
            while len(self.mem) < self.init_memory_size:
                self._populate_exp()
                pbar.update()
        self._init_memory_flag.set()
Exemple #2
0
    def _init_memory(self):
        logger.info("Populating replay memory with epsilon={} ...".format(self.exploration))

        with get_tqdm(total=self.init_memory_size) as pbar:
            while len(self.mem) < self.init_memory_size:
                self._populate_exp()
                pbar.update()
        self._init_memory_flag.set()
Exemple #3
0
 def _fake_init_memory(self):
     from copy import deepcopy
     with get_tqdm(total=self.init_memory_size) as pbar:
         while len(self.mem) < 5:
             self._populate_exp()
             pbar.update()
         while len(self.mem) < self.init_memory_size:
             self.mem.append(deepcopy(self.mem._hist[0]))
             pbar.update()
     self._init_memory_flag.set()
 def debug(self, cnt=100000):
     with get_tqdm(total=cnt) as pbar:
         for i in range(cnt):
             self.mem.append(
                 Experience(
                     np.zeros(
                         [self.num_actions[0], self.num_actions[1], 256]),
                     0, 0))
             # self._current_ob, self._action_space = self.get_state_and_action_spaces(None)
             pbar.update()
Exemple #5
0
 def _fake_init_memory(self):
     from copy import deepcopy
     with get_tqdm(total=self.init_memory_size) as pbar:
         while len(self.mem) < 5:
             self._populate_exp()
             pbar.update()
         while len(self.mem) < self.init_memory_size:
             self.mem.append(deepcopy(self.mem._hist[0]))
             pbar.update()
     self._init_memory_flag.set()
def predict_dataflow_batch(df, model_func, tqdm_bar=None):
    """
    Args:
        df: a DataFlow which produces (image, image_id)
        model_func: a callable from the TF model.
            It takes image and returns (boxes, probs, labels, [masks])
        tqdm_bar: a tqdm object to be shared among multiple evaluation instances. If None,
            will create a new one.

    Returns:
        list of dict, in the format used by
        `DetectionDataset.eval_or_save_inference_results`
    """
    df.reset_state()
    all_results = []
    with ExitStack() as stack:
        # tqdm is not quite thread-safe: https://github.com/tqdm/tqdm/issues/323
        if tqdm_bar is None:
            tqdm_bar = stack.enter_context(get_tqdm(total=df.size()))
        for imgs, img_ids, resized_sizes, scales, orig_sizes in df:
            results = predict_image_batch(imgs, model_func, resized_sizes,
                                          scales, orig_sizes)
            batch_id = 0
            for img_results in results:
                for r in img_results:

                    img_id = int(img_ids[batch_id])
                    class_id = int(r.class_id)
                    bbox = list([float(b) for b in r.box])
                    score = round(float(r.score), 4)

                    #                     print("A result")
                    #                     print(f'image_id [{type(img_id)}] {img_id}')
                    #                     print(f'class_id [{type(class_id)}] {class_id}')
                    #                     print(f'bbox [{type(bbox)}] {bbox}')
                    #                     print(f'bbox[0] [{type(bbox[0])}] {bbox[0]}')
                    #                     print(f'score [{type(score)}] {score}')

                    res = {
                        'image_id': img_id,
                        'category_id': class_id,
                        'bbox': bbox,
                        'score': score,
                    }

                    # also append segmentation to results
                    if r.mask is not None:
                        rle = cocomask.encode(
                            np.array(r.mask[:, :, None], order='F'))[0]
                        rle['counts'] = rle['counts'].decode('ascii')
                        res['segmentation'] = rle
                    all_results.append(res)
                batch_id += 1
            tqdm_bar.update(1)
    return all_results
Exemple #7
0
def compute_mean_std(db, fname):
    ds = LMDBSerializer.load(db, shuffle=False)
    ds.reset_state()
    o = OnlineMoments()
    for dp in get_tqdm(ds):
        feat = dp[0]  # len x dim
        for f in feat:
            o.feed(f)
    logger.info("Writing to {} ...".format(fname))
    with open(fname, 'wb') as f:
        f.write(serialize.dumps([o.mean, o.std]))
Exemple #8
0
def compute_mean_std(db, fname):
    ds = LMDBSerializer.load(db, shuffle=False)
    ds.reset_state()
    o = OnlineMoments()
    for dp in get_tqdm(ds):
        feat = dp[0]  # len x dim
        for f in feat:
            o.feed(f)
    logger.info("Writing to {} ...".format(fname))
    with open(fname, 'wb') as f:
        f.write(serialize.dumps([o.mean, o.std]))
Exemple #9
0
def compute_mean_std(db, fname):
    ds = LMDBDataPoint(db, shuffle=False)
    ds.reset_state()
    o = OnlineMoments()
    with get_tqdm(total=ds.size()) as bar:
        for dp in ds.get_data():
            feat = dp[0]  # len x dim
            for f in feat:
                o.feed(f)
            bar.update()
    logger.info("Writing to {} ...".format(fname))
    with open(fname, 'wb') as f:
        f.write(serialize.dumps([o.mean, o.std]))
Exemple #10
0
def compute_mean_std(db, fname):
    ds = LMDBDataPoint(db, shuffle=False)
    ds.reset_state()
    o = OnlineMoments()
    with get_tqdm(total=ds.size()) as bar:
        for dp in ds.get_data():
            feat = dp[0]  # len x dim
            for f in feat:
                o.feed(f)
            bar.update()
    logger.info("Writing to {} ...".format(fname))
    with open(fname, 'wb') as f:
        f.write(serialize.dumps([o.mean, o.std]))
def compute_mean_std(ds, fname):
    """
    Compute mean and std in datasets.
    Usage: compute_mean_std(ds, 'mean_std.txt')
    """
    o = stats.OnlineMoments()
    for dp in get_tqdm(ds):
        feat = dp[0]  # len x dim
        for f in feat:
            o.feed(f)
    logger.info("Writing to {} ...".format(fname))
    with open(fname, 'wb') as f:
        f.write(serialize.dumps([o.mean, o.std]))
 def start(self):
     """
     Start testing with a progress bar.
     """
     itr = self.ds.__iter__()
     if self.warmup:
         for _ in tqdm.trange(self.warmup, **get_tqdm_kwargs()):
             next(itr)
     # add smoothing for speed benchmark
     with get_tqdm(total=self.test_size, leave=True, smoothing=0.2) as pbar:
         for idx, dp in enumerate(itr):
             pbar.update()
             if idx == self.test_size - 1:
                 break
Exemple #13
0
def eval_proc(file_name):
    print(file_name)
    f = open(os.path.join('./log') + file_name, 'w+')
    for te in types:
        for ta in types:
            for role_id in [2, 3, 1]:
                agent = make_agent(ta, role_id)
                for i in range(1):
                    env = make_env(te)
                    st = StatCounter()
                    with get_tqdm(total=100) as pbar:
                        for j in range(100):
                            winning_rate = eval_episode(env, agent)
                            st.feed(winning_rate)
                            pbar.update()
                    f.write(
                        '%s with role id %d against %s, winning rate: %f\n' %
                        (ta, role_id, te, st.average))
    f.close()
Exemple #14
0
def predict_dataflow(df, model_func, tqdm_bar=None):
    """
    Args:
        df: a DataFlow which produces (image, image_id)
        model_func: a callable from the TF model.
            It takes image and returns (boxes, probs, labels, [masks])
        tqdm_bar: a tqdm object to be shared among multiple evaluation instances. If None,
            will create a new one.

    Returns:
        list of dict, in the format used by
        `DetectionDataset.eval_or_save_inference_results`
    """
    df.reset_state()
    all_results = []
    with ExitStack() as stack:
        # tqdm is not quite thread-safe: https://github.com/tqdm/tqdm/issues/323
        if tqdm_bar is None:
            tqdm_bar = stack.enter_context(get_tqdm(total=df.size()))
        for img, img_id in df:
            if (len(img.shape) == 2):
                img = np.expand_dims(img, axis=2)
                img = np.repeat(img, 3, axis=2)
            results = predict_image(img, model_func)
            for r in results:
                # int()/float() to make it json-serializable
                res = {
                    'image_id': img_id,
                    'category_id': int(r.class_id),
                    'bbox': [round(float(x), 4) for x in r.box],
                    'score': round(float(r.score), 4),
                }

                # also append segmentation to results
                if r.mask is not None:
                    rle = cocomask.encode(
                        np.array(r.mask[:, :, None], order='F'))[0]
                    rle['counts'] = rle['counts'].decode('ascii')
                    res['segmentation'] = rle
                all_results.append(res)
            tqdm_bar.update(1)
    return all_results
Exemple #15
0
def pred_cityscapes(df, model_func, tqdm_bar=None):
    """
    Args:
        df: a DataFlow which produces (image, image_id)
        model_func: a callable, takes [image] and returns [prediction]

    Returns:
        list of dict, to be dumped to COCO json format
    """
    df.reset_state()
    all_results = {}
    with ExitStack() as stack:
        if tqdm_bar is None:
            tqdm_bar = stack.enter_context(get_tqdm(total=df.size()))
        # with tqdm.tqdm(total=df.size(), **get_tqdm_kwargs()) as pbar:
        for img_batch, img_id_batch in df: #.get_data():
            preds_batch = pred_batch(img_batch, model_func)
            for preds, img_id in zip(preds_batch, img_id_batch):
                all_results[img_id] = preds.astype(np.uint8)
            tqdm_bar.update(1) # pbar.update(1)
    return all_results
Exemple #16
0
def predict_dataflow(df, model_func, tqdm_bar=None):
    """
    Args:
        df: a DataFlow which produces (image, image_id)
        model_func: a callable from the TF model.
            It takes image and returns (boxes, probs, labels, [masks])
        tqdm_bar: a tqdm object to be shared among multiple evaluation instances. If None,
            will create a new one.

    Returns:
        list of dict, in the format used by
        `DatasetSplit.eval_inference_results`
    """
    df.reset_state()
    all_results = []
    with ExitStack() as stack:
        # tqdm is not quite thread-safe: https://github.com/tqdm/tqdm/issues/323
        if tqdm_bar is None:
            tqdm_bar = stack.enter_context(get_tqdm(total=df.size()))
        for img, img_id in df:
            results = predict_image(img, model_func)
            for r in results:
                # int()/float() to make it json-serializable
                res = {
                    "image_id": img_id,
                    "category_id": int(r.class_id),
                    "bbox": [round(float(x), 4) for x in r.box],
                    "score": round(float(r.score), 4),
                }

                # also append segmentation to results
                if r.mask is not None:
                    rle = cocomask.encode(
                        np.array(r.mask[:, :, None], order="F"))[0]
                    rle["counts"] = rle["counts"].decode("ascii")
                    res["segmentation"] = rle
                all_results.append(res)
            tqdm_bar.update(1)
    return all_results