Esempio n. 1
0
def demo_batch(batch_size=1, inp_size=(100, 100), n_classes=20, rng=None,
               n=None):
    from clab import util
    rng = util.ensure_rng(rng)
    # number of groundtruth boxes per image in the batch
    if n is None:
        ntrues = [rng.randint(0, 10) for _ in range(batch_size)]
    else:
        ntrues = [n for _ in range(batch_size)]
    scale = min(inp_size) / 2.0
    gt_boxes = [yolo_utils.random_boxes(n, 'tlbr', scale=scale).reshape(-1, 4)
                for n in ntrues]
    gt_classes = [rng.randint(0, n_classes, n) for n in ntrues]
    gt_weights = [np.ones(n) for n in ntrues]
    orig_size = [inp_size for _ in range(batch_size)]
    indices = np.arange(batch_size)

    im_data = torch.randn(batch_size, 3, *inp_size)

    im_data = torch.FloatTensor(im_data)
    im_data = torch.FloatTensor(im_data)

    indices = torch.LongTensor([indices])
    orig_size = torch.LongTensor(orig_size)
    gt_weights = [torch.FloatTensor(item) for item in gt_weights]
    gt_classes = [torch.LongTensor(item) for item in gt_classes]

    inputs = [im_data]
    labels = [gt_boxes, gt_classes, orig_size, indices, gt_weights]
    return inputs, labels
Esempio n. 2
0
 def __init__(self, rng=None, backend='skimage', **kw):
     self.rng = util.ensure_rng(rng)
     self.augkw = augment_common.PERTERB_AUG_KW.copy()
     self.augkw.update(kw)
     # self.interp = 'nearest'
     # self.border_mode = 'reflect'
     # self.backend = 'skimage'
     # self.backend = 'cv2'
     # self.backend = 'pil'
     self.border_mode = 'constant'
     self.interp = 'nearest'
     self.backend = backend
Esempio n. 3
0
def demo_predictions(B, W, H, A, rng=None):
    """ dummy predictions in the same format as the output layer """
    from clab import util
    rng = util.ensure_rng(rng)
    # Simulate the final layers
    final0123 = torch.FloatTensor(np.random.rand(B, H * W, A, 4))
    final4 = torch.FloatTensor(np.random.rand(B, H * W, A, 1))
    # raw_aoff_pred_ = final0123
    xy_sig_pred = F.sigmoid(final0123[..., 0:2])
    wh_exp_pred = torch.exp(final0123[..., 2:4])
    aoff_pred = torch.cat([xy_sig_pred, wh_exp_pred], dim=3)
    iou_pred = F.sigmoid(final4)
    return aoff_pred, iou_pred
Esempio n. 4
0
File: lsuv.py Progetto: afcarl/clab
    def __init__(self,
                 needed_std=1.0,
                 std_tol=0.1,
                 max_attempts=10,
                 do_orthonorm=True,
                 rng=None):

        self.rng = util.ensure_rng(rng)

        self.do_orthonorm = do_orthonorm
        self.needed_std = needed_std
        self.std_tol = std_tol
        self.max_attempts = max_attempts
Esempio n. 5
0
File: lsuv.py Progetto: afcarl/clab
def svd_orthonormal(shape, rng=None, cache_key=None):
    """
    If cache_key is specified, then the result will be cached, and subsequent
    calls with the same key and shape will return the same result.

    References:
        Orthonorm init code is taked from Lasagne
        https://github.com/Lasagne/Lasagne/blob/master/lasagne/init.py
    """
    rng = util.ensure_rng(rng)

    if len(shape) < 2:
        raise RuntimeError("Only shapes of length 2 or more are supported.")
    flat_shape = (shape[0], np.prod(shape[1:]))

    enabled = False and cache_key is not None
    if enabled:
        rand_sequence = rng.randint(0, 2**16)
        depends = [shape, cache_key, rand_sequence]
        cfgstr = ub.hash_data(depends)
    else:
        cfgstr = ''

    # this process can be expensive, cache it

    # TODO: only cache very large matrices (4096x4096)
    # TODO: only cache very large matrices, not (256,256,3,3)
    cacher = ub.Cacher('svd_orthonormal',
                       appname='clab',
                       enabled=enabled,
                       cfgstr=cfgstr)
    q = cacher.tryload()
    if q is None:
        # print('Compute orthonormal matrix with shape ' + str(shape))
        a = rng.normal(0.0, 1.0, flat_shape)
        u, _, v = np.linalg.svd(a, full_matrices=False)
        q = u if u.shape == flat_shape else v
        # print(shape, flat_shape)
        q = q.reshape(shape)
        q = q.astype(np.float32)
        cacher.save(q)
    return q
Esempio n. 6
0
def demo_npdata(A=5,
                H=3,
                W=3,
                inp_size=(96, 96),
                C=20,
                factor=32,
                n=None,
                rng=None):
    from clab import util
    rng = util.ensure_rng(rng)
    B = 1
    out_size = np.array([W, H])
    n_classes = C

    inputs, labels = darknet.demo_batch(B,
                                        inp_size,
                                        n_classes=n_classes,
                                        rng=rng,
                                        n=n)
    aoff_pred, iou_pred = darknet.demo_predictions(1, H, W, A, rng=rng)

    gt_boxes, gt_classes, orig_size, indices, gt_weights = labels
    aoff_pred_np = aoff_pred.numpy()[0].astype(np.float)
    iou_pred_np = iou_pred.numpy()[0].astype(np.float)
    gt_boxes_np = [item.cpu().numpy().astype(np.float) for item in gt_boxes][0]
    gt_classes_np = [item.cpu().numpy() for item in gt_classes][0]
    gt_weights_np = [item.cpu().numpy() for item in gt_weights][0]

    gt_boxes_np = gt_boxes_np.reshape(-1, 4)

    # Random anchors specified w.r.t output shape
    anchors = np.abs(rng.randn(A, 2) * out_size).astype(np.float)

    data = (aoff_pred_np, iou_pred_np, gt_boxes_np, gt_classes_np,
            gt_weights_np)
    return data, anchors
Esempio n. 7
0
File: lsuv.py Progetto: afcarl/clab
 def __init__(self, rng=None):
     self.rng = util.ensure_rng(rng)