Пример #1
0
def dboxes300():
    figsize = 300
    feat_size = [38, 19, 10, 5, 3, 1]
    steps = [8, 16, 32, 64, 100, 300]
    scales = [30, 60, 111, 162, 213, 264, 315]
    aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]]
    dboxes = DefaultBoxes(figsize, feat_size, steps, scales, aspect_ratios)
    return dboxes
Пример #2
0
def dboxes512():
    figsize = 512
    feat_size = [64, 32, 16, 8, 4, 2, 1]
    steps = [8, 16, 32, 84, 128, 256, 512]
    scales = [35.84, 76.8, 153.6, 230.4, 307.2, 384.0, 460.8, 537.6]
    aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]]
    dboxes = DefaultBoxes(figsize, feat_size, steps, scales, aspect_ratios)
    return dboxes
Пример #3
0
def dboxes300_coco():
    figsize =  300
    feat_size = [38, 19, 10, 5, 3, 1]
    steps = [8, 16, 32, 64, 100, 300]
    # use the scales here: https://github.com/amdegroot/ssd.pytorch/blob/master/data/config.py
    scales = [21, 45, 99, 153, 207, 261, 315,369, 512]  
    aspect_ratios =  [[2], [2, 3], [2, 3], [2, 3], [2], [2]] 
    dboxes = DefaultBoxes(figsize, feat_size, steps, scales, aspect_ratios)
    return dboxes
Пример #4
0
def dboxes512_coco():
    figsize = 512
    feat_size = [64, 32, 16, 8, 4, 2, 1]
    steps = [8, 16, 32, 84, 128, 256, 512]
    # According to https://github.com/weiliu89/caffe/blob/ssd/examples/ssd/ssd_coco.py
    scales = [35.84, 76.8, 153.6, 230.4, 307.2, 384.0, 460.8, 537.6]
    aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]]
    dboxes = DefaultBoxes(figsize, feat_size, steps, scales, aspect_ratios)
    return dboxes
Пример #5
0
def dboxes_coco(figsize, strides):
    ssd_r34 = SSD_R34(81, strides=strides).to('cuda')
    synt_img = torch.rand([1, 3] + figsize).to('cuda')
    _, _, feat_size = ssd_r34(synt_img, extract_shapes=True)
    steps = [(int(figsize[0] / fs[0]), int(figsize[1] / fs[1]))
             for fs in feat_size]
    scales = [(int(s * figsize[0] / 300), int(s * figsize[1] / 300))
              for s in [21, 45, 99, 153, 207, 261, 315]]
    aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]]
    dboxes = DefaultBoxes(figsize, feat_size, steps, scales, aspect_ratios)
    print('Total number of anchors is: ', dboxes.dboxes.shape[0])
    return dboxes
Пример #6
0
def dboxes_R34_coco(figsize, strides):
    ssd_r34 = SSD_R34(81, strides=strides)
    synt_img = torch.rand([1, 3] + figsize)
    _, _, feat_size = ssd_r34(synt_img, extract_shapes=True)
    steps = [(int(figsize[0] / fs[0]), int(figsize[1] / fs[1]))
             for fs in feat_size]
    # use the scales here: https://github.com/amdegroot/ssd.pytorch/blob/master/data/config.py
    scales = [(int(s * figsize[0] / 300), int(s * figsize[1] / 300))
              for s in [21, 45, 99, 153, 207, 261, 315]]
    aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]]
    dboxes = DefaultBoxes(figsize, feat_size, steps, scales, aspect_ratios)
    return dboxes
Пример #7
0
def test512():
    figsize = 512
    feat_size = [64, 32, 16, 8, 4, 2, 1]
    steps = [8, 16, 32, 84, 128, 256, 512]
    scales = [35.84, 76.8, 153.6, 230.4, 307.2, 384.0, 460.8, 537.6]
    aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]]
    dboxes = DefaultBoxes(figsize, feat_size, steps, scales, aspect_ratios)
    print(dboxes().shape)

    img = torch.randn(1, 3, 512, 512)
    model = SSD512(21)
    loc, conf = model(img)
    print(loc.shape, conf.shape)
Пример #8
0
def test300():
    figsize = 300
    feat_size = [38, 19, 10, 5, 3, 1]
    steps = [8, 16, 32, 64, 100, 300]
    scales = [30, 60, 111, 162, 213, 264, 315]
    aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]]
    dboxes = DefaultBoxes(figsize, feat_size, steps, scales, aspect_ratios)
    print(dboxes().shape)

    img = torch.randn(1, 3, 300, 300)
    model = SSD300(21)
    loc, conf = model(img)
    print(loc.shape, conf.shape)
Пример #9
0
def dboxes300_coco():
    figsize = 300
    feat_size = [38, 19, 10, 5, 3, 1]
    ssd_print(key=mlperf_log.FEATURE_SIZES, value=feat_size)
    steps = [8, 16, 32, 64, 100, 300]
    ssd_print(key=mlperf_log.STEPS, value=steps)
    # use the scales here: https://github.com/amdegroot/ssd.pytorch/blob/master/data/config.py
    scales = [21, 45, 99, 153, 207, 261, 315]
    ssd_print(key=mlperf_log.SCALES, value=scales)
    aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]]
    ssd_print(key=mlperf_log.ASPECT_RATIOS, value=aspect_ratios)
    dboxes = DefaultBoxes(figsize, feat_size, steps, scales, aspect_ratios)
    ssd_print(key=mlperf_log.NUM_DEFAULTS, value=len(dboxes.default_boxes))
    return dboxes
Пример #10
0
def dboxes_R34_coco(figsize, strides):
    ssd_r34 = SSD_R34(81, strides=strides)
    synt_img = torch.rand([1, 3] + figsize)
    #if use_cude:
    #    synt_img.to('cuda')
    #    ssd_r34.to('cuda')
    _, _, feat_size = ssd_r34(synt_img, extract_shapes=True)
    print('Features size: ', feat_size)
    # import pdb; pdb.set_trace()
    steps = [(int(figsize[0] / fs[0]), int(figsize[1] / fs[1]))
             for fs in feat_size]
    # use the scales here: https://github.com/amdegroot/ssd.pytorch/blob/master/data/config.py
    scales = [(int(s * figsize[0] / 300), int(s * figsize[1] / 300))
              for s in [21, 45, 99, 153, 207, 261, 315]]
    aspect_ratios = [[2], [2, 3], [2, 3], [2, 3], [2], [2]]
    dboxes = DefaultBoxes(figsize, feat_size, steps, scales, aspect_ratios)
    print('dboxes from dboxes_R34_coco', dboxes)
    return dboxes