예제 #1
0
def cropBox(img, ul, br, resH, resW):
    ul = ul.int()
    br = br.int()
    lenH = max(br[1] - ul[1], (br[0] - ul[0]) * resH / resW)
    lenW = lenH * resW / resH
    if img.dim() == 2:
        img = img[np.newaxis, :]

    newDim = torch.IntTensor((img.size(0), int(lenH), int(lenW)))
    newImg = img[:, ul[1]:, ul[0]:].clone()
    # Crop and Padding
    size = torch.IntTensor((int(br[1] - ul[1]), int(br[0] - ul[0])))
    newImg = SpecialCrop(size, 1)(newImg)
    newImg = Pad(newDim)(newImg)
    # Resize to output
    v_Img = torch.autograd.Variable(newImg)
    v_Img = torch.unsqueeze(v_Img, 0)
    # newImg = F.upsample_bilinear(v_Img, size=(int(resH), int(resW))).data[0]
    if torch.__version__ == '0.4.0a0+32f3bf7' or torch.__version__ == '0.4.0':
        newImg = F.upsample(v_Img,
                            size=(int(resH), int(resW)),
                            mode='bilinear',
                            align_corners=True).data[0]
    else:
        newImg = F.interpolate(v_Img,
                               size=(int(resH), int(resW)),
                               mode='bilinear',
                               align_corners=True).data[0]
    return newImg
예제 #2
0
def cropBox(img, up_left, bottom_right, target_height, target_width):
    up_left = up_left.int()
    bottom_right = bottom_right.int()

    curr_height = max(bottom_right[1] - up_left[1],
                      (bottom_right[0] - up_left[0]) * target_height /
                      target_width)
    curr_width = curr_height * target_width / target_height
    if img.dim() == 2:
        img = img[np.newaxis, :]

    new_dim = torch.IntTensor((img.size(0), int(curr_height), int(curr_width)))
    new_img = img[:, up_left[1]:, up_left[0]:]

    # crop and padding
    size = torch.IntTensor(
        (bottom_right[1] - up_left[1], bottom_right[0] - up_left[0]))
    new_img = SpecialCrop(size, 1)(new_img)
    new_img = Pad(new_dim)(new_img)

    # resize to output
    v_img = torch.unsqueeze(new_img, 0)
    # newImg = F.upsample_bilinear(v_Img, size=(int(resH), int(resW))).data[0]
    new_img = F.upsample(v_img,
                         size=(int(target_height), int(target_width)),
                         mode='bilinear',
                         align_corners=True).data[0]

    return new_img
예제 #3
0
def data_resize(img, cfg, points=None):
    img = np.transpose(img, (2, 0, 1))
    img_H = img.shape[1]
    img_W = img.shape[2]
    input_H = int(max(img_H, img_H * cfg.input_H / cfg.input_W))
    input_W = int(input_H * cfg.input_W / cfg.input_H)
    newDim = torch.IntTensor((img.shape[0], input_H, input_W))

    newimg = torch.FloatTensor(img, )
    newimg = Pad(newDim)(newimg)

    v_img = torch.unsqueeze(newimg, 0)
    newimg = F.upsample(v_img,
                        size=(cfg.input_H, cfg.input_W),
                        mode='bilinear',
                        align_corners=True).data[0]

    if input_H > img_H:
        top_pad_len = (input_H - img_H) / 2
        if points is not None:
            points[:, 1] += top_pad_len
    else:
        left_pad_len = (input_W - img_W) / 2
        if points is not None:
            points[:, 0] += left_pad_len
    h_scale = cfg.input_H / input_H
    w_scale = cfg.input_W / input_W
    if points is not None:
        points[:, 0] *= w_scale
        points[:, 1] *= h_scale
        return newimg, points
    return newimg
예제 #4
0
def Pad_setup():
    tforms = {}

    tforms['pad_4040'] = Pad((40, 40))
    tforms['pad_3040'] = Pad((30, 40))
    tforms['pad_4030'] = Pad((40, 30))
    tforms['pad_3939'] = Pad((39, 39))
    tforms['pad_3941'] = Pad((39, 41))
    tforms['pad_4139'] = Pad((41, 39))
    tforms['pad_4138'] = Pad((41, 38))
    tforms['pad_3841'] = Pad((38, 41))

    return tforms
def cropBox(img, ul, br, resH, resW):
    ul = ul.int() # xmin, ymin
    br = br.int() # xmax, ymax
    lenH = max(br[1] - ul[1], (br[0] - ul[0]) * resH / resW)
    lenW = lenH * resW / resH
    if img.dim() == 2:
        img = img[np.newaxis, :]

    newDim = torch.IntTensor((img.size(0), int(lenH), int(lenW)))
    newImg = img[:, ul[1]:, ul[0]:]
    # Crop and Padding
    size = torch.IntTensor((br[1] - ul[1], br[0] - ul[0]))
    newImg = SpecialCrop(size, 1)(newImg)
    newImg = Pad(newDim)(newImg)
    # Resize to output
    # batch = 0, channel = img.size(0), h = int(lenH), w = int(lenW)
    v_Img = torch.unsqueeze(newImg, 0)
    # newImg = F.upsample_bilinear(v_Img, size=(int(resH), int(resW))).data[0]
    newImg = F.upsample(v_Img, size=(int(resH), int(resW)),
                        mode='bilinear', align_corners=True).data[0]
    return newImg