def preprocess_img(img, target_image_size): s = min(img.size) r = target_image_size / s s = (round(r * img.size[1]), round(r * img.size[0])) img = TF.resize(img, s, interpolation=PIL.Image.LANCZOS) # img = TF.center_crop(img, output_size=2 * [target_image_size]) img = torch.unsqueeze(T.ToTensor()(img), 0) return map_pixels(img)
def preprocessing(img): s = min(img.size) if s < target_size: raise ValueError(f'min dim for image') r = target_size / s s = round(r * img.size[1]),round(r * img.size[0]) img = TF.resize(img,s,interpolation=PIL.Image.LANCZOS) img = TF.center_crop(img,output_size=2*[target_size]) img = torch.unsqueeze(T.ToTensor()(img),0) return map_pixels(img)
def preprocess(img): min_img_dim = min(img.size) if min_img_dim < target_img_size: raise ValueError(f'min dim for img {min_img_dim} < {target_img_size}') img_ratio = target_img_size / min_img_dim min_img_dim = (round(img_ratio * img.size[1]), round(img_ratio * img.size[0])) img = TF.resize(img, min_img_dim, interpolation=PIL.Image.LANCZOS) img = TF.center_crop(img, output_size=2 * [target_img_size]) img = torch.unsqueeze(T.ToTensor()(img), 0) return map_pixels(img)