def load_image(image_path, _device, include_tensor=False): numpy_image = None tensor_image = None if os.path.isfile(image_path): to_save = False row_image = Image.open(image_path) w, h = row_image.size if h > 512: to_save = True h = int((512. * h) / w) row_image = row_image.resize((512, h), Image.BICUBIC) w, h = row_image.size if w % 16 != 0 or h % 16 != 0: to_save = True row_image = row_image.crop((0, 0, (w // 16) * 16, (h // 16) * 16)) if to_save: row_image.save(image_path) numpy_image = np.array(row_image) if len(numpy_image.shape) != 3: numpy_image = np.repeat(np.expand_dims(numpy_image, 2), 3, axis=2) if numpy_image.shape[2] != 3: numpy_image = numpy_image[:, :, :3] if include_tensor: tensor_image = MotifDS.trans(MotifDS.flip(numpy_image)[0])[0] tensor_image = torch.unsqueeze(torch.from_numpy(tensor_image), 0).to(_device) numpy_image = np.expand_dims(numpy_image / 255, 0) return numpy_image, tensor_image
def init_loaders(opt): train_dataset = MotifDS(opt.images_root, opt.vm_root, train=True, image_size=opt.image_size, motif_size=opt.vm_size, weight=opt.weight, perturbate=opt.perturbate, opacity_var=opt.opacity_var, rgb=opt.use_rgb, scale_vm=opt.scale_vm, rotate_vm=opt.rotate_vm, crop_vm=opt.crop_vm, batch_vm=opt.batch_vm, font=opt.font, border=opt.text_border, split_tag=dataset_tag, blur=opt.blur, fontsize=opt.fontsize, bound_offset=opt.bound_offset, bounding_style=opt.bounding_style, bound_weight=opt.bound_weight, broken=opt.broken) test_dataset = MotifDS(images_root, vm_root, train=False, image_size=opt.image_size, motif_size=opt.vm_size, weight=opt.weight, perturbate=opt.perturbate, opacity_var=opt.opacity_var, rgb=opt.use_rgb, scale_vm=False, rotate_vm=opt.rotate_vm, crop_vm=False, batch_vm=opt.batch_vm, font=opt.font, border=opt.text_border, split_tag=dataset_tag, blur=opt.blur, fontsize=opt.fontsize, bound_offset=opt.bound_offset, bounding_style=opt.bounding_style, bound_weight=opt.bound_weight, broken=opt.broken) _train_data_loader = DataLoader(train_dataset, batch_size=524288 // (image_size**2), shuffle=True, num_workers=2) _test_data_loader = DataLoader(test_dataset, batch_size=524288 // (image_size**2), shuffle=True, num_workers=2) return _train_data_loader, _test_data_loader