コード例 #1
0
ファイル: config.py プロジェクト: zjj-2015/DCL
def load_data_transformers(resize_reso=512, crop_reso=448, swap_num=[7, 7]):
    center_resize = 600
    Normalize = transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
    data_transforms = {
       	'swap': transforms.Compose([
            transforms.Randomswap((swap_num[0], swap_num[1])),
        ]),
        'common_aug': transforms.Compose([
            transforms.Resize((resize_reso, resize_reso)),
            transforms.RandomRotation(degrees=15),
            transforms.RandomCrop((crop_reso,crop_reso)),
            transforms.RandomHorizontalFlip(),
        ]),
        'train_totensor': transforms.Compose([
            transforms.Resize((crop_reso, crop_reso)),
            # ImageNetPolicy(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ]),
        'val_totensor': transforms.Compose([
            transforms.Resize((crop_reso, crop_reso)),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ]),
        'test_totensor': transforms.Compose([
            transforms.Resize((resize_reso, resize_reso)),
            transforms.CenterCrop((crop_reso, crop_reso)),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ]),
        'None': None,
    }
    return data_transforms
コード例 #2
0
ファイル: dataset.py プロジェクト: yuryfomichov/kgl-sttl
 def _train_image_transform(self):
     transform = transforms.Compose([
         transforms.Resize(256),
         transforms.RandomCrop(224),
         transforms.RandomHorizontalFlip(),
         transforms.RandomVerticalFlip(),
         transforms.RandomRotation(0.2),
         transforms.ColorJitter(0.1, 0, 0, 0),
         transforms.ToTensor(),
         transforms.Normalize(mean=[0.485, 0.456, 0.406],
                              std=[0.229, 0.224, 0.225])
     ])
     return transform
コード例 #3
0
data_dir = args.data_dir
save_dir = args.save_dir
num_classes = args.num_classes
channel = args.channel
clip_min_value = args.clip_min_value
clip_max_value = args.clip_max_value
mean = args.mean
std = args.std
num_epochs = args.num_epochs
train_batch_size = args.train_batch_size
lr = args.lr

# 定义训练和验证时的transforms
train_transforms = T.Compose([
    T.RandomVerticalFlip(0.5),
    T.RandomHorizontalFlip(0.5),
    T.ResizeStepScaling(0.5, 2.0, 0.25),
    T.RandomPaddingCrop(1000),
    T.Clip(min_val=clip_min_value, max_val=clip_max_value),
    T.Normalize(
        min_val=clip_min_value, max_val=clip_max_value, mean=mean, std=std),
])

eval_transforms = T.Compose([
    T.Clip(min_val=clip_min_value, max_val=clip_max_value),
    T.Normalize(
        min_val=clip_min_value, max_val=clip_max_value, mean=mean, std=std),
])

train_list = osp.join(data_dir, 'train.txt')
val_list = osp.join(data_dir, 'val.txt')
コード例 #4
0
def load_data_transformers(resize_reso=512, crop_reso=448, swap_num=[7, 7]):
    center_resize = 600
    Normalize = transforms.Normalize([0.485, 0.456, 0.406],
                                     [0.229, 0.224, 0.225])
    data_transforms = {
        'swap':
        transforms.Compose([
            transforms.Resize((resize_reso, resize_reso)),
            transforms.RandomRotation(degrees=15),
            transforms.RandomCrop((crop_reso, crop_reso)),
            transforms.RandomHorizontalFlip(),
            transforms.Randomswap((swap_num[0], swap_num[1])),
        ]),
        'food_swap':
        transforms.Compose([
            transforms.Resize((resize_reso, resize_reso)),
            transforms.RandomRotation(degrees=90),
            #transforms.RandomCrop((crop_reso, crop_reso)),
            transforms.RandomHorizontalFlip(),
            transforms.RandomVerticalFlip(),
            transforms.RandomResizedCrop(size=crop_reso, scale=(0.75, 1)),
            transforms.Randomswap((swap_num[0], swap_num[1])),
        ]),
        'food_unswap':
        transforms.Compose([
            transforms.Resize((resize_reso, resize_reso)),
            transforms.RandomRotation(degrees=90),
            #transforms.RandomCrop((crop_reso, crop_reso)),
            transforms.RandomHorizontalFlip(),
            transforms.RandomVerticalFlip(),
            transforms.RandomResizedCrop(size=crop_reso, scale=(0.75, 1)),
        ]),
        'unswap':
        transforms.Compose([
            transforms.Resize((resize_reso, resize_reso)),
            transforms.RandomRotation(degrees=15),
            transforms.RandomCrop((crop_reso, crop_reso)),
            transforms.RandomHorizontalFlip(),
        ]),
        'train_totensor':
        transforms.Compose([
            transforms.Resize((crop_reso, crop_reso)),
            #ImageNetPolicy(),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ]),
        'val_totensor':
        transforms.Compose([
            transforms.Resize((crop_reso, crop_reso)),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ]),
        'test_totensor':
        transforms.Compose([
            transforms.Resize((resize_reso, resize_reso)),
            transforms.CenterCrop((crop_reso, crop_reso)),
            transforms.ToTensor(),
            transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
        ]),
        'None':
        None,
        'Centered_swap':
        transforms.Compose([
            transforms.CenterCrop((center_resize, center_resize)),
            transforms.Resize((resize_reso, resize_reso)),
            transforms.RandomRotation(degrees=15),
            transforms.RandomCrop((crop_reso, crop_reso)),
            transforms.RandomHorizontalFlip(),
            transforms.Randomswap((swap_num[0], swap_num[1])),
        ]),
        'Centered_unswap':
        transforms.Compose([
            transforms.CenterCrop((center_resize, center_resize)),
            transforms.Resize((resize_reso, resize_reso)),
            transforms.RandomRotation(degrees=15),
            transforms.RandomCrop((crop_reso, crop_reso)),
            transforms.RandomHorizontalFlip(),
        ]),
        'Tencrop':
        transforms.Compose([
            transforms.Resize((resize_reso, resize_reso)),
            transforms.TenCrop((crop_reso, crop_reso)),
            transforms.Lambda(lambda crops: torch.stack(
                [transforms.ToTensor()(crop) for crop in crops])),
        ])
    }

    return data_transforms
コード例 #5
0
 print("------------Preparing Model...----------------")
 net = ACCNN(n_classes=n_classes,
             pre_trained=True,
             dataset=dataset,
             fold=fold,
             virtualize=True,
             using_fl=fl).to(DEVICE)
 print("------------%s Model Already be Prepared------------" % net_name)
 input_img_size = net.input_size
 IMG_MEAN = [0.449]
 IMG_STD = [0.226]
 transform_train = transforms.Compose([
     transforms.Resize(
         input_img_size
     ),  # 缩放将图片的最小边缩放为 input_img_size,因此如果输入是非正方形的,那么输出也不是正方形的
     transforms.RandomHorizontalFlip(),
     transforms.RandomRotation(30),
     transforms.ToTensor(),
     transforms.Normalize(IMG_MEAN, IMG_STD),
 ])
 transform_test = transforms.Compose([
     transforms.Resize(
         input_img_size
     ),  # 缩放将图片的最小边缩放为 input_img_size,因此如果输入是非正方形的,那么输出也不是正方形的
     transforms.ToTensor(),
     transforms.Normalize(IMG_MEAN, IMG_STD),
 ])
 print("------------Preparing Data...----------------")
 if dataset == "JAFFE":
     test_data = JAFFE(is_train=False,
                       transform=transform_test,
コード例 #6
0
                        help='learning rate',
                        default=0.01,
                        type=float)
    return parser.parse_args()


args = parse_args()
data_dir = args.data_dir
save_dir = args.save_dir
channel = args.channel
num_epochs = args.num_epochs
train_batch_size = args.train_batch_size
lr = args.lr

# 定义训练和验证时的transforms
train_transforms = T.Compose([T.RandomHorizontalFlip(0.5), T.Normalize()])

eval_transforms = T.Compose([T.Normalize()])

train_list = osp.join(data_dir, 'train.txt')
val_list = osp.join(data_dir, 'val.txt')
label_list = osp.join(data_dir, 'labels.txt')

# 定义数据读取器
train_reader = Reader(data_dir=data_dir,
                      file_list=train_list,
                      label_list=label_list,
                      transforms=train_transforms,
                      shuffle=True)

eval_reader = Reader(data_dir=data_dir,
コード例 #7
0
    IMG_MEAN = [0.449]
    IMG_STD = [0.226]
    # for RGB images
    # IMG_MEAN = [0.485, 0.456, 0.406]
    # IMG_STD = [0.229, 0.224, 0.225]

    crop_img_size = int(net.input_size * 1.2)
    input_img_size = net.input_size
    transform_using_crop = opt.tr_using_crop
    if transform_using_crop:
        transform_train = transforms.Compose([
            transforms.Resize(crop_img_size),
            transforms.TenCrop(input_img_size),
            transforms.Lambda(lambda crops: torch.stack([
                transforms.Normalize(IMG_MEAN, IMG_STD)
                (transforms.ToTensor()(transforms.RandomHorizontalFlip()(
                    transforms.RandomRotation(30)(crop)))) for crop in crops
            ])),
        ])
        transform_test = transforms.Compose([
            transforms.Resize(crop_img_size),
            transforms.TenCrop(input_img_size),
            transforms.Lambda(lambda crops: torch.stack([
                transforms.Normalize(IMG_MEAN, IMG_STD)
                (transforms.ToTensor()(crop)) for crop in crops
            ])),
        ])
    else:
        transform_train = transforms.Compose([
            transforms.Resize(
                input_img_size
            ),  # 缩放将图片的最小边缩放为 input_img_size,因此如果输入是非正方形的,那么输出也不是正方形的