Exemplo n.º 1
0
def get_transform(train):
    base_size = 520
    crop_size = 480

    return presets.SegmentationPresetTrain(
        base_size,
        crop_size) if train else presets.SegmentationPresetEval(base_size)
Exemplo n.º 2
0
def get_transform(train, args):
    if train:
        return presets.SegmentationPresetTrain(base_size=520, crop_size=480)
    elif not args.weights:
        return presets.SegmentationPresetEval(base_size=520)
    else:
        weights = PM.get_weight(args.weights)
        return weights.transforms()
Exemplo n.º 3
0
def get_transform(train):
    base_size = 520
    crop_size = 480
    return presets.SegmentationPresetTrain(
        base_size, crop_size) if train else presets.SegmentationPresetEval(
            base_size,
            contrast=args.contrast,
            brightness=args.brightness,
            sigma=args.sigma)
Exemplo n.º 4
0
def get_transform(train, args):
    if train:
        return presets.SegmentationPresetTrain(base_size=520, crop_size=480)
    elif not args.prototype:
        return presets.SegmentationPresetEval(base_size=520)
    else:
        if args.weights:
            weights = prototype.models.get_weight(args.weights)
            return weights.transforms()
        else:
            return prototype.transforms.SemanticSegmentationEval(resize_size=520)
Exemplo n.º 5
0
def get_transform(train, args):
    if train:
        return presets.SegmentationPresetTrain(base_size=520, crop_size=480)
    elif args.weights and args.test_only:
        weights = torchvision.models.get_weight(args.weights)
        trans = weights.transforms()

        def preprocessing(img, target):
            img = trans(img)
            size = F.get_dimensions(img)[1:]
            target = F.resize(target, size, interpolation=InterpolationMode.NEAREST)
            return img, F.pil_to_tensor(target)

        return preprocessing
    else:
        return presets.SegmentationPresetEval(base_size=520)