コード例 #1
0
def transform_test_2():
    return Compose([
        T.ToFloatTensorInZeroOne(),
        T.Resize((128, 228)),
        normalize,
        #T.CenterCrop((112, 112))
    ])
コード例 #2
0
def transform_train_optical_flow():
    return Compose([
        T.transform_optical_flow_raw,
        T.Resize((128, 228)),
        T.RandomHorizontalFlip(),
        T.RandomCrop((112, 112))
    ])
コード例 #3
0
def transform_train_reference():
    return Compose([
        T.ToFloatTensorInZeroOne(),
        T.Resize((128, 171)),
        T.RandomHorizontalFlip(), normalize,
        T.RandomCrop((112, 112))
    ])
コード例 #4
0
def transform_train_3():
    return Compose([
        T.ToFloatTensorInZeroOne(),
        T.Resize((128, 228)),
        T.RandomHorizontalFlip(), normalize,
        T.RandomCrop((112, 160))
    ])
コード例 #5
0
'''
Here we assume video have been resized to value appearing in T.Resize.
'''

import torchvision
import video_yyz.transforms as T
from torchvision import get_video_backend

video_backend = get_video_backend()

normalize = T.Normalize(mean=[0.43216, 0.394666, 0.37645],
                        std=[0.22803, 0.22145, 0.216989])

transform_train = torchvision.transforms.Compose([
    T.ToFloatTensorInZeroOne(),
    T.Resize((128, 228)),
    T.RandomHorizontalFlip(), normalize,
    T.RandomCrop((112, 112))
])

transform_test = torchvision.transforms.Compose([
    T.ToFloatTensorInZeroOne(),
    T.Resize((128, 228)), normalize,
    T.CenterCrop((112, 112))
])
コード例 #6
0
def transform_test_optical_flow():
    return Compose([
        T.transform_optical_flow_raw,
        T.Resize((128, 228)),
        T.CenterCrop((112, 112))
    ])
コード例 #7
0
def transform_test_1_right():
    return Compose([
        T.ToFloatTensorInZeroOne(),
        T.Resize((128, 228)), normalize,
        T.RightCrop((112, 112))
    ])
コード例 #8
0
def transform_test_reference():
    return Compose([
        T.ToFloatTensorInZeroOne(),
        T.Resize((128, 171)), normalize,
        T.CenterCrop((112, 112))
    ])
コード例 #9
0
'''
Provide some reference "objects"

Code in this folder should not be "imported". Copy & paste is preferred 
until proper abstraction for them are found.
'''

import torchvision
import video_yyz.transforms as T
from torchvision import get_video_backend

video_backend = get_video_backend()

normalize = T.Normalize(mean=[0.43216, 0.394666, 0.37645],
                        std=[0.22803, 0.22145, 0.216989])

transform_train = torchvision.transforms.Compose([
    T.ToFloatTensorInZeroOne(),
    T.Resize((128, 171)),
    T.RandomHorizontalFlip(), normalize,
    T.RandomCrop((112, 112))
])

transform_test = torchvision.transforms.Compose([
    T.ToFloatTensorInZeroOne(),
    T.Resize((128, 171)), normalize,
    T.CenterCrop((112, 112))
])