コード例 #1
0
def aug_train(split_dir, mode):
    train_dir = split_dir + os.sep + "train"

    for subdir, dirs, files in os.walk(train_dir):
        for f in files:
            file_path = subdir + os.sep + f
            if (is_image(f)):
                print(file_path)
                name, ext = os.path.splitext(f)
                img = cv2.imread(file_path)
                for i in range(1, 4):
                    rot_dir = (subdir + os.sep + name + "_aug_" + str(i * 90) +
                               ext)
                    if (mode == 'random'):
                        cv2.imwrite(rot_dir, aug.rotation(img, 0, 'random'))
                    elif (mode == 'strict'):
                        cv2.imwrite(rot_dir, aug.rotation(img, i, 'strict'))
                    else:
                        print("The mode should be either random | strict")
                        sys.exit(1)
コード例 #2
0
ファイル: dataloader.py プロジェクト: haohao11/AMENet
def preprocess_data(seq_length,
                    size,
                    dirname,
                    path=None,
                    data=None,
                    aug_num=1,
                    save=True):
    '''
    Parameters
    ----------
    seq_length : int
        This is the complete length of each trajectory offset and occupancy, 
        Note: one-step difference for the offset and occupancy and traj_data.
    size : [height, width, channels]
        The occupancy grid size and channels: 
            orientation, speed and position for the neighbors in the vicinity
    dirname : string
        "train" or "challenge"
    path : string, optional
        only for extract offsets, traj_data, and occupancy from the original data files
    data : numpy, optional
        it is the predicted complete trajectories after the first prediction,
        it is used to calculate the occupancy in the predicted time.
    aug_num : int, optional
        the number for augmenting the data by rotation.
    save : boolen, optional
        Only save the processed training data. The default is True.

    Returns
    -------
    offsets : numpy array
        [frameId, userId, x, y, delta_x, delta_y, theata, velocity].
    traj_data : numpy array
        [frameId, userId, x, y]
        Note: this is one-step longer 
    occupancy : numpy array
        [height, width, channels].
    '''
    start = time.time()
    if np.all(data) == None:
        data = np.genfromtxt(path, delimiter='')
        # challenge dataset have nan for prediction time steps
        data = data[~np.isnan(data).any(axis=1)]
        dataname = path.split('\\')[-1].split('.')[0]
        print("process data %s ..." % dataname)

    for r in range(aug_num):
        # Agument the data by orientating if the agumentation number if more than one
        if r > 0:
            data[:, 2:4] = rotation(data[:, 2:4], r / aug_num)

        # Get the environment maps
        maps = Maps(data)
        traj_map = maps.trajectory_map()
        orient_map, speed_map = maps.motion_map(max_speed=10)
        map_info = [traj_map, orient_map, speed_map]
        enviro_maps = concat_maps(map_info)
        print("enviro_maps shape", enviro_maps.shape)

        offsets = np.reshape(maps.offsets, (-1, seq_length, 8))
        print("offsets shape", offsets.shape)
        traj_data = np.reshape(maps.sorted_data, (-1, seq_length + 1, 4))
        print("traj_data shape", traj_data.shape)
        occupancy = circle_group_grid(offsets, maps.sorted_data, size)
        print("occupancy shape", occupancy.shape)

        if save:
            if r == 0:
                # Save the original one
                np.savez("../processed_data/%s/%s" % (dirname, dataname),
                         offsets=offsets,
                         traj_data=traj_data,
                         occupancy=occupancy)
                end = time.time()

            else:
                # Save the rotated one(s)
                np.savez("../processed_data/%s/%s_%.0f" %
                         (dirname, dataname, r),
                         offsets=offsets,
                         traj_data=traj_data,
                         occupancy=occupancy)
                end = time.time()
            print("It takes ", round(end - start, 2), "seconds!\n")

        else:
            return offsets, traj_data, occupancy
コード例 #3
0
test_loader = loader_from_dataset(cifar10_test)

cifar10_normalize_rotate = transforms.Compose([
    # transforms.Grayscale(num_output_channels=1),
    transforms.RandomRotation((-5, 5)),
    transforms.ToTensor(),
    transforms.Normalize((0.5, ), (0.5, ))
])
cifar10_test_rotated = datasets.CIFAR10('../data',
                                        train=False,
                                        download=True,
                                        transform=cifar10_normalize_rotate)
test_loader_rotated = loader_from_dataset(cifar10_test_rotated)

augmentations = [
    rotation(cifar10_train, cifar10_normalize, angles=range(-5, 6, 1)),
    resized_crop(cifar10_train, cifar10_normalize, size=32),
    blur(cifar10_train, cifar10_normalize),
    rotation_crop_blur(cifar10_train, cifar10_normalize, size=32),
    hflip(cifar10_train, cifar10_normalize),
    hflip_vflip(cifar10_train, cifar10_normalize),
    brightness(cifar10_train, cifar10_normalize),
    contrast(cifar10_train, cifar10_normalize)
]

n_channels = 3
size = 32
n_features = n_channels * size * size
n_classes = 10
gamma = 0.003  # gamma hyperparam for RBF kernel exp(-gamma ||x - y||^2). Best gamma is around 0.001--0.003
n_components = 10000
コード例 #4
0
# Construct loader from MNIST dataset, then construct loaders corresponding to
# augmented dataset (wrt to different transformations).
mnist_normalize = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.1307, ), (0.3081, ))
])
mnist_train = datasets.MNIST(
    '../data', train=True, download=True, transform=mnist_normalize)
mnist_test = datasets.MNIST(
    '../data', train=False, download=True, transform=mnist_normalize)
mnist_train, mnist_valid = get_train_valid_datasets(mnist_train)
train_loader = loader_from_dataset(mnist_train)
valid_loader = loader_from_dataset(mnist_valid)
test_loader = loader_from_dataset(mnist_test)

augmentations = [rotation(mnist_train, mnist_normalize),
                 resized_crop(mnist_train, mnist_normalize),
                 blur(mnist_train, mnist_normalize),
                 rotation_crop_blur(mnist_train, mnist_normalize),
                 hflip(mnist_train, mnist_normalize),
                 hflip_vflip(mnist_train, mnist_normalize),
                 brightness(mnist_train, mnist_normalize),
                 contrast(mnist_train, mnist_normalize)]

n_features = 28 * 28
n_classes = 10
gamma = 0.003  # gamma hyperparam for RBF kernel exp(-gamma ||x - y||^2). Best gamma is around 0.001--0.003
n_components = 10000
sgd_n_epochs = 15
n_trials = 10