Exemple #1
0
def make_transformed_samples(dataset, args):
    '''
    Given a dataset, it picks a random sample from it and returns a batch
    of (kGeneratedExamplesPerImage+1) samples. The batch contains true sample
    from dataset and kGeneratedExamplesPerImage samples, which are created
    artifically with augmentation by GOTURN smooth motion model.
    '''
    idx = np.random.randint(dataset.len, size=1)[0]
    # unscaled original sample (single image and bb)
    orig_sample = dataset.get_orig_sample(idx)
    # cropped scaled sample (two frames and bb)
    true_sample, _ = dataset.get_sample(idx)
    true_tensor = transform(true_sample)
    x1_batch = torch.Tensor(kGeneratedExamplesPerImage + 1, 3, input_size,
                            input_size)
    x2_batch = torch.Tensor(kGeneratedExamplesPerImage + 1, 3, input_size,
                            input_size)
    x1_batch_x2 = torch.Tensor(kGeneratedExamplesPerImage + 1, 3,
                               input_size * 2, input_size * 2)
    x2_batch_x2 = torch.Tensor(kGeneratedExamplesPerImage + 1, 3,
                               input_size * 2, input_size * 2)
    y_batch = torch.Tensor(kGeneratedExamplesPerImage + 1, 4)

    # initialize batch with the true sample
    x1_batch[0] = true_tensor['previmg']
    x2_batch[0] = true_tensor['currimg']
    x1_batch_x2[0] = true_tensor['previmg_x2']
    x2_batch_x2[0] = true_tensor['currimg_x2']
    y_batch[0] = true_tensor['currbb']

    scale = Rescale((input_size, input_size))
    for i in range(kGeneratedExamplesPerImage):
        sample = orig_sample
        # unscaled current image crop with box
        curr_sample, opts_curr = shift_crop_training_sample(sample, bb_params)
        # unscaled previous image crop with box
        prev_sample, opts_prev = crop_sample(sample)
        prev_sample_x2, opts_prev_x2 = crop_sample(sample, contextFactor=4)
        prev_sample['image_x2'] = prev_sample_x2['image']
        scaled_curr_obj = scale(curr_sample, opts_curr)
        scaled_prev_obj = scale(prev_sample, opts_prev)
        training_sample = {
            'previmg': scaled_prev_obj['image'],
            'currimg': scaled_curr_obj['image'],
            'previmg_x2': scaled_prev_obj['image_x2'],
            'currimg_x2': scaled_curr_obj['image_x2'],
            'currbb': scaled_curr_obj['bb']
        }
        sample = transform(training_sample)
        x1_batch[i + 1] = sample['previmg']
        x2_batch[i + 1] = sample['currimg']
        x1_batch_x2[i + 1] = sample['previmg_x2']
        x2_batch_x2[i + 1] = sample['currimg_x2']
        y_batch[i + 1] = sample['currbb']

    return x1_batch, x2_batch, x1_batch_x2, x2_batch_x2, y_batch
Exemple #2
0
def make_transformed_samples(dataset, args):

    idx = np.random.randint(dataset.len, size=1)[0]
    # unscaled original sample (single image and bb)
    orig_sample = dataset.get_orig_sample(idx)
    # cropped scaled sample (two frames and bb)
    true_sample, _ = dataset.get_sample(idx)
    true_tensor = transform(true_sample)
    x1_batch = torch.Tensor(kGeneratedExamplesPerImage + 1, 3, input_size,
                            input_size)
    x2_batch = torch.Tensor(kGeneratedExamplesPerImage + 1, 3, input_size,
                            input_size)
    y_batch = torch.Tensor(kGeneratedExamplesPerImage + 1, 4)

    # initialize batch with the true sample
    x1_batch[0, :, :, :] = true_tensor['previmg']
    x2_batch[0, :, :, :] = true_tensor['currimg']
    y_batch[0, :] = true_tensor['currbb']

    scale = Rescale((input_size, input_size))
    for i in range(kGeneratedExamplesPerImage):
        sample = orig_sample
        # unscaled current image crop with box
        curr_sample, opts_curr = shift_crop_training_sample(sample, bb_params)
        # unscaled previous image crop with box
        prev_sample, opts_prev = crop_sample(sample)
        scaled_curr_obj = scale(curr_sample, opts_curr)
        scaled_prev_obj = scale(prev_sample, opts_prev)
        training_sample = {
            'previmg': scaled_prev_obj['image'],
            'currimg': scaled_curr_obj['image'],
            'currbb': scaled_curr_obj['bb']
        }
        sample = transform(training_sample)
        x1_batch[i + 1, :, :, :] = sample['previmg']
        x2_batch[i + 1, :, :, :] = sample['currimg']
        y_batch[i + 1, :] = sample['currbb']

    return x1_batch, x2_batch, y_batch
Exemple #3
0
    def get_sample(self, idx):
        """
        Returns sample without transformation for visualization.

        Sample consists of resized previous and current frame with target
        which is passed to the network. Bounding box values are normalized
        between 0 and 1 with respect to the target frame and then scaled by
        factor of 10.
        """
        sample = self.get_orig_sample(idx)
        # unscaled current image crop with box
        curr_sample, opts_curr = shift_crop_training_sample(
            sample, self.bb_params)
        # unscaled previous image crop with box
        prev_sample, opts_prev = crop_sample(sample)
        scale = Rescale((self.sz, self.sz))
        scaled_curr_obj = scale(curr_sample, opts_curr)
        scaled_prev_obj = scale(prev_sample, opts_prev)
        training_sample = {
            'previmg': scaled_prev_obj['image'],
            'currimg': scaled_curr_obj['image'],
            'currbb': scaled_curr_obj['bb']
        }
        return training_sample, opts_curr