def fetch_from_dataset(self, batch_to_load): """ Return *batches* of 5D sequences/clips or 4D images. `batch_to_load` contains the indices of the first frame/image of each element of the batch. `load_sequence` should return a numpy array of 2 or more elements, the first of which 4-dimensional (frame, 0, 1, c) or (frame, c, 0, 1) containing the data and the second 3D or 4D containing the label. """ batch_ret = {} # Create batches for el in batch_to_load: if el is None: # The first element cannot be None, or we wouldn't have # this batch in the first place, so we can safely copy # the last element of the batch for each filename that # is None until we fill the batch. if self.fill_last_batch: for k in batch_ret.iterkeys(): batch_ret[k].append(batch_ret[k][-1]) continue # Load sequence, format is (s, 0, 1, c) ret = self.load_sequence(el) #ret['indices']=np.sort(batch_to_load) assert all(el in ret.keys() for el in ('data', 'labels', 'filenames', 'subset')), ( 'Keys: {}'.format(ret.keys())) assert all(isinstance(el, np.ndarray) for el in (ret['data'], ret['labels'])) raw_data = ret['data'].copy() seq_x, seq_y = ret['data'], ret['labels'] # Per-image normalization if self.remove_per_img_mean: seq_x -= seq_x.mean(axis=tuple(range(seq_x.ndim - 1)), keepdims=True) if self.divide_by_per_img_std: seq_x /= seq_x.std(axis=tuple(range(seq_x.ndim - 1)), keepdims=True) # Dataset statistics normalization if self.remove_mean: seq_x -= getattr(self, 'mean', 0) if self.divide_by_std: seq_x /= getattr(self, 'std', 1) # Make sure data is in 4D if seq_x.ndim == 3: seq_x = seq_x[np.newaxis, ...] raw_data = raw_data[np.newaxis, ...] assert seq_x.ndim == 4 # and labels in 3D if self.set_has_GT: if seq_y.ndim == 2: seq_y = seq_y[np.newaxis, ...] assert seq_y.ndim == 3 # Perform data augmentation, if needed seq_x, seq_y = random_transform( seq_x, seq_y, nclasses=self.nclasses, void_label=self.void_labels, **self.data_augm_kwargs) if self.set_has_GT and self._void_labels != [] or \ max(self._cmap.keys()) > self.non_void_nclasses-1: # Map all void classes to non_void_nclasses and shift the other # values accordingly, so that the valid values are between 0 # and non_void_nclasses-1 and the void_classes are all equal to # non_void_nclasses. void_l = self._void_labels void_l.sort(reverse=True) mapping = self._mapping # Apply the mapping tmp_class = (-1 if not hasattr(self, 'GTclasses') else max(self.GTclasses) + 1) seq_y[seq_y == self.non_void_nclasses] = tmp_class for i in sorted(mapping.keys()): if i == self.non_void_nclasses: continue seq_y[seq_y == i] = mapping[i] try: seq_y[seq_y == tmp_class] = mapping[self.non_void_nclasses] except KeyError: # none of the original classes was self.non_void_nclasses pass # Transform targets seq_y to one hot code if return_one_hot # is True if self.set_has_GT and self.return_one_hot: nc = (self.non_void_nclasses if self._void_labels == [] else self.non_void_nclasses + 1) sh = seq_y.shape seq_y = seq_y.flatten() seq_y_hot = np.zeros((seq_y.shape[0], nc), dtype='int32') seq_y = seq_y.astype('int32') seq_y_hot[range(seq_y.shape[0]), seq_y] = 1 seq_y_hot = seq_y_hot.reshape(sh + (nc,)) seq_y = seq_y_hot # Dimshuffle if return_01c is False if not self.return_01c: # s,0,1,c --> s,c,0,1 seq_x = seq_x.transpose([0, 3, 1, 2]) if self.set_has_GT and self.return_one_hot: seq_y = seq_y.transpose([0, 3, 1, 2]) if 'regions' in ret.keys(): ret['regions'] = ret['regions'].transpose([0, 3, 1, 2]) raw_data = raw_data.transpose([0, 3, 1, 2]) if seq_x.shape[3] == 1: seq_x = seq_x.squeeze(3) seq_y = seq_y.squeeze(3) if self.return_one_hot else \ seq_y.squeeze(2) if 'regions' in ret.keys(): ret['regions'] = ret['regions'].squeeze(2) if self.return_one_hot else \ ret['regions'].squeeze(1) raw_data = raw_data.squeeze(3) else: if seq_x.shape[2] == 1: seq_x = seq_x.squeeze(2) seq_y = seq_y.squeeze(2) if self.return_one_hot else \ seq_y.squeeze(1) if 'regions' in ret.keys(): ret['regions'] = ret['regions'].squeeze(2) if self.return_one_hot else \ ret['regions'].squeeze(1) raw_data = raw_data.squeeze(2) # Return 4D images if not self.return_sequence: seq_x = seq_x[0, ...] if self.set_has_GT: seq_y = seq_y[0, ...] if 'regions' in ret.keys(): ret['regions'] = ret['regions'][0, ...] raw_data = raw_data[0, ...] if self.return_0_255: seq_x = (seq_x * 255).astype('uint8') ret['data'], ret['labels'] = seq_x, seq_y ret['raw_data'] = raw_data # Append the data of this batch to the minibatch array for k, v in ret.iteritems(): batch_ret.setdefault(k, []).append(v) for k, v in batch_ret.iteritems(): try: batch_ret[k] = np.array(v) except ValueError: # Variable shape: cannot wrap with a numpy array pass if self.seq_length > 0 and self.return_middle_frame_only: batch_ret['labels'] = batch_ret['labels'][:, self.seq_length//2] if self.return_list: return [batch_ret['data'], batch_ret['labels']] else: return batch_ret
def fetch_from_dataset(self, batch_to_load): """ Return *batches* of 1D data. `batch_to_load` contains the indices of the lines to load in the batch. `load_sequence` should return a numpy array of 2 or more elements, the first of which 4-dimensional (frame, 0, 1, c) or (frame, c, 0, 1) containing the data and the second 3D or 4D containing the label. """ batch_ret = {} batch_to_load = [el for el in batch_to_load if el is not None] batch_to_load = [ element[1] for tupl in batch_to_load for element in tupl ] # Create batches ret = {} # Load data ret['data'] = [] ret['indices'] = [] #np.sort(batch_to_load) if self.smooth_raw_both == 'raw' or self.smooth_raw_both == 'both': raw = [] with open(self.image_path_raw) as fp: for i, line in enumerate(fp): if i in batch_to_load: line = re.split(' ', line) line = np.array([float(el) for el in line]) line = line.astype(floatX) raw.append(line) if len(raw) == len(batch_to_load): break raw = np.vstack(raw) # b,0 to b,0,c raw = np.expand_dims(raw, axis=2) if self.smooth_raw_both == 'smooth' or self.smooth_raw_both == 'both': smooth = [] with open(self.image_path_smooth) as fp: for i, line in enumerate(fp): if i in batch_to_load: line = re.split(' ', line) line = np.array([float(el) for el in line]) line = line.astype(floatX) smooth.append(line) if len(smooth) == len(batch_to_load): break smooth = np.vstack(smooth) # b,0 to b,0,c smooth = np.expand_dims(smooth, axis=2) if self.smooth_raw_both == 'raw': ret['data'] = raw elif self.smooth_raw_both == 'smooth': ret['data'] = smooth elif self.smooth_raw_both == 'both': ret['data'] = np.concatenate([smooth, raw], axis=2) # Load mask ret['labels'] = [] if self.task == 'segmentation': with open(self.mask_path) as fp: for i, line in enumerate(fp): if i in batch_to_load: line = re.split(' ', line) line = np.array([int(el) for el in line]) line = line.astype('int32') ret['labels'].append(line) if len(ret['labels']) == len(batch_to_load): break ret['labels'] = np.vstack(ret['labels']) elif self.task == 'classification': with open(self.mask_path) as fp: for i, line in enumerate(fp): if i in batch_to_load: line = re.split(' ', line) line = np.array([int(el) for el in line]) line = line.astype('int32') ret['labels'].append(line) if len(ret['labels']) == len(batch_to_load): break ret['labels'] = np.vstack(ret['labels']) ret['filenames'] = batch_to_load ret['subset'] = 'default' assert all(el in ret.keys() for el in ('data', 'labels', 'filenames', 'subset')), ('Keys: {}'.format(ret.keys())) assert all( isinstance(el, np.ndarray) for el in (ret['data'], ret['labels'])) raw_data = ret['data'].copy() seq_x, seq_y = ret['data'], ret['labels'] # Per-data normalization if self.remove_per_img_mean: seq_x -= seq_x.mean(axis=1, keepdims=True) if self.divide_by_per_img_std: seq_x /= seq_x.std(axis=1, keepdims=True) # Dataset statistics normalization if self.remove_mean: seq_x -= getattr(self, 'mean', 0) if self.divide_by_std: seq_x /= getattr(self, 'std', 1) assert seq_x.ndim == 3 assert seq_y.ndim == 2 # from b,0(,c) to b,0,1(,c) seq_x = np.expand_dims(seq_x, axis=2) seq_y = np.expand_dims(seq_y, axis=2) # Perform data augmentation, if needed seq_x, seq_y = random_transform(seq_x, seq_y, nclasses=self.nclasses, void_label=self.void_labels, **self.data_augm_kwargs) # from b,0,1(,c) to b,0(,c) sh = seq_x.shape seq_x = seq_x.reshape((sh[0], sh[1], sh[3])) if self.task == 'segmentation': seq_y = seq_y.reshape((sh[0], sh[1])) elif self.task == 'classification': #print seq_y.shape seq_y = seq_y.reshape((sh[0])) #print seq_y.shape if self.set_has_GT and self._void_labels != []: # Map all void classes to non_void_nclasses and shift the other # values accordingly, so that the valid values are between 0 # and non_void_nclasses-1 and the void_classes are all equal to # non_void_nclasses. void_l = self._void_labels void_l.sort(reverse=True) mapping = self._mapping # Apply the mapping tmp_class = (-1 if not hasattr(self, 'GTclasses') else max(self.GTclasses) + 1) seq_y[seq_y == self.non_void_nclasses] = tmp_class for i in sorted(mapping.keys()): if i == self.non_void_nclasses: continue seq_y[seq_y == i] = mapping[i] try: seq_y[seq_y == tmp_class] = mapping[self.non_void_nclasses] except KeyError: # none of the original classes was self.non_void_nclasses pass elif max(self._cmap.keys()) > self.non_void_nclasses - 1: # Shift values of labels, so that the valid values are between 0 # and non_void_nclasses-1. mapping = self._mapping # Apply the mapping tmp_class = (-1 if not hasattr(self, 'GTclasses') else max(self.GTclasses) + 1) seq_y[seq_y == self.non_void_nclasses] = tmp_class for i in sorted(mapping.keys()): if i == self.non_void_nclasses: continue seq_y[seq_y == i] = mapping[i] try: seq_y[seq_y == tmp_class] = mapping[self.non_void_nclasses] except KeyError: # none of the original classes was self.non_void_nclasses pass # Transform targets seq_y to one hot code if return_one_hot # is True if self.set_has_GT and self.return_one_hot: nc = (self.non_void_nclasses if self._void_labels == [] else self.non_void_nclasses + 1) sh = seq_y.shape seq_y = seq_y.flatten() seq_y_hot = np.zeros((seq_y.shape[0], nc), dtype='int32') seq_y = seq_y.astype('int32') seq_y_hot[range(seq_y.shape[0]), seq_y] = 1 seq_y_hot = seq_y_hot.reshape(sh + (nc, )) seq_y = seq_y_hot # Dimshuffle if return_01c is False if not self.return_01c: # b,0,c --> b,c,0 seq_x = seq_x.transpose([0, 2, 1]) if self.set_has_GT and self.return_one_hot: seq_y = seq_y.transpose([0, 2, 1]) raw_data = raw_data.transpose([0, 2, 1]) if self.return_0_255: seq_x = (seq_x * 255).astype('uint8') ret['data'], ret['labels'] = seq_x, seq_y ret['raw_data'] = raw_data # Append the data of this batch to the minibatch array for k, v in ret.iteritems(): batch_ret.setdefault(k, []).append(v) for k, v in batch_ret.iteritems(): try: batch_ret[k] = np.array(v) except ValueError: # Variable shape: cannot wrap with a numpy array pass batch_ret['data'] = batch_ret['data'].squeeze(0) batch_ret['labels'] = batch_ret['labels'].squeeze(0) if self.seq_length > 0 and self.return_middle_frame_only: batch_ret['labels'] = batch_ret['labels'][:, self.seq_length // 2] if self.return_list: return [batch_ret['data'], batch_ret['labels']] else: return batch_ret
def fetch_from_dataset(self, batch_to_load): """ Return *batches* of 5D sequences/clips or 4D images. `batch_to_load` contains the indices of the first frame/image of each element of the batch. `load_sequence` should return a numpy array of 2 or more elements, the first of which 4-dimensional (frame, 0, 1, c) or (frame, c, 0, 1) containing the data and the second 3D or 4D containing the label. """ batch_ret = {} # Create batches for el in batch_to_load: if el is None: # The first element cannot be None, or we wouldn't have # this batch in the first place, so we can safely copy # the last element of the batch for each filename that # is None until we fill the batch. if self.fill_last_batch: for k in batch_ret.iterkeys(): batch_ret[k].append(batch_ret[k][-1]) continue # Load sequence, format is (s, 0, 1, c) ret = self.load_sequence(el) assert all(el in ret.keys() for el in ('data', 'labels', 'filenames', 'subset')), ( 'Keys: {}'.format(ret.keys())) assert all(isinstance(el, np.ndarray) for el in (ret['data'], ret['labels'])) raw_data = ret['data'].copy() seq_x, seq_y = ret['data'], ret['labels'] # Per-image normalization if self.remove_per_img_mean: seq_x -= seq_x.mean(axis=tuple(range(seq_x.ndim - 1)), keepdims=True) if self.divide_by_per_img_std: seq_x /= seq_x.std(axis=tuple(range(seq_x.ndim - 1)), keepdims=True) # Dataset statistics normalization if self.remove_mean: seq_x -= getattr(self, 'mean', 0) if self.divide_by_std: seq_x /= getattr(self, 'std', 1) # Make sure data is in 4D if seq_x.ndim == 3: seq_x = seq_x[np.newaxis, ...] raw_data = raw_data[np.newaxis, ...] assert seq_x.ndim == 4 # and labels in 3D if self.set_has_GT: if seq_y.ndim == 2: seq_y = seq_y[np.newaxis, ...] assert seq_y.ndim == 3 # Perform data augmentation, if needed seq_x, seq_y = random_transform( seq_x, seq_y, nclasses=self.nclasses, void_label=self.void_labels, **self.data_augm_kwargs) if self.set_has_GT and self._void_labels != []: # Map all void classes to non_void_nclasses and shift the other # values accordingly, so that the valid values are between 0 # and non_void_nclasses-1 and the void_classes are all equal to # non_void_nclasses. void_l = self._void_labels void_l.sort(reverse=True) mapping = self._mapping # Apply the mapping tmp_class = (-1 if not hasattr(self, 'GTclasses') else max(self.GTclasses) + 1) seq_y[seq_y == self.non_void_nclasses] = tmp_class for i in sorted(mapping.keys()): if i == self.non_void_nclasses: continue seq_y[seq_y == i] = mapping[i] try: seq_y[seq_y == tmp_class] = mapping[self.non_void_nclasses] except KeyError: # none of the original classes was self.non_void_nclasses pass # Transform targets seq_y to one hot code if return_one_hot # is True if self.set_has_GT and self.return_one_hot: nc = (self.non_void_nclasses if self._void_labels == [] else self.non_void_nclasses + 1) sh = seq_y.shape seq_y = seq_y.flatten() seq_y_hot = np.zeros((seq_y.shape[0], nc), dtype='int32') seq_y = seq_y.astype('int32') seq_y_hot[range(seq_y.shape[0]), seq_y] = 1 seq_y_hot = seq_y_hot.reshape(sh + (nc,)) seq_y = seq_y_hot # Dimshuffle if return_01c is False if not self.return_01c: # s,0,1,c --> s,c,0,1 seq_x = seq_x.transpose([0, 3, 1, 2]) if self.set_has_GT and self.return_one_hot: seq_y = seq_y.transpose([0, 3, 1, 2]) raw_data = raw_data.transpose([0, 3, 1, 2]) # Return 4D images if not self.return_sequence: seq_x = seq_x[0, ...] if self.set_has_GT: seq_y = seq_y[0, ...] raw_data = raw_data[0, ...] if self.return_0_255: seq_x = (seq_x * 255).astype('uint8') ret['data'], ret['labels'] = seq_x, seq_y ret['raw_data'] = raw_data # Append the data of this batch to the minibatch array for k, v in ret.iteritems(): batch_ret.setdefault(k, []).append(v) for k, v in batch_ret.iteritems(): try: batch_ret[k] = np.array(v) except ValueError: # Variable shape: cannot wrap with a numpy array pass if self.seq_length > 0 and self.return_middle_frame_only: batch_ret['labels'] = batch_ret['labels'][:, self.seq_length//2] if self.return_list: return [batch_ret['data'], batch_ret['labels']] else: return batch_ret
def fetch_from_dataset(self, batch_to_load): """ Return *batches* of 1D data. `batch_to_load` contains the indices of the lines to load in the batch. `load_sequence` should return a numpy array of 2 or more elements, the first of which 4-dimensional (frame, 0, 1, c) or (frame, c, 0, 1) containing the data and the second 3D or 4D containing the label. """ batch_ret = {} batch_to_load = [el for el in batch_to_load if el is not None] batch_to_load = [element[1] for tupl in batch_to_load for element in tupl] # Create batches ret = {} # Load data ret['data'] = [] ret['indices'] = []#np.sort(batch_to_load) if self.smooth_raw_both=='raw' or self.smooth_raw_both=='both': if self.preload: raw = self.image_raw[batch_to_load] else: raw=[] with open(self.image_path_raw) as fp: for i, line in enumerate(fp): if i in batch_to_load: line = re.split(' ', line) line = np.array([float(el) for el in line]) line = line.astype(floatX) raw.append(line) if len(raw) == len(batch_to_load): break raw = np.vstack(raw) # b,0 to b,0,c raw = np.expand_dims(raw, axis=2) if self.smooth_raw_both=='smooth' or self.smooth_raw_both=='both': if self.preload: smooth = self.image_smooth[batch_to_load] else: smooth=[] with open(self.image_path_smooth) as fp: for i, line in enumerate(fp): if i in batch_to_load: line = re.split(' ', line) line = np.array([float(el) for el in line]) line = line.astype(floatX) smooth.append(line) if len(smooth) == len(batch_to_load): break smooth = np.vstack(smooth) # b,0 to b,0,c smooth = np.expand_dims(smooth, axis=2) if self.smooth_raw_both=='raw': ret['data'] = raw elif self.smooth_raw_both == 'smooth': ret['data'] = smooth elif self.smooth_raw_both == 'both': ret['data']=np.concatenate([smooth,raw],axis=2) # Load mask ret['labels'] = [] if self.task=='segmentation': if self.preload: ret['labels'] = self.mask[batch_to_load] else: with open(self.mask_path) as fp: for i, line in enumerate(fp): if i in batch_to_load: line = re.split(' ', line) line = np.array([int(el) for el in line]) line = line.astype('int32') ret['labels'].append(line) if len(ret['labels']) == len(batch_to_load): break ret['labels'] = np.vstack(ret['labels']) elif self.task =='classification': if self.preload: ret['labels'] = self.mask[batch_to_load] else: with open(self.mask_path) as fp: for i, line in enumerate(fp): if i in batch_to_load: line = re.split(' ', line) line = np.array([int(el) for el in line]) line = line.astype('int32') ret['labels'].append(line) if len(ret['labels']) == len(batch_to_load): break ret['labels'] = np.vstack(ret['labels']) ret['filenames'] = batch_to_load ret['subset'] = 'default' assert all(el in ret.keys() for el in ('data', 'labels', 'filenames', 'subset')), ( 'Keys: {}'.format(ret.keys())) assert all(isinstance(el, np.ndarray) for el in (ret['data'], ret['labels'])) raw_data = ret['data'].copy() seq_x, seq_y = ret['data'], ret['labels'] # Per-data normalization if self.remove_per_img_mean: seq_x -= seq_x.mean(axis=1, keepdims=True) if self.divide_by_per_img_std: seq_x /= seq_x.std(axis=1, keepdims=True) # Dataset statistics normalization if self.remove_mean: seq_x -= getattr(self, 'mean', 0) if self.divide_by_std: seq_x /= getattr(self, 'std', 1) assert seq_x.ndim == 3 assert seq_y.ndim == 2 # from b,0(,c) to b,0,1(,c) seq_x = np.expand_dims(seq_x, axis=2) seq_y = np.expand_dims(seq_y, axis=2) # Perform data augmentation, if needed seq_x, seq_y = random_transform( seq_x, seq_y, nclasses=self.nclasses, void_label=self.void_labels, **self.data_augm_kwargs) # from b,0,1(,c) to b,0(,c) sh = seq_x.shape seq_x = seq_x.reshape((sh[0], sh[1], sh[3])) if self.task == 'segmentation': seq_y = seq_y.reshape((sh[0], sh[1])) elif self.task=='classification': #print seq_y.shape seq_y = seq_y.reshape((sh[0])) #print seq_y.shape if self.set_has_GT and self._void_labels != []: # Map all void classes to non_void_nclasses and shift the other # values accordingly, so that the valid values are between 0 # and non_void_nclasses-1 and the void_classes are all equal to # non_void_nclasses. void_l = self._void_labels void_l.sort(reverse=True) mapping = self._mapping # Apply the mapping tmp_class = (-1 if not hasattr(self, 'GTclasses') else max(self.GTclasses) + 1) seq_y[seq_y == self.non_void_nclasses] = tmp_class for i in sorted(mapping.keys()): if i == self.non_void_nclasses: continue seq_y[seq_y == i] = mapping[i] try: seq_y[seq_y == tmp_class] = mapping[self.non_void_nclasses] except KeyError: # none of the original classes was self.non_void_nclasses pass elif max(self._cmap.keys()) > self.non_void_nclasses-1: # Shift values of labels, so that the valid values are between 0 # and non_void_nclasses-1. mapping = self._mapping # Apply the mapping tmp_class = (-1 if not hasattr(self, 'GTclasses') else max(self.GTclasses) + 1) seq_y[seq_y == self.non_void_nclasses] = tmp_class for i in sorted(mapping.keys()): if i == self.non_void_nclasses: continue seq_y[seq_y == i] = mapping[i] try: seq_y[seq_y == tmp_class] = mapping[self.non_void_nclasses] except KeyError: # none of the original classes was self.non_void_nclasses pass # Transform targets seq_y to one hot code if return_one_hot # is True if self.set_has_GT and self.return_one_hot: nc = (self.non_void_nclasses if self._void_labels == [] else self.non_void_nclasses + 1) sh = seq_y.shape seq_y = seq_y.flatten() seq_y_hot = np.zeros((seq_y.shape[0], nc), dtype='int32') seq_y = seq_y.astype('int32') seq_y_hot[range(seq_y.shape[0]), seq_y] = 1 seq_y_hot = seq_y_hot.reshape(sh + (nc,)) seq_y = seq_y_hot # Dimshuffle if return_01c is False if not self.return_01c: # b,0,c --> b,c,0 seq_x = seq_x.transpose([0, 2, 1]) if self.set_has_GT and self.return_one_hot: seq_y = seq_y.transpose([0, 2, 1]) raw_data = raw_data.transpose([0, 2, 1]) if self.return_0_255: seq_x = (seq_x * 255).astype('uint8') ret['data'], ret['labels'] = seq_x, seq_y ret['raw_data'] = raw_data # Append the data of this batch to the minibatch array for k, v in ret.iteritems(): batch_ret.setdefault(k, []).append(v) for k, v in batch_ret.iteritems(): try: batch_ret[k] = np.array(v) except ValueError: # Variable shape: cannot wrap with a numpy array pass batch_ret['data'] = batch_ret['data'].squeeze(0) batch_ret['labels'] = batch_ret['labels'].squeeze(0) if self.seq_length > 0 and self.return_middle_frame_only: batch_ret['labels'] = batch_ret['labels'][:, self.seq_length//2] if self.return_list: return [batch_ret['data'], batch_ret['labels']] else: return batch_ret
face = face[None, ...] # b01c face = face / 255. # Show def show(img, title=''): plt.imshow(img[0]) plt.title(title) plt.show() if False: show(face, 'face') # Rotation x, _ = random_transform(face, None, rotation_range=150., fill_mode='constant', chan_idx=3, rows_idx=1, cols_idx=2, void_label=0) show(x, 'rotation') # Width shift x, _ = random_transform(face, None, width_shift_range=0.3, fill_mode='constant', chan_idx=3, rows_idx=1, cols_idx=2, void_label=0) show(x, 'width shift')