Exemple #1
0
 def __init__(self,
              num_features: int,
              eps: float = 1e-5,
              momentum: float = 0.1,
              affine: bool = True,
              track_running_stats: bool = True) -> None:
     super(_NormBase, self).__init__()
     self.num_features = num_features
     self.eps = eps
     self.momentum = momentum
     self.affine = affine
     self.track_running_stats = track_running_stats
     if self.affine:
         self.weight = Parameter(torch.Tensor(num_features))
         self.bias = Parameter(torch.Tensor(num_features))
     else:
         self.register_parameter('weight', None)
         self.register_parameter('bias', None)
     if self.track_running_stats:
         self.register_buffer('running_mean', torch.zeros(num_features))
         self.register_buffer('running_var', torch.ones(num_features))
         self.register_buffer('num_batches_tracked',
                              torch.tensor(0, dtype=torch.long))
     else:
         self.register_parameter('running_mean', None)
         self.register_parameter('running_var', None)
         self.register_parameter('num_batches_tracked', None)
     self.reset_parameters()
Exemple #2
0
def crop_as(x: Array, y: tuple, center: tuple, fill: Scalar=0) -> Array:
    x = tp.Tensor(x)
    size_x = x.shape
    size_y = y

    if isinstance(size_y, tp.Size) and size_x.nspace == size_y.nspace:
        size_y = tuple(size_y.space)
    size_y = tuple(size_y)
    if len(size_y) == len(size_x): pass
    elif len(size_y) == size_x.nspace: size_y = add_special(size_y, size_x.special, -1)
    else: raise TypeError("Mismatch dimensions in 'crop_as', please use -1 if the dimension doesn't need to be cropped. ")
    assert len(size_y) == len(size_x)
    size_y = tuple(a if b == -1 else b for a, b in zip(size_x, size_y))

    if len(center) == len(size_x): pass
    elif len(center) == size_x.nspace: center = add_special(center, size_x.special, -1)
    elif len(x for x in center if x >= 0) == len(x for x in size_y if x >= 0):
        center = tuple(a if b >= 0 else -1 for a, b in zip(center, size_y))
    else: raise TypeError("Mismatch dimensions for the center in 'crop_as', please use -1 if the dimension that is centered or doesn't need cropping. ")
    assert len(center) == len(size_x)
    center = tuple(a / 2 if b == -1 else b for a, b in zip(size_x, center))

    z = fill * tp.ones(*size_y).type_as(x)
    def intersect(u, v):
        return max(u[0], v[0]), min(u[1], v[1])
    z_box = [intersect((0, ly), (- round(float(m - float(ly) / 2)), - round(float(m - float(ly) / 2)) + lx)) for m, lx, ly in zip(center, size_x, size_y)]
    x_box = [intersect((0, lx), (+ round(float(m - float(ly) / 2)), + round(float(m - float(ly) / 2)) + ly)) for m, lx, ly in zip(center, size_x, size_y)]
    # if the two boxes are seperated
    if any([r[0] >= r[1] for r in z_box]) or any([r[0] >= r[1] for r in x_box]): z.roi = None; return z
    region_z = tuple(slice(u, v) for u, v in z_box)
    region_x = tuple(slice(u, v) for u, v in x_box)
    z[region_z] = x[region_x]
    z.roi = region_x
    z.special_from_(x)
    return z
Exemple #3
0
def create_nii(dcm, creation):
    data = tp.Tensor(dcm)
    if not isinstance(dcm, DCM) and hasattr(dcm, 'bundle'): bundle = dcm.bundle
    else:
        header = nib.Nifti1Header()
        header['regular'] = b'r'
        header['dim'] = [data.ndim, *data.shape] + [1] * (7 - data.ndim)
        bits = len(data.flatten()[0].tobytes()) * 8
        header['bitpix'] = bits
        header.set_data_dtype(data.numpy().dtype)
        if isinstance(dcm, DCM): meta = dcm.bundle
        else: meta = None
        if meta and 'PixelSpacing' in meta:
            spacing = [float(x) for x in meta.PixelSpacing]
        else:
            spacing = [1.0, 1.0]
        if meta and 'SliceThickness' in meta: dz = [float(meta.SliceThickness)]
        else: dz = [1.0]
        header['pixdim'] = [1.0] + spacing + dz + [1.0] * (7 - data.ndim)
        header['qform_code'] = 1
        header['xyzt_units'] = 2
        if meta:
            header['qoffset_x'] = -float(meta.ImagePositionPatient[0])
            header['qoffset_y'] = -float(meta.ImagePositionPatient[1])
            header['qoffset_z'] = float(meta.ImagePositionPatient[2])
            qa, qb, qc, qd = orn2quatern(
                *[float(x) for x in meta.ImageOrientationPatient])
            header['quatern_b'] = qb
            header['quatern_c'] = qc
            header['quatern_d'] = qd
        bundle = nib.Nifti1Image(data, None, header=header)
    instance = creation(data)
    instance.bundle = bundle
    instance.path = getattr(dcm, 'path', 'Unknown')
    return instance
Exemple #4
0
    def __init__(self,
                 embed_dim,
                 num_heads,
                 dropout=0.,
                 bias=True,
                 add_bias_kv=False,
                 add_zero_attn=False,
                 kdim=None,
                 vdim=None):
        super(MultiheadAttention, self).__init__()
        self.embed_dim = embed_dim
        self.kdim = kdim if kdim is not None else embed_dim
        self.vdim = vdim if vdim is not None else embed_dim
        self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim

        self.num_heads = num_heads
        self.dropout = dropout
        self.head_dim = embed_dim // num_heads
        assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"

        if self._qkv_same_embed_dim is False:
            self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
            self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
            self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
            self.register_parameter('in_proj_weight', None)
        else:
            self.in_proj_weight = Parameter(
                torch.empty(3 * embed_dim, embed_dim))
            self.register_parameter('q_proj_weight', None)
            self.register_parameter('k_proj_weight', None)
            self.register_parameter('v_proj_weight', None)

        if bias:
            self.in_proj_bias = Parameter(torch.empty(3 * embed_dim))
        else:
            self.register_parameter('in_proj_bias', None)
        self.out_proj = _LinearWithBias(embed_dim, embed_dim)

        if add_bias_kv:
            self.bias_k = Parameter(torch.empty(1, 1, embed_dim))
            self.bias_v = Parameter(torch.empty(1, 1, embed_dim))
        else:
            self.bias_k = self.bias_v = None

        self.add_zero_attn = add_zero_attn

        self._reset_parameters()
Exemple #5
0
 def __init__(self, input_size: int, hidden_size: int, bias: bool,
              num_chunks: int) -> None:
     super(RNNCellBase, self).__init__()
     self.input_size = input_size
     self.hidden_size = hidden_size
     self.bias = bias
     self.weight_ih = Parameter(
         torch.Tensor(num_chunks * hidden_size, input_size))
     self.weight_hh = Parameter(
         torch.Tensor(num_chunks * hidden_size, hidden_size))
     if bias:
         self.bias_ih = Parameter(torch.Tensor(num_chunks * hidden_size))
         self.bias_hh = Parameter(torch.Tensor(num_chunks * hidden_size))
     else:
         self.register_parameter('bias_ih', None)
         self.register_parameter('bias_hh', None)
     self.reset_parameters()
Exemple #6
0
    def __init__(self,
                 in1_features: int,
                 in2_features: int,
                 out_features: int,
                 bias: bool = True) -> None:
        super(Bilinear, self).__init__()
        self.in1_features = in1_features
        self.in2_features = in2_features
        self.out_features = out_features
        self.weight = Parameter(
            torch.Tensor(out_features, in1_features, in2_features))

        if bias:
            self.bias = Parameter(torch.Tensor(out_features))
        else:
            self.register_parameter('bias', None)
        self.reset_parameters()
Exemple #7
0
 def __new__(cls, data=None, requires_grad=True):
     if data is None:
         data = torchplus.Tensor()
     assert isinstance(data, torch.Tensor)
     self = torchplus.Tensor._make_subclass(
         cls,
         data,
         auto_device=torchplus.is_autodevice(),
         requires_grad=requires_grad)
     return self
Exemple #8
0
 def _create_bundle(self, data, use_header_size=False, spacing=None):
     data = tp.Tensor(data)
     header = self.bundle.header.copy()
     if spacing is not None:
         header['pixdim'] = [1.0] + list(spacing) + [1.0] * (7 - data.ndim)
     if use_header_size:
         raise NotImplementedError(
             "It appears that the developers forgot to implement keyword use_header_size! Please contact us to remind us. "
         )
         # if any([s != 1 for s in scaling]):
         #     raise_rescale()
         #     dt = np.dtype(self.dtype)
         #     mode = 'Nearest' if dt.kind == np.dtype(np.int).kind or dt.kind == np.dtype(np.uint).kind else 'Linear'
         #     data = rescale_to(data.astype(np.float32), header['dim'][1: dimof(data) + 1], mode = mode).astype(data.dtype)
     else:
         header['dim'] = [data.ndim] + list(
             data.shape) + [1] * (7 - data.ndim)
     return nib.Nifti1Image(data.transpose(1, 0), None, header)
Exemple #9
0
    def forward(ctx, I1, I2, nbin=100):
        with tp.no_grad():
            if hasattr(ctx, 'JH'): del ctx.JH
            nbin = tp.tensor(nbin)
            data_pair = tp.stack(I1.flatten(1), I2.flatten(1), dim={1})
            nbatch, nhist, ndata = data_pair.ishape
            indices = []
            values = []
            ctx.window = (tp.image_grid(4, 4) - 1).flatten(1).transpose(0, 1)
            for shift in ctx.window:
                # [nbatch] x {nhist} x ndata
                hist_pos = data_pair * nbin
                index = tp.clamp(
                    tp.floor(hist_pos).long() + shift, 0, nbin - 1)
                batch_idx = tp.arange(nbatch).expand_to([nbatch], {1}, ndata)
                index = tp.cat(batch_idx, index, 1)
                value = Bspline(shift.expand_to(data_pair),
                                tp.decimal(hist_pos)).prod(1)
                indices.append(index)
                values.append(value)
            # n_batch x (1 + n_hist) x (n_data x 4 ** n_hist)
            Mindices = tp.cat(indices, -1)
            # n_batch x (n_data x 4 ** n_hist)
            Mvalues = tp.cat(values, -1)
            # (1 + n_hist) x (n_batch x n_data x 4 ** n_hist)
            indices = Mindices.transpose(0, 1).flatten(1)
            # (n_batch x n_data x 4 ** n_hist)
            values = Mvalues.flatten(0)
            if tp.Device == tp.DeviceCPU: creator = torch.sparse.FloatTensor
            else: creator = torch.cuda.sparse.FloatTensor
            collected = creator(indices, values,
                                (nbatch, nbin, nbin)).to_dense()
            collected = tp.Tensor(collected, batch_dim=0)

            ctx.nbin = nbin
            ctx.Ishape = I1.shape
            ctx.data_pair = data_pair
            ctx.JH = collected / ndata
        return ctx.JH
Exemple #10
0
    def __init__(self,
                 mode: str,
                 input_size: int,
                 hidden_size: int,
                 num_layers: int = 1,
                 bias: bool = True,
                 batch_first: bool = False,
                 dropout: float = 0.,
                 bidirectional: bool = False) -> None:
        super(RNNBase, self).__init__()
        self.mode = mode
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.bias = bias
        self.batch_first = batch_first
        self.dropout = float(dropout)
        self.bidirectional = bidirectional
        num_directions = 2 if bidirectional else 1

        if not isinstance(dropout, numbers.Number) or not 0 <= dropout <= 1 or \
                isinstance(dropout, bool):
            raise ValueError(
                "dropout should be a number in range [0, 1] "
                "representing the probability of an element being "
                "zeroed")
        if dropout > 0 and num_layers == 1:
            warnings.warn("dropout option adds dropout after all but last "
                          "recurrent layer, so non-zero dropout expects "
                          "num_layers greater than 1, but got dropout={} and "
                          "num_layers={}".format(dropout, num_layers))

        if mode == 'LSTM':
            gate_size = 4 * hidden_size
        elif mode == 'GRU':
            gate_size = 3 * hidden_size
        elif mode == 'RNN_TANH':
            gate_size = hidden_size
        elif mode == 'RNN_RELU':
            gate_size = hidden_size
        else:
            raise ValueError("Unrecognized RNN mode: " + mode)

        self._flat_weights_names = []
        self._all_weights = []
        for layer in range(num_layers):
            for direction in range(num_directions):
                layer_input_size = input_size if layer == 0 else hidden_size * num_directions

                w_ih = Parameter(torch.Tensor(gate_size, layer_input_size))
                w_hh = Parameter(torch.Tensor(gate_size, hidden_size))
                b_ih = Parameter(torch.Tensor(gate_size))
                # Second bias vector included for CuDNN compatibility. Only one
                # bias vector is needed in standard definition.
                b_hh = Parameter(torch.Tensor(gate_size))
                layer_params = (w_ih, w_hh, b_ih, b_hh)

                suffix = '_reverse' if direction == 1 else ''
                param_names = ['weight_ih_l{}{}', 'weight_hh_l{}{}']
                if bias:
                    param_names += ['bias_ih_l{}{}', 'bias_hh_l{}{}']
                param_names = [x.format(layer, suffix) for x in param_names]

                for name, param in zip(param_names, layer_params):
                    setattr(self, name, param)
                self._flat_weights_names.extend(param_names)
                self._all_weights.append(param_names)

        self._flat_weights = [(lambda wn: getattr(self, wn)
                               if hasattr(self, wn) else None)(wn)
                              for wn in self._flat_weights_names]
        self.flatten_parameters()
        self.reset_parameters()
Exemple #11
0
 def __init__(self, num_parameters: int = 1, init: float = 0.25) -> None:
     self.num_parameters = num_parameters
     super(PReLU, self).__init__()
     self.weight = Parameter(torch.Tensor(num_parameters).fill_(init))
Exemple #12
0
 def __new__(cls, instance, slice_only=False):
     if isinstance(instance, str):
         p = path(instance)
         if not p.isdir():
             if not slice_only: p = p @ path.Folder
         else: slice_only = False
         dcmBundle = dcm.filereader.dcmread(
             path(__file__) @ path.Folder / "template.dcm")
         slice_arrays = {}
         slices = {}
         zs = {}
         readable = False
         direction_down = True
         for p in ([p] if slice_only else p):
             if not p.ext.lower() in ('dcm', 'ima'): continue
             try:
                 image_slice = dcm.filereader.dcmread(p)
             except:
                 continue
             readable = True
             n_slice = int(image_slice.InstanceNumber)
             if 'SeriesNumber' in image_slice:
                 n_series = int(image_slice.SeriesNumber)
             else:
                 n_series = 0
             try:
                 slice_array = image_slice.pixel_array
             except:
                 try:
                     p_dicom = (p @ path.Folder //
                                'dicom').mkdir() / p @ path.File
                     if not p_dicom.exists():
                         _, stderr = shell(f"dcmdjpeg {p} {p_dicom}")
                     else:
                         stderr = ''
                     if stderr: raise TypeError("Unknown encoding: %s." % p)
                 except:
                     raise TypeError("Unknown encoding: %s." % p)
                 image_slice = dcm.filereader.dcmread(p_dicom)
                 try:
                     slice_array = image_slice.pixel_array
                 except:
                     raise TypeError("Unknown encoding: %s." % p)
             if n_series not in slices:
                 slice_arrays[n_series] = {}
                 slices[n_series] = {}
                 zs[n_series] = {}
             slice_arrays[n_series][n_slice] = slice_array
             slices[n_series][n_slice] = image_slice
             if image_slice.ImageOrientationPatient[2] != 0: iz = 0
             elif image_slice.ImageOrientationPatient[5] != 0: iz = 1
             else: iz = 2
             if 'ImagePositionPatient' in image_slice:
                 z = float(image_slice.ImagePositionPatient[iz])
             elif 'TablePosition' in image_slice:
                 z = image_slice.TablePosition
             elif 'SliceLocation' in image_slice:
                 z = float(image_slice.SliceLocation)
             else:
                 z = 0.
             zs[n_series][n_slice] = z
         if not readable:
             raise TypeError("Could not create a DICOM object from " + p +
                             ".")
         sorted_series = sorted([(n_series, slices[n_series])
                                 for n_series in slices],
                                key=lambda x: -len(x[1]))
         n_series = sorted_series[0][0]
         possible_series = [s[1] for s in sorted_series if s[0] == n_series]
         if len(possible_series) >= 8: series = possible_series[7]
         elif len(possible_series) >= 3: series = possible_series[2]
         else: series = possible_series[0]
         min_slice = 1000, None
         max_slice = 0, None
         top_slices = -float('inf'), {}
         bottom_slices = float('inf'), {}
         for n_slice in series:
             image_slice = series[n_slice]
             z = zs[n_series][n_slice]
             if n_slice < min_slice[0]:
                 min_slice = n_slice, image_slice
             if n_slice > max_slice[0]:
                 max_slice = n_slice, image_slice
             if z > top_slices[0]:
                 top_slices = z, {n_slice: image_slice}
             if z < bottom_slices[0]:
                 bottom_slices = z, {n_slice: image_slice}
             if z == top_slices[0]:
                 top_slices[1][n_slice] = image_slice
             if z == bottom_slices[0]:
                 bottom_slices[1][n_slice] = image_slice
         N = min(len(top_slices[1].keys()), len(bottom_slices[1].keys()))
         if N >= 8: i_series = 7
         elif N >= 3: i_series = 2
         else: i_series = 0
         bound1 = sorted(top_slices[1].keys())[i_series]
         bound2 = sorted(bottom_slices[1].keys())[i_series]
         if bound1 > bound2:
             zs = {
                 k: v
                 for k, v in zs[n_series].items() if bound2 <= k <= bound1
             }
             slices = {
                 k: v
                 for k, v in slice_arrays[n_series].items()
                 if bound2 <= k <= bound1
             }
             max_slice = bound1, top_slices[1][bound1]
             min_slice = bound2, bottom_slices[1][bound2]
         elif bound1 < bound2:
             zs = {
                 k: v
                 for k, v in zs[n_series].items() if bound1 <= k <= bound2
             }
             slices = {
                 k: v
                 for k, v in slice_arrays[n_series].items()
                 if bound1 <= k <= bound2
             }
             max_slice = bound2, bottom_slices[1][bound2]
             min_slice = bound1, top_slices[1][bound1]
         else:
             zs = {k: v for k, v in zs[n_series].items()}
             slices = {k: v for k, v in slice_arrays[n_series].items()}
             bound = sorted(series.keys())[0]
             max_slice = min_slice = bound, series[bound]
         direction_down = zs[max_slice[0]] < zs[min_slice[0]]
         typical_slice = max_slice[1] if direction_down else min_slice[1]
         for key in dir(typical_slice):
             if key == 'PixelData' or '_' in key: continue
             if key.capitalize() != key[0] + key[1:].lower(): continue
             dcmBundle[key] = typical_slice[key]
         ozs = tp.Tensor(sorted(zs.values()))
         if len(set(ozs)) > 1:
             volume = tp.stack(
                 orderedValue({zs[i]: slices[i]
                               for i in slices}), -1)
             dcmBundle.SliceThickness = str(
                 tp.abs(tp.mean(ozs[1:] - ozs[:-1])).item())
         else:
             volume = tp.stack(orderedValue(slices), -1)
         volume = volume.astype(
             toU(volume.dtype) if dcmBundle.
             PixelRepresentation else toI(volume.dtype))
         dcmBundle.PixelData = volume.tobytes()
         self = super().__new__(cls, volume)
         self.bundle = dcmBundle
         self.path = path
         self.slice_only = slice_only
         self.update()
         return self
     elif hasattr(instance, 'shape'):
         if instance.ndim == 0: return instance
         if isinstance(instance, DCM): return instance
         if isinstance(instance, NII):
             input = nii2dcmBundle(instance)
         else:
             data = tp.Tensor(instance)
             input.path = 'Unknown'
             self.slice_only = False
         self.update()
         return self
     else:
         raise TypeError(f"Unknown input for DCM: {instance}. ")
Exemple #13
0
def nii2dcm(nii, creation):
    data = tp.Tensor(nii)
    if not isinstance(nii, NII) and hasattr(nii, 'bundle'): bundle = nii.bundle
    elif hasattr(nii, 'bundle'):
        header = nii.bundle.header
        if data.ndim > 3:
            raise TypeError(
                "Dicom is unable to store high dimensional data [%dD]." %
                data.ndim)
        while data.ndim < 3:
            data = data.unsqueeze(-1)
        # use_meta_size = header.get('use_meta_size', False)
        # if use_meta_size:
        #     if dimof(data) <= 2: tosize = (self.bundle.Rows, self.bundle.Columns)[:dimof(data)]
        #     else: tosize = (self.bundle.Rows, self.bundle.Columns) + data.shape[2:]
        #     if any([s != 1 for s in scaling]):
        #         raise_rescale()
        #         dt = np.dtype(self.dtype)
        #         mode = 'Nearest' if dt.kind == np.dtype(np.int).kind or dt.kind == np.dtype(np.uint).kind else 'Linear'
        #         data = rescale_to(data.astype(np.float32), tosize, mode = mode).astype(data.dtype)
        # else: tosize = data.shape
        b, c, d = header.get('quatern', (0.0, 0.0, 0.0))
        origin = header.get('origin', (0.0, 0.0, 0.0))
        spacing = header.get('spacing', [1.0] * 8)
        modality = header.get('modality', 'CT')
        if 'header' in header:
            if 'quatern' not in header:
                b, c, d = [
                    header['header'].get('quatern_b', 0.0),
                    header['header'].get('quatern_c', 0.0),
                    header['header'].get('quatern_d', 0.0)
                ]
            if 'origin' not in header:
                origin = [
                    header['header'].get('qoffset_x', 0.0),
                    header['header'].get('qoffset_y', 0.0),
                    header['header'].get('qoffset_z', 0.0)
                ]
            if 'spacing' not in header:
                spacing = header['header'].get('pixdim', [1.0] * 8)
        spacing = spacing[1:4]
        from math import sqrt
        a = sqrt(1 - b * b - c * c - d * d)
        orn = quatern2orn(a, b, c, d)
        # orn = [-x for x in orn]
        origin = [str(-origin[0]), str(-origin[1]), str(origin[2])]
        slice_thickness = header.get('slice_thickness', spacing[2])
        if 'header' not in header:
            if 'quatern' not in header:
                orn = [float(x) for x in self.bundle.ImageOrientationPatient]
            if 'origin' not in header:
                origin = self.bundle.ImagePositionPatient
            if 'spacing' not in header:
                slice_thickness = float(self.bundle.SliceThickness)
                spacing = [float(x) for x in self.bundle.PixelSpacing] + \
                    [header.get('slice_thickness', abs(slice_thickness))]
            if 'Modality' in self.bundle: modality = self.bundle.Modality
        if 'InstanceCreationTime' in self.bundle:
            ctime = self.bundle.InstanceCreationTime
        if 'SOPInstanceUID' in self.bundle: UID = self.bundle.SOPInstanceUID
        if 'ContentTime' in self.bundle: time = self.bundle.ContentTime
        if 'TriggerTime' in self.bundle: ttime = self.bundle.TriggerTime
        if 'ReconstructionTargetCenterPatient' in self.bundle:
            center = self.bundle.ReconstructionTargetCenterPatient
        bits = len(data.flatten()[0].tobytes()) * 8
        traditional_origin = [
            -float(origin[0]), -float(origin[1]),
            float(origin[2])
        ]
        if np.abs(orn[2]) > 0: iz = 0
        elif np.abs(orn[5]) > 0: iz = 1
        else: iz = 2
        position = np.dot(
            quatern2mat(*orn2quatern(*orn)).T,
            np.array([traditional_origin]).T)[-1, 0]
        bundles = {}
        typical_slice = float('inf'), None
        Nslice = min(data.shape[-1], header.get('max_slice', float('inf')))
        for slice in range(Nslice):
            # sdcm = dcm.filereader.dcmread(self.bundle.filename, stop_before_pixels=True)
            if not header.get('generate_slices',
                              True) and 0 < slice < Nslice - 1:
                continue
            sdcm = deepcopy(self.bundle)
            if test(lambda: UID): *segs, tail = UID.split('.')
            if 'SOPInstanceUID' in sdcm:
                sdcm.SOPInstanceUID = '.'.join(segs + [str(int(tail) + slice)])
            if 'ReconstructionTargetCenterPatient' in sdcm and not self.slice_only:
                sdcm.ReconstructionTargetCenterPatient = [
                    0.0, 0.0, center[-1] + slice * slice_thickness
                ]
            if 'TablePosition' in sdcm and not self.slice_only:
                sdcm.TablePosition = position + slice * slice_thickness
            if 'InstanceNumber' in sdcm and not self.slice_only:
                sdcm.InstanceNumber = str(slice + 1)
            if 'ImagePositionPatient' in sdcm and not self.slice_only:
                sdcm.ImagePositionPatient = origin[:iz] + [
                    str(float(origin[iz]) + slice * slice_thickness)
                ] + origin[iz + 1:]
            if 'SliceLocation' in sdcm and not self.slice_only:
                sdcm.SliceLocation = str(position + slice * slice_thickness)
            if 'SliceThickness' in sdcm:
                sdcm.SliceThickness = str(abs(slice_thickness))
            if 'InStackPositionNumber' in sdcm and not self.slice_only:
                sdcm.InStackPositionNumber = slice + 1
            if 'ImageOrientationPatient' in sdcm:
                sdcm.ImageOrientationPatient = [str(x) for x in orn]
            # if 'InPlanePhaseEncodingDirection' in sdcm:
            #     del sdcm['InPlanePhaseEncodingDirection']
            if 'Modality' in sdcm and modality:
                sdcm.Modality = modality
            if 'PixelSpacing' in sdcm:
                sdcm.PixelSpacing = [str(x) for x in spacing[:2]]
            if 'BitsStored' in sdcm:
                sdcm.BitsStored = bits
            if 'HighBit' in sdcm:
                sdcm.HighBit = bits - 1
            if 'BitsAllocated' in sdcm:
                sdcm.BitsAllocated = bits
            if 'PixelRepresentation' in sdcm:
                sdcm.PixelRepresentation = int(data.dtype.kind == 'u')
            try:
                self.bundle[0x7005, 0x1018]
                try:
                    sdcm[0x7005, 0x1018]
                except:
                    sdcm[0x7005, 0x1018] = self.bundle[0x7005, 0x1018]
                sdcm[0x7005,
                     0x1018].value = chr(slice + 1).encode() + chr(0).encode()
            except:
                pass
            if 'LargestImagePixelValue' in sdcm:
                sdcm.LargestImagePixelValue = np.max(data[..., slice])
            if 'PixelData' in sdcm:
                sdcm.PixelData = data[..., slice].tobytes()
                sdcm['PixelData'].VR = 'OB'
            if 'Rows' in sdcm and 'Columns' in sdcm:
                sdcm.Rows, sdcm.Columns = data.shape[:2]
            if float(sdcm.ImagePositionPatient[2]) < typical_slice[0]:
                typical_slice = float(sdcm.ImagePositionPatient[2]), sdcm
            bundles[slice] = sdcm
        return bundles if header.get('generate_slices',
                                     True) else typical_slice[1]