예제 #1
0
    def data(self, index, role=Qt.DisplayRole):
        """Returns the data stored under the given role for the item
        referred to by the index.

        This is an overwritten method.

        :Parameters:

        - `index`: the index of a data item
        - `role`: the role being returned
        """
        row, col = index.row(), index.column()

        if not index.isValid() or not (0 <= row < self.numrows):
            return None

        if role == Qt.DisplayRole:

            if col < self.numcolsIdx:  # index fields
                aq = ismrmrd.Acquisition(self.rbuffer.getCell(row)['head'])
                cell = getattr(aq.idx, self.colnames[col])

            else:  # header fields
                aq = ismrmrd.Acquisition(self.rbuffer.getCell(row)['head'])
                cell = getattr(aq, self.colnames[col])

            # check if the current cell is the encoding counter field
            if isinstance(cell, ismrmrd.EncodingCounters):
                return None
            else:  # otherwise no special treatment
                # check what kind of data we have at hand (array or scalar)
                try:
                    # if no error => we have an array => fromat as such
                    ret = '['
                    cellIterator = iter(cell)
                    firstItem = next(cellIterator)
                    ret += str(firstItem)

                    for item in cellIterator:
                        ret += ',' + str(item)

                    ret += ']'

                except Exception as e:
                    # if error => we have a scalar
                    ret = str(cell)

                return ret

        if role == Qt.TextAlignmentRole:
            return Qt.AlignLeft | Qt.AlignCenter

        return None
예제 #2
0
def test_flags():
    acq = ismrmrd.Acquisition()
    eq_(acq.flags, 0)

    for i in range(1, 65):
        eq_(acq.isFlagSet(i), False)

    for i in range(1, 65):
        acq.setFlag(i)
        eq_(acq.isFlagSet(i), True)

    for i in range(1, 65):
        eq_(acq.isFlagSet(i), True)

    for i in range(1, 65):
        acq.clearFlag(i)
        eq_(acq.isFlagSet(i), False)

    eq_(acq.flags, 0)

    for i in range(1, 65):
        acq.setFlag(i)
    acq.clearAllFlags()
    for i in range(1, 65):
        eq_(acq.isFlagSet(i), False)
예제 #3
0
def test_flags():
    acq = ismrmrd.Acquisition()

    for i in range(1, 65):
        assert not acq.is_flag_set(i), \
            "Expected flag {} to not be set.".format(i)

    for i in range(1, 65):
        acq.set_flag(i)
        assert acq.is_flag_set(i), \
            "Expected flag {} to be set.".format(i)

    for i in range(1, 65):
        acq.clear_flag(i)
        assert not acq.is_flag_set(i), \
            "Expected flag {} to not be set.".format(i)

    eq_(acq.flags, 0)

    for i in range(1, 65):
        acq.set_flag(i)

    acq.clear_all_flags()

    for i in range(1, 65):
        assert not acq.is_flag_set(i), \
            "Expected flag {} to not be set.".format(i)
예제 #4
0
def test_new_instance():
    acq = ismrmrd.Acquisition()
    eq_(type(acq.getHead()), ismrmrd.AcquisitionHeader)
    eq_(type(acq.data), np.ndarray)
    eq_(acq.data.dtype, np.complex64)
    eq_(type(acq.traj), np.ndarray)
    eq_(acq.traj.dtype, np.float32)
예제 #5
0
def test_resize():
    acq = ismrmrd.Acquisition()
    nsamples, nchannels, ntrajdims = 128, 8, 3
    acq.resize(nsamples, nchannels, ntrajdims)
    eq_(acq.data.shape, (nchannels, nsamples))
    eq_(acq.traj.shape, (nsamples, ntrajdims))
    head = acq.getHead()
    eq_(head.number_of_samples, nsamples)
    eq_(head.active_channels, nchannels)
    eq_(head.trajectory_dimensions, ntrajdims)
예제 #6
0
def test_clearing_unset_flag_does_not_set_other_flags():
    acquisition = ismrmrd.Acquisition()

    assert acquisition.flags == 0, \
        "Fresh acquisitions should not have any flags set."

    acquisition.clearFlag(ismrmrd.ACQ_FIRST_IN_ENCODE_STEP1)

    assert acquisition.flags == 0, \
        "Clearing an unset flag sets other flags."
예제 #7
0
def test_read_only_fields():
    acq = ismrmrd.Acquisition()

    for field in ['number_of_samples', 'active_channels', 'trajectory_dimensions']:
        try:
            setattr(acq, field, None)
        except AttributeError:
            pass
        else:
            assert False, "assigned to read-only field of Acquisition"
예제 #8
0
def test_set_head():
    acq = ismrmrd.Acquisition()
    head = ismrmrd.AcquisitionHeader()
    nsamples, nchannels, ntrajdims = 128, 8, 3
    head.number_of_samples = nsamples
    head.active_channels = nchannels
    head.trajectory_dimensions = ntrajdims

    acq.setHead(head)

    eq_(acq.data.shape, (nchannels, nsamples))
    eq_(acq.traj.shape, (nsamples, ntrajdims))
예제 #9
0
    def read_acquisition(self, acqnum):
        if 'data' not in self._dataset:
            raise LookupError("Acquisition data not found in the dataset.")
        
        # create an acquisition
        # and fill with the header for this acquisition
        acq = ismrmrd.Acquisition(self._dataset['data'][acqnum]['head'])

        # copy the data as complex float
        acq.data[:] = self._dataset['data'][acqnum]['data'].view(np.complex64).reshape((acq.active_channels, acq.number_of_samples))[:]

        # copy the trajectory as float
        if acq.traj.size>0:
            acq.traj[:] = self._dataset['data'][acqnum]['traj'].reshape((acq.number_of_samples,acq.trajectory_dimensions))[:]

        return acq
# Definitions section in seq file
seq.set_definition("Name", seq_name) # metadata file name is saved in Siemens header for reco
seq.set_definition("FOV", [1e-3*fov, 1e-3*fov, slice_res]) # for FOV positioning
seq.set_definition("Slice_Thickness", "%f" % (slice_res*(1+dist_fac*1e-2)*(slices-1)+slice_res)) # we misuse this to show the total covered head area in the GUI
if num_segments > 1:
    seq.set_definition("MaxAdcSegmentLength", "%d" % int(num_samples/num_segments+0.5)) # for automatic ADC segment length setting

# Noise scans
noise_samples = 256
noise_dwelltime = 2e-6
noise_adc = make_adc(system=system, num_samples=256, dwell=noise_dwelltime, delay=system.adc_dead_time)
noise_delay = make_delay(d=ph.round_up_to_raster(calc_duration(noise_adc)+1e-3,decimals=5)) # add some more time to the ADC delay to be safe
for k in range(noisescans):
    seq.add_block(noise_adc, noise_delay)
    acq = ismrmrd.Acquisition()
    acq.setFlag(ismrmrd.ACQ_IS_NOISE_MEASUREMENT)
    meta_file.append_acquisition(acq)

# Perform cartesian reference scan: if selected / for accelerated spirals / for long readouts
if refscan == False:
    if redfac > 1:
        refscan = True
        print("Accelerated scan: Activate Cartesian reference scan.")

if refscan:
    res_refscan = res*1e-3 * 2
    flip_refscan = 15
    bw_refscan = 800
    params_ref = {"fov":fov*1e-3, "res":res_refscan, "slices":slices, "slice_res":slice_res, "dist_fac": dist_fac, "flip_angle":flip_refscan,
     "rf_dur":rf_dur, "tbp": tbp_exc, "readout_bw": bw_refscan}
예제 #11
0
def create(filename='testdata.h5',
           matrix_size=256,
           coils=8,
           oversampling=2,
           repetitions=1,
           acceleration=1,
           noise_level=0.05):

    # Generate the phantom and coil sensitivity maps
    phan = simulation.phantom(matrix_size)
    csm = simulation.generate_birdcage_sensitivities(matrix_size, coils)
    coil_images = np.tile(phan, (coils, 1, 1)) * csm

    # Oversample if needed
    if oversampling > 1:
        padding = round((oversampling * phan.shape[1] - phan.shape[1]) / 2)
        phan = np.pad(phan, ((0, 0), (padding, padding)), mode='constant')
        csm = np.pad(csm, ((0, 0), (0, 0), (padding, padding)),
                     mode='constant')
        coil_images = np.pad(coil_images, ((0, 0), (0, 0), (padding, padding)),
                             mode='constant')

    # The number of points in x,y,kx,ky
    nx = matrix_size
    ny = matrix_size
    nkx = oversampling * nx
    nky = ny

    # Open the dataset
    dset = ismrmrd.Dataset(filename, "dataset", create_if_needed=True)

    # Create the XML header and write it to the file
    header = ismrmrd.xsd.ismrmrdHeader()

    # Experimental Conditions
    exp = ismrmrd.xsd.experimentalConditionsType()
    exp.H1resonanceFrequency_Hz = 128000000
    header.experimentalConditions = exp

    # Acquisition System Information
    sys = ismrmrd.xsd.acquisitionSystemInformationType()
    sys.receiverChannels = coils
    header.acquisitionSystemInformation = sys

    # Encoding
    encoding = ismrmrd.xsd.encoding()
    encoding.trajectory = ismrmrd.xsd.trajectoryType.cartesian

    # encoded and recon spaces
    efov = ismrmrd.xsd.fieldOfView_mm()
    efov.x = oversampling * 256
    efov.y = 256
    efov.z = 5
    rfov = ismrmrd.xsd.fieldOfView_mm()
    rfov.x = 256
    rfov.y = 256
    rfov.z = 5

    ematrix = ismrmrd.xsd.matrixSize()
    ematrix.x = nkx
    ematrix.y = nky
    ematrix.z = 1
    rmatrix = ismrmrd.xsd.matrixSize()
    rmatrix.x = nx
    rmatrix.y = ny
    rmatrix.z = 1

    espace = ismrmrd.xsd.encodingSpaceType()
    espace.matrixSize = ematrix
    espace.fieldOfView_mm = efov
    rspace = ismrmrd.xsd.encodingSpaceType()
    rspace.matrixSize = rmatrix
    rspace.fieldOfView_mm = rfov

    # Set encoded and recon spaces
    encoding.encodedSpace = espace
    encoding.reconSpace = rspace

    # Encoding limits
    limits = ismrmrd.xsd.encodingLimitsType()

    limits1 = ismrmrd.xsd.limitType()
    limits1.minimum = 0
    limits1.center = round(ny / 2)
    limits1.maximum = ny - 1
    limits.kspace_encoding_step_1 = limits1

    limits_rep = ismrmrd.xsd.limitType()
    limits_rep.minimum = 0
    limits_rep.center = round(repetitions / 2)
    limits_rep.maximum = repetitions - 1
    limits.repetition = limits_rep

    limits_rest = ismrmrd.xsd.limitType()
    limits_rest.minimum = 0
    limits_rest.center = 0
    limits_rest.maximum = 0
    limits.kspace_encoding_step_0 = limits_rest
    limits.slice = limits_rest
    limits.average = limits_rest
    limits.contrast = limits_rest
    limits.kspaceEncodingStep2 = limits_rest
    limits.phase = limits_rest
    limits.segment = limits_rest
    limits.set = limits_rest

    encoding.encodingLimits = limits
    header.encoding.append(encoding)

    dset.write_xml_header(header.toxml('utf-8'))

    # Synthesize the k-space data
    Ktrue = transform.transform_image_to_kspace(coil_images, (1, 2))

    # Create an acquistion and reuse it
    acq = ismrmrd.Acquisition()
    acq.resize(nkx, coils)
    acq.version = 1
    acq.available_channels = coils
    acq.center_sample = round(nkx / 2)
    acq.read_dir[0] = 1.0
    acq.phase_dir[1] = 1.0
    acq.slice_dir[2] = 1.0

    # Initialize an acquisition counter
    counter = 0

    # Write out a few noise scans
    for n in range(32):
        noise = noise_level * (np.random.randn(coils, nkx) +
                               1j * np.random.randn(coils, nkx))
        # here's where we would make the noise correlated
        acq.scan_counter = counter
        acq.clearAllFlags()
        acq.setFlag(ismrmrd.ACQ_IS_NOISE_MEASUREMENT)
        acq.data[:] = noise
        dset.append_acquisition(acq)
        counter += 1  # increment the scan counter

    # Loop over the repetitions, add noise and write to disk
    # simulating a T-SENSE type scan
    for rep in range(repetitions):
        noise = noise_level * (np.random.randn(coils, nky, nkx) +
                               1j * np.random.randn(coils, nky, nkx))
        # here's where we would make the noise correlated
        K = Ktrue + noise
        acq.idx.repetition = rep
        for acc in range(acceleration):
            for line in np.arange(acc, nky, acceleration):
                # set some fields in the header
                acq.scan_counter = counter
                acq.idx.kspace_encode_step_1 = line
                acq.clearAllFlags()
                if line == 0:
                    acq.setFlag(ismrmrd.ACQ_FIRST_IN_ENCODE_STEP1)
                    acq.setFlag(ismrmrd.ACQ_FIRST_IN_SLICE)
                    acq.setFlag(ismrmrd.ACQ_FIRST_IN_REPETITION)
                elif line == nky - 1:
                    acq.setFlag(ismrmrd.ACQ_LAST_IN_ENCODE_STEP1)
                    acq.setFlag(ismrmrd.ACQ_LAST_IN_SLICE)
                    acq.setFlag(ismrmrd.ACQ_LAST_IN_REPETITION)
                # set the data and append
                acq.data[:] = K[:, line, :]
                dset.append_acquisition(acq)
                counter += 1

    # Clean up
    dset.close()
예제 #12
0
        head.flags = 0
        if line == 0:
            head.flags |= 1 << ismrmrd.ACQ_LAST_IN_ENCODE_STEP1
            head.flags |= 1 << ismrmrd.ACQ_FIRST_IN_SLICE
            head.flags |= 1 << ismrmrd.ACQ_FIRST_IN_REPETITION
        elif line == nY - 1:
            head.flags |= 1 << ismrmrd.ACQ_LAST_IN_ENCODE_STEP1
            head.flags |= 1 << ismrmrd.ACQ_LAST_IN_SLICE
            head.flags |= 1 << ismrmrd.ACQ_LAST_IN_REPETITION

        # Generate k-space data
        data = (np.array([c.real for c in np.array(K[:, line, :, rep])]) +
                1j * np.array([c.imag for c in np.array(K[:, line, :, rep])]))

        # Construct acquisition object from header
        acq = ismrmrd.Acquisition(head=head)

        # Fill in the internal data array
        acq.data = data

        # Append to HDF5 dataset
        dset.append_acquisition(acq)

# Fill the XML header
try:
    import ismrmrd_xsd
    HAS_XSD = True
except ImportError:
    HAS_XSD = False

if HAS_XSD:
예제 #13
0
    def updatePlot(self, *args):

        rawIndex = self.rawCB.currentIndex()
        trajIndex = self.trajCB.currentIndex()

        # get current acquisition if data or trajectory plot enabled
        if rawIndex != 0 or trajIndex != 0:
            # get currently selected row from table view
            row = self.tableView.currentIndex().row()

            # read corresponding acquisiton from table model buffer
            aq = ismrmrd.Acquisition(
                self.tableModel.rbuffer.getCell(row)['head'])

        # update raw data plot
        if rawIndex != 0:
            # get the data
            data = self.tableModel.rbuffer.getCell(row)['data'].view(
                np.complex64).reshape(
                    (aq.active_channels, aq.number_of_samples))[:]

            # modifiy data depending on selected visualization
            if self.rawCB.currentText() == 'Real':
                dataOut = np.real(data)
            elif self.rawCB.currentText() == 'Imag':
                dataOut = np.imag(data)
            elif self.rawCB.currentText() == 'FFT (magnitude)':
                dataOut = abs(np.fft.fftshift(np.fft.fft(data)))
            elif self.rawCB.currentText() == 'Phase':
                dataOut = np.angle(data)
            elif self.rawCB.currentText() == 'Phase (unwrapped)':
                dataOut = np.unwrap(np.angle(data))
            else:
                dataOut = abs(data)

            # remove old plots and legend entries
            for item in self.rawPlot.items():
                self.rawPlot.removeItem(item)
            try:
                self.rawPlot.legend.scene().removeItem(self.rawPlot.legend)
            except Exception as e:
                print(e)

            #self.plotWidget.rawPlot.clear()
            self.rawPlot.setTitle('Coil data')
            self.rawPlot.legend = self.rawPlot.addLegend()

            for ind in range(0, len(dataOut)):
                color = pg.intColor(ind)
                self.rawPlot.plot(dataOut[ind, :],
                                  pen=pg.mkPen(color),
                                  name=' Channel ' + str(ind))

            self.rawPlot.show()
        else:
            self.rawPlot.hide()

        # update trajectory plot
        if self.trajCB.currentIndex() != 0 and aq.traj.size > 0:
            # get the data
            data = self.tableModel.rbuffer.getCell(row)['traj'].reshape(
                (aq.number_of_samples, aq.trajectory_dimensions))[:]

            # modifiy data depending on selected visualization
            if self.trajCB.currentText() == 'FFT (magnitude)':
                dataOut = abs(np.fft.fft(data))
            else:
                dataOut = data

            # remove old plots and legend entries
            for item in self.trajPlot.items():
                self.trajPlot.removeItem(item)
            try:
                self.trajPlot.legend.scene().removeItem(self.trajPlot.legend)
            except Exception as e:
                print(e)

            #self.plotWidget.trajPlot.clear()
            self.trajPlot.setTitle('Trajectory data')
            self.trajPlot.legend = self.trajPlot.addLegend()

            for ind in range(0, dataOut.shape[1]):
                color = pg.intColor(ind)
                self.trajPlot.plot(dataOut[:, ind],
                                   pen=pg.mkPen(color),
                                   name=' Channel ' + str(ind))

            self.trajPlot.show()
        else:
            self.trajPlot.hide()
예제 #14
0
def _transform_from_legacy_acquisition(items):
    header, data = items
    header.number_of_samples, header.active_channels = data.shape
    acquisition = ismrmrd.Acquisition(header)
    acquisition.data[:] = numpy.transpose(data)
    return acquisition
    def compute(self):

        data = self.getData('data')
        noise = self.getData('noise')
        header = self.getData('header')
        filename = self.getData('filename')
        
        #We will delete the file if it exists
        try:
            os.remove(filename)
        except OSError:
            pass
        
        #Expected dimension order"
        #nr_measured_channels
        #nr_dynamic_scans
        #nr_cardiac_phases
        #nr_echoes
        #nr_locations
        #nr_rows
        #nr_extra_attr_values
        #nr_measurements 
        #nr_e3_profiles
        #nr_e2_profiles
        #nr_e1_profiles
        #nr_samples

        dimension_keys = ['nr_measured_channels',
                          'nr_mixes',
                          'nr_dynamic_scans',
                          'nr_echoes',
                          'nr_cardiac_phases',
                          'nr_locations',
                          'nr_rows',
                          'nr_extra_attr_values',
                          'nr_measurements',
                          'nr_e3_profiles',
                          'nr_e2_profiles',
                          'nr_e1_profiles',
                          'nr_samples']
        
        sin = header['sin']
        lab = header['lab']
        
        data_dimensions = np.concatenate([[int(sin[x][0][0]) for x in dimension_keys[0:9]],[sin[x] for x in dimension_keys[9:]]])
        data = data.reshape(data_dimensions)

        # Open the dataset
        dset = ismrmrd.Dataset(filename, "dataset", create_if_needed=True)
    
        # Create the XML header and write it to the file
        header = ismrmrd.xsd.ismrmrdHeader()
    
        # Experimental Conditions
        exp = ismrmrd.xsd.experimentalConditionsType()
        exp.H1resonanceFrequency_Hz = 128000000
        header.experimentalConditions = exp
    
        # Acquisition System Information
        sys = ismrmrd.xsd.acquisitionSystemInformationType()
        sys.receiverChannels = data.shape[0]
        header.acquisitionSystemInformation = sys

        # Encoding
        encoding = ismrmrd.xsd.encoding()
        encoding.trajectory = ismrmrd.xsd.trajectoryType.cartesian
    
        # encoded and recon spaces
        ematrix = ismrmrd.xsd.matrixSize()
        ematrix.x = data.shape[-1]
        ematrix.y = data.shape[-2]
        ematrix.z = data.shape[-3]
        rmatrix = ismrmrd.xsd.matrixSize()
        rmatrix.x = int(sin['output_resolutions'][0][0])
        rmatrix.y = int(sin['output_resolutions'][0][1])
        rmatrix.z = int(sin['output_resolutions'][0][2])
        efov = ismrmrd.xsd.fieldOfView_mm()
        efov.x = ematrix.x * float(sin['voxel_sizes'][0][0])
        efov.y = rmatrix.y * float(sin['voxel_sizes'][0][1])
        efov.z = rmatrix.z * float(sin['voxel_sizes'][0][2])
        rfov = ismrmrd.xsd.fieldOfView_mm()
        rfov.x = rmatrix.x * float(sin['voxel_sizes'][0][0])
        rfov.y = rmatrix.y * float(sin['voxel_sizes'][0][1])
        rfov.z = rmatrix.y * float(sin['voxel_sizes'][0][2])
    

        espace = ismrmrd.xsd.encodingSpaceType()
        espace.matrixSize = ematrix
        espace.fieldOfView_mm = efov
        rspace = ismrmrd.xsd.encodingSpaceType()
        rspace.matrixSize = rmatrix
        rspace.fieldOfView_mm = rfov
    
        # Set encoded and recon spaces
        encoding.encodedSpace = espace
        encoding.reconSpace = rspace
    
        # Encoding limits
        limits = ismrmrd.xsd.encodingLimitsType()
    
        limits1 = ismrmrd.xsd.limitType()
        limits1.minimum = 0
        limits1.center = data.shape[-2]/2
        limits1.maximum = data.shape[-2]-1
        limits.kspace_encoding_step_1 = limits1
    
        limits2 = ismrmrd.xsd.limitType()
        limits2.minimum = 0
        limits2.center = data.shape[-3]/2
        limits2.maximum = data.shape[-3]-1
        limits.kspace_encoding_step_2 = limits2

        #limits3 = ismrmrd.xsd.limitType()
        #limits3.minimum = 0
        #limits3.center = data.shape[-4]/2
        #limits3.maximum = data.shape[-4]
        #limits.kspace_encoding_step_3 = limits3

        limits_average = ismrmrd.xsd.limitType()
        limits_average.minimum = 0
        limits_average.center = data.shape[-5]/2
        limits_average.maximum = data.shape[-5]-1
        limits.average = limits_average

        limits_slice = ismrmrd.xsd.limitType()
        limits_slice.minimum = 0
        limits_slice.center = data.shape[-8]/2
        limits_slice.maximum = data.shape[-8]-1
        limits.slice = limits_slice

        limits_contrast = ismrmrd.xsd.limitType()
        limits_contrast.minimum = 0
        limits_contrast.center = data.shape[-9]/2
        limits_contrast.maximum = data.shape[-9]-1
        limits.contrast = limits_contrast

        limits_phase = ismrmrd.xsd.limitType()
        limits_phase.minimum = 0
        limits_phase.center = data.shape[-10]/2
        limits_phase.maximum = data.shape[-10]-1
        limits.phase = limits_phase

        limits_rep = ismrmrd.xsd.limitType()
        limits_rep.minimum = 0
        limits_rep.center = data.shape[-11]/2
        limits_rep.maximum = data.shape[-11]-1
        limits.repetition = limits_rep
    
        limits_rest = ismrmrd.xsd.limitType()
        limits_rest.minimum = 0
        limits_rest.center = 0
        limits_rest.maximum = 0
        limits.segment = limits_rest
        limits.set = limits_rest
    
        encoding.encodingLimits = limits
        header.encoding.append(encoding)

        dset.write_xml_header(header.toxml('utf-8'))           
        
        # # Write out a few noise scans
        # for n in range(32):
        #     noise = noise_level * (np.random.randn(coils, nkx) + 1j * np.random.randn(coils, nkx))
        #     # here's where we would make the noise correlated
        #     acq.scan_counter = counter
        #     acq.clearAllFlags()
        #     acq.setFlag(ismrmrd.ACQ_IS_NOISE_MEASUREMENT)
        #     acq.data[:] = noise
        #     dset.append_acquisition(acq)
        #     counter += 1 # increment the scan counter

        acq = ismrmrd.Acquisition()
        acq.clearAllFlags()
        acq.version = 1
        acq.setFlag(ismrmrd.ACQ_IS_NOISE_MEASUREMENT)
        acq.available_channels = noise.shape[0]
        acq.resize(noise.shape[1],noise.shape[0])
        acq.data[:] = noise[:]
        dset.append_acquisition(acq)
        
        counter = 0
        for l in range(len(lab['control'])):
            if lab['label_type'][l] == 'LABEL_TYPE_STANDARD':
                if lab['control'][l] == 'CTRL_NORMAL_DATA':
                    counter += 1
                    acq = ismrmrd.Acquisition()
                    acq.clearAllFlags()
                    
                    acq.resize(data.shape[-1], data.shape[0])
                    acq.version = 1
                    acq.available_channels = data.shape[0]
                    acq.center_sample = data.shape[-1]/2
                    acq.read_dir[0] = 1.0
                    acq.phase_dir[1] = 1.0
                    acq.slice_dir[2] = 1.0

                    
                    e1 = int(lab['e1_profile_nr'][l])
                    e2 = int(lab['e2_profile_nr'][l])
                    e3 = int(lab['e3_profile_nr'][l])
                    meas = int(lab['measurement_nr'][l])
                    location = int(lab['location_nr'][l])
                    echo = int(lab['echo_nr'][l])
                    phase = int(lab['cardiac_phase_nr'][l])
                    dyn = int(lab['dynamic_scan_nr'][l])
                    mix = int(lab['mix_nr'][l])
                    row = int(lab['mix_nr'][l])
                    extra = int(lab['extra_attr_nr'][l])
                    acq.idx.kspace_encode_step_1 = e1
                    acq.idx.kspace_encode_step_2 = e2
                    acq.idx.contrast = echo
                    acq.idx.average = meas
                    acq.idx.repetition = dyn
                    acq.idx.slice = location
                    acq.idx.phase = phase
                    acq.data[:] = np.squeeze(data[:,mix,dyn,echo,phase,location,row,extra,meas,e3,e2,e1,:])

                    #TODO:
                    #Set some flags
                    #deal with ignored dimensions
                    #add some noise sample
                    #deal with orientation

                    dset.append_acquisition(acq)                
        return 0
예제 #16
0
def gre_refscan(seq, meta_file=None, system=Opts(), params=None):

    # decrease slew rate a bit
    save_slew = system.max_slew
    system.max_slew = 100 * system.gamma
    if params is None:
        params = {
            "fov": 210e-3,
            "res": 3e-3,
            "flip_angle": 12,
            "rf_dur": 1e-3,
            "tbp": 2,
            "slices": 1,
            "slice_res": 2e-3,
            "dist_fac": 0,
            "readout_bw": 600
        }

    # RF
    rf, gz, gz_reph, rf_del = make_sinc_pulse(
        flip_angle=params["flip_angle"] * math.pi / 180,
        duration=params["rf_dur"],
        slice_thickness=params["slice_res"],
        apodization=0.5,
        time_bw_product=params["tbp"],
        system=system,
        return_gz=True,
        return_delay=True)

    # Calculate readout gradient and ADC parameters
    delta_k = 1 / params["fov"]
    Nx = Ny = int(params["fov"] / params["res"] + 0.5)
    samples = 2 * Nx  # 2x oversampling
    gx_flat_time_us = int(1e6 / params["readout_bw"])  # readout_bw is in Hz/Px
    dwelltime = ph.trunc_to_raster(1e-6 * gx_flat_time_us / samples,
                                   decimals=7)
    gx_flat_time = round(dwelltime * samples, 5)
    if (1e5 * gx_flat_time % 2 == 1):
        gx_flat_time += 10e-6  # even flat time
    diff_flat_adc = gx_flat_time - (dwelltime * samples)

    # Gradients
    gx_flat_area = Nx * delta_k * (
        gx_flat_time /
        (dwelltime * samples))  # compensate for longer flat time than ADC
    gx = make_trapezoid(channel='x',
                        flat_area=gx_flat_area,
                        flat_time=gx_flat_time,
                        system=system)
    gx_pre = make_trapezoid(channel='x',
                            area=-gx.area / 2,
                            duration=1.4e-3,
                            system=system)
    phase_areas = (np.arange(Ny) - Ny / 2) * delta_k

    # reduce slew rate of spoilers to avoid stimulation
    gx_spoil = make_trapezoid(channel='x',
                              area=2 * Nx * delta_k,
                              system=system,
                              max_slew=120 * system.gamma)
    gz_spoil = make_trapezoid(channel='z',
                              area=4 / params["slice_res"],
                              system=system,
                              max_slew=120 * system.gamma)

    # take minimum TE rounded up to .1 ms
    min_TE = np.ceil(
        (gz.fall_time + gz.flat_time / 2 + calc_duration(gx_pre) +
         calc_duration(gx) / 2) / seq.grad_raster_time) * seq.grad_raster_time
    TE = ph.round_up_to_raster(min_TE, decimals=4)
    delay_TE = TE - min_TE

    # take minimum TR rounded up to .1 ms
    min_TR = calc_duration(gx_pre) + calc_duration(gz) + calc_duration(
        gx) + delay_TE + calc_duration(gx_spoil, gz_spoil)
    TR = ph.round_up_to_raster(min_TR, decimals=4)
    delay_TR = TR - min_TR

    # ADC with 2x oversampling
    adc = make_adc(num_samples=samples,
                   dwell=dwelltime,
                   delay=gx.rise_time + diff_flat_adc / 2,
                   system=system)

    # RF spoiling
    rf_spoiling_inc = 117
    rf_phase = 0
    rf_inc = 0

    # build sequence
    prepscans = 40  # number of dummy preparation scans

    if params["slices"] % 2 == 1:
        slc = 0
    else:
        slc = 1
    for s in range(params["slices"]):
        if s == int(params["slices"] / 2 + 0.5):
            if params["slices"] % 2 == 1:
                slc = 1
            else:
                slc = 0
        rf.freq_offset = gz.amplitude * params["slice_res"] * (
            slc - (params["slices"] - 1) / 2) * (1 + params["dist_fac"] * 1e-2)

        # prepscans
        for d in range(prepscans):
            rf.phase_offset = rf_phase / 180 * np.pi
            adc.phase_offset = rf_phase / 180 * np.pi
            rf_inc = divmod(rf_inc + rf_spoiling_inc, 360.0)[1]
            rf_phase = divmod(rf_phase + rf_inc, 360.0)[1]

            seq.add_block(rf, gz, rf_del)
            gy_pre = make_trapezoid(channel='y',
                                    area=phase_areas[0],
                                    duration=1.4e-3,
                                    system=system)
            seq.add_block(gx_pre, gy_pre, gz_reph)
            seq.add_block(make_delay(delay_TE))
            seq.add_block(gx)
            gy_pre.amplitude = -gy_pre.amplitude
            seq.add_block(make_delay(delay_TR), gx_spoil, gy_pre, gz_spoil)

        # imaging scans
        for i in range(Ny):
            rf.phase_offset = rf_phase / 180 * np.pi
            adc.phase_offset = rf_phase / 180 * np.pi
            rf_inc = divmod(rf_inc + rf_spoiling_inc, 360.0)[1]
            rf_phase = divmod(rf_phase + rf_inc, 360.0)[1]

            seq.add_block(rf, gz, rf_del)
            gy_pre = make_trapezoid(channel='y',
                                    area=phase_areas[i],
                                    duration=1.4e-3,
                                    system=system)
            seq.add_block(gx_pre, gy_pre, gz_reph)
            seq.add_block(make_delay(delay_TE))
            seq.add_block(gx, adc)
            gy_pre.amplitude = -gy_pre.amplitude
            seq.add_block(make_delay(delay_TR), gx_spoil, gy_pre, gz_spoil)

            if meta_file is not None:
                acq = ismrmrd.Acquisition()
                acq.idx.kspace_encode_step_1 = i
                acq.idx.kspace_encode_step_2 = 0  # only 2D atm
                acq.idx.slice = slc
                # acq.idx.average = avg
                acq.setFlag(ismrmrd.ACQ_IS_PARALLEL_CALIBRATION)
                if i == Ny - 1:
                    acq.setFlag(ismrmrd.ACQ_LAST_IN_SLICE)
                meta_file.append_acquisition(acq)

        slc += 2  # acquire every 2nd slice, afterwards fill slices inbetween

    delay_end = make_delay(
        d=2)  # 5s delay after reference scan to allow for relaxation
    seq.add_block(delay_end)
    system.max_slew = save_slew