Exemplo n.º 1
0
 def test_write(self):
     with tempfile.TemporaryDirectory() as tempdir:
         outbase = os.path.join(tempdir, 'trashme')
         nimsdata.write(self.ds,
                        self.ds.data,
                        outbase=outbase,
                        filetype='png')
         assert len(glob.glob(outbase + '*')) >= 1
Exemplo n.º 2
0
 def test004_get_size(self):
     with tempfile.TemporaryDirectory() as tempdir:
         outbase = os.path.join(tempdir, 'trashme')
         nimsdata.write(self.ds,
                        self.ds.data,
                        outbase=outbase,
                        filetype='montage')
         outfile = os.path.join(tempdir, os.listdir(tempdir)[0])
         ok_(nimsdata.medimg.nimsmontage.get_info(outfile))  # gets made?
Exemplo n.º 3
0
 def test005_get_tile(self):
     with tempfile.TemporaryDirectory() as tempdir:
         outbase = os.path.join(tempdir, 'trashme')
         nimsdata.write(self.ds,
                        self.ds.data,
                        outbase=outbase,
                        filetype='montage')
         outfile = os.path.join(tempdir, os.listdir(tempdir)[0])
         ok_(nimsdata.medimg.nimsmontage.get_tile(
             outfile, 0, 0, 0))  # all montage have 0, 0, 0
Exemplo n.º 4
0
 def test003_write_pyrdb(self):
     with tempfile.TemporaryDirectory() as tempdir:
         outbase = os.path.join(tempdir, 'trashme')
         ok_(
             nimsdata.write(self.ds,
                            self.ds.data,
                            outbase=outbase,
                            filetype='montage',
                            mtype='sqlite'))
Exemplo n.º 5
0
 def test002_write_dir(self):
     """directory jpeg montage"""
     with tempfile.TemporaryDirectory() as tempdir:
         outbase = os.path.join(tempdir, 'trashme')
         ok_(
             nimsdata.write(self.ds,
                            self.ds.data,
                            outbase=outbase,
                            filetype='montage',
                            mtype='dir'))
Exemplo n.º 6
0
 def test_nifti_write(self):
     with tempfile.TemporaryDirectory() as tempdir:
         outbase = os.path.join(tempdir, 'trashme')
         nimsdata.write(self.ds, self.ds.data, filetype='nifti', outbase=outbase)
         print glob.glob(outbase + '*')
         assert (len(glob.glob(outbase + '*')) >= 1)
Exemplo n.º 7
0
    def process(self):
        """"
        Convert a pfile.

        Extracts a pfile.tgz into a temporary directory and full_parses the pfile.7.  If an error occurs
        during parsing, no exception gets raised, instead the exception is saved into dataset.failure_reason.
        This is to allow find() to attempt to locate physio, even if the input pfile not be loaded.  After
        locating physio has been attempted, the PFilePipeline will attempt to convert the dataset into

        a nifti, and then a montage.

        Parameters
        ---------
        None : NoneType
            The PFilePipeline works has a job and dataset assigned to it.  No additional parameters are required.

        """
        super(PFilePipeline, self).process()

        ds = self.job.data_container.primary_dataset

        with nimsutil.TempDir(dir=self.tempdir) as outputdir:
            log.debug('parsing')
            outbase = os.path.join(outputdir, ds.container.name)
            pfile_tgz = glob.glob(os.path.join(self.nims_path, ds.relpath, '*_pfile.tgz'))
            pfile_7gz = glob.glob(os.path.join(self.nims_path, ds.relpath, 'P?????.7*'))
            if pfile_tgz:
                log.debug('input format: tgz')
                with tarfile.open(pfile_tgz[0]) as archive:
                    archive.extractall(path=outputdir)
                temp_datadir = os.path.join(outputdir, os.listdir(outputdir)[0])
                input_pfile = os.path.join(temp_datadir, glob.glob(os.path.join(temp_datadir, 'P?????.7'))[0])
            elif pfile_7gz:
                log.debug('input format: directory')
                input_pfile = pfile_7gz[0]
            else:
                log.warning('no pfile input found in %s' % os.path.join(self.nims_path, ds.relpath))
                raise Exception('no pfile input found in %s' % os.path.join(self.nims_path, ds.relpath))

            # perform full parse, which doesn't attempt to load the data
            pf = nimsdata.parse(input_pfile, filetype='pfile', ignore_json=True, load_data=False, full_parse=True, tempdir=outputdir, num_jobs=self.max_recon_jobs)

            try:
                self.find(pf.slice_order, pf.num_slices)
            except Exception as exc:  # XXX, specific exceptions
                pass

            # MUX HACK, identify a group of aux candidates and determine the single best aux_file.
            # Certain mux_epi scans will return a dictionary of parameters to use as query filters to
            # help locate an aux_file that contains necessary calibration scans.
            criteria = pf.prep_convert()
            aux_file = None
            if criteria is not None:  # if criteria: this is definitely mux of some sort
                log.debug('pfile aux criteria %s' % str(criteria.keys()))
                q = Epoch.query.filter(Epoch.session==self.job.data_container.session).filter(Epoch.trashtime==None)
                for fieldname, value in criteria.iteritems():
                    q = q.filter(getattr(Epoch, fieldname)==unicode(value))  # filter by psd_name

                if pf.num_mux_cal_cycle >= 2:
                    log.debug('looking for num_bands = 1')
                    epochs = [e for e in q.all() if (e != self.job.data_container and e.num_bands == 1)]
                else:
                    log.debug('looking for num_mux_cal_cycle >= 2')
                    epochs = [e for e in q.all() if (e != self.job.data_container and e.num_mux_cal_cycle >= 2)]
                log.debug('candidates: %s' % str([e.primary_dataset.filenames for e in epochs]))

                # REALLY BAD MUX HACK!
                # prefer pe0 scans. Ideally, we'd check the pfile headers and find matching pepolar scans.
                # But that would take forever, so we'll assume target scans are always pe0 and further assume that
                # the pepolar is correctly indicated in the description.
                epochs_pe0 = [e for e in epochs if 'pe0' in e.description]
                if len(epochs_pe0)>0:
                    epochs = epochs_pe0
                    log.debug('Selecting only epochs with "pe0" in the description')
                else:
                    epochs_pe0 = [e for e in epochs if not 'pe1' in e.description]
                    if len(epochs_pe0)>0:
                        epochs = epochs_pe0
                        log.debug('Selecting only epochs without "pe1" in the description')
                    # At this point, just give up, use them all, and hope for the best.

                # which epoch has the closest series number
                series_num_diff = np.array([e.series for e in epochs]) - pf.series_no
                closest = np.min(np.abs(series_num_diff))==np.abs(series_num_diff)
                # there may be more than one. We prefer the prior scan.
                closest = np.where(np.min(series_num_diff[closest])==series_num_diff)[0][0]
                candidate = epochs[closest]
                # auxfile could be either P7.gz with adjacent files or a pfile tgz
                aux_tgz = glob.glob(os.path.join(self.nims_path, candidate.primary_dataset.relpath, '*_pfile.tgz'))
                aux_7gz = glob.glob(os.path.join(self.nims_path, candidate.primary_dataset.relpath, 'P?????.7*'))
                if aux_tgz:
                    aux_file = aux_tgz[0]
                elif aux_7gz:
                    aux_file = aux_7gz[0]
                # aux_file = os.path.join(self.nims_path, candidate.primary_dataset.relpath, candidate.primary_dataset.filenames[0])
                log.debug('identified aux_file: %s' % os.path.basename(aux_file))

                self.job.activity = (u'Found aux file: %s' % os.path.basename(aux_file))[:255]
                log.info(u'%d %s %s' % (self.job.id, self.job, self.job.activity))

            else:
                log.debug('no special criteria')
                aux_file = None

            # provide aux_files and db_description to pfile.load_data.  aux_file will be used for calibration scan,
            # db_desc passes the database description to the pfile.load_data fxn, allowing pfile.load_data() to
            # make additional decisions based on the description stored in the database.
            # This allows user-edits to the description to affect jobs.
            pf.load_data(aux_file=aux_file, db_desc=self.job.data_container.description)
            if pf.failure_reason:   # implies pf.data = None
                self.job.activity = (u'error loading pfile: %s' % str(pf.failure_reason))
                transaction.commit()
                DBSession.add(self.job)
                raise pf.failure_reason

            # attempt to write nifti, if write fails, let exception bubble up to pipeline process()
            # exception will cause job to be marked as 'fail'
            if pf.is_non_image:    # implies dcm_acq.data = None
                # non-image is an "expected" outcome, job has succeeded
                # no error should be raised, job status should end up 'done'
                self.job.activity = (u'pfile %s is a non-image type' % input_pfile)
                transaction.commit()
            else:
                conv_file = nimsdata.write(pf, pf.data, outbase, filetype='nifti')
                if conv_file:
                    outputdir_list = [f for f in os.listdir(outputdir) if not os.path.isdir(os.path.join(outputdir, f))]
                    self.job.activity = (u'generated %s' % (', '.join([f for f in outputdir_list])))[:255]
                    log.info(u'%d %s %s' % (self.job.id, self.job, self.job.activity))
                    dataset = Dataset.at_path(self.nims_path, u'nifti')
                    DBSession.add(self.job)
                    DBSession.add(self.job.data_container)
                    dataset.kind = u'derived'
                    dataset.container = self.job.data_container
                    dataset.container.size = pf.size
                    dataset.container.mm_per_vox = pf.mm_per_vox
                    dataset.container.num_slices = pf.num_slices
                    dataset.container.num_timepoints = pf.num_timepoints
                    dataset.container.duration = datetime.timedelta(seconds=pf.duration)
                    filenames = []
                    for f in outputdir_list:
                        filenames.append(f)
                        shutil.copy2(os.path.join(outputdir, f), os.path.join(self.nims_path, dataset.relpath))
                    dataset.filenames = filenames
                    transaction.commit()

                    pyramid_ds = Dataset.at_path(self.nims_path, u'img_pyr')
                    DBSession.add(self.job)
                    DBSession.add(self.job.data_container)
                    outpath = os.path.join(self.nims_path, pyramid_ds.relpath, self.job.data_container.name)
                    nims_montage = nimsdata.write(pf, pf.data, outpath, filetype='montage')
                    self.job.activity = u'generated image pyramid %s' % nims_montage
                    log.info(u'%d %s %s' % (self.job.id, self.job, self.job.activity))
                    pyramid_ds.kind = u'web'
                    pyramid_ds.container = self.job.data_container
                    pyramid_ds.filenames = os.listdir(os.path.join(self.nims_path, pyramid_ds.relpath))
                    transaction.commit()

            DBSession.add(self.job)

        DBSession.add(self.job)
Exemplo n.º 8
0
    def process(self):
        """"
        Convert a dicom file.

        Parse a dicom file and load the data. If an error occurs during parsing, no exception gets raised,
        instead the exception is saved into dataset.failure_reason.  This is to allow find() to attempt to
        locate physio, even if the input dicom files could not be loaded.  After the locating physio has been
        attempted, the DicomPipeline will attempt to convert the dataset into various output files.

        Parameters
        ---------
        None : NoneType
            The DicomPipeline works has a job and dataset assigned to it.  No additional parameters are required.

        """
        super(DicomPipeline, self).process()

        ds = self.job.data_container.primary_dataset
        with nimsutil.TempDir(dir=self.tempdir) as outputdir:
            outbase = os.path.join(outputdir, ds.container.name)
            dcm_tgz = os.path.join(self.nims_path, ds.relpath, os.listdir(os.path.join(self.nims_path, ds.relpath))[0])
            dcm_acq = nimsdata.parse(dcm_tgz, filetype='dicom', load_data=True, ignore_json=True)   # store exception for later...

            # if physio was not found, wait 30 seconds and search again.
            # this should only run when the job activity is u'no physio files found'
            # if physio not recorded, or physio invalid, don't try again
            try:
                self.find(dcm_acq.slice_order, dcm_acq.num_slices)
            except Exception as e:
                # this catches some of the non-image scans that do not have
                # dcm_acq.slice_order and/or dcm_acq.num_slices
                log.info(str(e))  # do we need this logging message?
            if self.job.activity == u'no physio files found':
                self.job.activity = u'no physio files found; searching again in 30 seconds'
                log.info(u'%d %s %s' % (self.job.id, self.job, self.job.activity))
                time.sleep(30)
                try:
                    self.find(dcm_acq.slice_order, dcm_acq.num_slices)
                except Exception as e:
                    # this catches some of the non-image scans that do not have
                    # dcm_acq.slice_order and/or dcm_acq.num_slices
                    log.info(str(e))  # do we need this logging message?

            if dcm_acq.failure_reason:   # implies dcm_acq.data = None
                # if dcm_acq.failure_reason is set, job has failed
                # raising an error should cause job.status should to end up 'failed'
                self.job.activity = (u'load dicom data failed; %s' % str(dcm_acq.failure_reason))
                transaction.commit()
                DBSession.add(self.job)
                raise dcm_acq.failure_reason

            if dcm_acq.is_non_image:    # implies dcm_acq.data = None
                # non-image is an "expected" outcome, job has succeeded
                # no error should be raised, job status should end up 'done'
                self.job.activity = (u'dicom %s is a non-image type' % dcm_tgz)
                transaction.commit()
            else:
                if dcm_acq.is_screenshot:
                    conv_files = nimsdata.write(dcm_acq, dcm_acq.data, outbase, filetype='png')
                    if conv_files:
                        outputdir_list = os.listdir(outputdir)
                        self.job.activity = (u'generated %s' % (', '.join([f for f in outputdir_list])))[:255]
                        log.info(u'%d %s %s' % (self.job.id, self.job, self.job.activity))
                        conv_ds = Dataset.at_path(self.nims_path, u'bitmap')
                        DBSession.add(self.job)
                        DBSession.add(self.job.data_container)
                        conv_ds.kind = u'derived'
                        conv_ds.container = self.job.data_container
                        conv_ds.container.size = dcm_acq.size
                        conv_ds.container.mm_per_vox = dcm_acq.mm_per_vox
                        conv_ds.container.num_slices = dcm_acq.num_slices
                        conv_ds.container.num_timepoints = dcm_acq.num_timepoints
                        conv_ds.container.duration = dcm_acq.duration
                        filenames = []
                        for f in outputdir_list:
                            filenames.append(f)
                            shutil.copy2(os.path.join(outputdir, f), os.path.join(self.nims_path, conv_ds.relpath))
                        conv_ds.filenames = filenames
                        transaction.commit()
                else:
                    conv_files = nimsdata.write(dcm_acq, dcm_acq.data, outbase, filetype='nifti')
                    if conv_files:
                        # if nifti was successfully created
                        outputdir_list = os.listdir(outputdir)
                        self.job.activity = (u'generated %s' % (', '.join([f for f in outputdir_list])))[:255]
                        log.info(u'%d %s %s' % (self.job.id, self.job, self.job.activity))
                        conv_ds = Dataset.at_path(self.nims_path, u'nifti')
                        DBSession.add(self.job)
                        DBSession.add(self.job.data_container)
                        conv_ds.kind = u'derived'
                        conv_ds.container = self.job.data_container
                        filenames = []
                        for f in outputdir_list:
                            filenames.append(f)
                            shutil.copy2(os.path.join(outputdir, f), os.path.join(self.nims_path, conv_ds.relpath))
                        conv_ds.filenames = filenames
                        transaction.commit()
                        pyramid_ds = Dataset.at_path(self.nims_path, u'img_pyr')
                        DBSession.add(self.job)
                        DBSession.add(self.job.data_container)
                        outpath = os.path.join(self.nims_path, pyramid_ds.relpath, self.job.data_container.name)
                        voxel_order = None if dcm_acq.is_localizer else 'LPS'
                        nims_montage = nimsdata.write(dcm_acq, dcm_acq.data, outpath, filetype='montage', voxel_order=voxel_order)
                        self.job.activity = (u'generated %s' % (', '.join([os.path.basename(f) for f in nims_montage])))[:255]
                        log.info(u'%d %s %s' % (self.job.id, self.job, self.job.activity))
                        pyramid_ds.kind = u'web'
                        pyramid_ds.container = self.job.data_container
                        pyramid_ds.filenames = os.listdir(os.path.join(self.nims_path, pyramid_ds.relpath))
                        transaction.commit()

            DBSession.add(self.job)

        DBSession.add(self.job)
Exemplo n.º 9
0
    def process(self):
        """"
        Convert a pfile.

        Extracts a pfile.tgz into a temporary directory and full_parses the pfile.7.  If an error occurs
        during parsing, no exception gets raised, instead the exception is saved into dataset.failure_reason.
        This is to allow find() to attempt to locate physio, even if the input pfile not be loaded.  After
        locating physio has been attempted, the PFilePipeline will attempt to convert the dataset into

        a nifti, and then a montage.

        Parameters
        ---------
        None : NoneType
            The PFilePipeline works has a job and dataset assigned to it.  No additional parameters are required.

        """
        super(PFilePipeline, self).process()

        ds = self.job.data_container.primary_dataset
        log.info('Processing ' + ds.container.description)

        with nimsutil.TempDir(dir=self.tempdir) as outputdir:
            log.debug('parsing')
            outbase = os.path.join(outputdir, ds.container.name)
            pfile_tgz = glob.glob(
                os.path.join(self.nims_path, ds.relpath, '*_pfile.tgz'))
            pfile_7gz = glob.glob(
                os.path.join(self.nims_path, ds.relpath, 'P?????.7*'))
            if pfile_tgz:
                log.debug('input format: tgz')
                from subprocess import call
                call([
                    'tar', '--use-compress-program=pigz', '-xf', pfile_tgz[0],
                    '-C', outputdir
                ])
                #with tarfile.open(pfile_tgz[0]) as archive:
                #    archive.extractall(path=outputdir)
                temp_datadir = os.path.join(outputdir,
                                            os.listdir(outputdir)[0])
                input_pfile = os.path.join(
                    temp_datadir,
                    glob.glob(os.path.join(temp_datadir, 'P?????.7'))[0])
            elif pfile_7gz:
                log.debug('input format: directory')
                input_pfile = pfile_7gz[0]
            else:
                log.warning('no pfile input found in %s' %
                            os.path.join(self.nims_path, ds.relpath))
                raise Exception('no pfile input found in %s' %
                                os.path.join(self.nims_path, ds.relpath))

            # perform full parse, which doesn't attempt to load the data
            if ds.container.description.lower().endswith(
                    '_ssg') or ds.container.description.lower().endswith(
                        '_sbref'):
                recon_type = 'split-slice-grappa_sense1'
            elif 'sense1' in ds.container.description.lower():
                recon_type = '1Dgrappa_sense1'
            else:
                recon_type = None
            log.info('Selecting recon_type %s...' % recon_type)
            pf = nimsdata.parse(input_pfile,
                                filetype='pfile',
                                ignore_json=True,
                                load_data=False,
                                full_parse=True,
                                tempdir=outputdir,
                                num_jobs=self.max_recon_jobs,
                                recon_type=recon_type)

            try:
                self.find(pf.slice_order, pf.num_slices)
            except Exception as exc:  # XXX, specific exceptions
                pass

            # MUX HACK, identify a group of aux candidates and determine the single best aux_file.
            # Certain mux_epi scans will return a dictionary of parameters to use as query filters to
            # help locate an aux_file that contains necessary calibration scans.
            aux_file = None
            if pf.psd_type == 'muxepi' and pf.num_bands > 1:
                q = Epoch.query.filter(
                    Epoch.session == self.job.data_container.session).filter(
                        Epoch.trashtime == None).filter(
                            Epoch.psd.startswith(u'mux')).filter(
                                Epoch.size_x == pf.size[0]).filter(
                                    Epoch.size_y == pf.size[1])

                log.info('looking for single-band mux calibration scans...')
                epochs = [
                    e for e in q.all()
                    if e != self.job.data_container and e.num_bands == 1
                ]
                if len(epochs) == 0:
                    if pf.num_mux_cal_cycle < 2:
                        epochs = [
                            e for e in q.all() if e != self.job.data_container
                            and e.num_mux_cal_cycle >= 2
                        ]
                        log.info(
                            'No single-band scan found; %d mux candidates found...'
                            % len(epochs))
                    else:
                        log.info(
                            'No single-band cal scan found-- using internal calibration.'
                        )
                else:
                    log.info('Single-band calibration candidates: %s' %
                             str([e.primary_dataset.filenames
                                  for e in epochs]))

                # REALLY BAD MUX HACK!
                # prefer pe0 scans. Ideally, we'd check the pfile headers and find matching pepolar scans.
                # But that would take forever, so we'll assume target scans are always pe0 and further assume that
                # the pepolar is correctly indicated in the description.
                epochs_pe_match = [
                    e for e in epochs
                    if (not 'pe1' in e.description
                        and pf.phase_encode_direction == 0) or
                    ('pe1' in e.description and pf.phase_encode_direction == 1)
                ]
                if len(epochs_pe_match) > 0:
                    epochs = epochs_pe_match
                    log.debug(
                        'Selecting only epochs with "pe0" in the description')
                else:
                    epochs = []
                    #epochs_pe0 = [e for e in epochs if not 'pe1' in e.description]
                    #if len(epochs_pe0)>0:
                    #    epochs = epochs_pe0
                    #    log.debug('Selecting only epochs without "pe1" in the description')

                if len(epochs) > 0:
                    # which epoch has the closest series number
                    series_num_diff = np.array([e.series for e in epochs
                                                ]) - pf.series_no
                    closest = np.min(
                        np.abs(series_num_diff)) == np.abs(series_num_diff)
                    # there may be more than one. We prefer the prior scan.
                    closest = np.where(
                        np.min(series_num_diff[closest]) ==
                        series_num_diff)[0][0]
                    candidate = epochs[closest]
                    # auxfile could be either P7.gz with adjacent files or a pfile tgz
                    aux_tgz = glob.glob(
                        os.path.join(self.nims_path,
                                     candidate.primary_dataset.relpath,
                                     '*_pfile.tgz'))
                    aux_7gz = glob.glob(
                        os.path.join(self.nims_path,
                                     candidate.primary_dataset.relpath,
                                     'P?????.7*'))
                    if aux_tgz:
                        aux_file = aux_tgz[0]
                    elif aux_7gz:
                        aux_file = aux_7gz[0]
                    # aux_file = os.path.join(self.nims_path, candidate.primary_dataset.relpath, candidate.primary_dataset.filenames[0])
                    log.info('identified aux_file: %s' %
                             os.path.basename(aux_file))

                    self.job.activity = (u'Found aux file: %s' %
                                         os.path.basename(aux_file))[:255]
                    log.info(u'%d %s %s' %
                             (self.job.id, self.job, self.job.activity))
                else:
                    log.info('no matching external cal scans found.')
                    aux_file = None

            else:
                aux_file = None

            # provide aux_files and db_description to pfile.load_data.  aux_file will be used for calibration scan,
            # db_desc passes the database description to the pfile.load_data fxn, allowing pfile.load_data() to
            # make additional decisions based on the description stored in the database.
            # This allows user-edits to the description to affect jobs.
            pf.load_data(aux_file=aux_file,
                         db_desc=self.job.data_container.description)
            if pf.failure_reason:  # implies pf.data = None
                self.job.activity = (u'error loading pfile: %s' %
                                     str(pf.failure_reason))
                transaction.commit()
                DBSession.add(self.job)
                raise pf.failure_reason

            # attempt to write nifti, if write fails, let exception bubble up to pipeline process()
            # exception will cause job to be marked as 'fail'
            if pf.is_non_image:  # implies dcm_acq.data = None
                # non-image is an "expected" outcome, job has succeeded
                # no error should be raised, job status should end up 'done'
                self.job.activity = (u'pfile %s is a non-image type' %
                                     input_pfile)
                transaction.commit()
            else:
                conv_file = nimsdata.write(pf,
                                           pf.data,
                                           outbase,
                                           filetype='nifti')
                if conv_file:
                    outputdir_list = [
                        f for f in os.listdir(outputdir)
                        if not os.path.isdir(os.path.join(outputdir, f))
                    ]
                    self.job.activity = (
                        u'generated %s' %
                        (', '.join([f for f in outputdir_list])))[:255]
                    log.info(u'%d %s %s' %
                             (self.job.id, self.job, self.job.activity))
                    dataset = Dataset.at_path(self.nims_path, u'nifti')
                    DBSession.add(self.job)
                    DBSession.add(self.job.data_container)
                    dataset.kind = u'derived'
                    dataset.container = self.job.data_container
                    dataset.container.size = pf.size
                    dataset.container.mm_per_vox = pf.mm_per_vox
                    dataset.container.num_slices = pf.num_slices
                    dataset.container.num_timepoints = pf.num_timepoints
                    dataset.container.duration = datetime.timedelta(
                        seconds=pf.duration)
                    filenames = []
                    for f in outputdir_list:
                        filenames.append(f)
                        shutil.copy2(
                            os.path.join(outputdir, f),
                            os.path.join(self.nims_path, dataset.relpath))
                    dataset.filenames = filenames
                    transaction.commit()

                    pyramid_ds = Dataset.at_path(self.nims_path, u'img_pyr')
                    DBSession.add(self.job)
                    DBSession.add(self.job.data_container)
                    outpath = os.path.join(self.nims_path, pyramid_ds.relpath,
                                           self.job.data_container.name)
                    nims_montage = nimsdata.write(pf,
                                                  pf.data,
                                                  outpath,
                                                  filetype='montage')
                    self.job.activity = u'generated image pyramid %s' % nims_montage
                    log.info(u'%d %s %s' %
                             (self.job.id, self.job, self.job.activity))
                    pyramid_ds.kind = u'web'
                    pyramid_ds.container = self.job.data_container
                    pyramid_ds.filenames = os.listdir(
                        os.path.join(self.nims_path, pyramid_ds.relpath))
                    transaction.commit()

            DBSession.add(self.job)

        DBSession.add(self.job)
Exemplo n.º 10
0
    def process(self):
        """"
        Convert a dicom file.

        Parse a dicom file and load the data. If an error occurs during parsing, no exception gets raised,
        instead the exception is saved into dataset.failure_reason.  This is to allow find() to attempt to
        locate physio, even if the input dicom files could not be loaded.  After the locating physio has been
        attempted, the DicomPipeline will attempt to convert the dataset into various output files.

        Parameters
        ---------
        None : NoneType
            The DicomPipeline works has a job and dataset assigned to it.  No additional parameters are required.

        """
        super(DicomPipeline, self).process()

        ds = self.job.data_container.primary_dataset
        with nimsutil.TempDir(dir=self.tempdir) as outputdir:
            outbase = os.path.join(outputdir, ds.container.name)
            dcm_tgz = os.path.join(
                self.nims_path, ds.relpath,
                os.listdir(os.path.join(self.nims_path, ds.relpath))[0])
            dcm_acq = nimsdata.parse(
                dcm_tgz, filetype='dicom', load_data=True,
                ignore_json=True)  # store exception for later...

            # if physio was not found, wait 30 seconds and search again.
            # this should only run when the job activity is u'no physio files found'
            # if physio not recorded, or physio invalid, don't try again
            try:
                self.find(dcm_acq.slice_order, dcm_acq.num_slices)
            except Exception as e:
                # this catches some of the non-image scans that do not have
                # dcm_acq.slice_order and/or dcm_acq.num_slices
                log.info(str(e))  # do we need this logging message?
            if self.job.activity == u'no physio files found':
                self.job.activity = u'no physio files found; searching again in 30 seconds'
                log.info(u'%d %s %s' %
                         (self.job.id, self.job, self.job.activity))
                time.sleep(30)
                try:
                    self.find(dcm_acq.slice_order, dcm_acq.num_slices)
                except Exception as e:
                    # this catches some of the non-image scans that do not have
                    # dcm_acq.slice_order and/or dcm_acq.num_slices
                    log.info(str(e))  # do we need this logging message?

            if dcm_acq.failure_reason:  # implies dcm_acq.data = None
                # if dcm_acq.failure_reason is set, job has failed
                # raising an error should cause job.status should to end up 'failed'
                self.job.activity = (u'load dicom data failed; %s' %
                                     str(dcm_acq.failure_reason))
                transaction.commit()
                DBSession.add(self.job)
                raise dcm_acq.failure_reason

            if dcm_acq.is_non_image:  # implies dcm_acq.data = None
                # non-image is an "expected" outcome, job has succeeded
                # no error should be raised, job status should end up 'done'
                self.job.activity = (u'dicom %s is a non-image type' % dcm_tgz)
                transaction.commit()
            else:
                if dcm_acq.is_screenshot:
                    conv_files = nimsdata.write(dcm_acq,
                                                dcm_acq.data,
                                                outbase,
                                                filetype='png')
                    if conv_files:
                        outputdir_list = os.listdir(outputdir)
                        self.job.activity = (
                            u'generated %s' %
                            (', '.join([f for f in outputdir_list])))[:255]
                        log.info(u'%d %s %s' %
                                 (self.job.id, self.job, self.job.activity))
                        conv_ds = Dataset.at_path(self.nims_path, u'bitmap')
                        DBSession.add(self.job)
                        DBSession.add(self.job.data_container)
                        conv_ds.kind = u'derived'
                        conv_ds.container = self.job.data_container
                        conv_ds.container.size = dcm_acq.size
                        conv_ds.container.mm_per_vox = dcm_acq.mm_per_vox
                        conv_ds.container.num_slices = dcm_acq.num_slices
                        conv_ds.container.num_timepoints = dcm_acq.num_timepoints
                        conv_ds.container.duration = dcm_acq.duration
                        filenames = []
                        for f in outputdir_list:
                            filenames.append(f)
                            shutil.copy2(
                                os.path.join(outputdir, f),
                                os.path.join(self.nims_path, conv_ds.relpath))
                        conv_ds.filenames = filenames
                        transaction.commit()
                else:
                    conv_files = nimsdata.write(dcm_acq,
                                                dcm_acq.data,
                                                outbase,
                                                filetype='nifti')
                    if conv_files:
                        # if nifti was successfully created
                        outputdir_list = os.listdir(outputdir)
                        self.job.activity = (
                            u'generated %s' %
                            (', '.join([f for f in outputdir_list])))[:255]
                        log.info(u'%d %s %s' %
                                 (self.job.id, self.job, self.job.activity))
                        conv_ds = Dataset.at_path(self.nims_path, u'nifti')
                        DBSession.add(self.job)
                        DBSession.add(self.job.data_container)
                        conv_ds.kind = u'derived'
                        conv_ds.container = self.job.data_container
                        filenames = []
                        for f in outputdir_list:
                            filenames.append(f)
                            shutil.copy2(
                                os.path.join(outputdir, f),
                                os.path.join(self.nims_path, conv_ds.relpath))
                        conv_ds.filenames = filenames
                        transaction.commit()
                        pyramid_ds = Dataset.at_path(self.nims_path,
                                                     u'img_pyr')
                        DBSession.add(self.job)
                        DBSession.add(self.job.data_container)
                        outpath = os.path.join(self.nims_path,
                                               pyramid_ds.relpath,
                                               self.job.data_container.name)
                        voxel_order = None if dcm_acq.is_localizer else 'LPS'
                        nims_montage = nimsdata.write(dcm_acq,
                                                      dcm_acq.data,
                                                      outpath,
                                                      filetype='montage',
                                                      voxel_order=voxel_order)
                        self.job.activity = (u'generated %s' % (', '.join(
                            [os.path.basename(f)
                             for f in nims_montage])))[:255]
                        log.info(u'%d %s %s' %
                                 (self.job.id, self.job, self.job.activity))
                        pyramid_ds.kind = u'web'
                        pyramid_ds.container = self.job.data_container
                        pyramid_ds.filenames = os.listdir(
                            os.path.join(self.nims_path, pyramid_ds.relpath))
                        transaction.commit()

            DBSession.add(self.job)

        DBSession.add(self.job)
Exemplo n.º 11
0
 def test005_get_tile(self):
     with tempfile.TemporaryDirectory() as tempdir:
         outbase = os.path.join(tempdir, 'trashme')
         nimsdata.write(self.ds, self.ds.data, outbase=outbase, filetype='montage')
         outfile = os.path.join(tempdir, os.listdir(tempdir)[0])
         ok_(nimsdata.medimg.nimsmontage.get_tile(outfile, 0, 0, 0))     # all montage have 0, 0, 0
Exemplo n.º 12
0
 def test004_get_size(self):
     with tempfile.TemporaryDirectory() as tempdir:
         outbase = os.path.join(tempdir, 'trashme')
         nimsdata.write(self.ds, self.ds.data, outbase=outbase, filetype='montage')
         outfile = os.path.join(tempdir, os.listdir(tempdir)[0])
         ok_(nimsdata.medimg.nimsmontage.get_info(outfile))              # gets made?
Exemplo n.º 13
0
 def test003_write_pyrdb(self):
     with tempfile.TemporaryDirectory() as tempdir:
         outbase = os.path.join(tempdir, 'trashme')
         ok_(nimsdata.write(self.ds, self.ds.data, outbase=outbase, filetype='montage', mtype='sqlite'))
Exemplo n.º 14
0
 def test002_write_dir(self):
     """directory jpeg montage"""
     with tempfile.TemporaryDirectory() as tempdir:
         outbase = os.path.join(tempdir, 'trashme')
         ok_(nimsdata.write(self.ds, self.ds.data, outbase=outbase, filetype='montage', mtype='dir'))
Exemplo n.º 15
0
    else:
        logging.basicConfig(level=logging.INFO)

    if not os.path.exists(args.input):
        raise NIMSDataError('could not find input file %s' % args.input)

    outbase = args.outbase or os.path.basename(os.path.splitext(args.input.rstrip('/'))[0])

    p_kwargs = {}
    if args.parser_kwarg:
        for item in args.parser_kwarg:
            kw, val = item.split('=')
            p_kwargs[kw] = val
    log.debug(p_kwargs)

    w_kwargs = {}
    if args.writer_kwarg:
        for item in args.writer_kwarg:
            kw, val = item.split('=')
            w_kwargs[kw] = val
    log.debug(w_kwargs)

    ds = nimsdata.parse(args.input, load_data=True, ignore_json=args.ignore_json, filetype=args.parser, **p_kwargs)

    if not ds:
        raise NIMSDataError('%s could not be parsed' % args.input)
    if ds.data is None:
        raise NIMSDataError('%s has no data' % args.input)

    nimsdata.write(ds, ds.data, outbase, filetype=args.writer, **w_kwargs)