Esempio n. 1
0
    def find(self):
        self.clean(self.job.data_container, u'peripheral')
        transaction.commit()
        DBSession.add(self.job)

        dc = self.job.data_container
        ds = self.job.data_container.primary_dataset
        if dc.physio_recorded:
            physio_files = nimsutil.find_ge_physio(self.physio_path, dc.timestamp+dc.prescribed_duration, dc.psd.encode('utf-8'))
            if physio_files:
                physio = nimsdata.nimsphysio.NIMSPhysio(physio_files, dc.tr, dc.num_timepoints)
                if physio.is_valid():
                    self.job.activity = u'valid physio found'
                    log.info(u'%d %s %s' % (self.job.id, self.job, self.job.activity))
                    # Computing the slice-order can be expensive, so we didn't do it when we instantiated.
                    # But now that we know physio is valid, we need to do it.
                    ni = nimsdata.parse(os.path.join(self.nims_path, ds.primary_file_relpath))
                    physio.slice_order = ni.get_slice_order() # TODO: should probably write a set method
                    dataset = Dataset.at_path(self.nims_path, u'physio')
                    DBSession.add(self.job)
                    DBSession.add(self.job.data_container)
                    dataset.kind = u'peripheral'
                    dataset.container = self.job.data_container
                    with nimsutil.TempDir(dir=self.tempdir) as tempdir_path:
                        arcdir_path = os.path.join(tempdir_path, '%s_physio' % self.job.data_container.name)
                        os.mkdir(arcdir_path)
                        for f in physio_files:
                            shutil.copy2(f, arcdir_path)
                        filename = '%s_physio.tgz' % self.job.data_container.name
                        dataset.filenames = [filename]
                        with tarfile.open(os.path.join(self.nims_path, dataset.relpath, filename), 'w:gz', compresslevel=6) as archive:
                            archive.add(arcdir_path, arcname=os.path.basename(arcdir_path))
                        try:
                            reg_filename = '%s_physio_regressors.csv.gz' % self.job.data_container.name
                            physio.write_regressors(os.path.join(self.nims_path, dataset.relpath, reg_filename))
                        except nimsdata.nimsphysio.NIMSPhysioError:
                            self.job.activity = u'error generating regressors from physio data'
                            log.info(u'%d %s %s' % (self.job.id, self.job, self.job.activity))
                        else:
                            dataset.filenames += [reg_filename]
                else:
                    self.job.activity = u'invalid physio found and discarded'
                    log.info(u'%d %s %s' % (self.job.id, self.job, self.job.activity))
            else:
                self.job.activity = u'no physio files found'
                log.info(u'%d %s %s' % (self.job.id, self.job, self.job.activity))
        else:
            self.job.activity = u'physio not recorded'
            log.info(u'%d %s %s' % (self.job.id, self.job, self.job.activity))
        transaction.commit()
        DBSession.add(self.job)
Esempio n. 2
0
 def find(self):
     dc = self.job.data_container
     ds = self.job.data_container.primary_dataset
     if dc.physio_flag:
         physio_files = nimsutil.find_ge_physio(self.physio_path, dc.timestamp+dc.duration, dc.psd.encode('utf-8'))
         if physio_files:
             self.job.activity = u'physio found %s' % (', '.join([os.path.basename(pf) for pf in physio_files]))
             self.log.info(u'%d %s %s' % (self.job.id, self.job, self.job.activity))
             dataset = Dataset.at_path(self.nims_path, None, u'Physio Data')
             DBSession.add(self.job)
             DBSession.add(self.job.data_container)
             dataset.file_cnt_act = 0
             dataset.file_cnt_tgt = len(physio_files)
             dataset.kind = u'secondary'
             dataset.container = self.job.data_container
             for f in physio_files:
                 shutil.copy2(f, os.path.join(self.nims_path, dataset.relpath))
                 dataset.file_cnt_act += 1
         else:
             self.job.activity = u'no physio files found'
             self.log.info(u'%d %s %s' % (self.job.id, self.job, self.job.activity))
     transaction.commit()
     DBSession.add(self.job)
Esempio n. 3
0
    def find(self, slice_order, num_slices):
        """
        Locate physio and generate physio regressors.

        Find is called from within each pipeline's process() method after the file's slice_order
        and num_slices attributes have been set, but before preparing the data to be written out.
        This will use the num_slice and slice_order to create an array of the slice numbers in the
        sequence they were acquired. Nimsphysio will use both the current data containers metadata,
        like timestamp and duration, and metadata obtained by parsing the primary dataset file to
        determine if physio is valid.

        This method should never raise any exceptions.

        Parameters
        ----------
        slice_order : int
            integer that corresponds to the appropriate NIFTI slice order code. 0 for unknown.
        num_slices : int
            number of slices

        """
        self.clean(self.job.data_container, u'peripheral')
        transaction.commit()
        DBSession.add(self.job)

        if self.physio_path is None: return             # can't search w/o phys path
        if not slice_order or not num_slices: return    # need both slice order AND num_slices to create regressors
        if self.job.data_container.scanner_name == 'IRC MRC35068': return   # hack to ignore Davis files

        dc = self.job.data_container
        if dc.physio_recorded:
            # 2015.02.03 RFD: apparently the old rule was incorrect. The physio files are not timestamped
            # sometime after the Rxed duration, but rather sometime after the actual duration! We don't yet
            # know the actual duration, so we'll just make shit up and hope for the best.
            physio_lag = datetime.timedelta(seconds=30)
            physio_files = nimsutil.find_ge_physio(self.physio_path, dc.timestamp+physio_lag, dc.psd.encode('utf-8'))
            #physio_files = nimsutil.find_ge_physio(self.physio_path, dc.timestamp+dc.prescribed_duration, dc.psd.encode('utf-8'))
            if physio_files:
                physio = nimsphysio.NIMSPhysio(physio_files, dc.tr, dc.num_timepoints, nimsdata.medimg.medimg.get_slice_order(slice_order, num_slices))
                if physio.is_valid():
                    self.job.activity = u'valid physio found (%s...)' % os.path.basename(physio_files[0])
                    log.info(u'%d %s %s' % (self.job.id, self.job, self.job.activity))
                    dataset = Dataset.at_path(self.nims_path, u'physio')
                    DBSession.add(self.job)
                    DBSession.add(self.job.data_container)
                    dataset.kind = u'peripheral'
                    dataset.container = self.job.data_container
                    with nimsutil.TempDir(dir=self.tempdir) as tempdir_path:
                        arcdir_path = os.path.join(tempdir_path, '%s_physio' % self.job.data_container.name)
                        os.mkdir(arcdir_path)
                        for f in physio_files:
                            shutil.copy2(f, arcdir_path)
                        filename = '%s_physio.tgz' % self.job.data_container.name
                        dataset.filenames = [filename]
                        with tarfile.open(os.path.join(self.nims_path, dataset.relpath, filename), 'w:gz', compresslevel=6) as archive:
                            archive.add(arcdir_path, arcname=os.path.basename(arcdir_path))
                        try:
                            reg_filename = '%s_physio_regressors.csv.gz' % self.job.data_container.name
                            physio.write_regressors(os.path.join(self.nims_path, dataset.relpath, reg_filename))
                            self.job.activity = u'physio regressors %s written' % reg_filename
                            log.info(u'%d %s %s' % (self.job.id, self.job, self.job.activity))
                        except nimsphysio.NIMSPhysioError:
                            self.job.activity = u'error generating regressors from physio data'
                            log.info(u'%d %s %s' % (self.job.id, self.job, self.job.activity))
                        else:
                            dataset.filenames += [reg_filename]
                else:
                    self.job.activity = u'invalid physio found and discarded'
                    log.info(u'%d %s %s' % (self.job.id, self.job, self.job.activity))
            else:
                self.job.activity = u'no physio files found'
                log.info(u'%d %s %s' % (self.job.id, self.job, self.job.activity))
        else:
            self.job.activity = u'physio not recorded'
            log.info(u'%d %s %s' % (self.job.id, self.job, self.job.activity))
        transaction.commit()
        DBSession.add(self.job)
Esempio n. 4
0
    def find(self, slice_order, num_slices):
        """
        Locate physio and generate physio regressors.

        Find is called from within each pipeline's process() method after the file's slice_order
        and num_slices attributes have been set, but before preparing the data to be written out.
        This will use the num_slice and slice_order to create an array of the slice numbers in the
        sequence they were acquired. Nimsphysio will use both the current data containers metadata,
        like timestamp and duration, and metadata obtained by parsing the primary dataset file to
        determine if physio is valid.

        This method should never raise any exceptions.

        Parameters
        ----------
        slice_order : int
            integer that corresponds to the appropriate NIFTI slice order code. 0 for unknown.
        num_slices : int
            number of slices

        """
        self.clean(self.job.data_container, u'peripheral')
        transaction.commit()
        DBSession.add(self.job)

        if self.physio_path is None: return  # can't search w/o phys path
        if not slice_order or not num_slices:
            return  # need both slice order AND num_slices to create regressors
        if self.job.data_container.scanner_name == 'IRC MRC35068':
            return  # hack to ignore Davis files

        dc = self.job.data_container
        if dc.physio_recorded:
            # 2015.02.03 RFD: apparently the old rule was incorrect. The physio files are not timestamped
            # sometime after the Rxed duration, but rather sometime after the actual duration! We don't yet
            # know the actual duration, so we'll just make shit up and hope for the best.
            physio_lag = datetime.timedelta(seconds=30)
            physio_files = nimsutil.find_ge_physio(self.physio_path,
                                                   dc.timestamp + physio_lag,
                                                   dc.psd.encode('utf-8'))
            #physio_files = nimsutil.find_ge_physio(self.physio_path, dc.timestamp+dc.prescribed_duration, dc.psd.encode('utf-8'))
            if physio_files:
                physio = nimsphysio.NIMSPhysio(
                    physio_files, dc.tr, dc.num_timepoints,
                    nimsdata.medimg.medimg.get_slice_order(
                        slice_order, num_slices))
                if physio.is_valid():
                    self.job.activity = u'valid physio found (%s...)' % os.path.basename(
                        physio_files[0])
                    log.info(u'%d %s %s' %
                             (self.job.id, self.job, self.job.activity))
                    dataset = Dataset.at_path(self.nims_path, u'physio')
                    DBSession.add(self.job)
                    DBSession.add(self.job.data_container)
                    dataset.kind = u'peripheral'
                    dataset.container = self.job.data_container
                    with nimsutil.TempDir(dir=self.tempdir) as tempdir_path:
                        arcdir_path = os.path.join(
                            tempdir_path,
                            '%s_physio' % self.job.data_container.name)
                        os.mkdir(arcdir_path)
                        for f in physio_files:
                            shutil.copy2(f, arcdir_path)
                        filename = '%s_physio.tgz' % self.job.data_container.name
                        dataset.filenames = [filename]
                        with tarfile.open(os.path.join(self.nims_path,
                                                       dataset.relpath,
                                                       filename),
                                          'w:gz',
                                          compresslevel=6) as archive:
                            archive.add(arcdir_path,
                                        arcname=os.path.basename(arcdir_path))
                        try:
                            reg_filename = '%s_physio_regressors.csv.gz' % self.job.data_container.name
                            physio.write_regressors(
                                os.path.join(self.nims_path, dataset.relpath,
                                             reg_filename))
                            self.job.activity = u'physio regressors %s written' % reg_filename
                            log.info(
                                u'%d %s %s' %
                                (self.job.id, self.job, self.job.activity))
                        except nimsphysio.NIMSPhysioError:
                            self.job.activity = u'error generating regressors from physio data'
                            log.info(
                                u'%d %s %s' %
                                (self.job.id, self.job, self.job.activity))
                        else:
                            dataset.filenames += [reg_filename]
                else:
                    self.job.activity = u'invalid physio found and discarded'
                    log.info(u'%d %s %s' %
                             (self.job.id, self.job, self.job.activity))
            else:
                self.job.activity = u'no physio files found'
                log.info(u'%d %s %s' %
                         (self.job.id, self.job, self.job.activity))
        else:
            self.job.activity = u'physio not recorded'
            log.info(u'%d %s %s' % (self.job.id, self.job, self.job.activity))
        transaction.commit()
        DBSession.add(self.job)
Esempio n. 5
0
    def find(self):
        self.clean(self.job.data_container, u'peripheral')
        transaction.commit()
        DBSession.add(self.job)

        dc = self.job.data_container
        ds = self.job.data_container.primary_dataset
        if dc.physio_recorded:
            physio_files = nimsutil.find_ge_physio(
                self.physio_path, dc.timestamp + dc.prescribed_duration,
                dc.psd.encode('utf-8'))
            if physio_files:
                physio = nimsdata.nimsphysio.NIMSPhysio(
                    physio_files, dc.tr, dc.num_timepoints)
                if physio.is_valid():
                    self.job.activity = u'valid physio found'
                    log.info(u'%d %s %s' %
                             (self.job.id, self.job, self.job.activity))
                    # Computing the slice-order can be expensive, so we didn't do it when we instantiated.
                    # But now that we know physio is valid, we need to do it.
                    ni = nimsdata.parse(
                        os.path.join(self.nims_path, ds.primary_file_relpath))
                    physio.slice_order = ni.get_slice_order(
                    )  # TODO: should probably write a set method
                    dataset = Dataset.at_path(self.nims_path, u'physio')
                    DBSession.add(self.job)
                    DBSession.add(self.job.data_container)
                    dataset.kind = u'peripheral'
                    dataset.container = self.job.data_container
                    with nimsutil.TempDir(dir=self.tempdir) as tempdir_path:
                        arcdir_path = os.path.join(
                            tempdir_path,
                            '%s_physio' % self.job.data_container.name)
                        os.mkdir(arcdir_path)
                        for f in physio_files:
                            shutil.copy2(f, arcdir_path)
                        filename = '%s_physio.tgz' % self.job.data_container.name
                        dataset.filenames = [filename]
                        with tarfile.open(os.path.join(self.nims_path,
                                                       dataset.relpath,
                                                       filename),
                                          'w:gz',
                                          compresslevel=6) as archive:
                            archive.add(arcdir_path,
                                        arcname=os.path.basename(arcdir_path))
                        try:
                            reg_filename = '%s_physio_regressors.csv.gz' % self.job.data_container.name
                            physio.write_regressors(
                                os.path.join(self.nims_path, dataset.relpath,
                                             reg_filename))
                        except nimsdata.nimsphysio.NIMSPhysioError:
                            self.job.activity = u'error generating regressors from physio data'
                            log.info(
                                u'%d %s %s' %
                                (self.job.id, self.job, self.job.activity))
                        else:
                            dataset.filenames += [reg_filename]
                else:
                    self.job.activity = u'invalid physio found and discarded'
                    log.info(u'%d %s %s' %
                             (self.job.id, self.job, self.job.activity))
            else:
                self.job.activity = u'no physio files found'
                log.info(u'%d %s %s' %
                         (self.job.id, self.job, self.job.activity))
        else:
            self.job.activity = u'physio not recorded'
            log.info(u'%d %s %s' % (self.job.id, self.job, self.job.activity))
        transaction.commit()
        DBSession.add(self.job)