Exemple #1
0
    def run(self):
        old_err = sys.stderr
        sys.stderr = self.err
        self.parser = self.__make_parser()
        options, args = self.parser.parse_args(self.argv[1:])
        if len(args) != 1:
            self.parser.error(
                "Must specify ACQ_FILE.\n" +
                "Try --help for more instructions.")
        try:
            infile = args[0]
            if infile == '-':
                infile = StringIO.StringIO(sys.stdin.read())
            data = bioread.read_file(infile)
        except:
            sys.stderr.write("Error reading %s\n" % args[0])
            sys.exit(1)
        try:
            chan = data.channels[options.channel]
        except:
            sys.stderr.write(
                "Channel %s out of bounds -- max: %s\n" %
                (options.channel, len(data.channels)))
            sys.exit(2)
        try:
            TxtWriter.write_file(chan, sys.stdout)
        except:
            sys.stderr.write("Notice: Not all data written\n")
            sys.exit(-1)

        sys.stderr = old_err
Exemple #2
0
    def run(self):
        old_err = sys.stderr
        sys.stderr = self.err
        self.parser = self.__make_parser()
        options, args = self.parser.parse_args(self.argv[1:])
        if len(args) <> 1:
            self.parser.error(
                "Must specify ACQ_FILE.\n"+
                "Try --help for more instructions.")
        try:
            infile = args[0]
            if infile == '-':
                infile = StringIO.StringIO(sys.stdin.read())
            data = bioread.read_file(infile)
        except:
            sys.stderr.write("Error reading %s\n" % args[0])
            sys.exit(1)
        try:
            chan = data.channels[options.channel]
        except:
            sys.stderr.write("Channel %s out of bounds -- max: %s\n" %
                (options.channel, len(data.channels)))
            sys.exit(2)
        try:
            TxtWriter.write_file(chan, sys.stdout)
        except:
            sys.stderr.write("Notice: Not all data written\n")
            sys.exit(-1)

        sys.stderr = old_err
Exemple #3
0
 def get_file(self, path):
     ACQ = bioread.read_file(str(path)) # can't handle unicode
     
     # filter channels to those of matching samplingrate
     channels = [c for c in ACQ.channels if c.samples_per_second == self._samplingrate]
     
     # read data
     n_channels = len(channels)
     if n_channels == 0:
         srs = np.unique([c.samples_per_second for c in ACQ.channels])
         err  =("No channels found. Change self._samplingrate to one of "
                "%s?" % srs)
         raise ValueError(err)
     
     c0 = channels[0]
     length = c0.point_count
     if n_channels > 1:
         assert all(c.point_count == length for c in channels[1:])
     data = np.empty((length, n_channels))
     for i,c in enumerate(channels):
         data[:,i] = c.data
     # properties
     properties = dict(samplingrate = self._samplingrate,
                       )
     return data, properties
def acq_to_text(data_in_from, filename, data_out_to=None):
    """
    Reads in an .acq file and converts the data to a .txt file of the same format as
    Biopac's own conversion to .txt. The output file will have the same name as the input file,
    but with .txt extension instead of .acq
    :param data_in_from: full path to file where the data can be found
    :param filename: name of the .acq file to convert.
    :param data_out_to: path to write the output to. If not set, defaults to data_in_from
    """

    if data_out_to is None:
        data_out_to = data_in_from

    # read in the data with the help of the bioread package
    if not filename.endswith('.acq'):
        filename = filename + '.acq'

    if not data_in_from.endswith('/'):
        data_in_from = data_in_from + '/'

    try:
        data = bioread.read_file(data_in_from + filename)
    except:
        print('Problems reading data from file ' + data_in_from + filename)
        raise

    # compile output to the same format as Biopac's own .txt data output
    output = ''
    output += filename + '\n'
    output += str(
        data.channels[0].samples_per_second / 1000) + ' msec/sample\n'
    output += str(len(data.channels)) + ' channels\n'
    for channel in data.channels:
        output += channel.name + '\n'
        output += channel.units + '\n'

    names = ''
    datapoints = ''
    data_matrix = np.zeros((data.channels[0].point_count, len(data.channels)))

    for i, channel in enumerate(data.channels):
        names += 'CH' + str(channel.order_num) + ','
        datapoints += str(channel.data_length) + ','
        data_matrix[:, i] = channel.data

    data_matrix.round(decimals=8)
    output += names + '\n'
    output += datapoints + '\n'

    # write out the results
    save_to = data_out_to + '/' + os.path.splitext(filename)[0] + '.txt'
    with open(save_to, 'w') as f:
        f.write(output)
    with open(save_to, 'ab') as f:
        np.savetxt(f, data_matrix, delimiter=",", fmt='%.8g')

    return ('Done')
Exemple #5
0
 def __init__(self,**traits):
     super(AcqImporter,self).__init__(**traits)
     if not os.path.exists(self.path):
         fail("No such file: "+self.path)
     
     acqdata = bioread.read_file(self.path)
     self.channels = [
          Channel(name=chan.name,
                  sampling_rate = chan.samples_per_second,
                  decimate = chan.samples_per_second > DEFAULT_SAMPLING_RATE)
                      for chan in acqdata.channels ]
     self.guess_channel_contents()
     self.acqdata = acqdata
Exemple #6
0
def load_acq(filename, chtrig=0):
    """
    Populate object phys_input from acq extension files.

    Parameters
    ----------
    filename : str
        path to the txt labchart file
    chtrig : int, optional
        index of trigger channel. Default is 0.

    Returns
    -------
    BlueprintInput

    Note
    ----
    chtrig is not a 0-based Python index - instead, it's human readable (i.e., 1-based).
    This is handy because, when initialising the class, a new channel corresponding
    to time is added at the beginning - that is already taken into account!

    See Also
    --------
    physio_obj.BlueprintInput
    """
    from bioread import read_file
    with warnings.catch_warnings():
        warnings.filterwarnings('ignore', category=DeprecationWarning)
        data = read_file(filename).channels

    freq = [
        data[0].samples_per_second,
    ]
    timeseries = [
        data[0].time_index,
    ]
    units = [
        's',
    ]
    names = [
        'time',
    ]

    for k, ch in enumerate(data):
        LGR.info(f'{k:02d}. {ch}')
        timeseries.append(ch.data)
        freq.append(ch.samples_per_second)
        units.append(ch.units)
        names.append(ch.name)

    return BlueprintInput(timeseries, freq, names, units, chtrig)
Exemple #7
0
def my_load_acq(fname):

    data = bioread.read_file(fname)
    out_dict = dict()
    names, units, sf = [], [], []

    signals = np.stack([data.channels[i].data for i in range(len(data.channels))])
    for i in range(len(data.channels)):
        names.append(data.channels[i].name.rstrip())
        units.append(data.channels[i].units)
        sf.append(data.channels[i].samples_per_second)

    out_dict['data'] = signals
    out_dict['labels'] = names
    out_dict['units'] = units
    out_dict['sampling_rate'] = sf
    return out_dict
Exemple #8
0
def populate_phys_input(filename, chtrig):
    """
    Populate object phys_input from acq files.

    Parameters
    ----------
    filename: str
        path to the txt labchart file
    chtrig : int
        index of trigger channel

    Returns
    -------
    BlueprintInput

    See Also
    --------
    physio_obj.BlueprintInput
    """

    with warnings.catch_warnings():
        warnings.filterwarnings('ignore', category=DeprecationWarning)
        data = read_file(filename).channels

    freq = [
        data[chtrig].samples_per_second,
    ]
    timeseries = [
        data[chtrig].time_index,
    ]
    units = [
        's',
    ]
    names = [
        'time',
    ]

    for k, ch in enumerate(data):
        LGR.info(f'{k:02d}. {ch}')
        timeseries.append(ch.data)
        freq.append(ch.samples_per_second)
        units.append(ch.units)
        names.append(ch.name)

    return BlueprintInput(timeseries, freq, names, units)
Exemple #9
0
def populate_phys_input(filename, chtrig):
    """
    Populate object phys_input
    """

    data = read_file(filename).channels

    freq = [data[chtrig].samples_per_second] * 2
    timeseries = [data[chtrig].time_index, data[chtrig].data]
    units = ['s', data[chtrig].units]
    names = ['time', 'trigger']

    for k, ch in enumerate(data):
        if k != chtrig:
            print(f'{k:02d}. {ch}')
            timeseries.append(ch.data)
            freq.append(ch.samples_per_second)
            units.append(ch.units)
            names.append(ch.name)

    return BlueprintInput(timeseries, freq, names, units)
Exemple #10
0
 def readACQ(self, fileAddress):
     '''
     1 Because the length of the channels is different  EEG() fails to make the channel matrix with a good example file will work OK
     2 Didn't find the filter var ????
     '''
     try:
         acq = bioread.read_file(fileAddress)
         channels = acq.channels
         labels = []
         signals = []
         for i in range(len(channels)):
             labels.append(channels[i].name + "")
             signals.append(np.zeros(channels[i].data.size))
             signals[i] = channels[i].data
     except:
         self.setError(3)
         return None
     frequency = channels[0].samples_per_second
     time = np.amax(acq.time_index)
     try:
         filtr = None
     except:
         filtr = None
     return EEGData(frequency, time, signals, filtr, labels)
Exemple #11
0
 def load_from_template(self, infile):
     infile = path.abspath(path.expanduser(infile))
     error_folder = infile.split('/')
     error_folder = '/'.join(error_folder[:-2])
     if path.isfile(infile):
         with open(infile) as json_data:
             d = json.load(json_data)
         for task in self.tasklist:
             taskfile = path.abspath(path.expanduser(d[task]))
             if d[task] != '' and path.isfile(taskfile):
                 print("loading: %s" % taskfile)
                 data = br.read_file(taskfile)
                 try:
                     print("loading: %s" % taskfile)
                     data = br.read_file(taskfile)
                     pulse, resp, idx = self.get_channels(data)
                     self.run[task].resp = resp
                     self.run[task].pulse = pulse
                 except:
                     print('Error loading {}, {}'.format(self.subid, task))
                     f = open(error_folder + '/error_log.txt', 'a')
                     f.write('{} : {} : {} : {}\n'.format(
                         datetime.datetime.now(), 'proc-biopac-coins',
                         self.subid, 'load error'))
                     f.close()
                 try:
                     print("Processing: %s, %s" % (self.subid, task))
                     self.samples_per_second = int(data.samples_per_second)
                     self.run[task].pulse = signal.medfilt(
                         self.run[task].pulse, 3)
                     self.run[task].resp = signal.medfilt(
                         self.run[task].resp, 3)
                     if self.samples_per_second != self.target_sampling_rate:
                         scale = int(self.samples_per_second /
                                     self.target_sampling_rate)
                         self.run[task].pulse = signal.decimate(
                             self.run[task].pulse, scale, zero_phase=True)
                         self.run[task].resp = signal.decimate(
                             self.run[task].resp, scale, zero_phase=True)
                     self.run[task].hr_idx = signal.find_peaks_cwt(
                         self.run[task].pulse, np.arange(1, 35))
                     self.run[task].hr = int(
                         len(self.run[task].hr_idx) /
                         ((len(self.run[task].pulse) /
                           self.target_sampling_rate) / 60.0))
                     self.run[task].rr_idx = signal.find_peaks_cwt(
                         moving_average(self.run[task].resp, 50),
                         np.arange(1, 70))
                     self.run[task].rr = int(
                         len(self.run[task].rr_idx) /
                         ((len(self.run[task].resp) /
                           self.target_sampling_rate) / 60.0))
                 except IOError:
                     print('Error processing {}, {}'.format(
                         self.subid, task))
                     f = open(error_folder + '/error_log.txt', 'a')
                     f.write('{} : {} : {} : {}\n'.format(
                         datetime.datetime.now(), 'proc-biopac-coins',
                         self.subid, 'processing error'))
                     f.close()
                 except ValueError:
                     print('Error processing {}, {}'.format(
                         self.subid, task))
                     f = open(error_folder + '/error_log.txt', 'a')
                     f.write('{} : {} : {} : {}\n'.format(
                         datetime.datetime.now(), 'proc-biopac-coins',
                         self.subid,
                         'processing error - channel missing data'))
                     f.close()
         self.hasloaded = True
     else:
         print('There is no json file for this subject')
         f = open(error_folder + '/error_log.txt', 'a')
         f.write('{} : {} : {} : {}\n'.format(datetime.datetime.now(),
                                              'proc-biopac-coins',
                                              self.subid, 'no json file'))
         f.close()
Exemple #12
0
def read():
    name = e1.get()
    data = bioread.read_file(name)
    total = len(data.channels[0].data)
    equation.set(str(total))
Exemple #13
0
def call():
    file_name = e1.get()
    file_length = e2.get()
    data = bioread.read_file(file_name)
    n = len(data.channels)

    for i in range(n):
        print('The name of the Channel', data.channels[i].name)
        a = data.channels[i].name
        print('Samples per second', data.channels[i].samples_per_second)
        print('Total number of data', len(data.channels[i].data))

        ref_size = int(file_length)
        print('Data', data.channels[i].data)

        plt.figure()
        plt.plot(data.channels[i].data[0:ref_size])
        plt.title(a, fontdict=None, loc='center')
        plt.xlabel("Time (ms)")
        plt.ylabel("Voltage(mV)")

        printPeaks(data.channels[i].data[0:ref_size], ref_size)

        length = len(peaks)
        rr.append(0)

        for i in range(length - 1):
            rr.append(peaks[i + 1] - peaks[i])

        print("Peaks :\n")
        print(peaks)
        print("\n")
        print("The RR interval for the given ECG is\n")
        print(rr)
        print("\n")

        #x and y are the cleaned version of  peaks and rr
        x = []
        y = []

        for k in range(1, length - 1):
            if rr[k] == 1:
                k = k + 1

            x.append(peaks[k])
            y.append(rr[k])

        print(x)
        print("\n")
        print(y)
        print("\n")

        plt.figure()
        plt.plot(x, y, 'ro')
        plt.title("RR Interval plot", fontdict=None, loc='center')
        plt.xlabel("Time(ms)")
        plt.ylabel("RR interval(ms)")
        plt.show(block=False)

        new_x = np.array(x)
        new_y = np.array(y)

        b = estimate_coef(new_x, new_y)
        plt.figure()
        plt.title("Plots over interval Single interval",
                  fontdict=None,
                  loc='center')
        print("Estimated coefficients:\nb_0 = {} \nb_1 = {}".format(
            b[0], b[1]))
        plot_regression_line(new_x, new_y, b)
        plt.show(block=False)

        for k in range(50, 201, 50):

            interval = k
            loop = len(new_x) / interval
            i = 0
            val = 0
            plt.figure()

            for i in range(loop - 1):

                b = estimate_coef(new_x[val:val + k], new_y[val:val + k])
                print("Estimated coefficients:\nb_0 = {}\nb_1 = {}".format(
                    b[0], b[1]))
                plot_regression_line(new_x[val:val + k], new_y[val:val + k], b)
                val = val + interval

            b = estimate_coef(new_x[val:len(new_x)], new_y[val:len(new_y)])
            plt.title("Plots over interval %i" % k,
                      fontdict=None,
                      loc='center')
            print("Estimated coefficients:\nb_0 = {}\nb_1 = {}".format(
                b[0], b[1]))
            plot_regression_line(new_x[val:len(new_x)], new_y[val:len(new_y)],
                                 b)
            plt.show(block=False)
Exemple #14
0
    
sourceDir = args.project_directory
outDir = args.output_directory
sublist = [fil.strip() for fil in open(args.subject_list)]
tasklist = [fil.strip() for fil in open(args.task_list)]

for sub in sublist:
    physio_data = new_physio_struct(tasklist)
    physio_data['subject_id'] = sub
    for task in tasklist:
        print(task)
        acqName = sub+"_"+task+".acq"
        infile = path.join(sourceDir,sub,'originals',"01+physio", acqName)
        if os.path.isfile(infile):
            print("Processing: \n\t"+infile)
            data = br.read_file(infile)
            if len(data.channels[0].data) > 1000:
                pulse, resp, hr, rr, hr_idx, rr_idx = proc_acq(data)
                physio_data[task].append(hr)
                physio_data[task].append(pulse)
                physio_data[task].append(hr_idx)
                physio_data[task].append(rr)
                physio_data[task].append(resp)
                physio_data[task].append(rr_idx)
            else:
                print("File Did not Load or was corrupt. \n\t"+acqName)
                for k in np.arange(0,6):
                    physio_data[task].append(np.nan)
        else:
            print("File Not Found: \n\t"+acqName)
            for k in np.arange(0,6):
import ecutils.eventhandler

config = configparser.ConfigParser()
config.read('emocon_config.ini')
SOURCE_DIR = config['DEFAULT']['SOURCE_DIR']
OUT_FOLDER = 'out/raw_data'

file_names = os.listdir(SOURCE_DIR)
file_names = [f for f in file_names if f.endswith('.acq')]
file_names = [f for f in file_names if not f.startswith('._')]

if not (os.path.exists(OUT_FOLDER)):
    os.makedirs(OUT_FOLDER)

for f_name in file_names:
    data = bioread.read_file(os.path.join(SOURCE_DIR, f_name))

    print('Extracting from', f_name)
    ec = ecutils.eventhandler.EventCollection.from_acq(data)

    eda = data.named_channels['EDA100C'].data
    emg = data.named_channels['EMG100C'].data

    subject_code = os.path.splitext(f_name)[0]

    np.save(os.path.join(OUT_FOLDER, subject_code + '_eda'), eda)
    np.save(os.path.join(OUT_FOLDER, subject_code + '_emg'), emg)

    ec.to_txt(os.path.join(OUT_FOLDER, subject_code + '_events.txt'))
Exemple #16
0
    def load_from_template(self, infile):
        infile = path.abspath(path.expanduser(infile))
        error_folder = infile.split('/')
        error_folder = '/'.join(error_folder[:-2])
        if path.isfile(infile):
            with open(infile) as json_data:
                d = json.load(json_data)
            for task in self.tasklist:
                taskfile = path.abspath(path.expanduser(d[task]))
                if d[task] != '' and path.isfile(taskfile):
                    print("loading: %s" % taskfile)
                    data = br.read_file(taskfile)
                    try:
                        print("loading: %s" % taskfile)
                        data = br.read_file(taskfile)
                        pulse, resp, idx = self.get_channels(data)
                        self.run[task].resp = resp
                        self.run[task].pulse = pulse
                    except:
                        print('Error loading {}, {}'.format(self.subid, task))
                        f = open(error_folder + '/error_log.txt', 'a')
                        f.write('{} : {} : {} : {}\n'.format(
                            datetime.datetime.now(), 'proc-biopac-coins',
                            self.subid, 'load error'))
                        f.close()
                    try:
                        print("Processing: %s, %s" % (self.subid, task))
                        self.samples_per_second = int(data.samples_per_second)
                        self.run[task].pulse = signal.medfilt(
                            self.run[task].pulse, 3)
                        self.run[task].resp = signal.medfilt(
                            self.run[task].resp, 3)
                        if self.samples_per_second != self.target_sampling_rate:
                            scale = int(self.samples_per_second /
                                        self.target_sampling_rate)
                            self.run[task].pulse = signal.decimate(
                                self.run[task].pulse, scale, zero_phase=True)
                            self.run[task].resp = signal.decimate(
                                self.run[task].resp, scale, zero_phase=True)
                        self.run[task].hr_idx = signal.find_peaks_cwt(
                            self.run[task].pulse, np.arange(1, 35))
                        self.run[task].hr = int(
                            len(self.run[task].hr_idx) /
                            ((len(self.run[task].pulse) /
                              self.target_sampling_rate) / 60.0))
                        #original np.arange(1,70), moving average
                        self.run[task].rr_idx = signal.find_peaks_cwt(
                            self.run[task].resp, np.arange(10, 110))
                        self.run[task].rr = int(
                            len(self.run[task].rr_idx) /
                            ((len(self.run[task].resp) /
                              self.target_sampling_rate) / 60.0))

                        time = np.arange(
                            0,
                            len(self.run[task].pulse) *
                            self.target_sampling_rate,
                            self.target_sampling_rate)

                        hr_distances = distance_btw_peaks(
                            time, self.run[task].hr_idx)
                        rr_distances = distance_btw_peaks(
                            time, self.run[task].rr_idx)

                        self.run[task].hr_skew = scipy.stats.skew(hr_distances)
                        self.run[task].hr_kurtosis = scipy.stats.kurtosis(
                            hr_distances)
                        self.run[task].rr_skew = scipy.stats.skew(rr_distances)
                        self.run[task].rr_kurtosis = scipy.stats.kurtosis(
                            rr_distances)

                        t_hr = 0
                        good_hr = 0
                        t_rr = 0
                        good_rr = 0

                        for i in hr_distances:
                            ins_HR = i / 60
                            if ins_HR >= 60 and ins_HR <= 130:
                                t_hr += 1
                                good_hr += 1
                            else:
                                t_hr += 1

                        self.run[task].hr_perc = good_hr / t_hr

                        for i in rr_distances:
                            ins_RR = i / 60
                            if ins_RR >= 15 and ins_RR <= 40:
                                t_rr += 1
                                good_rr += 1
                            else:
                                t_rr += 1

                        self.run[task].rr_perc = good_rr / t_rr

                    except IOError:
                        print('Error processing {}, {}'.format(
                            self.subid, task))
                        f = open(error_folder + '/error_log.txt', 'a')
                        f.write('{} : {} : {} : {}\n'.format(
                            datetime.datetime.now(), 'proc-biopac-coins',
                            self.subid, 'processing error'))
                        f.close()
                    except ValueError:
                        print('Error processing {}, {}'.format(
                            self.subid, task))
                        f = open(error_folder + '/error_log.txt', 'a')
                        f.write('{} : {} : {} : {}\n'.format(
                            datetime.datetime.now(), 'proc-biopac-coins',
                            self.subid,
                            'processing error - channel missing data'))
                        f.close()
            self.hasloaded = True
        else:
            print('There is no json file for this subject')
            f = open(error_folder + '/error_log.txt', 'a')
            f.write('{} : {} : {} : {}\n'.format(datetime.datetime.now(),
                                                 'proc-biopac-coins',
                                                 self.subid, 'no json file'))
            f.close()
Exemple #17
0
#!/usr/bin/env python

# Example taken from the README file.

import bioread

data = bioread.read_file('myfile.acq')

data.graph_header.file_revision

len(data.channels)

data.channels[1].samples_per_second
len(data.channels[1].data)
len(data.channels[1].upsampled_data)

data.channels[0].samples_per_second
len(data.channels[0].data)

data.channels[0].data[0]
data.channels[0].raw_data[0]

data.channels[0].name
# let's assume the output is "CO2"
data.named_channels['CO2'].data[0]

from bioread.writers import MatlabWriter
MatlabWriter.write_file(data, "myfile.mat")
Exemple #18
0
def _main(argv=None):
    options = _get_parser().parse_args(argv)
    # Check options to make them internally coherent
    # #!# This can probably be done while parsing?
    # #!# Make filename check better somehow.
    options.indir = check_input_dir(options.indir)
    options.outdir = check_input_dir(options.outdir)
    options.filename = check_input_ext(options.filename, '.acq')
    ftype = 'acq'
    if not os.path.isfile(os.path.join(options.indir, options.filename)):
        options.filename = check_input_ext(options.filename[:-4], '.txt')
        ftype = 'txt'

    print('File extension is .' + ftype)
    options.heur_file = check_input_ext(options.heur_file, '.py')

    infile = options.indir + '/' + options.filename
    outfile = options.outdir + '/' + options.filename[:-4]

    check_file_exists(infile)
    print('File exists')

    # Read infos from file
    if ftype == 'acq':
        from bioread import read_file

        data = read_file(infile).channels
        print_info_acq(options.filename, data)
    elif ftype == 'txt':
        header = print_info_txt(options.filename)

    # If file has to be processed, process it
    if not options.info:
        if options.heur_file and options.sub:
            check_file_exists(options.heur_file)
            print(f'Preparing BIDS output using {options.heur_file}')
            outfile = use_heuristic(options.heur_file, options.sub,
                                    options.ses, options.filename,
                                    options.outdir)
        elif options.heur_file and not options.sub:
            print(f'While "-heur" was specified, option "-sub" was not.\n'
                  f'Skipping BIDS formatting.')

        # #!# Get option of no trigger! (which is wrong practice or Respiract)
        print('Reading trigger data and time index')
        if ftype == 'acq':
            trigger = data[options.chtrig].data
            time = data[options.chtrig].time_index
        elif ftype == 'txt':
            # Read full file and extract right lines.
            data = np.genfromtxt(options.filename, skip_header=HEADERLENGTH)
            trigger = data[:, options.chtrig+1]
            time = data[:, 0]

        print('Counting trigger points')
        trigger_deriv = np.diff(trigger)
        tps = trigger_deriv > options.thr
        num_tps_found = tps.sum()
        time_offset = time[tps.argmax()]

        if options.num_tps_expected:
            print('Checking number of tps')
            if num_tps_found > options.num_tps_expected:
                tps_extra = num_tps_found - options.num_tps_expected
                print('Found ' + str(tps_extra) + ' tps more than expected!\n',
                      'Assuming extra tps are at the end (try again with a ',
                      'more conservative thr)')
            elif num_tps_found < options.num_tps_expected:
                tps_missing = options.num_tps_expected - num_tps_found
                print('Found ' + str(tps_missing) + ' tps less than expected!')
                if options.tr:
                    print('Correcting time offset, assuming missing tps'
                          'are at the beginning')
                    # time_offset = time_offset - (tps_missing * options.tr)
                    time_offset = time[tps.argmax()] - (tps_missing * options.tr)
                else:
                    print('Can\'t correct time offset, (try again specifying',
                          'tr or with a more liberal thr')

            else:
                print('Found just the right amount of tps!')

        else:
            print('Not checking the number of tps')

        time = time - time_offset
        # time = data[options.chtrig].time_index - time_offset

        path_exists_or_make_it(options.outdir)

        def time2ntr(x):
            return x / options.tr

        def ntr2time(x):
            return x * options.tr

        thrline = np.ones(time.shape) * options.thr
        fig = plt.figure(figsize=FIGSIZE, dpi=SET_DPI)
        subplot = fig.add_subplot(211)
        subplot.set_title('trigger and time')
        subplot.set_ylim([-0.2, options.thr*10])
        subplot.plot(time, trigger, '-', time, thrline, 'r-.', time, time, '-')
        subplot = fig.add_subplot(223)
        subplot.set_xlim([-options.tr*4, options.tr*4])
        subplot.set_ylim([-0.2, options.thr*3])
        subplot.secondary_xaxis('top', functions=(time2ntr, ntr2time))
        subplot.plot(time, trigger, '-', time, time, '-')
        subplot = fig.add_subplot(224)
        subplot.set_xlim([options.tr*(options.num_tps_expected-4), options.tr*(options.num_tps_expected+4)])
        subplot.set_ylim([-0.2, options.thr*3])
        subplot.secondary_xaxis('top', functions=(time2ntr, ntr2time))
        subplot.plot(time, trigger, '-', time, time, '-')
        plt.savefig(outfile + '_trigger_time.png', dpi=SET_DPI)
        plt.close()

        # #!# The following few lines could be a function on its own for use in python
        table = pd.DataFrame(index=time)

        if ftype == 'txt':
            col_names = header[1].split('\t')
            col_names[-1] = col_names[-1][:-1]

        if options.chsel:
            print('Extracting desired channels')
            for ch in options.chsel:
                if ftype == 'acq':
                    table[data[ch].name] = data[ch].data
                elif ftype == 'txt':
                    # preparing channel names from txt file
                    table[col_names[ch+1]] = data[:, ch+1]

        else:
            # #!# Needs a check on different channel frequency!
            print('Extracting all channels')
            if ftype == 'acq':
                for ch in range(0, len(data)):
                    table[data[ch].name] = data[ch].data
            elif ftype == 'txt':
                for ch in range(0, (data.shape[1]-1)):
                    table[col_names[ch+1]] = data[:, ch+1]

        print('Extracting minor informations')
        if ftype == 'acq':
            samp_freq = data[0].samples_per_second
        elif ftype == 'txt':
            freq_list = header[0].split('\t')
            samp_freq = 1 / float(freq_list[-1][:-2])

        table.index.names = ['time']
        table_width = len(table.columns)

        if options.table_header:
            if 'time' in options.table_header:
                ignored_headers = 1
            else:
                ignored_headers = 0

            n_headers = len(options.table_header)
            if table_width < n_headers - ignored_headers:
                print(f'Too many table headers specified!\n'
                      f'{options.table_header}\n'
                      f'Ignoring the last'
                      '{n_headers - table_width - ignored_headers}')
                options.table_header = options.table_header[:(table_width + ignored_headers)]
            elif table_width > n_headers - ignored_headers:
                missing_headers = n_headers - table_width - ignored_headers
                print(f'Not enough table headers specified!\n'
                      f'{options.table_header}\n'
                      f'Tailing {missing_headers} headers')
                for i in range(missing_headers):
                    options.table_header.append(f'missing n.{i+1}')

            table.columns = options.table_header[ignored_headers:]
            # #!# this should be iterative!
            print_plot(table, 'respiratory_CO2', outfile)

        print('Printing file')
        table.to_csv(outfile + '.tsv.gz', sep='\t', index=True, header=False, compression='gzip')
        # #!# Definitely needs check on samp_freq!
        print_json(outfile, samp_freq, time_offset, options.table_header)
        print_summary(options.filename, options.num_tps_expected,
                      num_tps_found, samp_freq, time_offset, outfile)
Exemple #19
0
 def _get_physio_acq_time(physio_file):
     # Return the method to get the acq_time for a .acq file:
     return bioread.read_file(physio_file).earliest_marker_created_at