Beispiel #1
0
def stats_to_xls_png(config_file, stats_filename, oilgas=outputPartType.all):
    '''summarises stats in two excel sheets of time-series PSD and averaged
    PSD.

    Args:
        config_file (string)            : Path of the config file for this data
        stats_filename (string)         : Path of the stats csv file
        oilgas=oc_pp.outputPartType.all : the oilgas enum if you want to just make the figure for oil, or just gas (defaults to all particles)

    Returns:
        dataframe: of time series
        files: in the proc folder)
    '''
    settings = PySilcamSettings(config_file)

    stats = pd.read_csv(stats_filename)
    stats.sort_values(by='timestamp', inplace=True)
    oilgasTxt = ''

    if oilgas == outputPartType.oil:
        from pysilcam.oilgas import extract_oil
        stats = extract_oil(stats)
        oilgasTxt = 'oil'
    elif oilgas == outputPartType.gas:
        from pysilcam.oilgas import extract_gas
        stats = extract_gas(stats)
        oilgasTxt = 'gas'

    df = make_timeseries_vd(stats, settings)

    df.to_excel(
        stats_filename.replace('-STATS.csv', '') + '-TIMESERIES' + oilgasTxt +
        '.xlsx')

    sample_volume = get_sample_volume(
        settings.PostProcess.pix_size,
        path_length=settings.PostProcess.path_length)

    dias, vd = vd_from_stats(stats, settings.PostProcess)
    nims = count_images_in_stats(stats)
    sv = sample_volume * nims
    vd /= sv

    d50 = d50_from_vd(vd, dias)

    dfa = pd.DataFrame(data=[vd], columns=dias)
    dfa['d50'] = d50

    timestamp = np.min(pd.to_datetime(df['Time']))
    dfa['Time'] = timestamp

    dfa.to_excel(
        stats_filename.replace('-STATS.csv', '') + '-AVERAGE' + oilgasTxt +
        '.xlsx')

    return df
Beispiel #2
0
def convert_to_pj_format(stats_csv_file, config_file):
    '''converts stats files into a total, and gas-only time-series csvfile which can be read by the old matlab
    SummaryPlot exe'''

    settings = PySilcamSettings(config_file)
    logger.info('Loading stats....')
    stats = pd.read_csv(stats_csv_file)

    base_name = stats_csv_file.replace('-STATS.csv', '-PJ.csv')
    gas_name = base_name.replace('-PJ.csv', '-PJ-GAS.csv')

    ogdatafile = DataLogger(base_name, ogdataheader())
    ogdatafile_gas = DataLogger(gas_name, ogdataheader())

    stats['timestamp'] = pd.to_datetime(stats['timestamp'])
    u = stats['timestamp'].unique()
    sample_volume = sc_pp.get_sample_volume(
        settings.PostProcess.pix_size,
        path_length=settings.PostProcess.path_length)

    logger.info('Analysing time-series')
    for s in tqdm(u):
        substats = stats[stats['timestamp'] == s]
        nims = sc_pp.count_images_in_stats(substats)
        sv = sample_volume * nims

        oil = extract_oil(substats)
        dias, vd_oil = sc_pp.vd_from_stats(oil, settings.PostProcess)
        vd_oil /= sv

        gas = extract_gas(substats)
        dias, vd_gas = sc_pp.vd_from_stats(gas, settings.PostProcess)
        vd_gas /= sv
        d50_gas = sc_pp.d50_from_vd(vd_gas, dias)

        vd_total = vd_oil + vd_gas
        d50_total = sc_pp.d50_from_vd(vd_total, dias)

        data_total = cat_data_pj(s, vd_total, d50_total, len(oil) + len(gas))
        ogdatafile.append_data(data_total)

        data_gas = cat_data_pj(s, vd_gas, d50_gas, len(gas))
        ogdatafile_gas.append_data(data_gas)

    logger.info('  OK.')

    logger.info('Deleting header!')
    with open(base_name, 'r') as fin:
        data = fin.read().splitlines(True)
    with open(base_name, 'w') as fout:
        fout.writelines(data[1:])
    with open(gas_name, 'r') as fin:
        data = fin.read().splitlines(True)
    with open(gas_name, 'w') as fout:
        fout.writelines(data[1:])
    logger.info('Conversion complete.')
Beispiel #3
0
    def load_data(self):
        '''handles loading of data, depending on what is available'''
        self.datadir = os.path.split(self.configfile)[0]

        self.stats_filename = ''
        self.stats_filename = QFileDialog.getOpenFileName(
            self,
            caption='Load a *-STATS.csv file',
            directory=self.datadir,
            filter=(('*-STATS.csv')))[0]
        if self.stats_filename == '':
            return

        timeseriesgas_file = self.stats_filename.replace(
            '-STATS.csv', '-TIMESERIESgas.xlsx')

        if os.path.isfile(timeseriesgas_file):
            self.load_from_timeseries()
        else:

            msgBox = QMessageBox()
            msgBox.setText(
                'The STATS data appear not to have been exported to TIMSERIES.xlsx'
                +
                '\n We can use the STATS file anyway (which might take a while)'
                + '\n or we can convert the data to TIMSERIES.xls now,'
                '\n which can be used quickly if you want to load these data another time.'
            )
            msgBox.setIcon(QMessageBox.Question)
            msgBox.setWindowTitle('What to do?')
            load_stats_button = msgBox.addButton('Load stats anyway',
                                                 QMessageBox.ActionRole)
            convert_stats_button = msgBox.addButton(
                'Convert and save timeseries', QMessageBox.ActionRole)
            msgBox.addButton(QMessageBox.Cancel)
            msgBox.exec_()
            if self.configfile == '':
                self.configfile = QFileDialog.getOpenFileName(
                    self,
                    caption='Load config ini file',
                    directory=self.datadir,
                    filter=(('*.ini')))[0]
                if self.configfile == '':
                    return
            if (msgBox.clickedButton() == load_stats_button):
                self.settings = PySilcamSettings(self.configfile)
                self.av_window = pd.Timedelta(
                    seconds=self.settings.PostProcess.window_size)
                self.load_from_stats()
            elif (msgBox.clickedButton() == convert_stats_button):
                export_timeseries(self.configfile, self.stats_filename)
                self.load_from_timeseries()
            else:
                return

        self.setup_figure()
Beispiel #4
0
 def status_update(self, string, uselog=False):
     if uselog:
         try:
             settings = PySilcamSettings(self.process.configfile)
             with open(settings.General.logfile, 'r') as a:
                 lines = a.readlines()
                 string = str(lines[-1])
         except:
             pass
     string = string + '  |  Directory: ' + self.datadir + '  |  Config file: ' + self.configfile
     self.ui.statusBar.setText(string)
     app.processEvents()
Beispiel #5
0
def test_settings():
    path = os.path.dirname(__file__)
    filename = os.path.join(path, '..', 'config_example.ini')
    settings = PySilcamSettings(filename)

    assert hasattr(settings, 'General')
    assert hasattr(settings.General, 'version')
    assert hasattr(settings, 'Background')
    assert hasattr(settings, 'Process')
    assert hasattr(settings, 'PostProcess')
    assert hasattr(settings, 'ExportParticles')
    assert hasattr(settings, 'NNClassify')
def particle_generator():

    print('load ctd')
    ctd_all = load_ctd()
    print(' ok')

    stn = 'STN12'
    mindepth = 1
    maxdepth = 70

    config_file = '/mnt/ARRAY/ENTICE/Data/configs/config.ini'
    stats_csv_file = '/mnt/ARRAY/ENTICE/Data/proc/' + stn + '-STATS.csv'

    time = ctd_all['Date_Time']
    depth = ctd_all['Depth']

    conf = load_config(config_file)
    settings = PySilcamSettings(conf)

    stats = pd.read_csv(stats_csv_file)

    stats = scpp.add_depth_to_stats(stats, time, depth)
    print('all stats:', len(stats))

    sstats = stats[(stats['Depth'] > mindepth) & (stats['Depth'] < maxdepth)]
    print('selected stats:', len(sstats))

    index = 0

    while True:

        #        if np.random.choice([0,1]):
        sstats_ = scpp.extract_nth_largest(sstats, settings, n=index)
        #        else:
        #            sstats_ = scpp.extract_nth_longest(sstats,settings,n=index)
        print(sstats_)

        filename = os.path.join('/mnt/ARRAY/ENTICE/Data/export/',
                                sstats_['export name'])

        im = skiio.imread(filename)

        im = scpp.explode_contrast(im)
        im = scpp.bright_norm(im)
        # scplt.show_imc(im)
        # plt.title(selected_stats['export name'] + ('\nDepth:
        # {0:.1f}'.format(selected_stats['depth'])) + 'm\n')

        index += 1

        yield im
Beispiel #7
0
    def plot_trimmed_stats(self):
        start_time = pd.to_datetime(
            self.ui.dateTimeStart.dateTime().toPyDateTime())
        end_time = pd.to_datetime(
            self.ui.dateTimeEnd.dateTime().toPyDateTime())
        self.trimmed_stats, self.output_filename = scpp.trim_stats(
            self.stats_filename,
            start_time,
            end_time,
            write_new=False,
            stats=self.stats)

        if np.isnan(self.trimmed_stats.equivalent_diameter.max()) or len(
                self.trimmed_stats) == 0:
            QMessageBox.warning(
                self, "No data in this segment!",
                'No data was found within the specified time range.',
                QMessageBox.Ok)
            return

        settings = PySilcamSettings(self.config_file)
        plt.figure(self.figure.number)
        plt.clf()
        stats_oil = scog.extract_oil(self.trimmed_stats)
        stats_gas = scog.extract_gas(self.trimmed_stats)

        sample_volume = scpp.get_sample_volume(
            settings.PostProcess.pix_size,
            path_length=settings.PostProcess.path_length)

        nims = scpp.count_images_in_stats(self.trimmed_stats)
        dias, vd = scpp.vd_from_stats(self.trimmed_stats, settings.PostProcess)
        dias, vd_oil = scpp.vd_from_stats(stats_oil, settings.PostProcess)
        dias, vd_gas = scpp.vd_from_stats(stats_gas, settings.PostProcess)

        sv = sample_volume * nims
        vd /= sv
        vd_oil /= sv
        vd_gas /= sv

        plt.plot(dias, vd_oil + vd_gas, 'k', label='TOTAL')
        plt.plot(dias, vd_oil, 'r', label='OIL')
        plt.plot(dias, vd_gas, 'b', label='GAS')
        plt.xscale('log')
        plt.xlabel('Equiv. diam (um)')
        plt.ylabel('Volume concentration (uL/L)')
        plt.xlim(10, 12000)
        plt.legend()

        self.canvas.draw()
Beispiel #8
0
def liveview_acquire(datapath, config_filename, writeToDisk=False):
    '''Aquire images from the SilCam

    Args:
       datapath              (str)          :  Path to the image storage
       config_filename=None  (str)          :  Camera config file
       writeToDisk=True      (Bool)         :  True will enable writing of raw data to disc
                                               False will disable writing of raw data to disc
       gui=None          (Class object)     :  Queue used to pass information between process thread and GUI
                                               initialised in ProcThread within guicals.py
    '''

    #Load the configuration, create settings object
    settings = PySilcamSettings(config_filename)

    #Print configuration to screen
    print('---- CONFIGURATION ----\n')
    settings.config.write(sys.stdout)
    print('-----------------------\n')

    if (writeToDisk):
        # Copy config file
        configFile2Copy = datetime.datetime.now().strftime(
            'D%Y%m%dT%H%M%S.%f') + os.path.basename(config_filename)
        copyfile(config_filename, os.path.join(datapath, configFile2Copy))

    configure_logger(settings.General)

    # update path_length
    updatePathLength(settings, logger)

    acq = Acquire(USE_PYMBA=True)  # ini class
    t1 = time.time()

    aqgen = acq.get_generator(datapath,
                              camera_config_file=config_filename,
                              writeToDisk=writeToDisk)

    for i, (timestamp, imraw) in enumerate(aqgen):
        t2 = time.time()
        aq_freq = np.round(1.0 / (t2 - t1), 1)
        requested_freq = settings.Camera.acquisitionframerateabs
        rest_time = (1 / requested_freq) - (1 / aq_freq)
        rest_time = np.max([rest_time, 0.])
        actual_aq_freq = 1 / (1 / aq_freq + rest_time)
        logger.info('Image {0} acquired at frequency {1:.1f} Hz'.format(
            i, actual_aq_freq))
        t1 = time.time()

        yield timestamp, imraw
def test_small_standards():
    '''Testing that the small standards are sized correctly'''
    path = os.path.dirname(__file__)
    conf_file = os.path.join(ROOTPATH, 'STANDARDS',
                             'config_glass_standards.ini')
    conf_file_out = os.path.join(ROOTPATH, 'STANDARDS',
                                 'config_glass_standards_generated.ini')
    conf = load_config(conf_file)

    data_file = os.path.join(ROOTPATH, 'STANDARDS/StandardsSmall')
    conf.set('General', 'datafile', os.path.join(ROOTPATH, 'STANDARDS',
                                                 'proc'))
    conf.set('General', 'logfile',
             os.path.join(ROOTPATH, 'STANDARDS', 'log.log'))
    if MODEL_PATH is not None:
        conf.set('NNClassify', 'model_path', MODEL_PATH)
    conf_file_hand = open(conf_file_out, 'w')
    conf.write(conf_file_hand)
    conf_file_hand.close()

    stats_file = os.path.join(ROOTPATH,
                              'STANDARDS/proc/StandardsSmall-STATS.csv')

    # if csv file already exists, it has to be deleted
    if (os.path.isfile(stats_file)):
        os.remove(stats_file)

    # call process function
    silcam_process(conf_file_out, data_file, multiProcess=False, nbImages=10)

    # check that csv file has been created
    assert os.path.isfile(stats_file), 'stats_file not created'

    # check that csv file has been properly built
    csvfile = open(stats_file)
    lines = csvfile.readlines()
    numline = len(lines)
    assert numline > 1, 'csv file empty'

    # check the columns
    assert lines[0] == 'particle index,major_axis_length,minor_axis_length,equivalent_diameter,solidity,minr,minc,maxr,maxc,'\
            'probability_oil,probability_other,probability_bubble,probability_faecal_pellets,probability_copepod,'\
            'probability_diatom_chain,probability_oily_gas,export name,timestamp,saturation\n', 'columns not properly built'

    settings = PySilcamSettings(conf_file_out)
    stats = pd.read_csv(stats_file)
    d50 = scpp.d50_from_stats(stats, settings.PostProcess)
    print('Small d50:', d50)
    assert (d50 > 70 and d50 < 90), 'incorrect d50'
Beispiel #10
0
def loop(config_filename, inputQueue, outputQueue, gui=None):
    '''
    Main processing loop, run for each image

    Args:
        config_filename (str)   : path of the config ini file
        inputQueue  ()          : queue where the images are added for processing
                                  initilised using defineQueues()
        outputQueue ()          : queue where information is retrieved from processing
                                  initilised using defineQueues()
        gui=None (Class object) : Queue used to pass information between process thread and GUI
                                  initialised in ProcThread within guicals.py
    '''
    settings = PySilcamSettings(config_filename)
    configure_logger(settings.General)
    logger = logging.getLogger(__name__ + '.silcam_process')

    # load the model for particle classification and keep it for later

    # a tensorflow session must be started on each process in order to function reliably in multiprocess.
    # This also includes the import of tensorflow on each process
    # @todo the loading of the model and prediction functions should be within a class that is initialized by starting a
    #  tensorflow session, then this will be cleaner.
    import tensorflow as tf
    sess = tf.Session()

    nnmodel = []
    nnmodel, class_labels = sccl.load_model(
        model_path=settings.NNClassify.model_path)

    while True:
        task = inputQueue.get()
        if task is None:
            outputQueue.put(None)
            break
        stats_all = processImage(nnmodel, class_labels, task, settings, logger,
                                 gui)

        if (not stats_all is None):
            outputQueue.put(stats_all)
        else:
            logger.info('No stats found.')

    # close of the tensorflow session when everything is finished.
    # unsure of behaviour if things crash or are stoppped before reaching this point
    sess.close()
    return
Beispiel #11
0
        def checkTFModel(self):
            if not ((self.run_type == process_mode.process) or
                    (self.run_type == process_mode.real_time)):
                return -1

            settings = PySilcamSettings(self.configfile)
            path, filename = os.path.split(settings.NNClassify.model_path)
            headerfile = os.path.join(path, 'header.tfl.txt')
            if not (os.path.isfile(headerfile)):
                QMessageBox.critical(
                    self, "Config error!",
                    'The path to the classification model cannot be found.\n\n'
                    +
                    'Please edit the config file in a text editor and make settings.NNClassify.model_path point to the silcam model',
                    QMessageBox.Ok)
                return False
            return True
Beispiel #12
0
    def fillInConfigEditor(self, inputFile):
        self.ui.configPathLabel.setText(self.configfileToModify)
        self.settings = PySilcamSettings(inputFile)
        self.ui.datafileEdit.setText(self.settings.General.datafile)
        idx = self.ui.loglevelEdit.findText(self.settings.General.loglevel,
                                            QtCore.Qt.MatchFixedString)
        if (idx == -1):
            idx = 0
        self.ui.loglevelEdit.setCurrentIndex(idx)
        self.ui.logfileEdit.setText(self.settings.General.logfile)
        if (self.settings.Process.real_time_stats == True):
            self.ui.real_time_statsEdit.setCurrentIndex(1)
        else:
            self.ui.real_time_statsEdit.setCurrentIndex(0)
        self.ui.path_lengthEdit.setText(
            str(self.settings.PostProcess.path_length))

        self.ui.com_portEdit.clear()

        listPortCom = []
        listPortCom = scog.getListPortCom()

        self.ui.com_portEdit.addItem(self.settings.PostProcess.com_port)
        self.ui.com_portEdit.setCurrentIndex(0)

        for port in listPortCom:
            if (port != self.settings.PostProcess.com_port):
                self.ui.com_portEdit.addItem(port)

        self.ui.window_sizeEdit.setText(
            str(self.settings.PostProcess.window_size))

        if (self.settings.ExportParticles.export_images == True):
            self.ui.export_imagesEdit.setCurrentIndex(1)
        else:
            self.ui.export_imagesEdit.setCurrentIndex(0)
        self.ui.outputpathEdit.setText(
            self.settings.ExportParticles.outputpath)
        self.ui.min_lengthEdit.setText(
            str(self.settings.ExportParticles.min_length))
        self.ui.num_imagesEdit.setText(str(
            self.settings.Background.num_images))
        self.ui.thresholdEdit.setText(str(self.settings.Process.threshold))
        self.ui.max_particlesEdit.setText(
            str(self.settings.Process.max_particles))
Beispiel #13
0
def silcam_acquire(datapath, config_filename, writeToDisk=True, gui=None):
    '''Aquire images from the SilCam

    Args:
       datapath              (str)          :  Path to the image storage
       config_filename=None  (str)          :  Camera config file
       writeToDisk=True      (Bool)         :  True will enable writing of raw data to disc
                                               False will disable writing of raw data to disc
       gui=None          (Class object)     :  Queue used to pass information between process thread and GUI
                                               initialised in ProcThread within guicals.py
    '''

    # Load the configuration, create settings object
    settings = PySilcamSettings(config_filename)

    # Print configuration to screen
    print('---- CONFIGURATION ----\n')
    settings.config.write(sys.stdout)
    print('-----------------------\n')

    if (writeToDisk):
        # Copy config file
        configFile2Copy = datetime.datetime.now().strftime(
            'D%Y%m%dT%H%M%S.%f') + os.path.basename(config_filename)
        copyfile(config_filename, os.path.join(datapath, configFile2Copy))

    configure_logger(settings.General)
    logger = logging.getLogger(__name__ + '.silcam_acquire')

    # update path_length
    updatePathLength(settings, logger)

    acq = Acquire(USE_PYMBA=True)  # ini class
    t1 = time.time()

    aqgen = acq.get_generator(datapath,
                              camera_config_file=config_filename,
                              writeToDisk=writeToDisk)

    for i, (timestamp, imraw) in enumerate(aqgen):
        t2 = time.time()
        aq_freq = np.round(1.0 / (t2 - t1), 1)
        requested_freq = settings.Camera.acquisitionframerateabs
        rest_time = (1 / requested_freq) - (1 / aq_freq)
        rest_time = np.max([rest_time, 0.])
        time.sleep(rest_time)
        actual_aq_freq = 1 / (1 / aq_freq + rest_time)
        print('Image {0} acquired at frequency {1:.1f} Hz'.format(
            i, actual_aq_freq))
        t1 = time.time()

        if not gui == None:
            while (gui.qsize() > 0):
                try:
                    gui.get_nowait()
                    time.sleep(0.001)
                except:
                    continue
            # try:
            rtdict = dict()
            rtdict = {
                'dias': 0,
                'vd_oil': 0,
                'vd_gas': 0,
                'oil_d50': 0,
                'gas_d50': 0,
                'saturation': 0
            }
            gui.put_nowait((timestamp, imraw, imraw, rtdict))
Beispiel #14
0
def summarise_fancy_stats(stats_csv_file, config_file, monitor=False,
        maxlength=100000, msize=2048, oilgas=sc_pp.outputPartType.all):
    '''
    Plots a summary figure of a dataset which shows
    the volume distribution, number distribution and a montage of randomly selected particles
    
    Args:
        stats_csv_file (str)            : path of the *-STATS.csv file created by silcam process
        config_file (str)               : path of the config ini file associated with the data
        monitor=False (Bool)            : if True then this function will run forever, continuously reading the stats_csv_file and plotting the data
                                          might be useful in monitoring the progress of processing, for example
        maxlength=100000 (int)          : particles longer than this number will not be put in the montage
        msize=2048 (int)                : the montage created will have a canvas size of msize x msize pixels
        oilgas=oc_pp.outputPartType.all : the oilgas enum if you want to just make the figure for oil, or just gas (defulats to all particles)
    '''
    sns.set_style('ticks')

    settings = PySilcamSettings(config_file)

    min_length = settings.ExportParticles.min_length + 1

    #f,a = plt.subplots(2,2)
    ax1 = plt.subplot2grid((2,2),(0, 0))
    ax2 = plt.subplot2grid((2,2),(1, 0))
    ax3 = plt.subplot2grid((2,2), (0, 1), rowspan=2)
    logger = logging.getLogger(__name__)

    while True:
        try:
            montage = sc_pp.make_montage(stats_csv_file,
                    settings.PostProcess.pix_size,
                    roidir=settings.ExportParticles.outputpath,
                    auto_scaler=msize*2, msize=msize,
                    maxlength=maxlength,
                    oilgas=oilgas)
        except:
            montage = np.zeros((msize, msize, 3), dtype=np.uint8) + 255
            logger.warning('Unable to make montage. Check: {0} folder for h5 files'.format(settings.ExportParticles.outputpath))
            logger.warning('  in config file ExportParticles.export_images is {0}'.format(settings.ExportParticles.export_images))

        stats = pd.read_csv(stats_csv_file)
        stats = stats[(stats['major_axis_length'] *
                settings.PostProcess.pix_size) < maxlength]

        # average numer and volume concentrations
        nc, vc, sv_total, junge = sc_pp.nc_vc_from_stats(stats,
                settings.PostProcess, oilgas=oilgas)

        # extract only wanted particle stats
        if oilgas==sc_pp.outputPartType.oil:
            from pysilcam.oilgas import extract_oil
            stats = extract_oil(stats)
        elif oilgas==sc_pp.outputPartType.gas:
            from pysilcam.oilgas import extract_gas
            stats = extract_gas(stats)

        d50 = sc_pp.d50_from_stats(stats, settings.PostProcess)
        total_measured_particles = len(stats['major_axis_length'])

        plt.sca(ax1)
        plt.cla()
        psd(stats, settings.PostProcess, plt.gca())
        plt.title('Volume conc.: {0:.2f}uL/L  d50: {1:.0f}um'.format(vc, d50))

        plt.sca(ax2)
        plt.cla()
        nd(stats, settings.PostProcess, plt.gca(), sample_volume=sv_total)
        plt.title('Number conc.: {0:.0f}#/L  Junge exp.: {1:.2f}'.format(nc,
            junge))

        plt.sca(ax3)
        plt.cla()
        montage_plot(montage, settings.PostProcess.pix_size)
        plt.title('Volume processed: {0:.1f}L  {1:.0f} particles measured'.format(sv_total,
            total_measured_particles))

        plt.draw()
        if monitor:
            plt.pause(1)
        else:
            break
Beispiel #15
0
        def export_summary_data_slow(self):

            if self.configfile == '':
                self.status_update('Asking user for config file')
                self.load_sc_config()
                if self.configfile == '':
                    self.status_update('Did not get STATS file')
                    return

            settings = PySilcamSettings(self.configfile)

            self.stats_filename = ''
            self.status_update('Asking user for *-STATS.csv file')
            self.load_stats_filename()
            if self.stats_filename == '':
                self.status_update('Did not get STATS file')
                return
            stats = pd.read_csv(self.stats_filename)
            stats.sort_values(by=['timestamp'], inplace=True)

            self.status_update('Exporting all data....')
            df = scpp.stats_to_xls_png(self.configfile, self.stats_filename)

            plt.figure(figsize=(20, 10))

            self.status_update('Exporting oil data....')
            df = scpp.stats_to_xls_png(self.configfile,
                                       self.stats_filename,
                                       oilgas=scpp.outputPartType.oil)
            plt.plot(df['Time'], df['D50'], 'ro')
            d50, time = scpp.d50_timeseries(scog.extract_oil(stats),
                                            settings.PostProcess)
            lns1 = plt.plot(time, d50, 'r-', label='OIL')

            self.status_update('Exporting gas data....')
            df = scpp.stats_to_xls_png(self.configfile,
                                       self.stats_filename,
                                       oilgas=scpp.outputPartType.gas)
            plt.plot(df['Time'], df['D50'], 'bo')
            d50, time = scpp.d50_timeseries(scog.extract_gas(stats),
                                            settings.PostProcess)
            lns2 = plt.plot(time, d50, 'b-', label='GAS')
            plt.ylabel('d50 [um]')
            plt.ylim(0, max(plt.gca().get_ylim()))

            self.status_update('Calculating GOR time series....')
            gor, time = scog.gor_timeseries(stats, settings.PostProcess)
            ax = plt.gca().twinx()
            plt.sca(ax)
            plt.ylabel('GOR')
            plt.ylim(0, max([max(gor), max(plt.gca().get_ylim())]))
            lns3 = ax.plot(time, gor, 'k', label='GOR')

            lns = lns1 + lns2 + lns3
            labs = [l.get_label() for l in lns]
            plt.legend(lns, labs)

            plt.savefig(self.stats_filename.replace('-STATS.csv', '') +
                        '-d50_TimeSeries.png',
                        dpi=600,
                        bbox_inches='tight')

            self.status_update('Export finished.')

            plt.figure(self.fig_main.number)
Beispiel #16
0
def silcam_process(config_filename,
                   datapath,
                   multiProcess=True,
                   realtime=False,
                   discWrite=False,
                   nbImages=None,
                   gui=None,
                   overwriteSTATS=True):
    '''Run processing of SilCam images

    Args:
      config_filename   (str)               :  The filename (including path) of the config.ini file
      datapath          (str)               :  Path to the data directory
      multiProcess=True (bool)              :  If True, multiprocessing is used
      realtime=False    (bool)              :  If True, a faster but less accurate methods is used for segmentation and rts stats become active
      discWrite=False   (bool)              :  True will enable writing of raw data to disc
                                               False will disable writing of raw data to disc
      nbImages=None     (int)               :  Number of images to skip
      gui=None          (Class object)      :  Queue used to pass information between process thread and GUI
                                               initialised in ProcThread within guicals.py
    '''
    print(config_filename)

    print('')
    # ---- SETUP ----

    # Load the configuration, create settings object
    settings = PySilcamSettings(config_filename)

    # Print configuration to screen
    print('---- CONFIGURATION ----\n')
    settings.config.write(sys.stdout)
    print('-----------------------\n')

    # Configure logging
    configure_logger(settings.General)
    logger = logging.getLogger(__name__ + '.silcam_process')

    logger.info('Processing path: ' + datapath)

    if realtime:
        if discWrite:
            # copy config file into data path
            configFile2Copy = datetime.datetime.now().strftime(
                'D%Y%m%dT%H%M%S.%f') + os.path.basename(config_filename)
            copyfile(config_filename, os.path.join(datapath, configFile2Copy))

        # update path_length
        updatePathLength(settings, logger)

    # make datafilename autogenerated for easier batch processing
    if (not os.path.isdir(settings.General.datafile)):
        logger.info('Folder ' + settings.General.datafile +
                    ' was not found and is created')
        os.mkdir(settings.General.datafile)

    procfoldername = os.path.split(datapath)[-1]
    datafilename = os.path.join(settings.General.datafile, procfoldername)
    logger.info('output stats to: ' + datafilename)

    adminSTATS(logger, settings, overwriteSTATS, datafilename, datapath)

    # Initialize the image acquisition generator
    if 'REALTIME_DISC' in os.environ.keys():
        print('acq = Acquire(USE_PYMBA=False)')
        aq = Acquire(USE_PYMBA=False)
    else:
        aq = Acquire(USE_PYMBA=realtime)
    aqgen = aq.get_generator(datapath,
                             writeToDisk=discWrite,
                             camera_config_file=config_filename)

    # Get number of images to use for background correction from config
    print('* Initializing background image handler')
    bggen = backgrounder(
        settings.Background.num_images,
        aqgen,
        bad_lighting_limit=settings.Process.bad_lighting_limit,
        real_time_stats=settings.Process.real_time_stats)

    # Create export directory if needed
    if settings.ExportParticles.export_images:
        if (not os.path.isdir(settings.ExportParticles.outputpath)):
            logger.info('Export folder ' +
                        settings.ExportParticles.outputpath +
                        ' was not found and is created')
            os.mkdir(settings.ExportParticles.outputpath)

    # ---- END SETUP ----

    # ---- RUN PROCESSING ----

    # If only one core is available, no multiprocessing will be done
    multiProcess = multiProcess and (multiprocessing.cpu_count() > 1)

    print('* Commencing image acquisition and processing')

    # initialise realtime stats class regardless of whether it is used later
    rts = scog.rt_stats(settings)

    if (multiProcess):
        proc_list = []
        mem = psutil.virtual_memory()
        memAvailableMb = mem.available >> 20
        distributor_q_size = np.min([
            int(memAvailableMb / 2 * 1 / 15),
            np.copy(multiprocessing.cpu_count() * 4)
        ])

        logger.debug('setting up processing queues')
        inputQueue, outputQueue = defineQueues(realtime, distributor_q_size)

        logger.debug('setting up processing distributor')
        distributor(inputQueue, outputQueue, config_filename, proc_list, gui)

        # iterate on the bggen generator to obtain images
        logger.debug('Starting acquisition loop')
        t2 = time.time()
        for i, (timestamp, imc, imraw) in enumerate(bggen):
            t1 = np.copy(t2)
            t2 = time.time()
            print(t2 - t1, 'Acquisition loop time')
            logger.debug('Corrected image ' + str(timestamp) +
                         ' acquired from backgrounder')

            # handle errors if the loop function fails for any reason
            if (nbImages != None):
                if (nbImages <= i):
                    break

            logger.debug('Adding image to processing queue: ' + str(timestamp))
            addToQueue(
                realtime, inputQueue, i, timestamp, imc
            )  # the tuple (i, timestamp, imc) is added to the inputQueue
            logger.debug('Processing queue updated')

            # write the images that are available for the moment into the csv file
            logger.debug('Running collector')
            collector(inputQueue,
                      outputQueue,
                      datafilename,
                      proc_list,
                      False,
                      settings,
                      rts=rts)
            logger.debug('Data collected')

            if not gui == None:
                logger.debug('Putting data on GUI Queue')
                while (gui.qsize() > 0):
                    try:
                        gui.get_nowait()
                        time.sleep(0.001)
                    except:
                        continue
                # try:
                rtdict = dict()
                rtdict = {
                    'dias': rts.dias,
                    'vd_oil': rts.vd_oil,
                    'vd_gas': rts.vd_gas,
                    'oil_d50': rts.oil_d50,
                    'gas_d50': rts.gas_d50,
                    'saturation': rts.saturation
                }
                gui.put_nowait((timestamp, imc, imraw, rtdict))
                logger.debug('GUI queue updated')

            if 'REALTIME_DISC' in os.environ.keys():
                scog.realtime_summary(datafilename + '-STATS.csv',
                                      config_filename)

        logger.debug('Acquisition loop completed')
        if (not realtime):
            logger.debug('Halting processes')
            for p in proc_list:
                inputQueue.put(None)

        # some images might still be waiting to be written to the csv file
        logger.debug('Running collector on left over data')
        collector(inputQueue,
                  outputQueue,
                  datafilename,
                  proc_list,
                  True,
                  settings,
                  rts=rts)
        logger.debug('All data collected')

        for p in proc_list:
            p.join()
            logger.info('%s.exitcode = %s' % (p.name, p.exitcode))

    else:  # no multiprocessing
        # load the model for particle classification and keep it for later
        nnmodel = []
        nnmodel, class_labels = sccl.load_model(
            model_path=settings.NNClassify.model_path)

        # iterate on the bggen generator to obtain images
        for i, (timestamp, imc, imraw) in enumerate(bggen):
            # handle errors if the loop function fails for any reason
            if (nbImages != None):
                if (nbImages <= i):
                    break

            image = (i, timestamp, imc)
            # one single image is processed at a time
            stats_all = processImage(nnmodel, class_labels, image, settings,
                                     logger, gui)

            if (not stats_all is None):  # if frame processed
                # write the image into the csv file
                writeCSV(datafilename, stats_all)
                if 'REALTIME_DISC' in os.environ.keys():
                    scog.realtime_summary(datafilename + '-STATS.csv',
                                          config_filename)

            if not gui == None:
                collect_rts(settings, rts, stats_all)
                logger.debug('Putting data on GUI Queue')
                while (gui.qsize() > 0):
                    try:
                        gui.get_nowait()
                        time.sleep(0.001)
                    except:
                        continue
                # try:
                rtdict = dict()
                rtdict = {
                    'dias': rts.dias,
                    'vd_oil': rts.vd_oil,
                    'vd_gas': rts.vd_gas,
                    'oil_d50': rts.oil_d50,
                    'gas_d50': rts.gas_d50,
                    'saturation': rts.saturation
                }
                gui.put_nowait((timestamp, imc, imraw, rtdict))
                logger.debug('GUI queue updated')

    print('PROCESSING COMPLETE.')
Beispiel #17
0
 def load_settings(self, configfile):
     self.settings = PySilcamSettings(configfile)
Beispiel #18
0
def export_timeseries(configfile, statsfile):

    settings = PySilcamSettings(configfile)

    print('Loading STATS data: ', statsfile)
    stats = pd.read_csv(statsfile)

    stats['timestamp'] = pd.to_datetime(stats['timestamp'])

    stats.sort_values(by='timestamp', inplace=True)

    print('Extracting oil and gas')
    stats_oil = scog.extract_oil(stats)
    stats_gas = scog.extract_gas(stats)

    print('Calculating timeseries')
    u = pd.to_datetime(stats['timestamp']).unique()

    sample_volume = sc_pp.get_sample_volume(settings.PostProcess.pix_size, path_length=settings.PostProcess.path_length)

    td = pd.to_timedelta('00:00:' + str(settings.PostProcess.window_size / 2.))

    vdts_all = []
    vdts_oil = []
    vdts_gas = []
    d50_all = []
    d50_oil = []
    d50_gas = []
    timestamp = []
    d50_av_all = []
    d50_av_oil = []
    d50_av_gas = []
    gor = []
    for s in tqdm(u):
        timestamp.append(pd.to_datetime(s))
        dt = pd.to_datetime(s)

        dias, vd_all = sc_pp.vd_from_stats(stats[stats['timestamp'] == s],
                                 settings.PostProcess)
        dias, vd_oil = sc_pp.vd_from_stats(stats_oil[stats_oil['timestamp'] == s],
                                 settings.PostProcess)
        dias, vd_gas = sc_pp.vd_from_stats(stats_gas[stats_gas['timestamp'] == s],
                                 settings.PostProcess)

        nims = sc_pp.count_images_in_stats(stats[stats['timestamp'] == s])
        sv = sample_volume * nims
        vd_all /= sv
        vd_oil /= sv
        vd_gas /= sv
        d50_all.append(sc_pp.d50_from_vd(vd_all, dias))
        d50_oil.append(sc_pp.d50_from_vd(vd_oil, dias))
        d50_gas.append(sc_pp.d50_from_vd(vd_gas, dias))

        vdts_all.append(vd_all)
        vdts_oil.append(vd_oil)
        vdts_gas.append(vd_gas)

        stats_av = stats[(stats['timestamp']<(dt+td)) & (stats['timestamp']>(dt-td))]
        stats_av_oil = scog.extract_oil(stats_av)
        stats_av_gas = scog.extract_gas(stats_av)
        d50_av_all.append(sc_pp.d50_from_stats(stats_av, settings.PostProcess))
        d50_av_oil.append(sc_pp.d50_from_stats(stats_av_oil, settings.PostProcess))
        d50_av_gas.append(sc_pp.d50_from_stats(stats_av_gas, settings.PostProcess))

        dias, vdts_av = sc_pp.vd_from_stats(stats_av, settings.PostProcess)
        dias, vdts_av_oil = sc_pp.vd_from_stats(stats_av_oil, settings.PostProcess)
        dias, vdts_av_gas = sc_pp.vd_from_stats(stats_av_gas, settings.PostProcess)
        nims = sc_pp.count_images_in_stats(stats_av)
        sv = sample_volume * nims
        vdts_av /= sv
        vdts_av_oil /= sv
        vdts_av_gas /= sv

        gor.append(np.sum(vdts_av_gas)/np.sum(vdts_av_oil))

    outpath, outfile = os.path.split(statsfile)
    outfile = outfile.replace('-STATS.csv','')
    outfile = os.path.join(outpath, outfile)

    time_series = pd.DataFrame(data=np.squeeze(vdts_all), columns=dias)
    time_series['D50'] = d50_all
    time_series['Time'] = timestamp
    time_series.to_excel(outfile +
            '-TIMESERIES' + '' + '.xlsx')

    time_series = pd.DataFrame(data=np.squeeze(vdts_oil), columns=dias)
    time_series['D50'] = d50_oil
    time_series['Time'] = timestamp
    time_series.to_excel(outfile +
            '-TIMESERIES' + 'oil' + '.xlsx')

    time_series = pd.DataFrame(data=np.squeeze(vdts_gas), columns=dias)
    time_series['D50'] = d50_gas
    time_series['Time'] = timestamp
    time_series.to_excel(outfile +
            '-TIMESERIES' + 'gas' + '.xlsx')

    plt.figure(figsize=(20, 10))

    if not np.min(np.isnan(d50_oil)):
        plt.plot(timestamp, d50_oil, 'ro')
    if not np.min(np.isnan(d50_av_oil)):
        plt.plot(timestamp, d50_av_oil, 'r-')
    lns1 = plt.plot(np.nan, np.nan, 'r-', label='OIL')

    if not np.min(np.isnan(d50_gas)):
        plt.plot(timestamp, d50_gas, 'bo')
    if not np.min(np.isnan(d50_av_gas)):
        plt.plot(timestamp, d50_av_gas, 'b-')
    lns2 = plt.plot(np.nan, np.nan, 'b-', label='GAS')

    plt.ylabel('d50 [um]')
    plt.ylim(0, max(plt.gca().get_ylim()))

    ax = plt.gca().twinx()
    plt.sca(ax)
    plt.ylabel('GOR')
    if not np.min(np.isnan(gor)):
        plt.plot(timestamp, gor, 'k')
    lns3 = plt.plot(np.nan, np.nan, 'k', label='GOR')
    plt.ylim(0, max(plt.gca().get_ylim()))

    lns = lns1 + lns2 + lns3
    labs = [l.get_label() for l in lns]
    plt.legend(lns, labs)

    plt.savefig(outfile +
                '-d50_TimeSeries.png', dpi=600, bbox_inches='tight')

    plt.close()
    print('Export figure made. ')
    print('Exporting averages... ')

    # average all
    dias, vd = sc_pp.vd_from_stats(stats,
                             settings.PostProcess)
    nims = sc_pp.count_images_in_stats(stats)
    sv = sample_volume * nims
    vd /= sv
    d50 = sc_pp.d50_from_vd(vd, dias)
    dfa = pd.DataFrame(data=[vd], columns=dias)
    dfa['d50'] = d50
    timestamp = np.min(pd.to_datetime(stats['timestamp']))
    dfa['Time'] = timestamp
    dfa.to_excel(statsfile.replace('-STATS.csv', '') +
                 '-AVERAGE' + '' + '.xlsx')

    #average oil
    dias, vd = sc_pp.vd_from_stats(stats_oil,
                             settings.PostProcess)
    vd /= sv # sample volume remains the same as 'all'
    d50 = sc_pp.d50_from_vd(vd, dias)
    dfa = pd.DataFrame(data=[vd], columns=dias)
    dfa['d50'] = d50
    timestamp = np.min(pd.to_datetime(stats['timestamp'])) # still use total stats for this time
    dfa['Time'] = timestamp
    dfa.to_excel(statsfile.replace('-STATS.csv', '') +
                 '-AVERAGE' + 'oil' + '.xlsx')

    #average gas
    dias, vd = sc_pp.vd_from_stats(stats_gas,
                             settings.PostProcess)
    vd /= sv # sample volume remains the same as 'all'
    d50 = sc_pp.d50_from_vd(vd, dias)
    dfa = pd.DataFrame(data=[vd], columns=dias)
    dfa['d50'] = d50
    timestamp = np.min(pd.to_datetime(stats['timestamp'])) # still use total stats for this time
    dfa['Time'] = timestamp
    dfa.to_excel(statsfile.replace('-STATS.csv', '') +
                 '-AVERAGE' + 'gas' + '.xlsx')

    print('Export done: ', outfile)