Exemple #1
0
 def load_darkfield(self):
     """
     Reads darkfield file and save the frame as class member.
     Parameters
     ----------
     none
     Returns
     -------
     nothing
     """
     try:
         self.darkfield = ut.read_tif(self.darkfield_filename)
     except:
         print("Darkfield filename not set for TIM1, will not correct")
Exemple #2
0
 def load_whitefield(self):
     """
     Reads whitefield file and save the frame as class member.
     Parameters
     ----------
     none
     Returns
     -------
     nothing
     """
     try:
         self.whitefield = ut.read_tif(self.whitefield_filename)
         self.whitefield = np.where(self.whitefield < 100, 1e20, self.whitefield) #Some large value
     except:
         print("Whitefield filename not set for TIM2")
         raise
Exemple #3
0
 def get_frame(self, filename, roi, Imult):
     """
     Reads raw frame from a file, and applies correction for concrete detector.
     Parameters
     ----------
     filename : str
         data file name
     roi : list
         detector area used to take image. If None the entire detector area will be used.
     Returns
     -------
     raw_frame : ndarray
         frame after instrument correction
     """
     self.raw_frame = ut.read_tif(filename)
     return self.raw_frame
Exemple #4
0
    def init_dev(self, device_id):
        self.dev = device_id
        if device_id != -1:
            try:
                devlib.set_device(device_id)
            except Exception as e:
                print(e)
                print('may need to restart GUI')
                return -1
        if self.data_file.endswith('tif') or self.data_file.endswith('tiff'):
            try:
                data_np = ut.read_tif(self.data_file)
                data = devlib.from_numpy(data_np)
            except Exception as e:
                print(e)
                return -1
        elif self.data_file.endswith('npy'):
            try:
                data = devlib.load(self.data_file)
            except Exception as e:
                print(e)
                return -1
        else:
            print('no data file found')
            return -1

        # in the formatted data the max is in the center, we want it in the corner, so do fft shift
        self.data = devlib.fftshift(devlib.absolute(data))
        self.dims = devlib.dims(self.data)
        print('data shape', self.dims)

        if self.need_save_data:
            self.saved_data = devlib.copy(self.data)
            self.need_save_data = False

        return 0
Exemple #5
0
 def get_raw_frame(self, filename):
     try:
         self.raw_frame = ut.read_tif(filename)
     except:
         print("problem reading raw file ", filename)
         raise
Exemple #6
0
def manage_reconstruction(proc, experiment_dir, rec_id=None):
    """
    This function starts the interruption discovery process and continues the recontruction processing.

    It reads configuration file defined as <experiment_dir>/conf/config_rec.
    If multiple generations are configured, or separate scans are discovered, it will start concurrent reconstructions.
    It creates image.npy file for each successful reconstruction.

    Parameters
    ----------
    proc : str
        processing library, choices are: cpu, cuda, opencl
    experiment_dir : str
        directory where the experiment files are loacted
    rec_id : str
        optional, if given, alternate configuration file will be used for reconstruction, (i.e. <rec_id>_config_rec)

    Returns
    -------
    nothing
    """
    print('starting reconstruction')

    # the rec_id is a postfix added to config_rec configuration file. If defined, use this configuration.
    conf_dir = os.path.join(experiment_dir, 'conf')
    if rec_id is None:
        conf_file = os.path.join(conf_dir, 'config_rec')
    else:
        conf_file = os.path.join(conf_dir, rec_id + '_config_rec')

    # check if file exists
    if not os.path.isfile(conf_file):
        print('no configuration file ' + conf_file + ' found')
        return

    # verify the configuration file
    if not ver.ver_config_rec(conf_file):
        # if not verified, the ver will print message
        return

    try:
        config_map = ut.read_config(conf_file)
        if config_map is None:
            print("can't read configuration file " + conf_file)
            return
    except Exception as e:
        print('Cannot parse configuration file ' + conf_file +
              ' , check for matching parenthesis and quotations')
        print(str(e))
        return

    # find which librarry to run it on, default is numpy ('np')
    lib = 'np'
    if proc == 'auto':
        try:
            import cupy
            lib = 'cp'
        except:
            # currently we could not install arrayfire on linux, so numpy is the second choice
            pass
    elif proc == 'cp':
        try:
            import cupy
            lib = 'cp'
        except:
            print('cupy is not installed, select different library (proc)')
            return
    elif proc == 'np':
        pass  # lib set to 'np'
    elif proc == 'af' or 'cpu' or proc == 'cuda' or proc == 'opencl':
        try:
            import arrayfire
            lib = proc
        except:
            print(
                'arrayfire is not installed, select different library (proc)')
            return
    else:
        print('invalid "proc" value', proc, 'is not supported')
        return

    # exp_dirs_data list hold pairs of data and directory, where the directory is the root of data/data.tif file, and
    # data is the data.tif file in this directory.
    exp_dirs_data = []
    # experiment may be multi-scan in which case reconstruction will run for each scan
    for dir in os.listdir(experiment_dir):
        if dir.startswith('scan'):
            datafile = os.path.join(experiment_dir, dir, 'data', 'data.tif')
            if os.path.isfile(datafile):
                exp_dirs_data.append(
                    (datafile, os.path.join(experiment_dir, dir)))
    # if there are no scan directories, assume it is combined scans experiment
    if len(exp_dirs_data) == 0:
        # in typical scenario data_dir is not configured, and it is defaulted to <experiment_dir>/data
        # the data_dir is ignored in multi-scan scenario
        try:
            data_dir = config_map.data_dir
        except AttributeError:
            data_dir = os.path.join(experiment_dir, 'data')
        datafile = os.path.join(data_dir, 'data.tif')
        if os.path.isfile(datafile):
            exp_dirs_data.append((datafile, experiment_dir))
    no_runs = len(exp_dirs_data)
    if no_runs == 0:
        print('did not find data.tif nor data.npy file(s). ')
        return
    try:
        generations = config_map.generations
    except:
        generations = 0
    try:
        reconstructions = config_map.reconstructions
    except:
        reconstructions = 1
    device_use = []
    if lib == 'cpu' or lib == 'np':
        cpu_use = [-1] * reconstructions
        if no_runs > 1:
            for _ in range(no_runs):
                device_use.append(cpu_use)
        else:
            device_use = cpu_use
    else:
        try:
            devices = config_map.device
        except:
            devices = [-1]

        if no_runs * reconstructions > 1:
            data_shape = ut.read_tif(exp_dirs_data[0][0]).shape
            device_use = get_gpu_use(devices, no_runs, reconstructions,
                                     data_shape)
        else:
            device_use = devices

    if no_runs == 1:
        if len(device_use) == 0:
            device_use = [-1]
        dir_data = exp_dirs_data[0]
        datafile = dir_data[0]
        dir = dir_data[1]
        if generations > 1:
            gen_rec.reconstruction(lib, conf_file, datafile, dir, device_use)
        elif reconstructions > 1:
            mult_rec.reconstruction(lib, conf_file, datafile, dir, device_use)
        else:
            rec.reconstruction(lib, conf_file, datafile, dir, device_use)
    else:
        if len(device_use) == 0:
            device_use = [[-1]]
        else:
            # check if is it worth to use last chunk
            if lib != 'cpu' and lib != 'np' and len(
                    device_use[0]) > len(device_use[-1]) * 2:
                device_use = device_use[0:-1]
        if generations > 1:
            r = 'g'
        elif reconstructions > 1:
            r = 'm'
        else:
            r = 's'
        q = Queue()
        for gpus in device_use:
            q.put((None, gpus))
        # index keeps track of the multiple directories
        index = 0
        processes = {}
        while index < no_runs:
            pid, gpus = q.get()
            if pid is not None:
                os.kill(pid, signal.SIGKILL)
                del processes[pid]
            datafile = exp_dirs_data[index][0]
            dir = exp_dirs_data[index][1]
            p = Process(target=rec_process,
                        args=(lib, conf_file, datafile, dir, gpus, r, q))
            p.start()
            processes[p.pid] = index
            index += 1

        # close the queue
        while len(processes.items()) > 0:
            pid, gpus = q.get()
            os.kill(pid, signal.SIGKILL)
            time.sleep(.1)
            del processes[pid]
        q.close()

    print('finished reconstruction')
Exemple #7
0
def reconstruction(lib, conf_file, datafile, dir, devices):
    """
    This function controls reconstruction utilizing genetic algorithm.

    Parameters
    ----------
    proc : str
        processor to run on (cpu, opencl, or cuda)

    conf_file : str
        configuration file with reconstruction parameters

    datafile : str
        name of the file with initial data

    dir : str
        a parent directory that holds the generations. It can be experiment directory or scan directory.

    devices : list
        list of GPUs available for this reconstructions

    Returns
    -------
    nothing
    """
    pars = Params(conf_file)
    er_msg = pars.set_params()
    if er_msg is not None:
        return er_msg

    if lib == 'af' or lib == 'cpu' or lib == 'opencl' or lib == 'cuda':
        if datafile.endswith('tif') or datafile.endswith('tiff'):
            try:
                data = ut.read_tif(datafile)
            except:
                print('could not load data file', datafile)
                return
        elif datafile.endswith('npy'):
            try:
                data = np.load(datafile)
            except:
                print('could not load data file', datafile)
                return
        else:
            print('no data file found')
            return
        set_lib('af', len(data.shape))
        if lib != 'af':
            dvclib.set_backend(lib)
    else:
        set_lib(lib)

    try:
        reconstructions = pars.reconstructions
    except:
        reconstructions = 1

    try:
        save_dir = pars.save_dir
    except AttributeError:
        filename = conf_file.split('/')[-1]
        save_dir = os.path.join(dir, filename.replace('config_rec', 'results'))
        #    temp_dir = os.path.join(save_dir, 'temp')

    generations = pars.generations

    # init starting values
    # if multiple reconstructions configured (typical for genetic algorithm), use "reconstruction_multi" module
    if reconstructions > 1:
        if pars.ga_fast:  # NEW the number of processes is the same as available GPUs (can be same GPU if can fit more recs)
            reconstructions = min(reconstructions, len(devices))
            workers = [
                calc.Rec(pars, datafile) for _ in range(reconstructions)
            ]
            #            for worker in workers:
            #                worker.init_dev(devices.pop())
            processes = {}

            for worker in workers:
                worker_qin = mp.Queue()
                worker_qout = mp.Queue()
                process = mp.Process(target=worker.fast_ga,
                                     args=(worker_qin, worker_qout))
                process.start()
                processes[process.pid] = [worker_qin, worker_qout]

            prev_dirs = None
            for g in range(generations):
                print('starting generation', g)
                if g == 0:
                    for pid in processes:
                        worker_qin = processes[pid][0]
                        worker_qin.put(('init_dev', devices.pop()))
                    bad_processes = []
                    for pid in processes:
                        worker_qout = processes[pid][1]
                        ret = worker_qout.get()
                        if ret < 0:
                            worker_qin = processes[pid][0]
                            worker_qin.put('done')
                            bad_processes.append(pid)
                    # remove bad processes from dict (in the future we may reuse them)
                    for pid in bad_processes:
                        processes.pop(pid)
                for pid in processes:
                    worker_qin = processes[pid][0]
                    if prev_dirs is None:
                        prev_dir = None
                    else:
                        prev_dir = prev_dirs[pid]
                    worker_qin.put(('init', prev_dir, g))
                for pid in processes:
                    worker_qout = processes[pid][1]
                    ret = worker_qout.get()
                if g > 0:
                    for pid in processes:
                        worker_qin = processes[pid][0]
                        worker_qin.put('breed')
                    for pid in processes:
                        worker_qout = processes[pid][1]
                        ret = worker_qout.get()
                for pid in processes:
                    worker_qin = processes[pid][0]
                    worker_qin.put('iterate')
                bad_processes = []
                for pid in processes:
                    worker_qout = processes[pid][1]
                    ret = worker_qout.get()
                    if ret < 0:
                        worker_qin = processes[pid][0]
                        worker_qin.put('done')
                        bad_processes.append(pid)
                # remove bad processes from dict (in the future we may reuse them)
                for pid in bad_processes:
                    processes.pop(pid)
                # get metric, i.e the goodness of reconstruction from each run
                proc_metrics = {}
                for pid in processes:
                    worker_qin = processes[pid][0]
                    metric_type = pars.metrics[g]
                    worker_qin.put(('get_metric', metric_type))
                for pid in processes:
                    worker_qout = processes[pid][1]
                    metric = worker_qout.get()
                    proc_metrics[pid] = metric
                # order processes by metric
                proc_ranks = order_processes(proc_metrics, metric_type)
                # cull
                culled_proc_ranks = cull(proc_ranks,
                                         pars.ga_reconstructions[g])
                # remove culled processes from list (in the future we may reuse them)
                for i in range(len(culled_proc_ranks), len(proc_ranks)):
                    pid = proc_ranks[i][0]
                    worker_qin = processes[pid][0]
                    worker_qin.put('done')
                    processes.pop(pid)
                # save results, we may modify it later to save only some
                gen_save_dir = os.path.join(save_dir, 'g_' + str(g))
                prev_dirs = {}
                for i in range(len(culled_proc_ranks)):
                    pid = culled_proc_ranks[i][0]
                    worker_qin = processes[pid][0]
                    worker_qin.put(
                        ('save_res', os.path.join(gen_save_dir, str(i))))
                    prev_dirs[pid] = os.path.join(gen_save_dir, str(i))
                for pid in processes:
                    worker_qout = processes[pid][1]
                    ret = worker_qout.get()
                if len(processes) == 0:
                    break
            for pid in processes:
                worker_qin = processes[pid][0]
                worker_qin.put('done')
        else:  # not fast GA
            rec = multi
            prev_dirs = []
            for _ in range(reconstructions):
                prev_dirs.append(None)
            for g in range(generations):
                print('starting generation', g)
                gen_save_dir = os.path.join(save_dir, 'g_' + str(g))
                metric_type = pars.metrics[g]
                workers = [
                    calc.Rec(pars, datafile) for _ in range(len(prev_dirs))
                ]
                prev_dirs, evals = rec.multi_rec(gen_save_dir, devices,
                                                 workers, prev_dirs,
                                                 metric_type, g)

                # results are saved in a list of directories - save_dir
                # it will be ranked, and moved to temporary ranked directories
                order_dirs(prev_dirs, evals, metric_type)
                prev_dirs = cull(prev_dirs, pars.ga_reconstructions[g])
    else:
        print("GA not implemented for a single reconstruction")

    print('done gen')
Exemple #8
0
def reconstruction(lib, conf_file, datafile, dir, dev):
    """
    Controls single reconstruction.

    This function checks whether the reconstruction is continuation or initial reconstruction. If continuation, the arrays of image, support, coherence are read from cont_directory, otherwise they are initialized to None.
    It starts thr reconstruction and saves results.

    Parameters
    ----------
    proc : str
        a string indicating the processor type (cpu, cuda or opencl)

    conf_file : str
        configuration file name

    datafile : str
        data file name

    dir : str
        a parent directory that holds the reconstructions. It can be experiment directory or scan directory.

    dev : int
        id defining the GPU this reconstruction will be utilizing, or -1 if running cpu or the gpu assignment is left to OS


    Returns
    -------
    nothing
    """
    pars = Params(conf_file)
    er_msg = pars.set_params()
    if er_msg is not None:
        return er_msg

    if lib == 'af' or lib == 'cpu' or lib == 'opencl' or lib == 'cuda':
        if datafile.endswith('tif') or datafile.endswith('tiff'):
            try:
                data = ut.read_tif(datafile)
            except:
                print('could not load data file', datafile)
                return
        elif datafile.endswith('npy'):
            try:
                data = np.load(datafile)
            except:
                print('could not load data file', datafile)
                return
        else:
            print('no data file found')
            return
        print('data shape', data.shape)
        set_lib('af', len(data.shape))
        if lib != 'af':
            devlib.set_backend(lib)
    else:
        set_lib(lib)

    if not pars.cont:
        continue_dir = None
    else:
        continue_dir = pars.continue_dir

    try:
        save_dir = pars.save_dir
    except AttributeError:
        filename = conf_file.split('/')[-1]
        save_dir = os.path.join(dir, filename.replace('config_rec', 'results'))

    worker = calc.Rec(pars, datafile)

    if worker.init_dev(dev[0]) < 0:
        return

    worker.init(continue_dir)
    ret_code = worker.iterate()
    if ret_code == 0:
        worker.save_res(save_dir)