Example #1
0
def read_dsg_vars_hdf5( filename ):
    if not pt.is_hdf5_file( filename ):
        raise ValueError('not a HDF5 file! (%s)' % filename)

    fd = pt.open_file( filename, mode = 'r' )
    aux1 = fd.get_node( '/dsgvar/inx' ).read()
    aux2 = fd.get_node( '/dsgvar/val' ).read()
    aux3 = fd.get_node( '/dsgvar/nsbdsg' ).read()
    dsg_vars = DesignVariables( indx = nm.asarray( aux1, dtype = nm.int32 ),
                               cxyz =  nm.asarray( aux2 ),
                               null_space_b = nm.asarray( aux3 ) )
    dsg_vars.indx = dsg_vars.indx.transpose()
    dsg_vars.indx[:,1] -= 1

    # No. of design variables.
    dsg_vars.n_dsg = dsg_vars.null_space_b.shape[1]
    # No. of control points.
    dsg_vars.n_cp = dsg_vars.indx.shape[0]
    # Design vector and initial design vector.
    dsg_vars.val0 = nm.zeros( (dsg_vars.n_dsg,), dtype = nm.float64 )
    dsg_vars.val = dsg_vars.val0.copy()

    fd.close()

    return dsg_vars
Example #2
0
def read_dsg_vars_hdf5(filename):
    if not pt.is_hdf5_file(filename):
        raise ValueError('not a HDF5 file! (%s)' % filename)

    fd = pt.open_file(filename, mode='r')
    aux1 = fd.get_node('/dsgvar/inx').read()
    aux2 = fd.get_node('/dsgvar/val').read()
    aux3 = fd.get_node('/dsgvar/nsbdsg').read()
    dsg_vars = DesignVariables(indx=nm.asarray(aux1, dtype=nm.int32),
                               cxyz=nm.asarray(aux2),
                               null_space_b=nm.asarray(aux3))
    dsg_vars.indx = dsg_vars.indx.transpose()
    dsg_vars.indx[:, 1] -= 1

    # No. of design variables.
    dsg_vars.n_dsg = dsg_vars.null_space_b.shape[1]
    # No. of control points.
    dsg_vars.n_cp = dsg_vars.indx.shape[0]
    # Design vector and initial design vector.
    dsg_vars.val0 = nm.zeros((dsg_vars.n_dsg, ), dtype=nm.float64)
    dsg_vars.val = dsg_vars.val0.copy()

    fd.close()

    return dsg_vars
Example #3
0
    def __init__(self, h5parmFile, readonly=True, complevel=0, complib='zlib'):

        self.H = None # variable to store the pytable object
        self.fileName = h5parmFile

        if os.path.isfile(h5parmFile):
            if not tables.is_hdf5_file(h5parmFile):
                logging.critical('Not a HDF5 file: '+h5parmFile+'.')
                raise Exception('Not a HDF5 file: '+h5parmFile+'.')
            if readonly:
                logging.debug('Reading from '+h5parmFile+'.')
                self.H = tables.open_file(h5parmFile, 'r', IO_BUFFER_SIZE=1024*1024*10, BUFFER_TIMES=500)
            else:
                logging.debug('Appending to '+h5parmFile+'.')
                self.H = tables.open_file(h5parmFile, 'r+', IO_BUFFER_SIZE=1024*1024*10, BUFFER_TIMES=500)

            # Check if it's a valid H5parm file: attribute h5parm_version should be defined in any solset
            is_h5parm = True
            for node in self.H.root:
                if 'h5parm_version' not in node._v_attrs:
                    is_h5parm=False
                    break
            if not is_h5parm:
                logging.warning('Missing H5pram version. Is this a properly made h5parm?')

        else:
            if readonly:
                raise Exception('Missing file '+h5parmFile+'.')
            else:
                logging.debug('Creating '+h5parmFile+'.')
                # add a compression filter
                f = tables.Filters(complevel=complevel, complib=complib)
                self.H = tables.open_file(h5parmFile, filters=f, mode='w', IO_BUFFER_SIZE=1024*1024*10, BUFFER_TIMES=500)
Example #4
0
    def write(self, fname):
        '''write data to file'''
        self._filename = fname
        if os.path.exists(self._filename):
            self._ishdf = tables.is_hdf5_file(self._filename)
        else:
            self._ishdf = (os.path.splitext(self._filename)[1]
                           in ['.h5', '.hdf5', '.hdf', '.he5'])

        if self._ishdf:
            if self._debug:
                print('Writing to HDF-5', file=sys.stderr)
            filters = tables.Filters(complevel=6,
                                     complib='zlib',
                                     fletcher32=True)
            self._filehandle = tables.open_file(self._filename,
                                                mode='w',
                                                title=self.name(),
                                                filters=filters)
            self.write_hdf()
        else:
            if self._debug:
                print('Writing to ASCII', file=sys.stderr)
            self._filehandle = open(self._filename, 'w')
            self.write_ascii()
        self._filehandle.close()
        self._filehandle = None
Example #5
0
def get_homog_coefs_linear(ts, coor, mode,
                           micro_filename=None, regenerate=False,
                           coefs_filename=None, define_args=None):

    oprefix = output.prefix
    output.prefix = 'micro:'

    required, other = get_standard_keywords()
    required.remove( 'equations' )

    conf = ProblemConf.from_file(micro_filename, required, other,
                                 verbose=False, define_args=define_args)
    if coefs_filename is None:
        coefs_filename = conf.options.get('coefs_filename', 'coefs')
        coefs_filename = op.join(conf.options.get('output_dir', '.'),
                                 coefs_filename) + '.h5'

    if not regenerate:
        if op.exists( coefs_filename ):
            if not pt.is_hdf5_file( coefs_filename ):
                regenerate = True
        else:
            regenerate = True

    if regenerate:
        options = Struct( output_filename_trunk = None )

        app = HomogenizationApp( conf, options, 'micro:' )
        coefs = app()
        if type(coefs) is tuple:
            coefs = coefs[0]

        coefs.to_file_hdf5( coefs_filename )
    else:
        coefs = Coefficients.from_file_hdf5( coefs_filename )

    out = {}
    if mode == None:
        for key, val in six.iteritems(coefs.__dict__):
            out[key] = val

    elif mode == 'qp':
        for key, val in six.iteritems(coefs.__dict__):
            if type( val ) == nm.ndarray or type(val) == nm.float64:
                out[key] = nm.tile( val, (coor.shape[0], 1, 1) )
            elif type(val) == dict:
                for key2, val2 in six.iteritems(val):
                    if type(val2) == nm.ndarray or type(val2) == nm.float64:
                        out[key+'_'+key2] = \
                                          nm.tile(val2, (coor.shape[0], 1, 1))

    else:
        out = None

    output.prefix = oprefix

    return out
Example #6
0
def sysmat_processing(files,
                      npix,
                      *args,
                      interp=True,
                      smooth=True,
                      fname='processed',
                      **kwargs):
    """*args = sze of gaussian filter and **kwargs is fhwm of filter. Files is list of sysmat_files and npix is list
    of dimensions of the spaces"""

    if isinstance(files, str):
        files = [files]

    if len(files) > np.array(npix).size // 2:
        print("Checked")
        npix = [npix] * len(files)

    if len(files) == 1:
        npix = [npix]

    print("npix length: ", len(npix))
    print("npix: ", npix)
    print("first entry: ", npix[0])
    store_list = []
    append_str = ''

    for fid, file in enumerate(files):
        npix_x, npix_y = npix[fid]
        if interp:
            sysmat, sysmat_file = system_matrix_interpolate(file, x_dim=npix_x)
            # print("Sysmat shape now:", sysmat.shape)
            npix_x = npix_x + (npix_x - 1)
            npix_y = npix_y + (npix_y - 1)
            sysmat_file.close()
        else:
            if tables.is_hdf5_file(file):
                sysmat_file = load_h5file(file)
                sysmat = sysmat_file.root.sysmat[:]
                sysmat_file.close(
                )  # TODO: Will this cause problems? Copy otherwise
            else:
                sysmat = np.load(file)  # .npy
        print("Interpolation successful!")
        print("sysmat.shape: ", sysmat.shape)
        if smooth:
            sysmat, append_str = smooth_point_response(sysmat, npix_x, *args,
                                                       **kwargs)

        store_list.append(sysmat)

    fname += append_str
    # if len(store_list) == 1:
    #   processed_array = store_list[0]
    # else:
    processed_array = np.vstack(store_list)
    print("Final shape: ", processed_array.shape)
    np.save(fname, processed_array)
Example #7
0
def append_responses(files, save_name='appended'):  # sysmat files
    """Append responses that are adjacent in the second dimension"""
    tmp_list = list(range(len(files)))
    for fid, file in enumerate(files):
        if tables.is_hdf5_file(file):
            sysmat_file = load_h5file(file)
            tmp_list[fid] = sysmat_file.root.sysmat[:]
            sysmat_file.close()
        else:
            tmp_list[fid] = np.load(file)

        print("File {f} shape: {s}".format(f=fid, s=tmp_list[fid].shape))
    np.save(save_name, np.vstack(tmp_list))
    print("Final shape: ", np.vstack(tmp_list).shape)
    def checkOpening(self, filepath):
        """
        Check if a database can be open.

        :Parameter filepath: the full path of the file
        """

        try:
            # Check if file doesn't exist
            if os.path.isdir(filepath):
                error = translate('DBsTreeModel',
                                  'Openning cancelled: {0} is a folder.',
                                  'A logger error message').format(filepath)
                raise ValueError

            elif not os.path.isfile(filepath):
                error = translate('DBsTreeModel',
                                  'Opening failed: file {0} cannot be found.',
                                  'A logger error message').format(filepath)
                raise ValueError

            # Check if file is already open.
            elif self.getDBDoc(filepath) is not None:
                error = translate('DBsTreeModel',
                                  'Opening cancelled: file {0} already open.',
                                  'A logger error message').format(filepath)

                raise ValueError

        except ValueError:
            print(error)
            return False

        # Check the file format
        try:
            if not tables.is_hdf5_file(filepath):
                error = translate('DBsTreeModel', \
                    'Opening cancelled: file {0} has not HDF5 format.',
                    'A logger error message').format(filepath)
                print(error)
                return False
        except (tables.NodeError, OSError):
            error = translate(
                'DBsTreeModel',
                """Opening failed: I cannot find out if file {0} has HDF5 """
                """format.""", 'A logger error message').format(filepath)
            print(error)
            return False
        else:
            return True
Example #9
0
def fileinfo(args):
    """
    Display information about ctapipe output files (DL1 or DL2 in HDF5 format).
    Optionally create an index table from all headers
    """

    files = []  # accumulated info for table output

    for filename in args.files:
        info = {}

        # prevent failure if a non-file is given (e.g. a directory)
        if Path(filename).is_file() is False:
            info[filename] = "not a file"

        elif tables.is_hdf5_file(filename) is not True:
            info[filename] = "unknown file type"
        else:
            try:
                with tables.open_file(filename, mode="r") as infile:
                    # pylint: disable=W0212,E1101
                    attrs = {
                        name: str(infile.root._v_attrs[name])
                        for name in infile.root._v_attrs._f_list()
                    }
                    if args.flat:
                        info[filename] = attrs.copy()
                    else:
                        info[filename] = unflatten(attrs)

                    if args.output_table:
                        attrs["PATH"] = filename
                        files.append(attrs)

            except tables.exceptions.HDF5ExtError as err:
                info[filename] = f"ERROR {err}"

        print(yaml.dump(info, indent=4))

    if args.output_table:
        if args.output_table.endswith(".fits") or args.output_table.endswith(
                ".fits.gz"):
            files = [{k: v.encode("utf-8")
                      for k, v in info.items()} for info in files]

        table = Table(files)
        table.write(args.output_table,
                    format=args.table_format,
                    overwrite=True)
Example #10
0
    def checkOpening(self, filepath):
        """
        Check if a database can be open.

        :Parameter filepath: the full path of the file
        """
        try:
            # Check if file doesn't exist
            if os.path.isdir(filepath):
                error = translate('DBsTreeModel',
                                  'Openning cancelled: {0} is a folder.',
                                  'A logger error message').format(filepath)
                raise ValueError

            elif not os.path.isfile(filepath):
                error = translate('DBsTreeModel',
                                  'Opening failed: file {0} cannot be found.',
                                  'A logger error message').format(filepath)
                raise ValueError

            # Check if file is already open.
            elif self.getDBDoc(filepath) is not None:
                error = translate('DBsTreeModel',
                                  'Opening cancelled: file {0} already open.',
                                  'A logger error message').format(filepath)

                raise ValueError

        except ValueError:
            log.error(error)
            return False

        # Check the file format
        try:
            if not tables.is_hdf5_file(filepath):
                error = translate('DBsTreeModel',
                                  'Opening cancelled: file {0} has not HDF5 format.',
                                  'A logger error message').format(filepath)
                log.error(error)
                return False
        except (tables.NodeError, OSError):
            error = translate('DBsTreeModel',
                              """Opening failed: I cannot find out if file {0} has HDF5 """
                              """format.""",
                              'A logger error message').format(filepath)
            log.error(error)
            return False
        else:
            return True
Example #11
0
def check_for_integral_db(beam_type_id, purge_cache=False):
    if purge_cache:
        purge_integral_db_cache()

    db_filename = get_integral_db_filename(beam_type_id)
    try:
        if tb.is_hdf5_file(db_filename):
            logger.info("Valid integral db file '%s' found in cache",
                        db_filename)
            return
    except (IOError, tb.HDF5ExtError):
        pass

    logger.warn("'%s' is not a valid integral db file!", db_filename)
    download_integral_db(beam_type_id)
Example #12
0
def read_spline_box_hdf5(filename):
    if not pt.is_hdf5_file(filename):
        raise ValueError('not a HDF5 file! (%s)' % filename)

    fd = pt.open_file(filename, mode='r')
    boxes = fd.list_nodes('/box')
    n_box = len(boxes)
    dim = len(fd.list_nodes(boxes[0].ax))

    sp_boxes = SplineBoxes(dim=dim,
                           n_box=n_box,
                           n_vertex=0,
                           spbs=OneTypeList(SplineBox))
    for box in boxes:
        spb = SplineBox()
        sp_boxes.spbs.append(spb)

        spb.ib = int(box._v_name)
        spb.cpi = nm.asarray(box.cpi.read()) - 1
        spb.gpi = nm.asarray(box.gpi.read()) - 1
        spb.cxyz = nm.asarray(box.cxyz.read()).transpose()
        spb.cxyz0 = spb.cxyz.copy()
        spb.ax = []
        for axi in fd.list_nodes(box.ax):
            spb.ax.append(nm.asarray(axi.bsc.read()))

        sp_boxes.n_vertex = max(sp_boxes.n_vertex, nm.amax(spb.gpi) + 1)
        print(nm.amin(spb.gpi), nm.amax(spb.gpi))

        ##
        # Fix cpi by rebuilding :).
        off = 0
        n0, n1, n2 = spb.cpi.shape
        aux = nm.arange(n0 * n1).reshape(n1, n0).transpose()
        for ii in range(n2):
            spb.cpi[:, :, ii] = aux + off
            off += n0 * n1

    fd.close()

    for perm in cycle([n_box] * 2):
        if perm[0] == perm[1]: continue
        gpi1 = sp_boxes.spbs[perm[0]].gpi
        gpi2 = sp_boxes.spbs[perm[1]].gpi
        assert_(len(nm.intersect1d(gpi1, gpi2)) == 0)

    return sp_boxes
Example #13
0
def fileinfo(args):
    """
    Display information about ctapipe output files (DL1 or DL2 in HDF5 format).
    Optionally create an index table from all headers
    """

    info_total = {}  # accumulated info for table output

    for filename in args.files:
        info = {}

        # prevent failure if a non-file is given (e.g. a directory)
        if Path(filename).is_file() is False:
            info[filename] = "not a file"

        elif tables.is_hdf5_file(filename) is not True:
            info[filename] = "unknown file type"
        else:
            try:
                with tables.open_file(filename, mode="r") as infile:
                    # pylint: disable=W0212,E1101
                    attrs = {
                        name: str(infile.root._v_attrs[name])
                        for name in infile.root._v_attrs._f_list()
                    }
                    if args.flat:
                        info[filename] = attrs
                    else:
                        info[filename] = unflatten(attrs)

                    if args.output_table:
                        info_total[filename] = attrs
            except tables.exceptions.HDF5ExtError as err:
                info[filename] = f"ERROR {err}"

        print(yaml.dump(info, indent=4))

    if args.output_table:
        # use pandas' ability to convert a dict of flat values to a table
        import pandas as pd  #  pylint: disable=C0415

        dataframe = pd.DataFrame(info_total)
        Table.from_pandas(dataframe.T,
                          index=True).write(args.output_table,
                                            format=args.table_format,
                                            overwrite=True)
Example #14
0
def read_spline_box_hdf5( filename ):
    if not pt.is_hdf5_file( filename ):
        raise ValueError('not a HDF5 file! (%s)' % filename)

    fd = pt.open_file( filename, mode = 'r' )
    boxes = fd.list_nodes( '/box' )
    n_box = len( boxes )
    dim = len( fd.list_nodes( boxes[0].ax ) )

    sp_boxes = SplineBoxes( dim = dim, n_box = n_box, n_vertex = 0,
                           spbs = OneTypeList( SplineBox ) )
    for box in boxes:
        spb = SplineBox()
        sp_boxes.spbs.append( spb )

        spb.ib = int( box._v_name )
        spb.cpi = nm.asarray( box.cpi.read() ) - 1
        spb.gpi = nm.asarray( box.gpi.read() ) - 1
        spb.cxyz = nm.asarray( box.cxyz.read() ).transpose()
        spb.cxyz0 = spb.cxyz.copy()
        spb.ax = []
        for axi in fd.list_nodes( box.ax ):
            spb.ax.append( nm.asarray( axi.bsc.read() ) )

        sp_boxes.n_vertex = max( sp_boxes.n_vertex, nm.amax( spb.gpi ) + 1 )
        print(nm.amin( spb.gpi ), nm.amax( spb.gpi ))

        ##
        # Fix cpi by rebuilding :).
        off = 0
        n0, n1, n2 = spb.cpi.shape
        aux = nm.arange( n0 * n1 ).reshape( n1, n0 ).transpose()
        for ii in range( n2 ):
            spb.cpi[:,:,ii] = aux + off
            off += n0 * n1

    fd.close()

    for perm in cycle( [n_box] * 2 ):
        if perm[0] == perm[1]: continue
        gpi1 = sp_boxes.spbs[perm[0]].gpi
        gpi2 = sp_boxes.spbs[perm[1]].gpi
        assert_( len( nm.intersect1d( gpi1, gpi2 ) ) == 0 )

    return sp_boxes
Example #15
0
def append_FoVs(files,
                save_name='appended',
                first_dim_pxls=(101, 101),
                after=True):  # TODO: Rewrite this
    """Append responses that are adjacent in the first dimension. After means each file is appended after previous.
    first_dim_pxls is the number of pixels in appended direction for each file"""

    tmp_list = list(range(len(files)))
    second_dim_pxls = list(range(
        len(files)))  # should all be the same, double check
    tot_pxls = 0
    det_pxls = 0
    meas_pxls = 0

    for fid, file in enumerate(files):
        if tables.is_hdf5_file(file):
            sysmat_file = load_h5file(file)
            arr = sysmat_file.root.sysmat[:]
            sysmat_file.close()
        else:
            arr = np.load(file)
        meas_pxls, det_pxls = arr.shape
        tot_pxls += meas_pxls
        second_dim_pxls[fid] = meas_pxls // first_dim_pxls[fid]
        print("det_pxl: ", det_pxls)
        print("second_dim_pxls: ", second_dim_pxls[fid])
        print("first dim pxls: ", first_dim_pxls[fid])
        tmp_list[fid] = arr.T.reshape(
            [det_pxls, second_dim_pxls[fid], first_dim_pxls[fid]])

        print("File {f} shape: {s}".format(f=fid, s=tmp_list[fid].shape))

    assert np.all(np.array(second_dim_pxls) == second_dim_pxls[0]
                  ), "Files don't have same shape in second dimension"
    if after:
        tot_arr = np.concatenate(tmp_list, axis=2)
    else:
        tot_arr = np.concatenate(tmp_list[::-1], axis=2)

    reshaped_arr = tot_arr.transpose((1, 2, 0)).reshape([tot_pxls, det_pxls])
    np.save(save_name, reshaped_arr)
    print("Final shape: ",
          reshaped_arr.shape)  # TODO: Test this with table measurements
Example #16
0
    def read(self, fname):
        '''Read data from file'''
        self._filename = fname
        if os.path.exists(self._filename):
            self._ishdf = tables.is_hdf5_file(self._filename)
        else:
            raise DatastoreError("File {0} not found".format(self._filename))

        if self._ishdf:
            if self._debug:
                print('Reading from HDF-5', file=sys.stderr)
            self._filehandle = tables.open_file(self._filename, mode='r')
            self.read_hdf()
        else:
            if self._debug:
                print('Reading from ASCII', file=sys.stderr)
            self._filehandle = open(self._filename, 'r')
            self.read_ascii()
        self._filehandle.close()
        self._filehandle = None
Example #17
0
    def readFile(self, files):
        """
        Read the given file.

        If all the passed files are empty (i.e., no data) then the `len`
        of the class is 0.

        Parameters
        ----------
        files : str
            The file to be read in.

        """

        files = np.atleast_1d(files)

        sources = list()

        for _file in files:
            if tables.is_hdf5_file(_file):
                this_data = self._readHDF(_file)
                # We need to modify flash IDs when reading multiple files
                # to ensure they are unique
                if len(sources) != 0:
                    _ctr = sources[-1].flash_id.max()
                    this_data.flash_id += _ctr+1
            else:
                # Assume it's ASCII
                this_data = self._readASCII(_file)

            if len(this_data) != 0:
                # This should catch empty files
                sources.append(this_data)

        try:
            self._add_record(pd.concat(sources, ignore_index=True))
        except ValueError:
            # This can happen when we nothing to concat (all files empty)
            print('No data in these files')
        else:
            self._data.alt /= 1e3  # convert to km
Example #18
0
 def connect(self, filename):
     """
     Opens / initialises new HDF5 file.
     We rely on PyTables and keep all session management staff there.
     """
     if not self.connected:
         try:
             if tb.is_hdf5_file(filename):
                 self._data = tb.open_file(filename, mode = "a", title = filename)
                 self.connected = True
             else:
                 raise TypeError('"%s" is not an HDF5 file format.' % filename)
         except IOError:
             # create a new file if specified file not found
             self._data = tb.open_file(filename, mode = "w", title = filename)
             self.connected = True
         except:
             raise NameError("Incorrect file path, couldn't find or create a file.")
         self.objects_by_ref = {}
         self.name_indices = {}
     else:
         logger.info("Already connected.")
Example #19
0
    def __init__(self, h5parmFile, readonly=True, complevel=5, complib='zlib'):
        """
        Keyword arguments:
        h5parmFile -- H5parm filename
        readonly -- if True the table is open in readonly mode (default=True)
        complevel -- compression level from 0 to 9 (default=5) when creating the file
        complib -- library for compression: lzo, zlib, bzip2 (default=zlib)
        """
        if os.path.isfile(h5parmFile):
            if not tables.is_hdf5_file(h5parmFile):
                logging.critical('Not a HDF5 file: '+h5parmFile+'.')
                raise Exception('Not a HDF5 file: '+h5parmFile+'.')
            if readonly:
                logging.debug('Reading from '+h5parmFile+'.')
                self.H = tables.open_file(h5parmFile, 'r', IO_BUFFER_SIZE=1024*1024*10, BUFFER_TIMES=500)
            else:
                logging.warn('Appending to '+h5parmFile+'.')
                self.H = tables.open_file(h5parmFile, 'r+', IO_BUFFER_SIZE=1024*1024*10, BUFFER_TIMES=500)
            # Check if it's a valid H5parm file: attribute h5parm_version should be defined in any node
            is_h5parm = False
            for node in self.H.walk_nodes("/"):
                if 'h5parm_version' in node._v_attrs:
                    is_h5parm=True
                    break
            if not is_h5parm:
                self.close()
                logging.critical('Not a H5parm file: '+h5parmFile+'.')
                raise Exception('Not a H5parm file: '+h5parmFile+'.')
        else:
            if readonly:
                raise Exception('Missing file '+h5parmFile+'.')
            else:
                logging.debug('Creating '+h5parmFile+'.')
                # add a compression filter
                f = tables.Filters(complevel=complevel, complib=complib)
                self.H = tables.open_file(h5parmFile, filters=f, mode='w', IO_BUFFER_SIZE=1024*1024*10, BUFFER_TIMES=500)

        self.fileName = h5parmFile
Example #20
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument('filename', nargs='+',
                        help='name of flydra .hdf5 file',
                        )

    parser.add_argument("--stim-xml",
                        type=str,
                        default=None,
                        help="name of XML file with stimulus info",
                        required=True,
                        )

    parser.add_argument("--align-json",
                        type=str,
                        default=None,
                        help="previously exported json file containing s,R,T",
                        )

    parser.add_argument("--radius", type=float,
                      help="radius of line (in meters)",
                      default=0.002,
                      metavar="RADIUS")

    parser.add_argument("--obj-only", type=str)

    parser.add_argument("--obj-filelist", type=str,
                      help="use object ids from list in text file",
                      )

    parser.add_argument(
        "-r", "--reconstructor", dest="reconstructor_path",
        type=str,
        help=("calibration/reconstructor path (if not specified, "
              "defaults to FILE)"))

    args = parser.parse_args()
    options = args # optparse OptionParser backwards compatibility

    reconstructor_path = args.reconstructor_path
    fps = None

    ca = core_analysis.get_global_CachingAnalyzer()
    by_file = {}

    for h5_filename in args.filename:
        assert(tables.is_hdf5_file(h5_filename))
        obj_ids, use_obj_ids, is_mat_file, data_file, extra = ca.initial_file_load(
            h5_filename)
        this_fps = result_utils.get_fps( data_file, fail_on_error=False )
        if fps is None:
            if this_fps is not None:
                fps = this_fps
        if reconstructor_path is None:
            reconstructor_path = data_file
        by_file[h5_filename] = (use_obj_ids, data_file)
    del h5_filename
    del obj_ids, use_obj_ids, is_mat_file, data_file, extra

    if options.obj_only is not None:
        obj_only = core_analysis.parse_seq(options.obj_only)
    else:
        obj_only = None

    if reconstructor_path is None:
        raise RuntimeError('must specify reconstructor from CLI if not using .h5 files')

    R = reconstruct.Reconstructor(reconstructor_path)

    if fps is None:
        fps = 100.0
        warnings.warn('Setting fps to default value of %f'%fps)
    else:
        fps = 1.0

    if options.stim_xml is None:
        raise ValueError(
            'stim_xml must be specified (how else will you align the data?')

    if 1:
        stim_xml = xml_stimulus.xml_stimulus_from_filename(
            options.stim_xml,
            )
        try:
            fanout = xml_stimulus.xml_fanout_from_filename( options.stim_xml )
        except xml_stimulus.WrongXMLTypeError:
            pass
        else:
            include_obj_ids, exclude_obj_ids = fanout.get_obj_ids_for_timestamp(
                timestamp_string=file_timestamp )
            if include_obj_ids is not None:
                use_obj_ids = include_obj_ids
            if exclude_obj_ids is not None:
                use_obj_ids = list( set(use_obj_ids).difference(
                    exclude_obj_ids ) )
            print('using object ids specified in fanout .xml file')
        if stim_xml.has_reconstructor():
            stim_xml.verify_reconstructor(R)

    x = []
    y = []
    z = []
    speed = []

    if options.obj_filelist is not None:
        obj_filelist=options.obj_filelist
    else:
        obj_filelist=None

    if obj_filelist is not None:
        obj_only = 1

    if obj_only is not None:
        if len(by_file) != 1:
            raise RuntimeError("specifying obj_only can only be done for a single file")
        if obj_filelist is not None:
            data = np.loadtxt(obj_filelist,delimiter=',')
            obj_only = np.array(data[:,0], dtype='int')
            print(obj_only)

        use_obj_ids = numpy.array(obj_only)
        h5_filename = by_file.keys()[0]
        (prev_use_ob_ids, data_file) = by_file[h5_filename]
        by_file[h5_filename] = (use_obj_ids, data_file)

    for h5_filename in by_file:
        (use_obj_ids, data_file) = by_file[h5_filename]
        for obj_id_enum,obj_id in enumerate(use_obj_ids):
            rows = ca.load_data( obj_id, data_file,
                                 use_kalman_smoothing=False,
                                 #dynamic_model_name = dynamic_model_name,
                                 #frames_per_second=fps,
                                 #up_dir=up_dir,
                                )
            verts = numpy.array( [rows['x'], rows['y'], rows['z']] ).T
            if len(verts)>=3:
                verts_central_diff = verts[2:,:] - verts[:-2,:]
                dt = 1.0/fps
                vels = verts_central_diff/(2*dt)
                speeds = numpy.sqrt(numpy.sum(vels**2,axis=1))
                # pad end points
                speeds = numpy.array([speeds[0]] + list(speeds) + [speeds[-1]])
            else:
                speeds = numpy.zeros( (verts.shape[0],) )

            if verts.shape[0] != len(speeds):
                raise ValueError('mismatch length of x data and speeds')
            x.append( verts[:,0] )
            y.append( verts[:,1] )
            z.append( verts[:,2] )
            speed.append(speeds)
        data_file.close()
    del h5_filename, use_obj_ids, data_file

    if 0:
        # debug
        if stim_xml is not None:
            v = None
            for child in stim_xml.root:
                if child.tag == 'cubic_arena':
                    info = stim_xml._get_info_for_cubic_arena(child)
                    v=info['verts4x4']
            if v is not None:
                for vi in v:
                    print('adding',vi)
                    x.append( [vi[0]] )
                    y.append( [vi[1]] )
                    z.append( [vi[2]] )
                    speed.append( [100.0] )

    x = np.concatenate(x)
    y = np.concatenate(y)
    z = np.concatenate(z)
    w = np.ones_like(x)
    speed = np.concatenate(speed)

    # homogeneous coords
    verts = np.array([x,y,z,w])

    #######################################################

    # Create the MayaVi engine and start it.
    e = Engine()
    # start does nothing much but useful if someone is listening to
    # your engine.
    e.start()

    # Create a new scene.
    from tvtk.tools import ivtk
    #viewer = ivtk.IVTK(size=(600,600))
    viewer = IVTKWithCalGUI(size=(800,600))
    viewer.open()
    e.new_scene(viewer)

    viewer.cal_align.set_data(verts,speed,R,args.align_json)

    if 0:
        # Do this if you need to see the MayaVi tree view UI.
        ev = EngineView(engine=e)
        ui = ev.edit_traits()

    # view aligned data
    e.add_source(viewer.cal_align.source)

    v = Vectors()
    v.glyph.scale_mode = 'data_scaling_off'
    v.glyph.color_mode = 'color_by_scalar'
    v.glyph.glyph_source.glyph_position='center'
    v.glyph.glyph_source.glyph_source = tvtk.SphereSource(
        radius=options.radius,
        )
    e.add_module(v)

    if stim_xml is not None:
        if 0:
            stim_xml.draw_in_mayavi_scene(e)
        else:
            actors = stim_xml.get_tvtk_actors()
            viewer.scene.add_actors(actors)

    gui = GUI()
    gui.start_event_loop()
Example #21
0
def load_h5file(filepath):
    if tables.is_hdf5_file(filepath):
        h5file = tables.open_file(filepath, 'r')
        return h5file
    else:
        raise ValueError('{fi} is not a hdf5 file!'.format(fi=filepath))
Example #22
0
def hdf5(path):
    '''Return path to file object is HDF5'''

    return tb.is_hdf5_file(path)
def is_experiment_computed(results_file):
    try:
        return tb.is_hdf5_file(results_file)
    except (IOError, tb.HDF5ExtError):
        return False
    def __init__(self, args1):
        # idiot check that file is valid
        if tables.is_hdf5_file(args1) == False:
            print ("File is Not HDF5 format - abort.")
            return

        self.raw_file = tables.open_file(args1, mode='r')
        self.node_bmode_data = self.raw_file.get_node(
            "/MovieGroup1/AcqTissue/RawData")
        self.node_bmode_attributes = self.raw_file.get_node(
            "/MovieGroup1/AcqTissue/AcquisitionTissueCF")
        self.node_viewer_attributes = self.raw_file.get_node(
            "/MovieGroup1/ViewerTissue")

        self.pd_scan_end_cfm = None
        self.pd_scan_end_raw = None
        self.pd_scan_end_volbox = None
        self.pd_scan_end_viewer = None

        self.pd_scan_start_cfm = None
        self.pd_scan_start_raw = None
        self.pd_scan_start_volbox = None
        self.pd_scan_start_viewer = None

        # cannot load data that isn't there!
        # first check if the Doppler data is there
        if self.raw_file.__contains__('/MovieGroup1/ViewerTissueCF'):
            self.node_pd_viewer_attributes = self.raw_file.get_node(
                "/MovieGroup1/ViewerTissueCF")

        if self.raw_file.__contains__("/MovieGroup1/AcqColorFlow/RawData"):
            self.has_doppler = True
            self.node_powerdoppler_data = self.raw_file.get_node(
                "/MovieGroup1/AcqColorFlow/RawData")

        if self.raw_file.__contains__('/MovieGroup1/GraphicViewerTissueCF'):
            self.node_graphicviewercf = self.raw_file.get_node(
                '/MovieGroup1/GraphicViewerTissueCF')

        if self.raw_file.__contains__(
                "/MovieGroup1/AcqColorFlow/AcquisitionTissueCF"):
            self.has_doppler = True
            self.node_powerdoppler_attributes = self.raw_file.get_node(
                "/MovieGroup1/AcqColorFlow/AcquisitionTissueCF")

        if self.raw_file.__contains__("/MovieGroup2/AcqPWCW/RawData/"):
            self.has_waveform = True
            self.node_waveform_data = self.raw_file.get_node(
                "/MovieGroup2/AcqPWCW/RawData/")

        if self.raw_file.__contains__("/MovieGroup2/AcqPWCW/RawData/"):
            self.has_waveform = True
            self.node_waveform_attributes = self.raw_file.get_node(
                "/MovieGroup2/AcqPWCW/AcquisitionPWCW/")

        if self.containsDoppler():
            pass
            #self.getBModeBounds()
            #self.getPowerDopplerBounds()
            #self.createBModeCartesian()
            # self.printBModeAttributes()
            # self.printViewerAttributes()
            # self.printPDAttributes()
            #self.createPDCartesian()

        if self.has_waveform:
            self.getWaveformBounds()