Ejemplo n.º 1
0
def file_reader(filename, endianess = '<', **kwds):
    mapped_parameters={}
    dtype_list = get_std_dtype_list(endianess) + get_fei_dtype_list(endianess)
    f = open(filename, 'rb')
    std_header = np.fromfile(f, dtype = get_std_dtype_list(endianess), 
    count = 1)
    fei_header = None
    if std_header['NEXT'] / 1024 == 128:
        print "It seems to contain an extended FEI header"
        fei_header = np.fromfile(f, dtype = get_fei_dtype_list(endianess), 
                                 count = 1024)    
    if f.tell() == 1024 + std_header['NEXT']:
        print "The FEI header was correctly loaded"
    else:
        print "There was a problem reading the extended header"
        f.seek(1024 + std_header['NEXT'])
        fei_header = None
    NX, NY, NZ = std_header['NX'], std_header['NY'], std_header['NZ']    
    data = np.memmap(f, mode = 'c', offset = f.tell(), 
                     dtype = get_data_type(std_header['MODE'], endianess)
                     ).squeeze().reshape((NX, NY, NZ), order = 'F').T
                     
    original_parameters = { 'std_header' : sarray2dict(std_header)}
    if fei_header is not None:
        fei_dict = sarray2dict(fei_header,)
        del fei_dict['empty']
        original_parameters['fei_header'] = fei_dict
        
    dim = len(data.shape)
    if fei_header is None:
        # The scale is in Amstrongs, we convert it to nm
        scales = [   10 * float(std_header['Zlen']/std_header['MZ'])
                        if float(std_header['MZ']) != 0 else 1,
                     10 *  float(std_header['Ylen']/std_header['MY'])
                        if float(std_header['MY']) != 0 else 1,
                     10 *  float(std_header['Xlen']/std_header['MX'])
                     if float(std_header['MX']) != 0 else 1,]
        offsets = [   10 * float(std_header['ZORIGIN']),
                      10 * float(std_header['YORIGIN']),
                      10 * float(std_header['XORIGIN']),]
        
    else:
        # FEI does not use the standard header to store the scale
        # It does store the spatial scale in pixel_size, one per angle in meters
        scales = [1, ] + [fei_header['pixel_size'][0] * 10**9,] * 2 
        offsets = [0,] * 3
    
    units = ['undefined', 'nm', 'nm']
    names = ['z', 'y', 'x']
    mapped_parameters = {   'original_filename' : os.path.split(filename)[1],
                            'record_by' : 'image',
                            'signal_type' : "",}
    #create the axis objects for each axis
    axes=[
            {
                'size': data.shape[i], 
                'index_in_array' : i, 
                'name' : names[i + 3 - dim], 
                'scale' : scales[i + 3 - dim], 
                'offset': offsets[i + 3 - dim], 
                'units' : units[i + 3 - dim],} 
            for i in xrange(dim)]

    dictionary = {    'data' : data,
                      'axes' : axes,
                      'mapped_parameters' : mapped_parameters,
                      'original_parameters' : original_parameters,}
    
    return [dictionary,]
Ejemplo n.º 2
0
def ser_reader(filename, objects = None, *args, **kwds):
    """Reads the information from the file and returns it in the Hyperspy 
    required format"""
    # Determine if it is an emi or a ser file.
    
    header, data = load_ser_file(filename)
    record_by = guess_record_by(header['DataTypeID'])
    axes = []
    ndim = int(header['NumberDimensions'])
    if record_by == 'spectrum':
        array_shape = [None,] * int(ndim)
        i_array = range(ndim)
        if len(data['PositionY']) > 1 and \
        (data['PositionY'][0] == data['PositionY'][1]):
            # The spatial dimensions are stored in the reversed order
            # We reverse the shape
            i_array.reverse()
        # Extra dimensions
        for i in xrange(ndim):
            if i_array[i] == ndim - 1:
                name = 'x'
            elif i_array[i] == ndim - 2:
                name = 'y'
            else:
                name = 'undefined_%i' % (i + 1)
            axes.append({
            'name' : name,
            'offset' : header['Dim-%i_CalibrationOffset' % (i + 1)][0],
            'scale' : header['Dim-%i_CalibrationDelta' % (i + 1)][0],
            'units' : header['Dim-%i_Units' % (i + 1)][0],
            'size' : header['Dim-%i_DimensionSize' % (i + 1)][0],
            'index_in_array' : i_array[i]
            })
            array_shape[i_array[i]] = \
            header['Dim-%i_DimensionSize' % (i + 1)][0]
        # FEI seems to use the international system of units (SI) for the 
        # spatial scale. However, we prefer to work in nm
        for axis in axes:
            if axis['units'] == 'meters':
                axis['units'] = 'nm'
                axis['scale'] *= 10**9
        
        # Spectral dimension    
        axes.append({
            'name' : 'undefined',
            'offset' : data['CalibrationOffset'][0],
            'scale' : data['CalibrationDelta'][0],
            'units' : 'undefined',
            'size' : data['ArrayLength'][0],
            'index_in_array' : header['NumberDimensions'][0]
            })
        
        array_shape.append(data['ArrayLength'][0])
        
    elif record_by == 'image':
        array_shape = [None,] * int(ndim - 1)
        # Y axis
        axes.append({
            'name' : 'y',
            'offset' : data['CalibrationOffsetY'][0] - \
            data['CalibrationElementY'][0] * data['CalibrationDeltaY'][0],
            'scale' : data['CalibrationDeltaY'][0],
            'units' : 'Unknown',
            'size' : data['ArraySizeY'][0],
            'index_in_array' : ndim - 1
            })
        array_shape.append(data['ArraySizeY'][0])
        
        # X axis
        axes.append({
            'name' : 'x',
            'offset' : data['CalibrationOffsetX'][0] - \
            data['CalibrationElementX'][0] * data['CalibrationDeltaX'][0],
            'scale' : data['CalibrationDeltaX'][0],
            'units' : 'undefined',
            'size' : data['ArraySizeX'][0],
            'index_in_array' : ndim
            })
        array_shape.append(data['ArraySizeX'][0])
        
        # Extra dimensions
        for i in xrange(ndim - 1):
            axes.append({
            'name' : 'undefined%s' % i,
            'offset' : header['Dim-%i_CalibrationOffset' % i + 1][0],
            'scale' : header['Dim-%i_CalibrationDelta' % i + 1][0],
            'units' : header['Dim-%i_Units' % i + 1][0],
            'size' : header['Dim-%i_DimensionSize' % i + 1][0],
            'index_in_array' : ndim - 1 -i
            })
            array_shape.append(header['Dim-%i_DimensionSize' % i + 1][0])

    # If the acquisition stops before finishing the job, the stored file will 
    # report the requested size even though no values are recorded. Therefore if
    # the shapes of the retrieved array does not match that of the data 
    # dimensions we must fill the rest with zeros or (better) nans if the 
    # dtype is float
    if np.cumprod(array_shape)[-1] != np.cumprod(data['Array'].shape)[-1]:
        dc = np.zeros(np.cumprod(array_shape)[-1], 
                      dtype = data['Array'].dtype)
        if dc.dtype is np.dtype('f') or dc.dtype is np.dtype('f8'):
            dc[:] = np.nan
        dc[:data['Array'].ravel().shape[0]] = data['Array'].ravel()
    else:
        dc = data['Array']
    
    dc = dc.reshape(array_shape)
    if record_by == 'image':
        dc = dc[::-1]
    if ordict:
        original_parameters = OrderedDict()
    else:
        print("\nWARNING:")
        print("FEI plugin")
        print("OrderedDict is not available, using a standard dictionary.\n")
        original_parameters = {}
    header_parameters = sarray2dict(header)
    sarray2dict(data, header_parameters)
    if objects is not None:
        i = 0
        for obj in objects:
            original_parameters['emi_xml%i' % i] = xmlreader.readConfig(obj)
            
    
    # We remove the Array key to save memory avoiding duplication
    del header_parameters['Array']
    original_parameters['ser_header_parameters'] = header_parameters
    dictionary = {
    'data' : dc,
    'mapped_parameters' : {
                            'original_filename' : os.path.split(filename)[1],
                            'record_by' : record_by,
                            'signal_type' : "",},
    'axes' : axes,
    'original_parameters' : original_parameters}
    return dictionary
Ejemplo n.º 3
0
def ser_reader(filename, objects=None, *args, **kwds):
    """Reads the information from the file and returns it in the Hyperspy 
    required format"""
    # Determine if it is an emi or a ser file.

    header, data = load_ser_file(filename)
    record_by = guess_record_by(header["DataTypeID"])
    axes = []
    ndim = int(header["NumberDimensions"])
    if record_by == "spectrum":
        array_shape = [None] * int(ndim)
        i_array = range(ndim)
        if len(data["PositionY"]) > 1 and (data["PositionY"][0] == data["PositionY"][1]):
            # The spatial dimensions are stored in the reversed order
            # We reverse the shape
            i_array.reverse()
        # Extra dimensions
        for i in xrange(ndim):
            if i_array[i] == ndim - 1:
                name = "x"
            elif i_array[i] == ndim - 2:
                name = "y"
            else:
                name = "undefined_%i" % (i + 1)
            axes.append(
                {
                    "name": name,
                    "offset": header["Dim-%i_CalibrationOffset" % (i + 1)][0],
                    "scale": header["Dim-%i_CalibrationDelta" % (i + 1)][0],
                    "units": header["Dim-%i_Units" % (i + 1)][0],
                    "size": header["Dim-%i_DimensionSize" % (i + 1)][0],
                    "index_in_array": i_array[i],
                }
            )
            array_shape[i_array[i]] = header["Dim-%i_DimensionSize" % (i + 1)][0]
        # FEI seems to use the international system of units (SI) for the
        # spatial scale. However, we prefer to work in nm
        for axis in axes:
            if axis["units"] == "meters":
                axis["units"] = "nm"
                axis["scale"] *= 10 ** 9

        # Spectral dimension
        axes.append(
            {
                "name": "undefined",
                "offset": data["CalibrationOffset"][0],
                "scale": data["CalibrationDelta"][0],
                "units": "undefined",
                "size": data["ArrayLength"][0],
                "index_in_array": header["NumberDimensions"][0],
            }
        )

        array_shape.append(data["ArrayLength"][0])

    elif record_by == "image":
        array_shape = [None] * int(ndim - 1)
        # Y axis
        axes.append(
            {
                "name": "y",
                "offset": data["CalibrationOffsetY"][0] - data["CalibrationElementY"][0] * data["CalibrationDeltaY"][0],
                "scale": data["CalibrationDeltaY"][0],
                "units": "Unknown",
                "size": data["ArraySizeY"][0],
                "index_in_array": ndim - 1,
            }
        )
        array_shape.append(data["ArraySizeY"][0])

        # X axis
        axes.append(
            {
                "name": "x",
                "offset": data["CalibrationOffsetX"][0] - data["CalibrationElementX"][0] * data["CalibrationDeltaX"][0],
                "scale": data["CalibrationDeltaX"][0],
                "units": "undefined",
                "size": data["ArraySizeX"][0],
                "index_in_array": ndim,
            }
        )
        array_shape.append(data["ArraySizeX"][0])

        # Extra dimensions
        for i in xrange(ndim - 1):
            axes.append(
                {
                    "name": "undefined%s" % i,
                    "offset": header["Dim-%i_CalibrationOffset" % i + 1][0],
                    "scale": header["Dim-%i_CalibrationDelta" % i + 1][0],
                    "units": header["Dim-%i_Units" % i + 1][0],
                    "size": header["Dim-%i_DimensionSize" % i + 1][0],
                    "index_in_array": ndim - 1 - i,
                }
            )
            array_shape.append(header["Dim-%i_DimensionSize" % i + 1][0])

    # If the acquisition stops before finishing the job, the stored file will
    # report the requested size even though no values are recorded. Therefore if
    # the shapes of the retrieved array does not match that of the data
    # dimensions we must fill the rest with zeros or (better) nans if the
    # dtype is float
    if np.cumprod(array_shape)[-1] != np.cumprod(data["Array"].shape)[-1]:
        dc = np.zeros(np.cumprod(array_shape)[-1], dtype=data["Array"].dtype)
        if dc.dtype is np.dtype("f") or dc.dtype is np.dtype("f8"):
            dc[:] = np.nan
        dc[: data["Array"].ravel().shape[0]] = data["Array"].ravel()
    else:
        dc = data["Array"]

    dc = dc.reshape(array_shape)
    if record_by == "image":
        dc = dc[::-1]
    if ordict:
        original_parameters = OrderedDict()
    else:
        print ("\nWARNING:")
        print ("FEI plugin")
        print ("OrderedDict is not available, using a standard dictionary.\n")
        original_parameters = {}
    header_parameters = sarray2dict(header)
    sarray2dict(data, header_parameters)
    if objects is not None:
        i = 0
        for obj in objects:
            original_parameters["emi_xml%i" % i] = xmlreader.readConfig(obj)

    # We remove the Array key to save memory avoiding duplication
    del header_parameters["Array"]
    original_parameters["ser_header_parameters"] = header_parameters
    dictionary = {
        "data": dc,
        "mapped_parameters": {"original_filename": filename, "record_by": record_by, "signal": None},
        "axes": axes,
        "original_parameters": original_parameters,
    }
    return dictionary
Ejemplo n.º 4
0
def ser_reader(filename, objects=None, *args, **kwds):
    """Reads the information from the file and returns it in the Hyperspy 
    required format"""
    # Determine if it is an emi or a ser file.

    header, data = load_ser_file(filename)
    record_by = guess_record_by(header['DataTypeID'])
    axes = []
    ndim = int(header['NumberDimensions'])
    if record_by == 'spectrum':
        array_shape = [
            None,
        ] * int(ndim)
        i_array = range(ndim)
        if len(data['PositionY']) > 1 and \
        (data['PositionY'][0] == data['PositionY'][1]):
            # The spatial dimensions are stored in the reversed order
            # We reverse the shape
            i_array.reverse()
        # Extra dimensions
        for i in xrange(ndim):
            if i_array[i] == ndim - 1:
                name = 'x'
            elif i_array[i] == ndim - 2:
                name = 'y'
            else:
                name = 'undefined_%i' % (i + 1)
            axes.append({
                'name':
                name,
                'offset':
                header['Dim-%i_CalibrationOffset' % (i + 1)][0],
                'scale':
                header['Dim-%i_CalibrationDelta' % (i + 1)][0],
                'units':
                header['Dim-%i_Units' % (i + 1)][0],
                'size':
                header['Dim-%i_DimensionSize' % (i + 1)][0],
                'index_in_array':
                i_array[i]
            })
            array_shape[i_array[i]] = \
            header['Dim-%i_DimensionSize' % (i + 1)][0]
        # FEI seems to use the international system of units (SI) for the
        # spatial scale. However, we prefer to work in nm
        for axis in axes:
            if axis['units'] == 'meters':
                axis['units'] = 'nm'
                axis['scale'] *= 10**9

        # Spectral dimension
        axes.append({
            'name': 'undefined',
            'offset': data['CalibrationOffset'][0],
            'scale': data['CalibrationDelta'][0],
            'units': 'undefined',
            'size': data['ArrayLength'][0],
            'index_in_array': header['NumberDimensions'][0]
        })

        array_shape.append(data['ArrayLength'][0])

    elif record_by == 'image':
        array_shape = [
            None,
        ] * int(ndim - 1)
        # Y axis
        axes.append({
            'name' : 'y',
            'offset' : data['CalibrationOffsetY'][0] - \
            data['CalibrationElementY'][0] * data['CalibrationDeltaY'][0],
            'scale' : data['CalibrationDeltaY'][0],
            'units' : 'Unknown',
            'size' : data['ArraySizeY'][0],
            'index_in_array' : ndim - 1
            })
        array_shape.append(data['ArraySizeY'][0])

        # X axis
        axes.append({
            'name' : 'x',
            'offset' : data['CalibrationOffsetX'][0] - \
            data['CalibrationElementX'][0] * data['CalibrationDeltaX'][0],
            'scale' : data['CalibrationDeltaX'][0],
            'units' : 'undefined',
            'size' : data['ArraySizeX'][0],
            'index_in_array' : ndim
            })
        array_shape.append(data['ArraySizeX'][0])

        # Extra dimensions
        for i in xrange(ndim - 1):
            axes.append({
                'name': 'undefined%s' % i,
                'offset': header['Dim-%i_CalibrationOffset' % i + 1][0],
                'scale': header['Dim-%i_CalibrationDelta' % i + 1][0],
                'units': header['Dim-%i_Units' % i + 1][0],
                'size': header['Dim-%i_DimensionSize' % i + 1][0],
                'index_in_array': ndim - 1 - i
            })
            array_shape.append(header['Dim-%i_DimensionSize' % i + 1][0])

    # If the acquisition stops before finishing the job, the stored file will
    # report the requested size even though no values are recorded. Therefore if
    # the shapes of the retrieved array does not match that of the data
    # dimensions we must fill the rest with zeros or (better) nans if the
    # dtype is float
    if np.cumprod(array_shape)[-1] != np.cumprod(data['Array'].shape)[-1]:
        dc = np.zeros(np.cumprod(array_shape)[-1], dtype=data['Array'].dtype)
        if dc.dtype is np.dtype('f') or dc.dtype is np.dtype('f8'):
            dc[:] = np.nan
        dc[:data['Array'].ravel().shape[0]] = data['Array'].ravel()
    else:
        dc = data['Array']

    dc = dc.reshape(array_shape)
    if record_by == 'image':
        dc = dc[::-1]
    if ordict:
        original_parameters = OrderedDict()
    else:
        print("\nWARNING:")
        print("FEI plugin")
        print("OrderedDict is not available, using a standard dictionary.\n")
        original_parameters = {}
    header_parameters = sarray2dict(header)
    sarray2dict(data, header_parameters)
    if objects is not None:
        i = 0
        for obj in objects:
            original_parameters['emi_xml%i' % i] = xmlreader.readConfig(obj)

    # We remove the Array key to save memory avoiding duplication
    del header_parameters['Array']
    original_parameters['ser_header_parameters'] = header_parameters
    dictionary = {
        'data': dc,
        'mapped_parameters': {
            'original_filename': filename,
            'record_by': record_by,
            'signal': None
        },
        'axes': axes,
        'original_parameters': original_parameters
    }
    return dictionary
Ejemplo n.º 5
0
def file_reader(filename, endianess='<', **kwds):
    mapped_parameters = {}
    dtype_list = get_std_dtype_list(endianess) + get_fei_dtype_list(endianess)
    f = open(filename, 'rb')
    std_header = np.fromfile(f, dtype=get_std_dtype_list(endianess), count=1)
    fei_header = None
    if std_header['NEXT'] / 1024 == 128:
        print "It seems to contain an extended FEI header"
        fei_header = np.fromfile(f,
                                 dtype=get_fei_dtype_list(endianess),
                                 count=1024)
    if f.tell() == 1024 + std_header['NEXT']:
        print "The FEI header was correctly loaded"
    else:
        print "There was a problem reading the extended header"
        f.seek(1024 + std_header['NEXT'])
        fei_header = None
    NX, NY, NZ = std_header['NX'], std_header['NY'], std_header['NZ']
    data = np.memmap(f,
                     mode='c',
                     offset=f.tell(),
                     dtype=get_data_type(std_header['MODE'],
                                         endianess)).squeeze().reshape(
                                             (NX, NY, NZ), order='F').T

    original_parameters = {'std_header': sarray2dict(std_header)}
    if fei_header is not None:
        fei_dict = sarray2dict(fei_header, )
        del fei_dict['empty']
        original_parameters['fei_header'] = fei_dict

    dim = len(data.shape)
    if fei_header is None:
        # The scale is in Amstrongs, we convert it to nm
        scales = [
            10 * float(std_header['Zlen'] / std_header['MZ'])
            if float(std_header['MZ']) != 0 else 1,
            10 * float(std_header['Ylen'] / std_header['MY'])
            if float(std_header['MY']) != 0 else 1,
            10 * float(std_header['Xlen'] / std_header['MX'])
            if float(std_header['MX']) != 0 else 1,
        ]
        offsets = [
            10 * float(std_header['ZORIGIN']),
            10 * float(std_header['YORIGIN']),
            10 * float(std_header['XORIGIN']),
        ]

    else:
        # FEI does not use the standard header to store the scale
        # It does store the spatial scale in pixel_size, one per angle in meters
        scales = [
            1,
        ] + [
            fei_header['pixel_size'][0] * 10**9,
        ] * 2
        offsets = [
            0,
        ] * 3

    units = ['undefined', 'nm', 'nm']
    names = ['z', 'y', 'x']
    mapped_parameters = {
        'original_filename': filename,
        'record_by': 'image',
        'signal': None,
    }
    #create the axis objects for each axis
    axes = [{
        'size': data.shape[i],
        'index_in_array': i,
        'name': names[i + 3 - dim],
        'scale': scales[i + 3 - dim],
        'offset': offsets[i + 3 - dim],
        'units': units[i + 3 - dim],
    } for i in xrange(dim)]

    dictionary = {
        'data': data,
        'axes': axes,
        'mapped_parameters': mapped_parameters,
        'original_parameters': original_parameters,
    }

    return [
        dictionary,
    ]