Example #1
0
File: ff.py Project: omarjamil/iris
    def _extract_field(self):
        # FF table pointer initialisation based on FF LOOKUP table configuration. 
        table_index, table_entry_depth, table_count = self._ff_header.lookup_table
        table_offset = (table_index - 1) * FF_WORD_DEPTH       # in bytes
        table_entry_depth = table_entry_depth * FF_WORD_DEPTH  # in bytes
        # Open the FF for processing.
        ff_file = open(self._ff_header.ff_filename, 'rb')
        ff_file_seek = ff_file.seek
        # Process each FF LOOKUP table entry.
        while table_count:
            table_count -= 1
            # Move file pointer to the start of the current FF LOOKUP table entry.
            ff_file_seek(table_offset, os.SEEK_SET)
            # Read the current PP header entry from the FF LOOKUP table.
            pp_header_integers = np.fromfile(ff_file, dtype='>i8', count=pp.NUM_LONG_HEADERS)  # 64-bit words.
            pp_header_floats = np.fromfile(ff_file, dtype='>f8', count=pp.NUM_FLOAT_HEADERS)   # 64-bit words.
            pp_header_data = tuple(pp_header_integers) + tuple(pp_header_floats)
            # Check whether the current FF LOOKUP table entry is valid.
            if pp_header_data[0] == _FF_LOOKUP_TABLE_TERMINATE:
                # There are no more FF LOOKUP table entries to read. 
                break
            # Calculate next FF LOOKUP table entry.
            table_offset += table_entry_depth
            # Construct a PPField object and populate using the pp_header_data
            # read from the current FF LOOKUP table.
            # (The PPField sub-class will depend on the header release number.)
            pp_field = pp.make_pp_field(pp_header_data)
            # Calculate file pointer address for the start of the associated PP header data. 
            data_offset = pp_field.lbegin * FF_WORD_DEPTH
            # Determine PP field payload depth.
            pp_data_extra_depth = pp_field.lbext
            if pp_field.lbpack:
                # Convert PP field LBNREC, representing a count in 64-bit words,
                # into its associated count in bytes.
                pp_data_depth = ((pp_field.lbnrec * 2) - 1) * pp.PP_WORD_DEPTH  # in bytes
            else:
                pp_data_depth = (pp_field.lblrec - pp_data_extra_depth) * pp.PP_WORD_DEPTH  # in bytes 
            
            # Determine PP field payload datatype.
            pp_data_type = pp.LBUSER_DTYPE_LOOKUP.get(pp_field.lbuser[0], pp.LBUSER_DTYPE_LOOKUP['default'])

            # Determine PP field data shape.
            pp_data_shape = (pp_field.lbrow, pp_field.lbnpt)
            # Determine whether to read the associated PP field data.
            if self._read_data:
                # Move file pointer to the start of the current PP field data.
                ff_file_seek(data_offset, os.SEEK_SET)
                # Get the PP field data.
                data = pp_field.read_data(ff_file, pp_data_depth, pp_data_shape, pp_data_type)
                pp_field._data = data
                pp_field._data_manager = None
            else:
                pp_field._data = np.array(pp.PPDataProxy(self._ff_header.ff_filename, data_offset, pp_data_depth, pp_field.lbpack))
                pp_field._data_manager = iris.fileformats.manager.DataManager(pp_data_shape, pp_data_type, pp_field.bmdi)
            yield pp_field
        ff_file.close()
        return
Example #2
0
File: ff.py Project: ahill818/iris
    def _extract_field(self):
        # FF table pointer initialisation based on FF LOOKUP table configuration. 
        table_index, table_entry_depth, table_count = self._ff_header.lookup_table
        table_offset = (table_index - 1) * self._word_depth       # in bytes
        table_entry_depth = table_entry_depth * self._word_depth  # in bytes
        # Open the FF for processing.
        ff_file = open(self._ff_header.ff_filename, 'rb')
        ff_file_seek = ff_file.seek

        # Check for an instantaneous dump.
        if self._ff_header.dataset_type == 1:
            table_count = self._ff_header.total_prognostic_fields

        # Process each FF LOOKUP table entry.
        while table_count:
            table_count -= 1
            # Move file pointer to the start of the current FF LOOKUP table entry.
            ff_file_seek(table_offset, os.SEEK_SET)
            # Read the current PP header entry from the FF LOOKUP table.
            header_integers = np.fromfile(ff_file, dtype='>i{0}'.format(self._word_depth),
                                          count=pp.NUM_LONG_HEADERS)
            header_floats = np.fromfile(ff_file, dtype='>f{0}'.format(self._word_depth),
                                        count=pp.NUM_FLOAT_HEADERS)
            # In 64-bit words.
            header_data = tuple(header_integers) + tuple(header_floats)
            # Check whether the current FF LOOKUP table entry is valid.
            if header_data[0] == _FF_LOOKUP_TABLE_TERMINATE:
                # There are no more FF LOOKUP table entries to read. 
                break
            # Calculate next FF LOOKUP table entry.
            table_offset += table_entry_depth
            # Construct a PPField object and populate using the header_data
            # read from the current FF LOOKUP table.
            # (The PPField sub-class will depend on the header release number.)
            field = pp.make_pp_field(header_data)
            # Calculate start address of the associated PP header data.
            data_offset = field.lbegin * self._word_depth
            # Determine PP field payload depth and type.
            data_depth, data_type = self._payload(field)
            # Determine PP field data shape.
            data_shape = (field.lbrow, field.lbnpt)
            # Determine whether to read the associated PP field data.
            if self._read_data:
                # Move file pointer to the start of the current PP field data.
                ff_file_seek(data_offset, os.SEEK_SET)
                # Get the PP field data.
                data = field.read_data(ff_file, data_depth, data_shape, data_type)
                field._data = data
                field._data_manager = None
            else:
                proxy = pp.PPDataProxy(self._filename, data_offset,
                                       data_depth, field.lbpack)
                field._data = np.array(proxy)
                field._data_manager = DataManager(data_shape, data_type, field.bmdi)
            yield field
        ff_file.close()
        return
Example #3
0
File: ff.py Project: ckmo/iris
    def _extract_field(self):
        # FF table pointer initialisation based on FF LOOKUP table
        # configuration.

        lookup_table = self._ff_header.lookup_table
        table_index, table_entry_depth, table_count = lookup_table
        table_offset = (table_index - 1) * self._word_depth       # in bytes
        table_entry_depth = table_entry_depth * self._word_depth  # in bytes
        # Open the FF for processing.
        ff_file = open(self._ff_header.ff_filename, 'rb')
        ff_file_seek = ff_file.seek

        # Check for an instantaneous dump.
        if self._ff_header.dataset_type == 1:
            table_count = self._ff_header.total_prognostic_fields

        is_boundary_packed = self._ff_header.dataset_type == 5

        grid = self._ff_header.grid()

        # Process each FF LOOKUP table entry.
        while table_count:
            table_count -= 1
            # Move file pointer to the start of the current FF LOOKUP
            # table entry.
            ff_file_seek(table_offset, os.SEEK_SET)
            # Read the current PP header entry from the FF LOOKUP table.
            header_longs = np.fromfile(
                ff_file, dtype='>i{0}'.format(self._word_depth),
                count=pp.NUM_LONG_HEADERS)
            # Check whether the current FF LOOKUP table entry is valid.
            if header_longs[0] == _FF_LOOKUP_TABLE_TERMINATE:
                # There are no more FF LOOKUP table entries to read.
                break
            header_floats = np.fromfile(
                ff_file, dtype='>f{0}'.format(self._word_depth),
                count=pp.NUM_FLOAT_HEADERS)
            header = tuple(header_longs) + tuple(header_floats)

            # Calculate next FF LOOKUP table entry.
            table_offset += table_entry_depth
            # Construct a PPField object and populate using the header_data
            # read from the current FF LOOKUP table.
            # (The PPField sub-class will depend on the header release number.)
            field = pp.make_pp_field(header)
            # Calculate start address of the associated PP header data.
            data_offset = field.lbegin * self._word_depth
            # Determine PP field payload depth and type.
            data_depth, data_type = self._payload(field)

            # Fast stash look-up.
            stash_s = field.lbuser[3] / 1000
            stash_i = field.lbuser[3] % 1000
            stash = 'm{:02}s{:02}i{:03}'.format(field.lbuser[6],
                                                stash_s, stash_i)
            stash_entry = STASH_TRANS.get(stash, None)
            if stash_entry is None:
                subgrid = None
                warnings.warn('The STASH code {0} was not found in the '
                              'STASH to grid type mapping. Picking the P '
                              'position as the cell type'.format(stash))
            else:
                subgrid = stash_entry.grid_code
                if subgrid not in HANDLED_GRIDS:
                    warnings.warn('The stash code {} is on a grid {} which '
                                  'has not been explicitly handled by the '
                                  'fieldsfile loader. Assuming the data is on '
                                  'a P grid.'.format(stash, subgrid))

            field.x, field.y = grid.vectors(subgrid)

            # Use the per-file grid if no per-field metadata is available.
            no_x = field.bzx in (0, field.bmdi) and field.x is None
            no_y = field.bzy in (0, field.bmdi) and field.y is None
            if no_x and no_y:
                field.bzx, field.bdx = grid.regular_x(subgrid)
                field.bzy, field.bdy = grid.regular_y(subgrid)
                field.bplat = grid.pole_lat
                field.bplon = grid.pole_lon
            elif no_x or no_y:
                warnings.warn('Partially missing X or Y coordinate values.')

            if is_boundary_packed:
                name_mapping = dict(rim_width=slice(4, 6), y_halo=slice(2, 4),
                                    x_halo=slice(0, 2))
                b_packing = pp.SplittableInt(field.lbuser[2], name_mapping)
                field.lbpack.boundary_packing = b_packing
                # Fix the lbrow and lbnpt to be the actual size of the data
                # array, since the field is no longer a "boundary" fields file
                # field.
                # Note: The documentation states that lbrow (y) doesn't
                # contain the halo rows, but no such comment exists at UM v8.5
                # for lbnpt (x). Experimentation has shown that lbnpt also
                # excludes the halo size.
                field.lbrow += 2 * field.lbpack.boundary_packing.y_halo
                field.lbnpt += 2 * field.lbpack.boundary_packing.x_halo
                # Update the x and y coordinates for this field. Note: it may
                # be that this needs to update x and y also, but that is yet
                # to be confirmed.
                if (field.bdx in (0, field.bmdi) or
                        field.bdy in (0, field.bmdi)):
                    field.x = self._det_border(field.x, b_packing.x_halo)
                    field.y = self._det_border(field.y, b_packing.y_halo)
                else:
                    if field.bdy < 0:
                        warnings.warn('The LBC has a bdy less than 0. No '
                                      'case has previously been seen of '
                                      'this, and the decompression may be '
                                      'erroneous.')
                    field.bzx -= field.bdx * b_packing.x_halo
                    field.bzy -= field.bdy * b_packing.y_halo

            if self._read_data:
                # Read the actual bytes. This can then be converted to a
                # numpy array at a higher level.
                ff_file_seek(data_offset, os.SEEK_SET)
                field._data = pp.LoadedArrayBytes(ff_file.read(data_depth),
                                                  data_type)
            else:
                # Provide enough context to read the data bytes later on.
                field._data = (self._filename, data_offset,
                               data_depth, data_type)
            yield field
        ff_file.close()
Example #4
0
File: ff.py Project: aashish24/iris
    def _extract_field(self):
        # FF table pointer initialisation based on FF LOOKUP table
        # configuration.
        lookup_table = self._ff_header.lookup_table
        table_index, table_entry_depth, table_count = lookup_table
        table_offset = (table_index - 1) * self._word_depth       # in bytes
        table_entry_depth = table_entry_depth * self._word_depth  # in bytes
        # Open the FF for processing.
        ff_file = open(self._ff_header.ff_filename, 'rb')
        ff_file_seek = ff_file.seek

        # Check for an instantaneous dump.
        if self._ff_header.dataset_type == 1:
            table_count = self._ff_header.total_prognostic_fields

        # Define the T, U, and V grid coordinates. The theta values are
        # stored in the first element of the second dimension on the
        # column/row dependent constants, and if it exists the U and V grid
        # coordinates can be found on the second element of the second
        # dimension.
        x_p, y_p, x_u, y_v = (None, None, None, None)
        if self._ff_header.column_dependent_constants is not None:
            x_p = self._ff_header.column_dependent_constants[:, 0]
            if self._ff_header.column_dependent_constants.shape[1] == 2:
                # The UM variable resolution configuration produces n "U" grid
                # values (with the last point on the extreme right hand side
                # of the last cell), whereas there are just n-1 "V" grid
                # values.this has been done for good reason inside
                # the UM.
                x_u = self._ff_header.column_dependent_constants[:, 1]
        if self._ff_header.row_dependent_constants is not None:
            y_p = self._ff_header.row_dependent_constants[:, 0]
            if self._ff_header.row_dependent_constants.shape[1] == 2:
                y_v = self._ff_header.row_dependent_constants[:-1, 1]

        # Process each FF LOOKUP table entry.
        while table_count:
            table_count -= 1
            # Move file pointer to the start of the current FF LOOKUP
            # table entry.
            ff_file_seek(table_offset, os.SEEK_SET)
            # Read the current PP header entry from the FF LOOKUP table.
            header_integers = np.fromfile(
                ff_file, dtype='>i{0}'.format(self._word_depth),
                count=pp.NUM_LONG_HEADERS)
            header_floats = np.fromfile(
                ff_file, dtype='>f{0}'.format(self._word_depth),
                count=pp.NUM_FLOAT_HEADERS)
            # In 64-bit words.
            header_data = tuple(header_integers) + tuple(header_floats)
            # Check whether the current FF LOOKUP table entry is valid.
            if header_data[0] == _FF_LOOKUP_TABLE_TERMINATE:
                # There are no more FF LOOKUP table entries to read.
                break
            # Calculate next FF LOOKUP table entry.
            table_offset += table_entry_depth
            # Construct a PPField object and populate using the header_data
            # read from the current FF LOOKUP table.
            # (The PPField sub-class will depend on the header release number.)
            field = pp.make_pp_field(header_data)
            # Calculate start address of the associated PP header data.
            data_offset = field.lbegin * self._word_depth
            # Determine PP field payload depth and type.
            data_depth, data_type = self._payload(field)
            # Determine PP field data shape.
            data_shape = (field.lbrow, field.lbnpt)

            grid = STASH_GRID.get(str(field.stash), None)

            if grid is None:
                warnings.warn('The STASH code {0} was not found in the '
                              'STASH to grid type mapping. Picking the P '
                              'position as the cell type'.format(field.stash))
            elif grid not in HANDLED_GRIDS:
                warnings.warn('The stash code {} is on a grid {} which has '
                              'not been explicitly handled by the fieldsfile '
                              'loader. Assuming the data is on a P grid.'
                              ''.format(field.stash, grid))

            field.x = x_p
            field.y = y_p
            if grid in X_COORD_U_GRID:
                field.x = x_u
            if grid in Y_COORD_V_GRID:
                field.y = y_v

            # Determine whether to read the associated PP field data.
            if self._read_data:
                # Move file pointer to the start of the current PP field data.
                ff_file_seek(data_offset, os.SEEK_SET)
                # Get the PP field data.
                data = field.read_data(ff_file, data_depth, data_shape,
                                       data_type)
                field._data = data
                field._data_manager = None
            else:
                proxy = pp.PPDataProxy(self._filename, data_offset,
                                       data_depth, field.lbpack)
                field._data = np.array(proxy)
                field._data_manager = DataManager(data_shape, data_type,
                                                  field.bmdi)
            yield field
        ff_file.close()
        return
Example #5
0
    def _extract_field(self):
        # FF table pointer initialisation based on FF LOOKUP table
        # configuration.
        lookup_table = self._ff_header.lookup_table
        table_index, table_entry_depth, table_count = lookup_table
        table_offset = (table_index - 1) * self._word_depth  # in bytes
        table_entry_depth = table_entry_depth * self._word_depth  # in bytes
        # Open the FF for processing.
        ff_file = open(self._ff_header.ff_filename, 'rb')
        ff_file_seek = ff_file.seek

        # Check for an instantaneous dump.
        if self._ff_header.dataset_type == 1:
            table_count = self._ff_header.total_prognostic_fields

        # Define the T, U, and V grid coordinates. The theta values are
        # stored in the first element of the second dimension on the
        # column/row dependent constants, and if it exists the U and V grid
        # coordinates can be found on the second element of the second
        # dimension.
        x_p, y_p, x_u, y_v = (None, None, None, None)
        if self._ff_header.column_dependent_constants is not None:
            x_p = self._ff_header.column_dependent_constants[:, 0]
            if self._ff_header.column_dependent_constants.shape[1] == 2:
                # The UM variable resolution configuration produces n "U" grid
                # values (with the last point on the extreme right hand side
                # of the last cell), whereas there are just n-1 "V" grid
                # values.this has been done for good reason inside
                # the UM.
                x_u = self._ff_header.column_dependent_constants[:, 1]
        if self._ff_header.row_dependent_constants is not None:
            y_p = self._ff_header.row_dependent_constants[:, 0]
            if self._ff_header.row_dependent_constants.shape[1] == 2:
                y_v = self._ff_header.row_dependent_constants[:-1, 1]

        # Process each FF LOOKUP table entry.
        while table_count:
            table_count -= 1
            # Move file pointer to the start of the current FF LOOKUP
            # table entry.
            ff_file_seek(table_offset, os.SEEK_SET)
            # Read the current PP header entry from the FF LOOKUP table.
            header_integers = np.fromfile(ff_file,
                                          dtype='>i{0}'.format(
                                              self._word_depth),
                                          count=pp.NUM_LONG_HEADERS)
            header_floats = np.fromfile(ff_file,
                                        dtype='>f{0}'.format(self._word_depth),
                                        count=pp.NUM_FLOAT_HEADERS)
            # In 64-bit words.
            header_data = tuple(header_integers) + tuple(header_floats)
            # Check whether the current FF LOOKUP table entry is valid.
            if header_data[0] == _FF_LOOKUP_TABLE_TERMINATE:
                # There are no more FF LOOKUP table entries to read.
                break
            # Calculate next FF LOOKUP table entry.
            table_offset += table_entry_depth
            # Construct a PPField object and populate using the header_data
            # read from the current FF LOOKUP table.
            # (The PPField sub-class will depend on the header release number.)
            field = pp.make_pp_field(header_data)
            # Calculate start address of the associated PP header data.
            data_offset = field.lbegin * self._word_depth
            # Determine PP field payload depth and type.
            data_depth, data_type = self._payload(field)
            # Determine PP field data shape.
            data_shape = (field.lbrow, field.lbnpt)

            grid = STASH_GRID.get(str(field.stash), None)

            if grid is None:
                warnings.warn('The STASH code {0} was not found in the '
                              'STASH to grid type mapping. Picking the P '
                              'position as the cell type'.format(field.stash))
            elif grid not in HANDLED_GRIDS:
                warnings.warn('The stash code {} is on a grid {} which has '
                              'not been explicitly handled by the fieldsfile '
                              'loader. Assuming the data is on a P grid.'
                              ''.format(field.stash, grid))

            field.x = x_p
            field.y = y_p
            if grid in X_COORD_U_GRID:
                field.x = x_u
            if grid in Y_COORD_V_GRID:
                field.y = y_v

            # Determine whether to read the associated PP field data.
            if self._read_data:
                # Move file pointer to the start of the current PP field data.
                ff_file_seek(data_offset, os.SEEK_SET)
                # Get the PP field data.
                data = field.read_data(ff_file, data_depth, data_shape,
                                       data_type)
                field._data = data
                field._data_manager = None
            else:
                proxy = pp.PPDataProxy(self._filename, data_offset, data_depth,
                                       field.lbpack)
                field._data = np.array(proxy)
                field._data_manager = DataManager(data_shape, data_type,
                                                  field.bmdi)
            yield field
        ff_file.close()
        return
Example #6
0
    def _extract_field(self):
        # FF table pointer initialisation based on FF LOOKUP table configuration.
        table_index, table_entry_depth, table_count = self._ff_header.lookup_table
        table_offset = (table_index - 1) * self._word_depth  # in bytes
        table_entry_depth = table_entry_depth * self._word_depth  # in bytes
        # Open the FF for processing.
        ff_file = open(self._ff_header.ff_filename, 'rb')
        ff_file_seek = ff_file.seek

        # Check for an instantaneous dump.
        if self._ff_header.dataset_type == 1:
            table_count = self._ff_header.total_prognostic_fields

        # Process each FF LOOKUP table entry.
        while table_count:
            table_count -= 1
            # Move file pointer to the start of the current FF LOOKUP table entry.
            ff_file_seek(table_offset, os.SEEK_SET)
            # Read the current PP header entry from the FF LOOKUP table.
            header_integers = np.fromfile(ff_file,
                                          dtype='>i{0}'.format(
                                              self._word_depth),
                                          count=pp.NUM_LONG_HEADERS)
            header_floats = np.fromfile(ff_file,
                                        dtype='>f{0}'.format(self._word_depth),
                                        count=pp.NUM_FLOAT_HEADERS)
            # In 64-bit words.
            header_data = tuple(header_integers) + tuple(header_floats)
            # Check whether the current FF LOOKUP table entry is valid.
            if header_data[0] == _FF_LOOKUP_TABLE_TERMINATE:
                # There are no more FF LOOKUP table entries to read.
                break
            # Calculate next FF LOOKUP table entry.
            table_offset += table_entry_depth
            # Construct a PPField object and populate using the header_data
            # read from the current FF LOOKUP table.
            # (The PPField sub-class will depend on the header release number.)
            field = pp.make_pp_field(header_data)
            # Calculate start address of the associated PP header data.
            data_offset = field.lbegin * self._word_depth
            # Determine PP field payload depth and type.
            data_depth, data_type = self._payload(field)
            # Determine PP field data shape.
            data_shape = (field.lbrow, field.lbnpt)
            # Determine whether to read the associated PP field data.
            if self._read_data:
                # Move file pointer to the start of the current PP field data.
                ff_file_seek(data_offset, os.SEEK_SET)
                # Get the PP field data.
                data = field.read_data(ff_file, data_depth, data_shape,
                                       data_type)
                field._data = data
                field._data_manager = None
            else:
                proxy = pp.PPDataProxy(self._filename, data_offset, data_depth,
                                       field.lbpack)
                field._data = np.array(proxy)
                field._data_manager = DataManager(data_shape, data_type,
                                                  field.bmdi)
            yield field
        ff_file.close()
        return
Example #7
0
File: ff.py Project: andreas-h/iris
    def _extract_field(self):
        # FF table pointer initialisation based on FF LOOKUP table
        # configuration.

        lookup_table = self._ff_header.lookup_table
        table_index, table_entry_depth, table_count = lookup_table
        table_offset = (table_index - 1) * self._word_depth  # in bytes
        table_entry_depth = table_entry_depth * self._word_depth  # in bytes
        # Open the FF for processing.
        ff_file = open(self._ff_header.ff_filename, 'rb')
        ff_file_seek = ff_file.seek

        # Check for an instantaneous dump.
        if self._ff_header.dataset_type == 1:
            table_count = self._ff_header.total_prognostic_fields

        is_boundary_packed = self._ff_header.dataset_type == 5

        grid = self._ff_header.grid()

        # Process each FF LOOKUP table entry.
        while table_count:
            table_count -= 1
            # Move file pointer to the start of the current FF LOOKUP
            # table entry.
            ff_file_seek(table_offset, os.SEEK_SET)
            # Read the current PP header entry from the FF LOOKUP table.
            header_longs = np.fromfile(ff_file,
                                       dtype='>i{0}'.format(self._word_depth),
                                       count=pp.NUM_LONG_HEADERS)
            # Check whether the current FF LOOKUP table entry is valid.
            if header_longs[0] == _FF_LOOKUP_TABLE_TERMINATE:
                # There are no more FF LOOKUP table entries to read.
                break
            header_floats = np.fromfile(ff_file,
                                        dtype='>f{0}'.format(self._word_depth),
                                        count=pp.NUM_FLOAT_HEADERS)
            header = tuple(header_longs) + tuple(header_floats)

            # Calculate next FF LOOKUP table entry.
            table_offset += table_entry_depth
            # Construct a PPField object and populate using the header_data
            # read from the current FF LOOKUP table.
            # (The PPField sub-class will depend on the header release number.)
            field = pp.make_pp_field(header)
            # Calculate start address of the associated PP header data.
            data_offset = field.lbegin * self._word_depth
            # Determine PP field payload depth and type.
            data_depth, data_type = self._payload(field)

            # Fast stash look-up.
            stash_s = field.lbuser[3] / 1000
            stash_i = field.lbuser[3] % 1000
            stash = 'm{:02}s{:02}i{:03}'.format(field.lbuser[6], stash_s,
                                                stash_i)
            stash_entry = STASH_TRANS.get(stash, None)
            if stash_entry is None:
                subgrid = None
                warnings.warn('The STASH code {0} was not found in the '
                              'STASH to grid type mapping. Picking the P '
                              'position as the cell type'.format(stash))
            else:
                subgrid = stash_entry.grid_code
                if subgrid not in HANDLED_GRIDS:
                    warnings.warn('The stash code {} is on a grid {} which '
                                  'has not been explicitly handled by the '
                                  'fieldsfile loader. Assuming the data is on '
                                  'a P grid.'.format(stash, subgrid))

            field.x, field.y = grid.vectors(subgrid)

            # Use the per-file grid if no per-field metadata is available.
            no_x = field.bzx in (0, field.bmdi) and field.x is None
            no_y = field.bzy in (0, field.bmdi) and field.y is None
            if no_x and no_y:
                field.bzx, field.bdx = grid.regular_x(subgrid)
                field.bzy, field.bdy = grid.regular_y(subgrid)
                field.bplat = grid.pole_lat
                field.bplon = grid.pole_lon
            elif no_x or no_y:
                warnings.warn('Partially missing X or Y coordinate values.')

            if is_boundary_packed:
                name_mapping = dict(rim_width=slice(4, 6),
                                    y_halo=slice(2, 4),
                                    x_halo=slice(0, 2))
                b_packing = pp.SplittableInt(field.lbuser[2], name_mapping)
                field.lbpack.boundary_packing = b_packing
                # Fix the lbrow and lbnpt to be the actual size of the data
                # array, since the field is no longer a "boundary" fields file
                # field.
                # Note: The documentation states that lbrow (y) doesn't
                # contain the halo rows, but no such comment exists at UM v8.5
                # for lbnpt (x). Experimentation has shown that lbnpt also
                # excludes the halo size.
                field.lbrow += 2 * field.lbpack.boundary_packing.y_halo
                field.lbnpt += 2 * field.lbpack.boundary_packing.x_halo
                # Update the x and y coordinates for this field. Note: it may
                # be that this needs to update x and y also, but that is yet
                # to be confirmed.
                if (field.bdx in (0, field.bmdi)
                        or field.bdy in (0, field.bmdi)):
                    field.x = self._det_border(field.x, b_packing.x_halo)
                    field.y = self._det_border(field.y, b_packing.y_halo)
                else:
                    if field.bdy < 0:
                        warnings.warn('The LBC has a bdy less than 0. No '
                                      'case has previously been seen of '
                                      'this, and the decompression may be '
                                      'erroneous.')
                    field.bzx -= field.bdx * b_packing.x_halo
                    field.bzy -= field.bdy * b_packing.y_halo

            if self._read_data:
                # Read the actual bytes. This can then be converted to a
                # numpy array at a higher level.
                ff_file_seek(data_offset, os.SEEK_SET)
                field._data = pp.LoadedArrayBytes(ff_file.read(data_depth),
                                                  data_type)
            else:
                # Provide enough context to read the data bytes later on.
                field._data = (self._filename, data_offset, data_depth,
                               data_type)
            yield field
        ff_file.close()
Example #8
0
    def _extract_field(self):
        # FF table pointer initialisation based on FF LOOKUP table
        # configuration.

        # Interpret grid coordinates based on grid staggering type.
        grid_staggering = {3: self._det_typeC_grid_coord,
                           6: self._det_typeC_vpole_grid_coord}

        lookup_table = self._ff_header.lookup_table
        table_index, table_entry_depth, table_count = lookup_table
        table_offset = (table_index - 1) * self._word_depth       # in bytes
        table_entry_depth = table_entry_depth * self._word_depth  # in bytes
        # Open the FF for processing.
        ff_file = open(self._ff_header.ff_filename, 'rb')
        ff_file_seek = ff_file.seek

        # Check for an instantaneous dump.
        if self._ff_header.dataset_type == 1:
            table_count = self._ff_header.total_prognostic_fields

        is_boundary_packed = self._ff_header.dataset_type == 5

        # Interpret grid points
        if self._ff_header.grid_staggering in grid_staggering:
            x_p, y_p, x_u, y_v = grid_staggering[
                self._ff_header.grid_staggering]()
        else:
            warnings.warn(
                'Staggered grid type: {} not currently interpreted, assuming '
                'standard C-grid'.format(self._ff_header.grid_staggering))
            x_p, y_p, x_u, y_v = grid_staggering[3]()

        # Process each FF LOOKUP table entry.
        while table_count:
            table_count -= 1
            # Move file pointer to the start of the current FF LOOKUP
            # table entry.
            ff_file_seek(table_offset, os.SEEK_SET)
            # Read the current PP header entry from the FF LOOKUP table.
            header_integers = np.fromfile(
                ff_file, dtype='>i{0}'.format(self._word_depth),
                count=pp.NUM_LONG_HEADERS)
            header_floats = np.fromfile(
                ff_file, dtype='>f{0}'.format(self._word_depth),
                count=pp.NUM_FLOAT_HEADERS)
            # In 64-bit words.
            header_data = tuple(header_integers) + tuple(header_floats)
            # Check whether the current FF LOOKUP table entry is valid.
            if header_data[0] == _FF_LOOKUP_TABLE_TERMINATE:
                # There are no more FF LOOKUP table entries to read.
                break
            # Calculate next FF LOOKUP table entry.
            table_offset += table_entry_depth
            # Construct a PPField object and populate using the header_data
            # read from the current FF LOOKUP table.
            # (The PPField sub-class will depend on the header release number.)
            field = pp.make_pp_field(header_data)
            # Calculate start address of the associated PP header data.
            data_offset = field.lbegin * self._word_depth
            # Determine PP field payload depth and type.
            data_depth, data_type = self._payload(field)

            grid = STASH_GRID.get(str(field.stash), None)

            if grid is None:
                warnings.warn('The STASH code {0} was not found in the '
                              'STASH to grid type mapping. Picking the P '
                              'position as the cell type'.format(field.stash))
            elif grid not in HANDLED_GRIDS:
                warnings.warn('The stash code {} is on a grid {} which has '
                              'not been explicitly handled by the fieldsfile '
                              'loader. Assuming the data is on a P grid.'
                              ''.format(field.stash, grid))

            field.x, field.y = self._select_grid(grid, x_p, x_u, y_p, y_v)

            if is_boundary_packed:
                name_mapping = dict(rim_width=slice(4, 6), y_halo=slice(2, 4),
                                    x_halo=slice(0, 2))
                b_packing = pp.SplittableInt(field.lbuser[2], name_mapping)
                field.lbpack.boundary_packing = b_packing
                # Fix the lbrow and lbnpt to be the actual size of the data
                # array, since the field is no longer a "boundary" fields file
                # field.
                # Note: The documentation states that lbrow (y) doesn't
                # contain the halo rows, but no such comment exists at UM v8.5
                # for lbnpt (x). Experimentation has shown that lbnpt also
                # excludes the halo size.
                field.lbrow += 2 * field.lbpack.boundary_packing.y_halo
                field.lbnpt += 2 * field.lbpack.boundary_packing.x_halo
                # Update the x and y coordinates for this field. Note: it may
                # be that this needs to update x and y also, but that is yet
                # to be confirmed.
                if (field.bdx in (0, field.bmdi) or
                        field.bdy in (0, field.bmdi)):
                    warnings.warn('The LBC field has non-trivial x or y '
                                  'coordinates and have not been updated to '
                                  'take the boundary size into account. If '
                                  'you get this message, your x and y '
                                  'coordinates of the boundary condition '
                                  'fields may be incorrect.')
                else:
                    if field.bdy < 0:
                        warnings.warn('The LBC has a bdy less than 0. No '
                                      'case has previously been seen of '
                                      'this, and the decompression may be '
                                      'erroneous.')
                    field.bzx -= field.bdx * b_packing.x_halo
                    field.bzy -= field.bdy * b_packing.y_halo

            if self._read_data:
                # Read the actual bytes. This can then be converted to a
                # numpy array at a higher level.
                ff_file_seek(data_offset, os.SEEK_SET)
                field._data = pp.LoadedArrayBytes(ff_file.read(data_depth),
                                                  data_type)
            else:
                # Provide enough context to read the data bytes later on.
                field._data = pp.DeferredArrayBytes(self._filename,
                                                    data_offset, data_depth,
                                                    data_type)
            yield field
        ff_file.close()