コード例 #1
0
ファイル: _ff.py プロジェクト: lauradomar/iris
    def _extract_field(self):
        # FF table pointer initialisation based on FF LOOKUP table
        # configuration.

        lookup_table = self._ff_header.lookup_table
        table_index, table_entry_depth, table_count = lookup_table
        table_offset = (table_index - 1) * self._word_depth       # in bytes
        table_entry_depth = table_entry_depth * self._word_depth  # in bytes
        # Open the FF for processing.
        with open(self._ff_header.ff_filename, 'rb') as ff_file:
            ff_file_seek = ff_file.seek

            is_boundary_packed = self._ff_header.dataset_type == 5

            grid = self._ff_header.grid()

            # Process each FF LOOKUP table entry.
            while table_count:
                table_count -= 1
                # Move file pointer to the start of the current FF LOOKUP
                # table entry.
                ff_file_seek(table_offset, os.SEEK_SET)

                # Read the current PP header entry from the FF LOOKUP table.
                header_longs = np.fromfile(
                    ff_file, dtype='>i{0}'.format(self._word_depth),
                    count=pp.NUM_LONG_HEADERS)
                # Check whether the current FF LOOKUP table entry is valid.
                if header_longs[0] == _FF_LOOKUP_TABLE_TERMINATE:
                    # There are no more FF LOOKUP table entries to read.
                    break
                header_floats = np.fromfile(
                    ff_file, dtype='>f{0}'.format(self._word_depth),
                    count=pp.NUM_FLOAT_HEADERS)
                header = tuple(header_longs) + tuple(header_floats)

                # Calculate next FF LOOKUP table entry.
                table_offset += table_entry_depth

                # Construct a PPField object and populate using the header_data
                # read from the current FF LOOKUP table.
                # (The PPField sub-class will depend on the header release
                # number.)
                # (Some Fields File fields are UM specific scratch spaces, with
                # no header release number, these will throw an exception from
                # the PP module and are skipped to enable reading of the file.
                try:
                    field = pp.make_pp_field(header)

                    # Fast stash look-up.
                    stash_s = field.lbuser[3] // 1000
                    stash_i = field.lbuser[3] % 1000
                    stash = 'm{:02}s{:02}i{:03}'.format(field.lbuser[6],
                                                        stash_s, stash_i)
                    stash_entry = STASH_TRANS.get(stash, None)
                    if stash_entry is None:
                        subgrid = None
                    else:
                        subgrid = stash_entry.grid_code
                        if subgrid not in HANDLED_GRIDS:
                            warnings.warn('The stash code {} is on a grid {} '
                                          'which has not been explicitly '
                                          'handled by the fieldsfile loader.'
                                          ' Assuming the data is on a P grid'
                                          '.'.format(stash, subgrid))

                    field.x, field.y = grid.vectors(subgrid)

                    # Use the per-file grid if no per-field metadata is
                    # available.
                    no_x = field.bzx in (0, field.bmdi) and field.x is None
                    no_y = field.bzy in (0, field.bmdi) and field.y is None
                    if no_x and no_y:
                        if subgrid is None:
                            msg = ('The STASH code {0} was not found in the '
                                   'STASH to grid type mapping. Picking the P '
                                   'position as the cell type'.format(stash))
                            warnings.warn(msg)
                        field.bzx, field.bdx = grid.regular_x(subgrid)
                        field.bzy, field.bdy = grid.regular_y(subgrid)
                        field.bplat = grid.pole_lat
                        field.bplon = grid.pole_lon
                    elif no_x or no_y:
                        warnings.warn(
                            'Partially missing X or Y coordinate values.')

                    # Check for LBC fields.
                    is_boundary_packed = self._ff_header.dataset_type == 5
                    if is_boundary_packed:
                        # Apply adjustments specific to LBC data.
                        self._adjust_field_for_lbc(field)

                    # Calculate start address of the associated PP header data.
                    data_offset = field.lbegin * self._word_depth
                    # Determine PP field payload depth and type.
                    data_depth, data_type = self._payload(field)

                    # Produce (yield) output fields.
                    if is_boundary_packed:
                        fields = self._fields_over_all_levels(field)
                    else:
                        fields = [field]
                    for result_field in fields:
                        # Add a field data element.
                        if self._read_data:
                            # Read the actual bytes. This can then be converted
                            # to a numpy array at a higher level.
                            ff_file_seek(data_offset, os.SEEK_SET)
                            result_field.data = pp.LoadedArrayBytes(
                                ff_file.read(data_depth), data_type)
                        else:
                            # Provide enough context to read the data bytes
                            # later.
                            result_field.data = (self._filename, data_offset,
                                                 data_depth, data_type)

                        data_offset += data_depth

                        yield result_field
                except ValueError as valerr:
                    msg = ('Input field skipped as PPField creation failed :'
                           ' error = {!r}')
                    warnings.warn(msg.format(str(valerr)))
コード例 #2
0
ファイル: _ff.py プロジェクト: cpelley/iris
    def _extract_field(self):
        # FF table pointer initialisation based on FF LOOKUP table
        # configuration.

        lookup_table = self._ff_header.lookup_table
        table_index, table_entry_depth, table_count = lookup_table
        table_offset = (table_index - 1) * self._word_depth       # in bytes
        table_entry_depth = table_entry_depth * self._word_depth  # in bytes
        # Open the FF for processing.
        with open(self._ff_header.ff_filename, 'rb') as ff_file:
            ff_file_seek = ff_file.seek

            is_boundary_packed = self._ff_header.dataset_type == 5

            grid = self._ff_header.grid()

            # Process each FF LOOKUP table entry.
            while table_count:
                table_count -= 1
                # Move file pointer to the start of the current FF LOOKUP
                # table entry.
                ff_file_seek(table_offset, os.SEEK_SET)

                # Read the current PP header entry from the FF LOOKUP table.
                header_longs = np.fromfile(
                    ff_file, dtype='>i{0}'.format(self._word_depth),
                    count=pp.NUM_LONG_HEADERS)
                # Check whether the current FF LOOKUP table entry is valid.
                if header_longs[0] == _FF_LOOKUP_TABLE_TERMINATE:
                    # There are no more FF LOOKUP table entries to read.
                    break
                header_floats = np.fromfile(
                    ff_file, dtype='>f{0}'.format(self._word_depth),
                    count=pp.NUM_FLOAT_HEADERS)
                header = tuple(header_longs) + tuple(header_floats)

                # Calculate next FF LOOKUP table entry.
                table_offset += table_entry_depth

                # Construct a PPField object and populate using the header_data
                # read from the current FF LOOKUP table.
                # (The PPField sub-class will depend on the header release
                # number.)
                # (Some Fields File fields are UM specific scratch spaces, with
                # no header release number, these will throw an exception from
                # the PP module and are skipped to enable reading of the file.
                try:
                    field = pp.make_pp_field(header)

                    # Fast stash look-up.
                    stash_s = field.lbuser[3] // 1000
                    stash_i = field.lbuser[3] % 1000
                    stash = 'm{:02}s{:02}i{:03}'.format(field.lbuser[6],
                                                        stash_s, stash_i)
                    stash_entry = STASH_TRANS.get(stash, None)
                    if stash_entry is None:
                        subgrid = None
                    else:
                        subgrid = stash_entry.grid_code
                        if subgrid not in HANDLED_GRIDS:
                            warnings.warn('The stash code {} is on a grid {} '
                                          'which has not been explicitly '
                                          'handled by the fieldsfile loader.'
                                          ' Assuming the data is on a P grid'
                                          '.'.format(stash, subgrid))

                    field.x, field.y = grid.vectors(subgrid)

                    # Use the per-file grid if no per-field metadata is
                    # available.
                    no_x = field.bzx in (0, field.bmdi) and field.x is None
                    no_y = field.bzy in (0, field.bmdi) and field.y is None
                    if no_x and no_y:
                        if subgrid is None:
                            msg = ('The STASH code {0} was not found in the '
                                   'STASH to grid type mapping. Picking the P '
                                   'position as the cell type'.format(stash))
                            warnings.warn(msg)
                        field.bzx, field.bdx = grid.regular_x(subgrid)
                        field.bzy, field.bdy = grid.regular_y(subgrid)
                        field.bplat = grid.pole_lat
                        field.bplon = grid.pole_lon
                    elif no_x or no_y:
                        warnings.warn(
                            'Partially missing X or Y coordinate values.')

                    # Check for LBC fields.
                    is_boundary_packed = self._ff_header.dataset_type == 5
                    if is_boundary_packed:
                        # Apply adjustments specific to LBC data.
                        self._adjust_field_for_lbc(field)

                    # Calculate start address of the associated PP header data.
                    data_offset = field.lbegin * self._word_depth
                    # Determine PP field payload depth and type.
                    data_depth, data_type = self._payload(field)

                    # Produce (yield) output fields.
                    if is_boundary_packed:
                        fields = self._fields_over_all_levels(field)
                    else:
                        fields = [field]
                    for result_field in fields:
                        # Add a field data element.
                        if self._read_data:
                            # Read the actual bytes. This can then be converted
                            # to a numpy array at a higher level.
                            ff_file_seek(data_offset, os.SEEK_SET)
                            result_field.data = pp.LoadedArrayBytes(
                                ff_file.read(data_depth), data_type)
                        else:
                            # Provide enough context to read the data bytes
                            # later.
                            result_field.data = (self._filename, data_offset,
                                                 data_depth, data_type)

                        data_offset += data_depth

                        yield result_field
                except ValueError as valerr:
                    msg = ('Input field skipped as PPField creation failed :'
                           ' error = {!r}')
                    warnings.warn(msg.format(str(valerr)))
コード例 #3
0
ファイル: ff.py プロジェクト: ckmo/iris
    def _extract_field(self):
        # FF table pointer initialisation based on FF LOOKUP table
        # configuration.

        lookup_table = self._ff_header.lookup_table
        table_index, table_entry_depth, table_count = lookup_table
        table_offset = (table_index - 1) * self._word_depth       # in bytes
        table_entry_depth = table_entry_depth * self._word_depth  # in bytes
        # Open the FF for processing.
        ff_file = open(self._ff_header.ff_filename, 'rb')
        ff_file_seek = ff_file.seek

        # Check for an instantaneous dump.
        if self._ff_header.dataset_type == 1:
            table_count = self._ff_header.total_prognostic_fields

        is_boundary_packed = self._ff_header.dataset_type == 5

        grid = self._ff_header.grid()

        # Process each FF LOOKUP table entry.
        while table_count:
            table_count -= 1
            # Move file pointer to the start of the current FF LOOKUP
            # table entry.
            ff_file_seek(table_offset, os.SEEK_SET)
            # Read the current PP header entry from the FF LOOKUP table.
            header_longs = np.fromfile(
                ff_file, dtype='>i{0}'.format(self._word_depth),
                count=pp.NUM_LONG_HEADERS)
            # Check whether the current FF LOOKUP table entry is valid.
            if header_longs[0] == _FF_LOOKUP_TABLE_TERMINATE:
                # There are no more FF LOOKUP table entries to read.
                break
            header_floats = np.fromfile(
                ff_file, dtype='>f{0}'.format(self._word_depth),
                count=pp.NUM_FLOAT_HEADERS)
            header = tuple(header_longs) + tuple(header_floats)

            # Calculate next FF LOOKUP table entry.
            table_offset += table_entry_depth
            # Construct a PPField object and populate using the header_data
            # read from the current FF LOOKUP table.
            # (The PPField sub-class will depend on the header release number.)
            field = pp.make_pp_field(header)
            # Calculate start address of the associated PP header data.
            data_offset = field.lbegin * self._word_depth
            # Determine PP field payload depth and type.
            data_depth, data_type = self._payload(field)

            # Fast stash look-up.
            stash_s = field.lbuser[3] / 1000
            stash_i = field.lbuser[3] % 1000
            stash = 'm{:02}s{:02}i{:03}'.format(field.lbuser[6],
                                                stash_s, stash_i)
            stash_entry = STASH_TRANS.get(stash, None)
            if stash_entry is None:
                subgrid = None
                warnings.warn('The STASH code {0} was not found in the '
                              'STASH to grid type mapping. Picking the P '
                              'position as the cell type'.format(stash))
            else:
                subgrid = stash_entry.grid_code
                if subgrid not in HANDLED_GRIDS:
                    warnings.warn('The stash code {} is on a grid {} which '
                                  'has not been explicitly handled by the '
                                  'fieldsfile loader. Assuming the data is on '
                                  'a P grid.'.format(stash, subgrid))

            field.x, field.y = grid.vectors(subgrid)

            # Use the per-file grid if no per-field metadata is available.
            no_x = field.bzx in (0, field.bmdi) and field.x is None
            no_y = field.bzy in (0, field.bmdi) and field.y is None
            if no_x and no_y:
                field.bzx, field.bdx = grid.regular_x(subgrid)
                field.bzy, field.bdy = grid.regular_y(subgrid)
                field.bplat = grid.pole_lat
                field.bplon = grid.pole_lon
            elif no_x or no_y:
                warnings.warn('Partially missing X or Y coordinate values.')

            if is_boundary_packed:
                name_mapping = dict(rim_width=slice(4, 6), y_halo=slice(2, 4),
                                    x_halo=slice(0, 2))
                b_packing = pp.SplittableInt(field.lbuser[2], name_mapping)
                field.lbpack.boundary_packing = b_packing
                # Fix the lbrow and lbnpt to be the actual size of the data
                # array, since the field is no longer a "boundary" fields file
                # field.
                # Note: The documentation states that lbrow (y) doesn't
                # contain the halo rows, but no such comment exists at UM v8.5
                # for lbnpt (x). Experimentation has shown that lbnpt also
                # excludes the halo size.
                field.lbrow += 2 * field.lbpack.boundary_packing.y_halo
                field.lbnpt += 2 * field.lbpack.boundary_packing.x_halo
                # Update the x and y coordinates for this field. Note: it may
                # be that this needs to update x and y also, but that is yet
                # to be confirmed.
                if (field.bdx in (0, field.bmdi) or
                        field.bdy in (0, field.bmdi)):
                    field.x = self._det_border(field.x, b_packing.x_halo)
                    field.y = self._det_border(field.y, b_packing.y_halo)
                else:
                    if field.bdy < 0:
                        warnings.warn('The LBC has a bdy less than 0. No '
                                      'case has previously been seen of '
                                      'this, and the decompression may be '
                                      'erroneous.')
                    field.bzx -= field.bdx * b_packing.x_halo
                    field.bzy -= field.bdy * b_packing.y_halo

            if self._read_data:
                # Read the actual bytes. This can then be converted to a
                # numpy array at a higher level.
                ff_file_seek(data_offset, os.SEEK_SET)
                field._data = pp.LoadedArrayBytes(ff_file.read(data_depth),
                                                  data_type)
            else:
                # Provide enough context to read the data bytes later on.
                field._data = (self._filename, data_offset,
                               data_depth, data_type)
            yield field
        ff_file.close()
コード例 #4
0
ファイル: ff.py プロジェクト: andreas-h/iris
    def _extract_field(self):
        # FF table pointer initialisation based on FF LOOKUP table
        # configuration.

        lookup_table = self._ff_header.lookup_table
        table_index, table_entry_depth, table_count = lookup_table
        table_offset = (table_index - 1) * self._word_depth  # in bytes
        table_entry_depth = table_entry_depth * self._word_depth  # in bytes
        # Open the FF for processing.
        ff_file = open(self._ff_header.ff_filename, 'rb')
        ff_file_seek = ff_file.seek

        # Check for an instantaneous dump.
        if self._ff_header.dataset_type == 1:
            table_count = self._ff_header.total_prognostic_fields

        is_boundary_packed = self._ff_header.dataset_type == 5

        grid = self._ff_header.grid()

        # Process each FF LOOKUP table entry.
        while table_count:
            table_count -= 1
            # Move file pointer to the start of the current FF LOOKUP
            # table entry.
            ff_file_seek(table_offset, os.SEEK_SET)
            # Read the current PP header entry from the FF LOOKUP table.
            header_longs = np.fromfile(ff_file,
                                       dtype='>i{0}'.format(self._word_depth),
                                       count=pp.NUM_LONG_HEADERS)
            # Check whether the current FF LOOKUP table entry is valid.
            if header_longs[0] == _FF_LOOKUP_TABLE_TERMINATE:
                # There are no more FF LOOKUP table entries to read.
                break
            header_floats = np.fromfile(ff_file,
                                        dtype='>f{0}'.format(self._word_depth),
                                        count=pp.NUM_FLOAT_HEADERS)
            header = tuple(header_longs) + tuple(header_floats)

            # Calculate next FF LOOKUP table entry.
            table_offset += table_entry_depth
            # Construct a PPField object and populate using the header_data
            # read from the current FF LOOKUP table.
            # (The PPField sub-class will depend on the header release number.)
            field = pp.make_pp_field(header)
            # Calculate start address of the associated PP header data.
            data_offset = field.lbegin * self._word_depth
            # Determine PP field payload depth and type.
            data_depth, data_type = self._payload(field)

            # Fast stash look-up.
            stash_s = field.lbuser[3] / 1000
            stash_i = field.lbuser[3] % 1000
            stash = 'm{:02}s{:02}i{:03}'.format(field.lbuser[6], stash_s,
                                                stash_i)
            stash_entry = STASH_TRANS.get(stash, None)
            if stash_entry is None:
                subgrid = None
                warnings.warn('The STASH code {0} was not found in the '
                              'STASH to grid type mapping. Picking the P '
                              'position as the cell type'.format(stash))
            else:
                subgrid = stash_entry.grid_code
                if subgrid not in HANDLED_GRIDS:
                    warnings.warn('The stash code {} is on a grid {} which '
                                  'has not been explicitly handled by the '
                                  'fieldsfile loader. Assuming the data is on '
                                  'a P grid.'.format(stash, subgrid))

            field.x, field.y = grid.vectors(subgrid)

            # Use the per-file grid if no per-field metadata is available.
            no_x = field.bzx in (0, field.bmdi) and field.x is None
            no_y = field.bzy in (0, field.bmdi) and field.y is None
            if no_x and no_y:
                field.bzx, field.bdx = grid.regular_x(subgrid)
                field.bzy, field.bdy = grid.regular_y(subgrid)
                field.bplat = grid.pole_lat
                field.bplon = grid.pole_lon
            elif no_x or no_y:
                warnings.warn('Partially missing X or Y coordinate values.')

            if is_boundary_packed:
                name_mapping = dict(rim_width=slice(4, 6),
                                    y_halo=slice(2, 4),
                                    x_halo=slice(0, 2))
                b_packing = pp.SplittableInt(field.lbuser[2], name_mapping)
                field.lbpack.boundary_packing = b_packing
                # Fix the lbrow and lbnpt to be the actual size of the data
                # array, since the field is no longer a "boundary" fields file
                # field.
                # Note: The documentation states that lbrow (y) doesn't
                # contain the halo rows, but no such comment exists at UM v8.5
                # for lbnpt (x). Experimentation has shown that lbnpt also
                # excludes the halo size.
                field.lbrow += 2 * field.lbpack.boundary_packing.y_halo
                field.lbnpt += 2 * field.lbpack.boundary_packing.x_halo
                # Update the x and y coordinates for this field. Note: it may
                # be that this needs to update x and y also, but that is yet
                # to be confirmed.
                if (field.bdx in (0, field.bmdi)
                        or field.bdy in (0, field.bmdi)):
                    field.x = self._det_border(field.x, b_packing.x_halo)
                    field.y = self._det_border(field.y, b_packing.y_halo)
                else:
                    if field.bdy < 0:
                        warnings.warn('The LBC has a bdy less than 0. No '
                                      'case has previously been seen of '
                                      'this, and the decompression may be '
                                      'erroneous.')
                    field.bzx -= field.bdx * b_packing.x_halo
                    field.bzy -= field.bdy * b_packing.y_halo

            if self._read_data:
                # Read the actual bytes. This can then be converted to a
                # numpy array at a higher level.
                ff_file_seek(data_offset, os.SEEK_SET)
                field._data = pp.LoadedArrayBytes(ff_file.read(data_depth),
                                                  data_type)
            else:
                # Provide enough context to read the data bytes later on.
                field._data = (self._filename, data_offset, data_depth,
                               data_type)
            yield field
        ff_file.close()