def offset_column(input_file, output_file, columns, offset, method, pressure_range=None):

    # Load existing file
    matrix_obj = OCO_Matrix(input_file)
   
    # Add ability to specify cols individually or using a * to goto end
    cols = index_range_list(columns)

    if offset.isdigit():
        offset = float(offset)
    else:
        offset = eval(offset)

    if pressure_range != None:
        pres_col = matrix_obj.labels_lower.index("pressure")

        pres_range_arr = pressure_range.split(',')
        pres_val_beg = float(pres_range_arr[0])
        pres_val_end = float(pres_range_arr[1])

        pres_idx_beg = 0
        pres_idx_end = matrix_obj.dims[0]

        pres_column = []
        [ pres_column.append(float(val[pres_col])) for val in matrix_obj.data ]

        pres_idx_curr = 0
        beg_found = False
        for pres_val in pres_column:            
            if pres_val >= pres_val_beg and not beg_found:
                pres_idx_beg = pres_idx_curr
                beg_found = True
        
            if pres_val <= pres_val_end:
                pres_idx_end = pres_idx_curr + 1

            pres_idx_curr += 1

        target_rows = range(pres_idx_beg, pres_idx_end)

    else:
        target_rows = range(matrix_obj.dims[0])

    for rowIdx in target_rows:
        for colIdx in cols:

            #print 'old_val[%d][%d] = %f' % (rowIdx, colIdx, matrix_obj.data[rowIdx][colIdx])
            
            if method == '/':
                matrix_obj.data[rowIdx][colIdx] = matrix_obj.data[rowIdx][colIdx] / offset
            elif method == '-':
                matrix_obj.data[rowIdx][colIdx] = matrix_obj.data[rowIdx][colIdx] - offset
            elif method == '*':
                matrix_obj.data[rowIdx][colIdx] = matrix_obj.data[rowIdx][colIdx] * offset
            else:
                matrix_obj.data[rowIdx][colIdx] = matrix_obj.data[rowIdx][colIdx] + offset

            #print 'new_val[%d][%d] = %f' % (rowIdx, colIdx, matrix_obj.data[rowIdx][colIdx])

    matrix_obj.write(output_file)
Exemple #2
0
def noisify_spectra_obj(matrix_obj,
                        row_range_spec=None,
                        noise_cut_off=None,
                        save_perturb=False):
    logger = logging.getLogger(os.path.basename(__file__))

    radiance_col = matrix_obj.labels_lower.index("radiance")
    noise_col = matrix_obj.labels_lower.index("noise")

    if row_range_spec == None or len(row_range_spec) == 0:
        row_range = range(matrix_obj.dims[0])
    else:
        row_range = index_range_list(row_range_spec)

    if matrix_obj.dims[1] <= 2 and save_perturb:
        new_data = numpy.zeros((matrix_obj.dims[0], matrix_obj.dims[1] + 1),
                               dtype=float)
        new_data[:, 0:2] = matrix_obj.data[:, :]

        matrix_obj.labels.append('True-Perturb')
        matrix_obj.units.append('W sr-1 m-2')
    else:
        new_data = matrix_obj.data

    if save_perturb:
        perturb_col = new_data.shape[1] - 1

    if noise_cut_off != None:
        mean_noise = 0
        for row_idx in row_range:
            mean_noise += new_data[row_idx, noise_col]
        mean_noise /= len(row_range)

    for row_idx in row_range:
        radiance_val = new_data[row_idx, radiance_col]
        noise_val = new_data[row_idx, noise_col]

        new_radiance = random.gauss(radiance_val, noise_val)

        if (noise_cut_off != None
                and noise_val > float(noise_cut_off) * mean_noise):
            logger.error('Bad pixel detected for pixel row %d' % row_idx)
            noised_radiance_val = radiance_val
        else:
            noised_radiance_val = new_radiance

        new_data[row_idx, radiance_col] = noised_radiance_val

        if save_perturb:
            new_data[row_idx, perturb_col] = radiance_val - noised_radiance_val

    matrix_obj.data = new_data
def Process_File(source, destination, fileKeywords, moduleSections, valuesDict, mapDict):
    logger = logging.getLogger(os.path.basename(__file__))

    for currSection in moduleSections:
        if str(source) == str(destination):
            raise IOError("source and destination must be different. will not overwrite source file")

    rows = Apply_Template(moduleSections[0].Get_Keyword_Value("rows"), valuesDict, mapDict=mapDict)
    columns = Apply_Template(moduleSections[0].Get_Keyword_Value("columns"), valuesDict, mapDict=mapDict)
    identifier = Apply_Template(moduleSections[0].Get_Keyword_Value("identifier"), valuesDict, mapDict=mapDict)
    initial_value = Apply_Template(moduleSections[0].Get_Keyword_Value("initial_value"), valuesDict, mapDict=mapDict)
    map_filename = Apply_Template(moduleSections[0].Get_Keyword_Value("map_filename"), valuesDict, mapDict=mapDict)
    modify = moduleSections[0].Get_Keyword_Value("modify")

    # Load ranges from RANGES section of module
    max_range_val = None
    range_values = {}
    for range_sect in moduleSections[0].Get_Section("->RANGES"):
        for range_spec in range_sect.Get_Matrix_Data():
            (range_name, range_str) = range_spec

            if range_str.find(",") > 0:
                curr_range = [float(val) for val in range_str.split(",")]
            else:
                curr_range = [float(val) for val in range_str.split()]

            if max_range_val == None:
                max_range_val = max(curr_range)
            else:
                max_range_val = max(max_range_val, max(curr_range))

            range_values[range_name] = curr_range

    if len(range_values) == 0:
        logger.error("No index range list supplied for operating on source: %s" % source)
        return

    # Load source for data to map agains
    data_obj = OCO_Matrix(source)

    # Set columns to all if argument not supplied,
    # Otherwise try parsing as an index range list failing that try
    # using the specified columns as label names
    if columns == None:
        columns = range(data_obj.dims[1])
    else:
        try:
            columns = index_range_list(columns)
        except ValueError, TypeError:
            columns = data_obj.find_labels(columns, match_case=False, indexes=True)
def noisify_spectra_obj(matrix_obj, row_range_spec=None, noise_cut_off=None, save_perturb=False):
    logger = logging.getLogger(os.path.basename(__file__))

    radiance_col = matrix_obj.labels_lower.index("radiance")
    noise_col    = matrix_obj.labels_lower.index("noise")

    if row_range_spec == None or len(row_range_spec) == 0:
        row_range = range(matrix_obj.dims[0])
    else:
        row_range = index_range_list(row_range_spec)

    if matrix_obj.dims[1] <= 2 and save_perturb:
        new_data = numpy.zeros((matrix_obj.dims[0], matrix_obj.dims[1]+1), dtype=float)
        new_data[:, 0:2] = matrix_obj.data[:,:]

        matrix_obj.labels.append('True-Perturb')
        matrix_obj.units.append('W sr-1 m-2')
    else:
        new_data = matrix_obj.data

    if save_perturb:
        perturb_col  = new_data.shape[1]-1
    
    if noise_cut_off != None:
        mean_noise = 0
        for row_idx in row_range:
            mean_noise += new_data[row_idx, noise_col]
        mean_noise /= len(row_range)

    for row_idx in row_range:
        radiance_val = new_data[row_idx, radiance_col]
        noise_val    = new_data[row_idx, noise_col]

        new_radiance = random.gauss(radiance_val, noise_val)

        if (noise_cut_off != None and noise_val > float(noise_cut_off)*mean_noise):
            logger.error('Bad pixel detected for pixel row %d' % row_idx)
            noised_radiance_val = radiance_val
        else:
            noised_radiance_val = new_radiance
        
        new_data[row_idx, radiance_col] = noised_radiance_val

        if save_perturb:
            new_data[row_idx, perturb_col]  = radiance_val - noised_radiance_val
 
    matrix_obj.data = new_data
def random_column(input_file, output_file, columns, mean, std_dev):

    # Load existing file
    matrix_obj = OCO_Matrix(input_file)
   
    # Add ability to specify cols individually or using a * to goto end
    cols = index_range_list(columns)
    
    mean = float(mean)
    std_dev = float(std_dev)

    target_rows = range(matrix_obj.dims[0])

    for rowIdx in target_rows:
        for colIdx in cols:
            matrix_obj.data[rowIdx][colIdx] = random.gauss(mean, std_dev)

    matrix_obj.write(output_file)
def random_column(input_file, output_file, columns, mean, std_dev):

    # Load existing file
    matrix_obj = OCO_Matrix(input_file)

    # Add ability to specify cols individually or using a * to goto end
    cols = index_range_list(columns)

    mean = float(mean)
    std_dev = float(std_dev)

    target_rows = range(matrix_obj.dims[0])

    for rowIdx in target_rows:
        for colIdx in cols:
            matrix_obj.data[rowIdx][colIdx] = random.gauss(mean, std_dev)

    matrix_obj.write(output_file)
def create_simple_cov(input_file, output_file, scaling, columns):
    logger = logging.getLogger(os.path.basename(__file__))

    # Load source file
    src_obj = OCO_Matrix(input_file)

    scaling = [float(v) for v in scaling.split(',')]
    if len(scaling) < src_obj.dims[0]:
        last_val = scaling[len(scaling)-1]
        [ scaling.append(last_val) for nada in range(src_obj.dims[0] - len(scaling)) ]

    try:
        columns = index_range_list(columns, max_value=src_obj.dims[1])
    except:
        if not type(columns) is ListType:
            col_name_list = [columns]
        else:
            col_name_list = columns

        columns = []
        for curr_name in col_name_list:
            if curr_name.lower() not in src_obj.labels_lower:
                raise IOError('Column named %s not found in file: %s' % (curr_name, input_file))
            columns.append( src_obj.labels_lower.index(curr_name.lower()) )
                                    
    logger.info('cols = ', columns)

    num_diags = len(columns) * src_obj.dims[0]
    logger.info('num_diags = ', num_diags)
    dst_data = numpy.zeros((num_diags, num_diags), dtype=float)

    diag_index = 0
    for col_index in columns:
        for row_index in range(src_obj.dims[0]):
            #print '%d, %d => %d, %d' % (row_index, col_index, diag_index, diag_index)
            dst_data[diag_index, diag_index] = (src_obj.data[row_index, col_index] * scaling[row_index])**2
            diag_index += 1

    logger.info('Writing: %s' % input_file)
    src_obj.file_id = 'Simple Covariance Created from "%s", scaling: "%s"' % (input_file, ', '.join([str(sc) for sc in scaling]))
    src_obj.labels = []
    src_obj.data = dst_data
    src_obj.units = []
    src_obj.write(output_file, auto_size_cols=False, verbose=True)
def scale_uncertainty(input_radiance_file, output_radiance_file, scale_factor, row_range_spec=None):
       
    # Load existing file
    matrix_obj = OCO_Matrix(input_radiance_file)

    radiance_col = matrix_obj.labels_lower.index("radiance")
    noise_col    = matrix_obj.labels_lower.index("noise")

    if row_range_spec == None or len(row_range_spec) == 0:
        row_range = range(matrix_obj.dims[0])
    else:
        row_range = index_range_list(row_range_spec)

    for row_idx in row_range:
        radiance_val = matrix_obj.data[row_idx, radiance_col]
   
        new_uncert = radiance_val * float(scale_factor)
        
        matrix_obj.data[row_idx, noise_col] = new_uncert

    matrix_obj.write(output_radiance_file)
def scale_uncertainty(input_radiance_file,
                      output_radiance_file,
                      scale_factor,
                      row_range_spec=None):

    # Load existing file
    matrix_obj = OCO_Matrix(input_radiance_file)

    radiance_col = matrix_obj.labels_lower.index("radiance")
    noise_col = matrix_obj.labels_lower.index("noise")

    if row_range_spec == None or len(row_range_spec) == 0:
        row_range = range(matrix_obj.dims[0])
    else:
        row_range = index_range_list(row_range_spec)

    for row_idx in row_range:
        radiance_val = matrix_obj.data[row_idx, radiance_col]

        new_uncert = radiance_val * float(scale_factor)

        matrix_obj.data[row_idx, noise_col] = new_uncert

    matrix_obj.write(output_radiance_file)
def Process_File(source, destination, fileKeywords, moduleSections, valuesDict, mapDict):
    logger = logging.getLogger(os.path.basename(__file__))

    # Load existing file
    matrix_obj = OCO_Matrix(source)

    for modifySect in moduleSections:

        # Add ability to specify cols individually or using a * to goto end
        columns = Apply_Template(modifySect.Get_Keyword_Value('columns'), valuesDict, mapDict=mapDict)
        rows    = Apply_Template(modifySect.Get_Keyword_Value('rows'), valuesDict, mapDict=mapDict)
        modify  = modifySect.Get_Keyword_Value('modify')
        delete  = evaluate_bool_str( modifySect.Get_Keyword_Value('delete') )
        add_column = evaluate_bool_str( modifySect.Get_Keyword_Value('add_column') )

        if columns != None:
            try:
                columns = index_range_list(columns, max_value=matrix_obj.dims[1])
            except:
                if not type(columns) is ListType:
                    col_name_list = [columns]
                else:
                    col_name_list = columns

                columns = []
                for curr_name in col_name_list:
                    if curr_name.lower() not in matrix_obj.labels_lower:
                        if add_column:
                            matrix_obj.add_column(curr_name)
                            columns.append(  matrix_obj.dims[1] - 1 )
                        else:
                            raise IOError('Column named %s not found in file: %s' % (curr_name, source))
                            
                    columns.append( matrix_obj.labels_lower.index(curr_name.lower()) )
        else:
            columns = range(matrix_obj.dims[1])

        if rows != None:
            rows = index_range_list(rows, max_value=matrix_obj.dims[0])
        else:
            rows = range(matrix_obj.dims[0])

        if delete and modify != None:
            raise ValueError('delete and modify keywords can not be specified together')

        if delete:
            if len(columns) > matrix_obj.dims[1]:
                raise IOError('More columns to be deleted %d than exist %d in input file %s' % (len(columns), matrix_obj.dims[1], source))
            
            new_data = numpy.zeros((matrix_obj.dims[0], matrix_obj.dims[1]-len(columns)), dtype=numpy.double)
            new_labels = []
            new_units  = []

            new_col_idx = 0
            for old_col_idx in range(matrix_obj.dims[1]):
                if old_col_idx not in columns:
                    new_labels.append(matrix_obj.labels[old_col_idx])
                    new_units.append(matrix_obj.units[old_col_idx])

                    new_data[:,new_col_idx] = matrix_obj.data[:,old_col_idx]

                    new_col_idx += 1

            matrix_obj.data = new_data
            matrix_obj.labels = new_labels
            matrix_obj.units  = new_units

        if modify != None and len(modify) > 0:
            
            modifyDict = copy_module.copy(valuesDict)

            Get_Constant_Values(modifySect.Get_Section('->CONSTANTS'), modifyDict)

            for row_idx in rows:
                for col_idx in columns:
                    modifyDict['original'] = str(matrix_obj.data[row_idx][col_idx])

                    modify_str = Apply_Template(modify, modifyDict, mapDict=mapDict)

                    try:
                        matrix_obj.data[row_idx][col_idx] = eval(modify_str)
                    except:
                        raise RuntimeError('Error evaluating modify string: "%s"' % modify_str)

    matrix_obj.write(destination, auto_size_cols=False)
def Process_File(source, destination, fileKeywords, moduleSections, valuesDict,
                 mapDict):
    logger = logging.getLogger(os.path.basename(__file__))

    for currSection in moduleSections:
        if str(source) == str(destination):
            raise IOError(
                'source and destination must be different. will not overwrite source file'
            )

    rows = Apply_Template(moduleSections[0].Get_Keyword_Value('rows'),
                          valuesDict,
                          mapDict=mapDict)
    columns = Apply_Template(moduleSections[0].Get_Keyword_Value('columns'),
                             valuesDict,
                             mapDict=mapDict)
    identifier = Apply_Template(
        moduleSections[0].Get_Keyword_Value('identifier'),
        valuesDict,
        mapDict=mapDict)
    initial_value = Apply_Template(
        moduleSections[0].Get_Keyword_Value('initial_value'),
        valuesDict,
        mapDict=mapDict)
    map_filename = Apply_Template(
        moduleSections[0].Get_Keyword_Value('map_filename'),
        valuesDict,
        mapDict=mapDict)
    modify = moduleSections[0].Get_Keyword_Value('modify')

    # Load ranges from RANGES section of module
    max_range_val = None
    range_values = {}
    for range_sect in moduleSections[0].Get_Section('->RANGES'):
        for range_spec in range_sect.Get_Matrix_Data():
            (range_name, range_str) = range_spec

            if range_str.find(',') > 0:
                curr_range = [float(val) for val in range_str.split(',')]
            else:
                curr_range = [float(val) for val in range_str.split()]

            if max_range_val == None:
                max_range_val = max(curr_range)
            else:
                max_range_val = max(max_range_val, max(curr_range))

            range_values[range_name] = curr_range

    if len(range_values) == 0:
        logger.error(
            'No index range list supplied for operating on source: %s' %
            source)
        return

    # Load source for data to map agains
    data_obj = OCO_Matrix(source)

    # Set columns to all if argument not supplied,
    # Otherwise try parsing as an index range list failing that try
    # using the specified columns as label names
    if columns == None:
        columns = range(data_obj.dims[1])
    else:
        try:
            columns = index_range_list(columns)
        except ValueError, TypeError:
            columns = data_obj.find_labels(columns,
                                           match_case=False,
                                           indexes=True)
    if columns == None:
        columns = range(data_obj.dims[1])
    else:
        try:
            columns = index_range_list(columns)
        except ValueError, TypeError:
            columns = data_obj.find_labels(columns,
                                           match_case=False,
                                           indexes=True)

    # Set rows to all if no argument supplied, otherwise use the specified
    # index range list
    if rows == None:
        rows = range(data_obj.dims[0])
    else:
        rows = index_range_list(rows)

    # Load the modifer string for specifying how to operate on rows and col values
    # By default sum all values encountered
    if modify == None:
        modify = '<current> + <data_result>'

    # Set initial value for before we look at source data file
    if initial_value == None:
        data_result = 0
    else:
        # Use eval so string is evaluated to the type expected
        data_result = eval(intial_value)

    # Load value to check against ranges
    modify_dict = copy.copy(valuesDict)
Exemple #13
0
def Process_File(source, destination, fileKeywords, moduleSections, valuesDict,
                 mapDict):
    logger = logging.getLogger(os.path.basename(__file__))

    # Load existing file
    matrix_obj = OCO_Matrix(source)

    for modifySect in moduleSections:

        # Add ability to specify cols individually or using a * to goto end
        columns = Apply_Template(modifySect.Get_Keyword_Value('columns'),
                                 valuesDict,
                                 mapDict=mapDict)
        rows = Apply_Template(modifySect.Get_Keyword_Value('rows'),
                              valuesDict,
                              mapDict=mapDict)
        modify = modifySect.Get_Keyword_Value('modify')
        delete = evaluate_bool_str(modifySect.Get_Keyword_Value('delete'))
        add_column = evaluate_bool_str(
            modifySect.Get_Keyword_Value('add_column'))

        if columns != None:
            try:
                columns = index_range_list(columns,
                                           max_value=matrix_obj.dims[1])
            except:
                if not type(columns) is ListType:
                    col_name_list = [columns]
                else:
                    col_name_list = columns

                columns = []
                for curr_name in col_name_list:
                    if curr_name.lower() not in matrix_obj.labels_lower:
                        if add_column:
                            matrix_obj.add_column(curr_name)
                            columns.append(matrix_obj.dims[1] - 1)
                        else:
                            raise IOError(
                                'Column named %s not found in file: %s' %
                                (curr_name, source))

                    columns.append(
                        matrix_obj.labels_lower.index(curr_name.lower()))
        else:
            columns = range(matrix_obj.dims[1])

        if rows != None:
            rows = index_range_list(rows, max_value=matrix_obj.dims[0])
        else:
            rows = range(matrix_obj.dims[0])

        if delete and modify != None:
            raise ValueError(
                'delete and modify keywords can not be specified together')

        if delete:
            if len(columns) > matrix_obj.dims[1]:
                raise IOError(
                    'More columns to be deleted %d than exist %d in input file %s'
                    % (len(columns), matrix_obj.dims[1], source))

            new_data = numpy.zeros(
                (matrix_obj.dims[0], matrix_obj.dims[1] - len(columns)),
                dtype=numpy.double)
            new_labels = []
            new_units = []

            new_col_idx = 0
            for old_col_idx in range(matrix_obj.dims[1]):
                if old_col_idx not in columns:
                    new_labels.append(matrix_obj.labels[old_col_idx])
                    new_units.append(matrix_obj.units[old_col_idx])

                    new_data[:, new_col_idx] = matrix_obj.data[:, old_col_idx]

                    new_col_idx += 1

            matrix_obj.data = new_data
            matrix_obj.labels = new_labels
            matrix_obj.units = new_units

        if modify != None and len(modify) > 0:

            modifyDict = copy_module.copy(valuesDict)

            Get_Constant_Values(modifySect.Get_Section('->CONSTANTS'),
                                modifyDict)

            for row_idx in rows:
                for col_idx in columns:
                    modifyDict['original'] = str(
                        matrix_obj.data[row_idx][col_idx])

                    modify_str = Apply_Template(modify,
                                                modifyDict,
                                                mapDict=mapDict)

                    try:
                        matrix_obj.data[row_idx][col_idx] = eval(modify_str)
                    except:
                        raise RuntimeError(
                            'Error evaluating modify string: "%s"' %
                            modify_str)

    matrix_obj.write(destination, auto_size_cols=False)
Exemple #14
0
def Process_File(source, destination, fileKeywords, moduleSections, valuesDict, mapDict):
    logger = logging.getLogger(os.path.basename(__file__))
    
    logger.debug('')
    logger.debug('Reading source file: %s' % source)
    modFileObj = L2_Input.Input_File(source)
    
    for pick in moduleSections:
        section  = Apply_Template(pick.Get_Keyword_Value('section'), valuesDict, mapDict=mapDict)
        keyword  = Apply_Template(pick.Get_Keyword_Value('keyword'), valuesDict, mapDict=mapDict)

        # Resolve template later when can add to the values dictionary
        template = pick.Get_Keyword_Value('template', addWhiteSpace=True)

        which_line    = Apply_Template(pick.Get_Keyword_Value('which_line'), valuesDict, mapDict=mapDict)
        which_section = Apply_Template(pick.Get_Keyword_Value('which_section'), valuesDict, mapDict=mapDict)
        which_keyword = Apply_Template(pick.Get_Keyword_Value('which_keyword'), valuesDict, mapDict=mapDict)

        ignore_missing = Apply_Template(pick.Get_Keyword_Value('ignore_missing'), valuesDict, mapDict=mapDict)
        index_format   = Apply_Template(pick.Get_Keyword_Value('index_format'), valuesDict, mapDict=mapDict)

        delete   = Apply_Template(pick.Get_Keyword_Value('delete'), valuesDict, mapDict=mapDict)
        indent   = Apply_Template(pick.Get_Keyword_Value('indent'), valuesDict, mapDict=mapDict)
        unique   = evaluate_bool_str(Apply_Template(pick.Get_Keyword_Value('unique'), valuesDict, mapDict=mapDict), False)

        if indent == None:
            indent = 0

        if keyword != None and type(keyword) is not ListType:
            keyword = [keyword]

        if which_line == None:
            which_line = None
        elif type(which_line) is ListType or not which_line.isdigit():
            raise ValueError('which_line must be a scalar integer')
        else:
            which_line = int(which_line)

        if (template == None or len(template) == 0) and (delete == None or len(delete)) == 0:
            raise ValueError('template must be defined for PICK')

        if section != None:
            if keyword != None:
                if delete != None and len(delete) > 0:
                    logger.debug('Deleting keyword %s->%s' % (section, keyword))
                else:
                    logger.debug('Modifying keyword %s->%s' % (section, keyword))
            elif delete != None and len(delete) > 0:
                logger.debug('Deleting section %s' % section)
        else:
            if keyword != None:
                if delete != None and len(delete) > 0:
                    logger.debug('Deleting keyword %s' % keyword)
                else:
                    logger.debug('Modifying keyword %s' % keyword)

            elif delete != None and len(delete) > 0:
                logger.debug('Deleting lines from root file section:', delete)

        # Find the section to modify
        if section == None:
            modSect = [ modFileObj.rootNode ]
        else:
            if type(section) is ListType:
                modSect = []
                for curr_sect_name in section:
                    for found_sect in modFileObj.rootNode.Get_Section(curr_sect_name):
                        modSect.append(found_sect)
            else:
                modSect = modFileObj.rootNode.Get_Section(section)

        if len(modSect) == 0:
            modSect = [L2_Input.Section(leaf=section)]

            if which_line != None:
                modFileObj.children.insert(which_line, modSect[0])
            else:
                modFileObj.children.append(modSect[0])

        # If which is defined then filter sections to modify
        if keyword != None and which_section != None:

            try:
                section_indexes = index_range_list(which_section)
            except:
                section_indexes = []
                curr_index = 0
                for testSect in modSect:
                    sectName = testSect.Get_Keyword_Value('name')
                    if sectName == which_section:
                        section_indexes.append(curr_index)
                    curr_index += 1
                if len(section_indexes) == 0 and not ignore_missing:
                    raise IOError('Could not find section named: %s with name keyword: %s in file %s' % (section, which_section, source))
                
            sectChoices = []
            for w_idx in section_indexes:
                try:
                    sectChoices.append( modSect[w_idx] )
                except:
                    raise IOError("Section index: %d not found for keyword: %s, section: %s in file %s" % (w_idx, keyword, section, source))
            modSect = sectChoices

        # Finally modify all chosen sections and chosen keywords
        for pickSect in modSect:
            pickValDict = copy.copy(valuesDict)

            if keyword != None:
                for curr_keyname in keyword:
                    keyword_val = pickSect.Get_Keyword_Value(curr_keyname)
                    if type(keyword_val) is ListType:
                        pickValDict[curr_keyname] = ' '.join(keyword_val)
                    elif keyword_val != None:
                        pickValDict[curr_keyname] = keyword_val

            newValue = Apply_Template(template, pickValDict, mapDict=mapDict)

            if delete != None and len(delete) > 0:

                try:
                    delete = index_range_list(delete)
                except:
                    delete = [0]

                if keyword != None and len(keyword) > 0:
                    for curr_keyname in keyword:
                        pickSect.Delete_Keyword(curr_keyname, which=delete)
                elif section != None and len(section) > 0:
                    modFileObj.Delete_Section(pickSect) # which=delete
                else:
                    # Remove from a location a certain # of times
                    for dlist_idx in range(len(delete)-1, -1, -1):
                        del_idx = delete[dlist_idx]
                        x = pickSect.children.pop(del_idx)

            elif keyword != None:

                if which_keyword != None:
                    which_keyword = index_range_list(which_keyword)

                for curr_keyname in keyword:
                    if index_format != None and hasattr(newValue, '__iter__'):
                        for curr_index, curr_value in enumerate(newValue):
                            index_keyname = index_format.format(keyword=curr_keyname, index=curr_index+1)
                            pickSect.Set_Keyword_Value(index_keyname, curr_value, which=which_keyword, indent=indent)
                    else:
                        pickSect.Set_Keyword_Value(curr_keyname, newValue, which=which_keyword, indent=indent)
            else:
                if not type(newValue) is ListType:
                    newValue = [ newValue ]

                for currValue in newValue:
                    if unique and currValue in modFileObj.Get_Matrix_Data():
                        continue
                    
                    newNode = L2_Input.Node('value', currValue + '\n')

                    if which_line != None:
                        pickSect.children.insert(which_line, newNode)
                    else:
                        pickSect.children.append(newNode)

    # Write newly modified file
    logger.debug('Writing destination file: %s' % destination)
    modFileObj.Write(destination)
    # Otherwise try parsing as an index range list failing that try
    # using the specified columns as label names
    if columns == None:
        columns = range(data_obj.dims[1])
    else:
        try:
            columns = index_range_list(columns)
        except ValueError, TypeError:
            columns = data_obj.find_labels(columns, match_case=False, indexes=True)

    # Set rows to all if no argument supplied, otherwise use the specified
    # index range list
    if rows == None:
        rows = range(data_obj.dims[0])
    else:
        rows = index_range_list(rows)

    # Load the modifer string for specifying how to operate on rows and col values
    # By default sum all values encountered
    if modify == None:
        modify = "<current> + <data_result>"

    # Set initial value for before we look at source data file
    if initial_value == None:
        data_result = 0
    else:
        # Use eval so string is evaluated to the type expected
        data_result = eval(intial_value)

    # Load value to check against ranges
    modify_dict = copy.copy(valuesDict)
Exemple #16
0
def offset_column(input_file,
                  output_file,
                  columns,
                  offset,
                  method,
                  pressure_range=None):

    # Load existing file
    matrix_obj = OCO_Matrix(input_file)

    # Add ability to specify cols individually or using a * to goto end
    cols = index_range_list(columns)

    if offset.isdigit():
        offset = float(offset)
    else:
        offset = eval(offset)

    if pressure_range != None:
        pres_col = matrix_obj.labels_lower.index("pressure")

        pres_range_arr = pressure_range.split(',')
        pres_val_beg = float(pres_range_arr[0])
        pres_val_end = float(pres_range_arr[1])

        pres_idx_beg = 0
        pres_idx_end = matrix_obj.dims[0]

        pres_column = []
        [pres_column.append(float(val[pres_col])) for val in matrix_obj.data]

        pres_idx_curr = 0
        beg_found = False
        for pres_val in pres_column:
            if pres_val >= pres_val_beg and not beg_found:
                pres_idx_beg = pres_idx_curr
                beg_found = True

            if pres_val <= pres_val_end:
                pres_idx_end = pres_idx_curr + 1

            pres_idx_curr += 1

        target_rows = range(pres_idx_beg, pres_idx_end)

    else:
        target_rows = range(matrix_obj.dims[0])

    for rowIdx in target_rows:
        for colIdx in cols:

            #print 'old_val[%d][%d] = %f' % (rowIdx, colIdx, matrix_obj.data[rowIdx][colIdx])

            if method == '/':
                matrix_obj.data[rowIdx][
                    colIdx] = matrix_obj.data[rowIdx][colIdx] / offset
            elif method == '-':
                matrix_obj.data[rowIdx][
                    colIdx] = matrix_obj.data[rowIdx][colIdx] - offset
            elif method == '*':
                matrix_obj.data[rowIdx][
                    colIdx] = matrix_obj.data[rowIdx][colIdx] * offset
            else:
                matrix_obj.data[rowIdx][
                    colIdx] = matrix_obj.data[rowIdx][colIdx] + offset

            #print 'new_val[%d][%d] = %f' % (rowIdx, colIdx, matrix_obj.data[rowIdx][colIdx])

    matrix_obj.write(output_file)