def round_cols(self, df):
     lg.warning('-- ROUND COLS')
     all_cols = self.env.f_handler.get('columns', PROJ_SETTINGS)
     for c in df.columns:
         if all_cols[c]['precision'] is not False:
             df[c] = df[c].round(all_cols[c]['precision'])
     return df
Beispiel #2
0
    def column_combined(self, msg, col1, col2):
        ''' @msg - the beginning of the message that is shown in the actions history
            @col1 - the first column name to combine, more precise than the second
            @col1 - the second column name to combine
        '''
        msg = msg
        df = self.env.cruise_data.df
        COL1 = True
        if col1 not in df or (col1 in df and df[col1].isnull().all()):
            COL1 = False

        COL2 = True
        if col2 not in df or (col2 in df and df[col2].isnull().all()):
            COL2 = False

        if COL1 and not COL2:
            ret = df[col1].to_numpy()
            ret[(df[f'{col1}{FLAG_END}'] > 2) & (df[f'{col1}{FLAG_END}'] != 6)] = np.nan
            msg += f' {col1} was taken because {col2} is empty or does not exist.'
            msg += ' Values with flags 3, 4 and 5 were set to NaN.'
        elif COL2 and not COL1:
            ret = df[col2].to_numpy()
            ret[(df[f'{col2}{FLAG_END}'] > 2) & (df[f'{col2}{FLAG_END}'] != 6)] = np.nan
            msg += f' {col2} was taken because {col1} is empty or does not exist.'
            msg += ' Values with flags 3, 4 and 5 were set to NaN.'
        elif not COL2 and not COL1:
            ret = pd.Series([np.nan] * len(df.index))
            msg += f' {col1} and {col2} do not exist'
        else:
            col1_arr = df[col1].to_numpy()
            # TODO: inform if there is some change here to the user
            col1_arr[(df[f'{col1}{FLAG_END}'] > 2) & (df[f'{col1}{FLAG_END}'] != 6)] = np.nan
            col2_arr = df[col2].to_numpy()
            col2_arr[(df[f'{col2}{FLAG_END}'] > 2) & (df[f'{col2}{FLAG_END}'] != 6)] = np.nan
            msg += f' Values from {col1} and {col2} columns with flags 3, 4 and 5 were set to NaN.'

            dev = np.nanmean(np.abs(col1_arr - col2_arr))
            col2_nonnans = np.sum(~np.isnan(col2_arr)) / np.size(col2_arr)

            if col2_nonnans > 0.8:
                msg += f'Use {col2} as more {col2_nonnans * 100}% of data has it.'
                ret = col2_arr
            if dev < 0.003:
                msg += f' Gaps filled with {col1} as mean deviation is {dev:.4f}'
                ret = np.where(~np.isnan(col2_arr), col2_arr, col1_arr)
            else:
                mask = ~np.isnan(col2_arr) & ~np.isnan(col1_arr)
                slope, intercept, r_value, p_value, std_err = stats.linregress(col1_arr[mask], col2_arr[mask])
                rsq = r_value * r_value
                if rsq > 0.99:
                    msg = msg + f' Calibrating {col1} (R^2={rsq:.3f}) to filll gaps as mean deviation is {dev:.4f}'
                    calibrated_ctd = slope * col1_arr + intercept
                    ret = np.where(~np.isnan(col2_arr), col2_arr, calibrated_ctd)
                else:
                    msg += f' Not filling gaps with {col1} as mean deviation is {dev:.4f} and trying to calibrate gots a R^2={rsq:.3f}'
                    ret = col2_arr

        self.env.cruise_data.add_moves_element('column_combined', msg)
        lg.warning(f'>> {msg}')
        return ret
Beispiel #3
0
 def _nearby_prof_select_remove_on_change(self):
     lg.info('-- NEARBY PROF SELECT REMOVE ON CHANGE')
     try:
         self.nearby_prof_select.remove_on_change(
             'value', self._on_change_nearby_prof_select)
     except Exception as e:
         lg.warning('Select callback could not be removed')
Beispiel #4
0
 def _cruise_data_update_rollback(self):
     lg.warning('-- CRUISE DATA UPDATE ROLLBACK')
     self.env.cd_aux = None
     self.env.cd_update = None
     if os.path.isdir(UPD):
         shutil.rmtree(UPD)
     self.env.bk_bridge.show_default_cursor()
 def _rmv_empty_columns(self):
     lg.info('-- REMOVE EMPTY COLUMNS (all values with -999)')
     cols_to_rmv = []
     flags_to_rmv = []
     basic_params = self.env.f_handler.get_custom_cols_by_attr('basic')
     for col in self.df:
         if col not in basic_params:  # empty basic param columns are needed for some calculated params
             if self.df[col].str.contains(NA_REGEX).all():
                 cols_to_rmv.append(col)
                 if f'{col}_FLAG_W' in self.df:
                     flags_to_rmv.append(f'{col}_FLAG_W')
     if len(cols_to_rmv) > 0:
         lg.warning(f'>> THE FOLLOWING COLUMNS WERE REMOVED DUE TO -999: {",".join(cols_to_rmv)}')
         self.add_moves_element(
             'cols_removed',
             f'{",".join(cols_to_rmv)} param columns were removed'
         )
     if len(flags_to_rmv):
         lg.warning(f'>> THE FOLLOWING COLUMNS FLAGS WERE REMOVED DUE TO -999: {",".join(flags_to_rmv)}')
         self.add_moves_element(
             'flags_cols_removed',
             f'{",".join(flags_to_rmv)} flag columns were removed'
         )
     cols_to_rmv.extend(flags_to_rmv)
     self.df = self.df.drop(columns=cols_to_rmv)
    def update_flag_values(self, column, new_flag_value, row_indices):
        """ This method is executed mainly when a flag is pressed to update the values
                * column: it is the column to update, only one column
                * new_flag_value: it is the flag value
        """
        lg.info('-- UPDATE DATA --')

        lg.info('>> COLUMN: %s | VALUE: %s | ROWS: %s' % (column, new_flag_value, row_indices))
        # lg.info('\n\nData previous changed: \n\n%s' % self.df[[ column ]].iloc[row_indices])

        empty_column = False
        if 'empty' in self.cols[column]['attrs'] and self.df[self.df[column] == 9][column].index.size == self.df.index.size:
            empty_column = True

        hash_index_list = self.df.index[row_indices]
        self.df.loc[hash_index_list,(column)] = new_flag_value

        if new_flag_value != 9 and empty_column:
            lg.warn(f'>> REMOVING EMPTY ATTR FROM COLUMNS {column}')
            self.cols[column]['attrs'].remove('empty')
            self.cols[column]['export'] = True
            self.env.f_handler.set('columns', self.cols, path.join(TMP,'settings.json'))
        elif new_flag_value == 9:
            if 'empty' not in self.cols[column]['attrs'] and self.df[self.df[column] == 9][column].index.size == self.df.index.size:
                lg.warning(f'>> ADDING EMPTY ATTR TO COLUMN {column}')
                self.cols[column]['attrs'].append('empty')
                self.cols[column]['export'] = False
                self.env.f_handler.set('columns', self.cols, path.join(TMP,'settings.json'))

        # lg.info('\n\nData after changed: \n\n%s' % self.df[[ column ]].iloc[row_indices])

        # Update the action log
        date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        action = 'QC Update'

        for row in row_indices:
            stnnbr = self.df[[ 'STNNBR' ]].iloc[row][0]
            castno = self.df[[ 'CASTNO' ]].iloc[row][0]
            btlnbr = self.df[[ 'BTLNBR' ]].iloc[row][0]
            latitude = self.df[[ 'LATITUDE' ]].iloc[row][0]
            longitude = self.df[[ 'LONGITUDE' ]].iloc[row][0]
            description = '{COLUMN} flag was updated to {FLAG}, in [station {STNNBR}, cast number {CASTNO}, bottle {BTLNBR}, latitude {LATITUDE}, longitude {LONGITUDE}]'.format(
                COLUMN=column, FLAG=new_flag_value, STNNBR=stnnbr, CASTNO=castno,
                BTLNBR=btlnbr, LATITUDE=latitude, LONGITUDE=longitude,
            )
            lg.info('>> MOVES LOG: {}, {}, {}'.format(date, action, description))

            fields = [date, action, stnnbr, castno, btlnbr, latitude, longitude, column, new_flag_value, description]
            if not self.moves.empty:
                last_pos = self.moves.tail(1).index[0]
                self.moves.loc[last_pos + 1] = fields  # fastest way to add a row at the end
            else:
                self.moves.loc[0] = fields

        self.save_tmp_data()
 def import_octave_equations(self):
     lg.info('>> OCTAVE PATH: {}'.format(self.env.oct_eq.oct_exe_path))
     oc_output = sbp.getstatusoutput('{} --eval "OCTAVE_VERSION"'.format(self.env.oct_eq.oct_exe_path))
     if oc_output[0] == 0:
         lg.info('>> OCTAVE DETECTED FROM PYTHON, VERSION: {}'.format(
             oc_output[1].split('=')[1].strip())
         )
         self.equations = self.env.oct_eq  # remove methods that are not equations
     else:
         lg.warning('>> OCTAVE UNDETECTED')
         self.equations = None
 def _sanitize_flags(self):
     lg.info('-- SANITIZING FLAGS --')
     column_list = self.df.columns.tolist()
     for column in column_list:
         flag = column + FLAG_END
         if flag in column_list:
             try:
                 self.df[flag][pd.isnull(self.df[column])] = 9
             except:
                 lg.warning('Unable to sanitize flag %s for column %s',
                            flag, column)
    def _manage_empty_cols(self):
        lg.info('-- SET EMPTY COLS')
        cols = self.get_cols_by_attrs(['param', 'non_qc', 'computed'])
        for c in cols:
            if self.df[c].isnull().all():
                attrs = ','.join(self.cols[c]['attrs'])
                del self.cols[c]
                del self.df[c]
                lg.warning(f'>> COLUMN: {c} REMOVED BECAUSE IT WAS EMPTY | {attrs}')

                fc = f'{c}{FLAG_END}'
                if fc in self.df:
                    del self.cols[fc]
                    del self.df[fc]
                    lg.warning(f'>> FLAG COLUMN: {c} REMOVED BECAUSE THE RELATED PARAM WAS EMPTY')

        for c in self.get_cols_by_attrs('flag'):
            if self.df[self.df[c] == 9][c].index.size == self.df.index.size:
                self.cols[c]['attrs'].append('empty')
                lg.warning(f'>> FLAG: {c} IS MARKED AS EMPTY')

                # NOTE: if the flag has 9 in all the rows means that the param has NaN in all the rows
                #       so, the param should be removed before. This should be fixed in a more appropriate way

        # required columns can be nan in order to create the hash_id ??
        for c in self.get_cols_by_attrs(['required']):
            if self.df[c].isnull().all():
                self.df[c]['attrs'].append('empty')
                lg.warning(f'>> COLUMN: {c} MARKED AS EMPTY')
 def set(self, attr, value, f_path):
     ''' Store attribute data to some json file
         * attr: attribute to set
         * value: new value to set
         * f_path: file path where the file is located
     '''
     with open(f_path, 'r') as f:
         json_content = json.load(f)
     if attr in json_content:
         json_content[attr] = value
         with open(f_path, 'w') as fp:
             json.dump(json_content, fp, indent=4, sort_keys=True)
     else:
         lg.warning(
             f'>> The attribute {attr} is not in the JSON file: {f_path}')
    def get(self, attr, f_path):
        """ Gets data from json files
            * attr: attribute to get
            * f_path: file path where the file is located

            NOTE: This method should be avoided because access to hard disk is very costly
        """
        # lg.info('-- GET ATTR: {} | FROM FILE: {}'.format(attr, f_path))
        with open(f_path, 'r') as f:
            json_content = json.load(f)
        if attr in json_content:
            return json_content[attr]
        else:
            lg.warning(
                f'>> The attribute {attr} is not in the JSON file: {f_path}')
 def _prep_directory(self):
     if not path.exists(EXPORT):
         mkdir(EXPORT)
     else:
         lg.warning(
             'Directory {} already exists. Cleaning...'.format(EXPORT))
         for the_file in listdir(EXPORT):
             file_path = path.join(EXPORT, the_file)
             try:
                 if path.isfile(file_path):
                     unlink(file_path)
                 #elif path.isdir(file_path): shutil.rmtree(file_path)
             except Exception as e:
                 lg.warning(
                     'Directory {} could not be cleaned'.format(EXPORT))
Beispiel #13
0
    def _set_cols_from_scratch(self):
        """ The main attributes of the object are filled:

                "cols": {
                    "ALKALI": {
                        "orig_name": "alkali",
                        "types": ["param"],
                        "required": False,
                        "unit": "UMOL/KG",
                    },
                    "ALKALI_FLAG_W": {
                        "types": ["param_flag", "qc_param_flag"],
                        "required": False,
                        "unit": NaN,  # >> False
                    }
                }

            TODO: to create less noise in the JSON structure:
                  unit and orig_name should not exist if they do not have any value
        """
        lg.info('-- SET ATTRIBUTES FROM SCRATCH --')
        if self.original_type == 'whp':
            units_list = self.df.iloc[0].values.tolist(
            )  # TODO: how to detect if there are units or not?
            #       how to fill the units fields then?
        else:
            units_list = []
        pos = 0
        column_list = self.df.columns.tolist()
        lg.warning('>> SELF ORIG COLS: {}'.format(self.orig_cols))
        for column in column_list:
            self._add_column(column=column)
            if units_list != []:
                if str(units_list[pos]) == 'nan':
                    self.cols[column]['unit'] = False
                else:
                    self.cols[column]['unit'] = units_list[pos]
            pos += 1

        # lg.info(json.dumps(self.cols, sort_keys=True, indent=4))

        if self.original_type == 'whp':
            self.df = self.df[1:-1].reset_index(
                drop=True)  # rewrite index column and remove the units row
    def add_computed_parameter(self, arg):
        ''' It adds the computed parameter to cols and to the project.
            Previous to this method we had to check the dependencies and
            that all the columns needed are in the current dataframe
        '''
        val = arg.get('value', False)
        prevent_save = arg.get('prevent_save', False)
        if val is False:
            return {
                'success': False,
                'msg': 'value is mandatory',
            }

        for cp in self.proj_settings_cps:  # NOTE: list of dicts, I need to iterate over all the items to get the cp to add
            if cp['param_name'] == val:
                prec = int(cp['precision'])
                new_cp = {
                    'eq': cp['equation'],
                    'computed_param_name': cp['param_name'],
                    'precision': prec,
                }
                result = self.compute_equation(new_cp)
                if result.get('success', False):
                    self.cruise_data.cols[val] = {
                        'external_name': [],
                        'data_type': 'integer' if prec == 0 else 'float',
                        'attrs': ['computed'],
                        'unit': cp.get('units', False),
                        'precision': prec,
                        'export': False
                    }
                    if prevent_save is False:
                        self.cruise_data.save_col_attribs()
                    lg.info('>> CP <<{}>> ADDED'.format(val))
                else:
                    msg = ''
                    if 'error' in result:
                        msg = result.get('error', '')  # TODO: remove "\n" fro here?
                    elif 'msg' in result:
                        msg = result.get('msg', '')
                    lg.warning('>> CP <<{}>> COULD NOT BE COMPUTED: {}'.format(
                        cp['param_name'], msg
                    ))
                return result
    def prep_bigger_plots(self):
        lg.warning('-- PREP BIGGER PLOTS')
        self._store_default_values()

        # TODO: update this code when the following issue is solved
        # https://github.com/bokeh/bokeh/issues/9245
        for bp in self.env.bk_plots:
            bp.plot.background_fill_color = 'white'
            bp.plot.border_fill_color = 'white'

            big_width = 4
            if bp.plot.title:
                bp.plot.title.text_font_size = '30pt'

            bp.plot.xaxis.axis_line_width = big_width
            bp.plot.yaxis.axis_line_width = big_width
            bp.plot.xaxis.axis_label_text_font_size = '25pt'
            bp.plot.yaxis.axis_label_text_font_size = '25pt'

            bp.plot.xaxis.major_tick_line_width = big_width
            bp.plot.yaxis.major_tick_line_width = big_width
            bp.plot.xaxis.minor_tick_line_width = big_width
            bp.plot.yaxis.minor_tick_line_width = big_width

            bp.plot.xaxis.major_label_text_font_size = '20pt'
            bp.plot.yaxis.major_label_text_font_size = '20pt'

            bp.plot.width = 1200
            bp.plot.height = 1200

            for c in bp.circles:
                c.glyph.size = 20  # original 4

            bp.ml_prof_line.glyph.line_width = 10  # original 2

            for p in bp.prof_line_circles:
                p.glyph.size = 20  # 4

            bp.asterisk.glyph.size = 100  # 20
            bp.aux_asterisk.glyph.size = 85  # 17
            bp.aux_asterisk_circle.glyph.size = 15  # 3
        return {'success': True}
 def run_js_code(self, signal, params={}):
     """ General method to run JavaScript inside the iframe
         The signal is sent to the bokeh_renderer.js file
         TODO: they are developing a better way to run JavaScript directly
     """
     if params != {}:
         params = json.dumps(params, sort_keys=True)
     if len(params) < 5000:
         lg.info('>> RUN JS CODE, PARAMS: {}'.format(params))
     else:
         lg.warning('>> Very long string in param, skiping print')
     lg.info('>> SIGNAL: {}'.format(signal))
     js_code = """
         window.top.postMessage({{
             'signal': '{}',
             'params': {}
         }}, '*');                        // to main_renderer.js
     """.format(signal, params)
     # lg.info('>> JS CODE: {}'.format(js_code))
     self.bridge_plot_callback.code = js_code
     self.bridge_trigger.glyph.size += 1  # triggers the callback
Beispiel #17
0
    def nitrate_combined(self):
        ''' NO2_NO3 is the sum of NITRAT and NITRIT, sometimes both are reported separately.
            Some other times we need to get the NITRATE from the difference NO2_NO3 - NITRIT.
            NO2_NO3 exists because there are some devices that take the measures together.
            The values of NITRIT are always tiny. If the column does not exist we can do NITRATE = NO2_NO3.
        '''
        msg = ''
        ret = None
        df = self.env.cruise_data.df
        NITRAT = True
        if 'NITRAT' not in df or ('NITRAT' in df and df['NITRAT'].isnull().all()):
            NITRAT = False

        NITRIT = True
        if 'NITRIT' not in df or ('NITRIT' in df and df['NITRIT'].isnull().all()):
            NITRIT = False

        NO2_NO3 = True
        if 'NO2_NO3' not in df or ('NO2_NO3' in df and df['NO2_NO3'].isnull().all()):
            NO2_NO3 = False

        if NO2_NO3 and not NITRAT:
            if NITRIT:
                ret = df['NO2_NO3'] - ~pd.isnull(df['NITRIT'])
                msg = '_NITRATE created from the calculation NO2_NO3 - NITRIT'
            else:
                ret = df['NO2_NO3']
                msg = '_NITRATE created from the NO2_NO3, NITRITE and NITRATE columns are missing'
        elif NITRAT:
            ret = df['NITRAT']
            msg = '_NITRATE was copied from the NITRAT column'
        else:  # not NITRAT and not NO2_NO3
            ret = pd.Series([np.nan] * len(df.index))
            msg = '_NITRATE is an empty column because NITRAT and NO2_NO3 columns do not exist or they are all NaN'

        self.env.cruise_data.add_moves_element('column_combined', msg)
        lg.warning(f'>> {msg}')
        return ret
Beispiel #18
0
    def _add_column(self, column='', units=False, export=True):
        ''' Adds a column to the self.cols dictionary
            This dictionary is useful to select some columns by type
                * required      - required columns
                * param         - parameter columns
                * flag          - flag columns
                * non_qc        - parameters without flag columns associated
                * computed      - computed parameters
                * created       - if the column was created by the app

            TODO: add all arguments or add a param as a dictionary with all the attributes
                  this method also should work if something should be modified or removed?
        '''
        if column not in self.get_cols_by_attrs('all'):
            self.cols[column] = {
                'external_name': [],
                'attrs': [],
                'unit': units,
                'precision': False,
                'export': export
            }
            non_qc_params = self.env.f_handler.get_custom_cols_by_attr('non_qc')
            if column.endswith(FLAG_END):
                self.cols[column]['attrs'] += ['flag']
            else:
                basic_params = self.env.f_handler.get_custom_cols_by_attr('basic')
                if column in basic_params:
                    self.cols[column]['attrs'] += ['basic']
                required_cols = self.env.f_handler.get_custom_cols_by_attr('required')
                if column in required_cols:
                    self.cols[column]['attrs'] += ['required']
                elif column in non_qc_params:
                    self.cols[column]['attrs'] += ['non_qc']
                else:
                    self.cols[column]['attrs'] += ['param']
                self.create_missing_flag_col(column)
        else:
            lg.warning('>> THE COLUMN ALREADY EXISTS AND IT CANNOT BE CREATED AGAIN')
    def _init_ranges(self):
        lg.warning('-- INIT RANGES')
        # TODO: do no create axis for some parameters (if not needed)
        # lg.warning('>> TAB LIST: {}'.format(self.env.f_handler.tab_list))
        # for tab in self.env.f_handler.tab_list:

        for col in self.env.cur_plotted_cols:
            # gmax = self.env.cruise_data.df[col].max()
            # gmin = self.env.cruise_data.df[col].min()
            # d = gmax - gmin

            range_padding = 0.25
            x_range = DataRange1d(range_padding=range_padding, renderers=[])
            y_range = DataRange1d(range_padding=range_padding, renderers=[])

            # x_range = Range1d(
            #     start=gmin,                       # bounds automatically detected with DataRange1d
            #     end=gmax,
            #     # max_interval=gmax + d * p,        # zoom out limit >> useful if hovers are used
            #     # min_interval                      # zoom in limit
            # )
            # y_range = Range1d(
            #     start=gmin,                       # bounds automatically detected with DataRange1d
            #     end=gmax,
            #     # max_interval=gmax + d * p,        # zoom out limit >> useful if hovers are used
            #     # min_interval                      # zoom in limit
            # )

            # lg.info('>> COLUMN: {} | X START: {} | X END: {} | Y START: {} | Y END: {}'.format(
            #     col, gmin - d * p, gmax + d *p, gmin - d * p, gmax + d * p
            # ))

            if col not in self.ranges:
                self.env.ranges[col] = {}
                self.env.ranges[col]['x_range'] = x_range
                self.env.ranges[col]['y_range'] = y_range
Beispiel #20
0
 def _set_hash_ids(self):
     """ Create a column id for the whp-exchange files
         this new column is a hash of these fields combined:
             * STNNBR     station number
             * CASTNO     cast number (it may exist or not)
             * BTLNBR     bottle number (it may exist or not)
             * LATITUDE   latitude
             * LONGITUDE  longitude
     """
     self.df['HASH_ID'] = pd.util.hash_pandas_object(  # faster, but it may create duplicates
         self.df[['STNNBR', 'CASTNO', 'BTLNBR', 'LATITUDE', 'LONGITUDE']],
         index=False
     )
     if self.df['HASH_ID'].duplicated().any():
         # TODO: if the second file (the file to update) uses this other method
         #       to create the hashes there will many changes in the hashes.
         #       I could control this with some flag (though it is unlikely to happen)
         lg.warning('>> HASH ID is being created with hashlib sha256')
         self.df['HASH_ID'] = self.df[[
             'STNNBR', 'CASTNO', 'BTLNBR', 'LATITUDE', 'LONGITUDE'   # if BTLNBR is NaN the hash is made correctly as well
         ]].astype(str).apply(                                       # astype is 4x slower than apply
             lambda x: hashlib.sha256(str.encode(str(tuple(x)))).hexdigest(), axis=1
         )
     self.df = self.df.set_index(['HASH_ID'])
    def _get_sandbox_funcs(self, loc_dict={}):
        local_dict = loc_dict.copy()  # deepcopy() > recursively  ???

        for elem in local_dict:  # resets all the values
            local_dict[elem] = None

        # math functions
        local_dict.update({
            'acos': acos,
            'asin': asin,
            'atan': atan,
            'atan2': atan2,
            'ceil': ceil,
            'cos': cos,
            'cosh': cosh,
            'degrees': degrees,
            'exp': exp,
            'fabs': fabs,
            'floor': floor,
            'fmod': fmod,
            'frexp': frexp,
            'hypot': hypot,
            'ldexp': ldexp,
            'log': log,
            'log10': log10,
            'modf': modf,
            'pow': pow,
            'radians': radians,
            'sin': sin,
            'sinh': sinh,
            'sqrt': sqrt,
            'tan': tan,
            'tanh': tanh,
        })

        # seawater functions
        local_dict.update({
            'cndr': sw.library.cndr,
            'salds': sw.library.salrp,
            'salrt': sw.library.salrt,
            'seck': sw.library.seck,
            'sals': sw.library.sals,
            'smow': sw.library.smow,
            'T68conv': sw.library.T68conv,
            'T90conv': sw.library.T90conv,
            'adtg': sw.eos80.adtg,
            'alpha': sw.eos80.alpha,
            'aonb': sw.eos80.aonb,
            'beta': sw.eos80.beta,
            'dpth': sw.eos80.dpth,
            'g': sw.eos80.g,
            'salt': sw.eos80.salt,
            'fp': sw.eos80.fp,
            'svel': sw.eos80.svel,
            'pres': sw.eos80.pres,
            'dens0': sw.eos80.dens0,
            'dens': sw.eos80.dens,
            'pden': sw.eos80.pden,
            'cp': sw.eos80.cp,
            'ptmp': sw.eos80.ptmp,
            'temp': sw.eos80.temp,
            'bfrq': sw.geostrophic.bfrq,
            'svan': sw.geostrophic.svan,
            'gpan': sw.geostrophic.gpan,
            'gvel': sw.geostrophic.gvel,
            'dist': sw.extras.dist,
            'f': sw.extras.f,
            'satAr': sw.extras.satAr,
            'satN2': sw.extras.satN2,
            'satO2': sw.extras.satO2,
            'swvel': sw.extras.swvel,
        })
        lg.warning('>> EQUATIONS LIBRARY OBJECT: {}'.format(self.equations))
        if self.equations is not None:
            for elem_str in dir(self.equations):
                if elem_str[0] != '_' and elem_str not in [
                        'guess_oct_exe_path', 'set_oct_exe_path'
                ]:
                    elem_obj = getattr(self.equations, elem_str)
                    if isinstance(elem_obj, (\
                    types.FunctionType, types.BuiltinFunctionType,
                    types.MethodType, types.BuiltinMethodType)):
                        lg.warning('>> ACCEPTED METHOD: {}'.format(elem_str))
                        local_dict.update({elem_str: elem_obj})
        return local_dict
    def restore_plot_sizes(self):
        lg.info('-- RESTORE PLOT SIZES')

        for bp in self.env.bk_plots:
            bp.plot.background_fill_color = self.dflt_plot_attrs[
                'background_fill_color']
            bp.plot.border_fill_color = self.dflt_plot_attrs[
                'border_fill_color']

            if bp.plot.title:
                bp.plot.title.text_font_size = self.dflt_plot_attrs[
                    'title_font_size']

            bp.plot.xaxis[0].axis_line_width = self.dflt_plot_attrs[
                'xaxis_line_width']
            bp.plot.yaxis[0].axis_line_width = self.dflt_plot_attrs[
                'yaxis_line_width']
            bp.plot.xaxis[0].axis_label_text_font_size = self.dflt_plot_attrs[
                'xaxis_label_text_font_size']
            bp.plot.yaxis[0].axis_label_text_font_size = self.dflt_plot_attrs[
                'yaxis_label_text_font_size']

            bp.plot.xaxis[0].major_tick_line_width = self.dflt_plot_attrs[
                'xmajor_tick_line_width']
            bp.plot.yaxis[0].major_tick_line_width = self.dflt_plot_attrs[
                'ymajor_tick_line_width']
            bp.plot.xaxis[0].minor_tick_line_width = self.dflt_plot_attrs[
                'xminor_tick_line_width']
            bp.plot.yaxis[0].minor_tick_line_width = self.dflt_plot_attrs[
                'yminor_tick_line_width']

            bp.plot.xaxis[0].major_label_text_font_size = self.dflt_plot_attrs[
                'xmajor_label_text_font_size']
            bp.plot.yaxis[0].major_label_text_font_size = self.dflt_plot_attrs[
                'ymajor_label_text_font_size']

            bp.plot.width = self.dflt_plot_attrs['plot_width']
            bp.plot.height = self.dflt_plot_attrs['plot_height']

            # TODO: create constants for these attributes
            for c in bp.circles:
                c.glyph.size = 4  # original 4

            bp.ml_prof_line.glyph.line_width = 2  # original 2

            for p in bp.prof_line_circles:
                p.glyph.size = 4  # 4

            bp.asterisk.glyph.size = 20  # 20
            bp.aux_asterisk.glyph.size = 17  # 17
            bp.aux_asterisk_circle.glyph.size = 3  # 3

        self.tab_img = {}
        self.table_list = []
        self.drawing_list = []
        self.margin = None
        self.cell_padding = None
        self.col_width = None
        self.col_height = None
        self.dflt_plot_attrs = {}

        try:
            if path.exists(EXPORT):
                rmtree(EXPORT)
        except Exception as e:
            lg.warning(
                'Temp "export" directory could not be cleaned: {}'.format(e))
        return {'success': True}
 def remove_tmp_folder(self):
     lg.warning('-- REMOVE TMP FOLDER')
     shutil.rmtree(TMP)
import re
from math import *
import seawater as sw
import types
import subprocess as sbp

# NOTE: check octave availability againg here because if we check shared_data maybe
#       the value is not updated due to asyncronous matters
oc_output = sbp.getstatusoutput('%s --eval "OCTAVE_VERSION"' %
                                (OCTAVE_EXECUTABLE))
if oc_output[0] == 0:
    lg.info('>> OCTAVE DETECTED FROM PYTHON, VERSION: {}'.format(
        oc_output[1].split('=')[1].strip()))
    import ocean_data_qc.equations as equations
else:
    lg.warning('>> OCTAVE UNDETECTED')
    equations = None


class ComputedParameter(Environment):
    env = Environment

    def __init__(self, cruise_data=False):
        lg.info('-- INIT COMPUTED PARAMETER')
        self.sandbox_vars = None
        self.sandbox_funcs = None
        if cruise_data is not False:
            self.cruise_data = cruise_data
        else:
            self.cruise_data = self.env.cruise_data
Beispiel #25
0
 def _cruise_data_rollback(self):
     lg.warning('-- CRUISE DATA ROLLBACK')
     self.env.cruise_data = None
     self.env.f_handler.remove_tmp_folder()
     self.env.bk_bridge.show_default_cursor()
Beispiel #26
0
 def reset_env_cruise_data(self):
     lg.warning('-- RESET ENV + CRUISE DATA')
     self.reset_bokeh()
     self.reset_env(reset=['cruise_data'])