示例#1
0
    def __init__(self):
        """ the original __init__ is overriden here
            (the super method is not called) """
        lg.info('-- CRUISE DATA UPDATE INIT')
        self.env.cd_update = self

        self.cols_to_compare = [
            'param', 'param_flag', 'qc_param_flag', 'non_qc_param', 'required'
        ]
        self.modified = False

        # COLUMNS
        self.add_cols = []
        self.rmv_cols = []
        self.rmv_plot_cols = []
        self.rmv_plot_cps = []

        # TODO: do not break couples (param, associated flags)?
        # self.fg_to_rmv = []         # if a param is removed but the flag column associated is kept
        # self.fg_to_reset = []       # if a flag column is removed but its param is kept

        # self.fg_to_create = []    # TODO: a param is added without associated flag >> this flag is created automatically in cd_aux
        # self.fg_to_ignore = []    # TODO: create flag column with 'ignore' type in self.cols

        # ROWS
        self.add_rows = 0
        self.rmv_rows = 0

        # VALUES
        self.diff_val_qty = 0
        self.diff_val_pairs = []
        self._compute_comparison()
示例#2
0
    def _init_sample_buttons(self):
        lg.info('-- SET SAMPLE BUTTONS')

        def next_sample():
            lg.info('>> NEXT SAMPLE')
            if self.s < self.ns:
                self.env.doc.hold('collect')
                self.s += 1
                self.sample_div.text = ' {} / {}'.format(self.s, self.ns)
                self.env.cur_nearby_prof = None  # to reset the extra stt profile to plot
                self.env.dt_next_sample = True
                self._update_dt_sample()
                self.env.doc.unhold()

        def previous_sample():
            lg.info('>> PREVIOUS SAMPLE')
            if self.s > 1:
                self.env.doc.hold('collect')
                self.s -= 1
                self.sample_div.text = ' {} / {}'.format(self.s, self.ns)
                self.env.cur_nearby_prof = None
                self.env.dt_previous_sample = True
                self._update_dt_sample()
                self.env.doc.unhold()

        self.next_bt = Button(label=">", button_type="success", width=30)
        self.sample_div = Div(
            text='0 / 0',
            width=100,
            height=30,
            css_classes=['sample_div'],
        )
        self.previous_bt = Button(label="<", button_type="success", width=30)
        self.next_bt.on_click(next_sample)
        self.previous_bt.on_click(previous_sample)
示例#3
0
    def __init__(self):
        lg.info('-- INIT OCTAVE EXECUTABLE')
        self.env.oct_eq = self

        self.oc = None
        self.oct_exe_path = False
        self.set_oct_exe_path()
示例#4
0
    def _sync_with_full_df(self):
        lg.info('-- SYNC WITH FULL DF')
        start = time.time()
        astk_df = self._upd_astk_src()
        self.env.astk_src.data = self.env.astk_src.from_df(astk_df)
        p1 = time.time()
        ml_df, df_fs, stt_order = self._get_ml_df()

        if ml_df.index.size >= 1:
            ml_df = ml_df.reindex(stt_order)
            stt_colors = self.env.profile_colors[-len(stt_order):]
            ml_df[
                'colors'] = stt_colors  # [light blue, normal blue, darker blue, red]

        p2 = time.time()
        prof_df = self._upd_pc_srcs(df_fs, stt_order)
        p3 = time.time()
        self.env.ml_src.data = self.env.ml_src.from_df(ml_df)
        self.env.pc_src.data = self.env.ml_src.from_df(prof_df)

        # NOTE: this translates the selection indices into positional indices
        #       bokeh with each ColumnDataSource uses a new index with consecutive integers [0, 1, 2, 3, ...]
        #       it doesn´t matter if you have a different index in the DF that you use to create the CDS

        prof_sel = []
        for i in self.env.selection:  # TODO: only selected points within profiles
            if i in prof_df.index:
                prof_sel.append(prof_df.index.get_loc(i))
        self.env.pc_src.selected.indices = prof_sel

        p4 = time.time()
        lg.info(
            '>> TIME: ML: {} | PC: {} | SYNC: {} >> FULL ALGORITHM TIME: {}'.
            format(round(p2 - p1, 2), round(p3 - p2, 2), round(p4 - p3, 2),
                   round(p4 - start, 2)))
示例#5
0
    def _get_empty_prof_df(self):
        ''' DF initialization with empty values '''

        lg.info('-- GET EMPTY PROF DF')
        compound_cols = []
        for tab in self.env.f_handler.tab_list:
            plot_indices = self.env.tabs_flags_plots[tab]['plots']
            aux_cols = []
            for pi in plot_indices:
                aux_cols.append(self.env.bk_plots[pi].x)
                aux_cols.append(self.env.bk_plots[pi].y)
            aux_cols = list(set(aux_cols))  # removes duplicates
            # lg.info('>> AUX COLS: {}'.format(aux_cols))
            for col in aux_cols:  # TODO: not all of them
                for n in range(NPROF):
                    compound_cols.append('{}_{}_{}'.format(tab, col, n))
        compound_cols.sort()

        d = {}
        if compound_cols != []:
            d = dict.fromkeys(compound_cols,
                              np.array([np.nan] * self.env.cds_df.index.size))
        prof_df = pd.DataFrame(d)  # init empty columns
        prof_df['INDEX'] = self.env.cds_df.index
        prof_df = prof_df.set_index(['INDEX'])

        return prof_df
示例#6
0
    def recompute_cps(self):
        ''' Compute all the calculated parameters again. Mainly after a cruise data update

            NOTE: what should happen if some column cannot be computed?
                  - Check if it is plotted in order to remove the plots?
                  - Show a error message (now only a warning appears)
        '''
        lg.info('-- RECOMPUTE CP PARAMETERS')
        cp_params = self.env.cruise_data.get_cols_by_attrs('computed')
        for c in cp_params:
            del self.cols[c]
        cps_to_rmv = []
        for c in self.cp_param.proj_settings_cps:
            if c['param_name'] not in self.cols:  # exclude the computed parameters
                res = self.cp_param.add_computed_parameter({
                    'value': c['param_name'],
                    'prevent_save': True
                })
                if res.get('success', False) is False:
                    if c['param_name'] in self.env.cur_plotted_cols:
                        cps_to_rmv.append(c['param_name'])
        if cps_to_rmv != []:
            self.env.f_handler.remove_cols_from_qc_plot_tabs(cps_to_rmv)
        self._manage_empty_cols()
        self.env.cruise_data.save_col_attribs()
    def _init_tabs(self):
        lg.info('-- INIT TABS')
        panel_list = []
        # lg.info('>> self.env.TABS_FLAGS_PLOTS: {}'.format(self.env.tabs_flags_plots))
        SORT_TABS = False
        if SORT_TABS:
            ordered_tab_list = sorted(self.env.tabs_flags_plots)
        else:
            ordered_tab_list = list(self.env.tabs_flags_plots.keys())
        self.env.cur_tab = ordered_tab_list[
            0]  # self.env.cur_tab initialization
        self.env.cur_flag = self.env.cur_tab + FLAG_END  # self.env.cur_tab initialization

        ly_settings = self.env.f_handler.get_layout_settings()
        for tab in ordered_tab_list:
            indices = self.env.tabs_flags_plots[tab]['plots']
            children = [
                x.plot for x in self.env.bk_plots if x.n_plot in indices
            ]
            # lg.info('>> CHILDREN: {}'.format(children))
            gp = gridplot(
                children=children,
                ncols=ly_settings['ncols'],
                plot_width=ly_settings[
                    'plot_width'],  # if 350 then the points are blurred
                plot_height=ly_settings['plot_height'],
                toolbar_location=
                'left',  # TODO: separate the toolbars to set some tools active by default,
                #       like this the hover icon can be shown as well
            )
            name = 'panel_{}'.format(tab.lower())
            panel_list.append(
                Panel(
                    name='panel_{}'.format(tab.lower()),
                    child=gp,
                    title=tab,
                ))  # TODO: closable=True

        lg.info('>> TABS WIDGET: {}'.format(self.env.tabs_widget))
        if self.env.tabs_widget is None:
            self.env.tabs_widget = Tabs(
                name='tabs_widget',
                tabs=panel_list,
                width=1250,
            )
        else:
            self.env.tabs_widget.tabs.clear()
            self.env.tabs_widget.tabs = panel_list

        def update_active_tab(attr, old, new):
            lg.info('-- UPDATE ACTIVE TAB | OLD: {} | NEW: {}'.format(
                old, new))
            self.env.cur_tab = self.env.tabs_widget.tabs[new].title
            lg.info('>> CUR TAB: {}'.format(self.env.cur_tab))
            flag = self.env.tabs_flags_plots[self.env.cur_tab]['flag']
            if self.env.flagger_select.value != flag:
                self.env.tab_change = True
                self.env.flagger_select.value = flag  # if they concide the update of the select is not triggered

        self.env.tabs_widget.on_change('active', update_active_tab)
示例#8
0
    def _update_visible_flags(self, to_visible_flags=[]):
        ''' Makes visible the flags passed as argument, and make invisible the rest
                @to_visible_flags: all the visible (or to make visible) flags indices
        '''
        lg.info('-- UPDATE VISIBLE FLAGS')

        to_visible = []
        to_invisible = []
        for flag_index, flag_str in self.env.all_flags.items():
            if flag_index in self.env.visible_flags and flag_index not in to_visible_flags:
                to_invisible.append('GR_FLAG_{}'.format(flag_index))
            if flag_index not in self.env.visible_flags and flag_index in to_visible_flags:
                to_visible.append('GR_FLAG_{}'.format(flag_index))

        # lg.info('>> TO VISIBLE FLAGS: {}'.format(to_visible_flags))
        # lg.info('>> TO VISIBLE: {}'.format(to_visible))
        # lg.info('>> TO INVISIBLE: {}'.format(to_invisible))

        self.env.doc.hold('collect')
        if to_visible != []:
            self.env.doc.set_select(selector=dict(tags=to_visible),
                                    updates=dict(visible=True))
        if to_invisible != []:
            self.env.doc.set_select(selector=dict(tags=to_invisible),
                                    updates=dict(visible=False))

        all_flags_bt = self.env.doc.select_one(dict(name='all_flags_bt'))
        if to_visible_flags == []:
            all_flags_bt.css_classes = ['eye_slash_bt']
        else:
            all_flags_bt.css_classes = ['eye_bt']

        self.env.visible_flags = to_visible_flags.copy()
        self.env.bk_sources._upd_prof_srcs(force_selection=True)
        self.env.doc.unhold()
示例#9
0
        def update_select_flag_value(attr, old, new):
            lg.info('-- SELECT VALUE | OLD: {} | NEW: {}'.format(old, new))
            self.env.cur_flag = new
            if self.env.tab_change:
                self.env.tab_change = False
            else:
                lg.info('-- SELECT CHANGE')
                self.env.bk_bridge.call_js({
                    'object': 'tools',
                    'function': 'show_wait_cursor',
                })
                self.env.tabs_flags_plots[self.env.cur_tab]['flag'] = new

                # TODO: replot of all the colors of the tab
                #       some of the glyphs could be invisible
                #       only an indices update is needed

                cur_plot_list = self.env.tabs_flags_plots[
                    self.env.cur_tab]['plots']
                self.env.doc.hold('collect')
                self.env.bk_plots_handler.replot_color_circles(
                    only_cur_tab=True)
                self.env.bk_sources._upd_prof_srcs(
                )  # TODO: keep the selection as it is >> keep_selection = True
                #       I do this here because some point could be invisible for
                #       other tab
                self.env.doc.unhold()
                self.env.bk_bridge.call_js({
                    'object': 'tools',
                    'function': 'show_default_cursor',
                })
示例#10
0
    def _validate_original_data(self):
        ''' Checks if all the rows have the same number of elements

            In this case there is no need to check the data because
            was already checked when it was open the first time
        '''
        lg.info('-- CHECK DATA FORMAT')
示例#11
0
 def load_file(self):
     lg.info('-- LOAD FILE AQC >> LOAD FROM FILES')
     self._set_cols_from_json_file()
     self._replace_nan_values()         # '-999' >> NaN
     self._convert_data_to_number()
     self._set_hash_ids()
     self._set_cps()
示例#12
0
    def _update_moves(self):
        """ The log of actions is updated with the new operations """
        lg.info('-- Updating moves')
        date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")

        if len(self.add_cols) > 0:
            action = '[ADD] Columns'
            description = 'Added columns: {}'.format(self.add_cols)
            self.env.cruise_data.add_moves_element(action, description)

        if len(self.rmv_cols) > 0:
            action = '[DEL] Columns'
            description = 'Deleted columns: {}'.format(self.rmv_cols)
            self.env.cruise_data.add_moves_element(action, description)

        if self.add_rows > 0:
            action = '[ADD] Rows'
            description = 'Added rows: {}'.format(self.add_rows)
            self.env.cruise_data.add_moves_element(action, description)

        if self.rmv_rows > 0:
            action = '[DEL] Rows'
            description = 'Deleted rows: {}'.format(self.rmv_rows)
            self.env.cruise_data.add_moves_element(action, description)

        if self.diff_val_qty > 0:
            action = '[UPD] Values'
            description = 'Updated values: {}'.format(self.diff_val_qty)
            self.env.cruise_data.add_moves_element(action, description)
示例#13
0
 def _update_values(self, diff_val_qty=0, diff_values={}):
     """ update the values in the self.env.cruise_data object with the new ones
         the flag associated to the columns has to be reset """
     lg.info('-- UPDATING VALUES')
     if diff_val_qty is True:  # update all the values
         for hash_id, column in self.diff_val_pairs:
             self.env.cruise_data.df.loc[
                 hash_id, column] = self.env.cd_aux.df.loc[hash_id, column]
     else:
         if diff_values != {} and diff_values is not False:
             for param in diff_values:
                 for stt in diff_values[param]:
                     for elem in diff_values[param][stt]:
                         lg.info('>> STT ELEM: {}'.format(elem))
                         if elem['param_checked'] is True:
                             self.env.cruise_data.df.loc[
                                 elem['hash_id'],
                                 param] = self.env.cd_aux.df.loc[
                                     elem['hash_id'], param]
                         if elem['flag_checked'] is True:
                             flag = param + '_FLAG_W'
                             self.env.cruise_data.df.loc[
                                 elem['hash_id'],
                                 flag] = self.env.cd_aux.df.loc[
                                     elem['hash_id'], flag]
示例#14
0
    def update_data_from_csv(self, params={}):
        """ Update and save columns, rows and values from the new data object self.env.cd_aux created
            from an updated WHP csv file. The changes accepted by the user are updated to the old_data object

            The parameters sent (params dictionary) are boolean values, for example if everything should be saved:
            params = {
                'add_cols': true,
                'rmv_cols': true,
                'add_rows': true,
                'rmv_rows': true,
                'diff_val_qty': true,
                'diff_values': {}     # dict with the accepted values
            }
        """
        lg.info('-- UPDATE DATA FROM CSV --')

        if params != {}:
            self._update_rows(add_rows_checked=params['add_rows'],
                              rmv_rows_checked=params['rmv_rows'])
            self._update_columns(add_cols_checked=params['add_cols'],
                                 rmv_cols_checked=params['rmv_cols'])
            diff_values = params[
                'diff_values'] if 'diff_values' in params else {}
            self._update_values(diff_val_qty=params['diff_val_qty'],
                                diff_values=diff_values)

        self.env.cruise_data._replace_nan_values()  # -999 >> NaN

        self._update_moves()
        self.env.cruise_data.save_tmp_data()
        self._reset_update_env()
示例#15
0
    def _manage_date_time(self):
        # TODO: check what happens with this columns in the cd_update and self.env.cols
        if 'DATE' not in self.df:
            lg.info('-- CREATE DATE COLUMN')
            if 'YEAR' in self.df and 'MONTH' in self.df and 'DAY' in self.df:
                try:
                    self.df = self.df.assign(
                        DATE=pd.to_datetime(self.df[['YEAR', 'MONTH', 'DAY']]).dt.strftime('%Y%m%d')
                    )
                except Exception as e:
                    raise ValidationError(
                        'DATE column, which is a required field, does not exist. Also, it could not be created'
                        ' from YEAR, MONTH and DAY columns possibly because some of the rows do not have any value.',
                        rollback=self.rollback
                    )
                self.add_moves_element(
                    'required_column_added',
                    'DATE column was automatically generated from the columns YEAR, MONTH and DAY'
                )
            else:
                raise ValidationError(
                    'DATE column, which is a required field, does not exist. Also, it could not be built'
                    ' with other columns (usually year, month and day).',
                    rollback=self.rollback
                )

        if 'TIME' in self.df:  # fill with zeros on the left: 132 >> 0132
            self.df['TIME'] = self.df[self.df['TIME'].notnull()]['TIME'].astype(float).apply(lambda x: f'{x:04.0f}')
示例#16
0
        def all_flags_vb_bt_callback():
            lg.info('-- ALL FLAGS VISIBLE CALLBACK')
            self.env.bk_bridge.call_js({
                'object': 'tools',
                'function': 'show_wait_cursor',
            })
            all_flags_bt = self.env.doc.select_one(dict(name='all_flags_bt'))
            eye_slash_bt = True if 'eye_slash_bt' in all_flags_bt.css_classes else False
            if eye_slash_bt:
                self.env.doc.set_select(selector=dict(tags=['vb_bt']),
                                        updates=dict(css_classes=['eye_bt']))
            else:
                self.env.doc.set_select(
                    selector=dict(tags=['vb_bt']),
                    updates=dict(css_classes=['eye_slash_bt']))

            new_visible_flags = []
            if 'eye_bt' in all_flags_bt.css_classes:
                all_flags_bt.css_classes = ['eye_slash_bt']
            else:
                new_visible_flags = self.all_flags_list
                all_flags_bt.css_classes = ['eye_bt']
            lg.info('>> NEW VISIBLE FLAGS: {}'.format(new_visible_flags))
            self._update_visible_flags(new_visible_flags)
            self.env.bk_bridge.call_js({
                'object': 'tools',
                'function': 'show_default_cursor',
            })
示例#17
0
 def _rmv_empty_columns(self):
     lg.info('-- REMOVE EMPTY COLUMNS (all values with -999)')
     cols_to_rmv = []
     flags_to_rmv = []
     basic_params = self.env.f_handler.get_custom_cols_by_attr('basic')
     for col in self.df:
         if col not in basic_params:  # empty basic param columns are needed for some calculated params
             if self.df[col].str.contains(NA_REGEX).all():
                 cols_to_rmv.append(col)
                 if f'{col}_FLAG_W' in self.df:
                     flags_to_rmv.append(f'{col}_FLAG_W')
     if len(cols_to_rmv) > 0:
         lg.warning(f'>> THE FOLLOWING COLUMNS WERE REMOVED DUE TO -999: {",".join(cols_to_rmv)}')
         self.add_moves_element(
             'cols_removed',
             f'{",".join(cols_to_rmv)} param columns were removed'
         )
     if len(flags_to_rmv):
         lg.warning(f'>> THE FOLLOWING COLUMNS FLAGS WERE REMOVED DUE TO -999: {",".join(flags_to_rmv)}')
         self.add_moves_element(
             'flags_cols_removed',
             f'{",".join(flags_to_rmv)} flag columns were removed'
         )
     cols_to_rmv.extend(flags_to_rmv)
     self.df = self.df.drop(columns=cols_to_rmv)
示例#18
0
 def export_csv(self):
     """ Create an export_data.csv file to export it with node
         It will export the latest saved data
     """
     lg.info('-- EXPORT CSV')
     if path.isfile(path.join(TMP, 'export_data.csv')):
         os.remove(path.join(TMP, 'export_data.csv'))
     aux_df = self.df.copy()
     aux_df = aux_df.replace(np.nan,
                             -999.0)  # float64 fields value will be -999.0
     cols = self.get_cols_by_type([
         'required', 'param', 'non_qc_param', 'qc_param_flag', 'param_flag'
     ])
     aux_df = aux_df.filter(cols)
     orig_col_names = []
     for c in cols:
         if 'orig_name' in self.cols[c]:
             orig_col_names.append(
                 self.cols[c]
                 ['orig_name'])  # computed do not have orig_name
         else:
             orig_col_names.append(c)
     aux_df.to_csv(
         path_or_buf=os.path.join(TMP, 'export_data.csv'),
         header=orig_col_names,
         index=False,
     )
     return True
示例#19
0
    def _manage_empty_cols(self):
        lg.info('-- SET EMPTY COLS')
        cols = self.get_cols_by_attrs(['param', 'non_qc', 'computed'])
        for c in cols:
            if self.df[c].isnull().all():
                attrs = ','.join(self.cols[c]['attrs'])
                del self.cols[c]
                del self.df[c]
                lg.warning(f'>> COLUMN: {c} REMOVED BECAUSE IT WAS EMPTY | {attrs}')

                fc = f'{c}{FLAG_END}'
                if fc in self.df:
                    del self.cols[fc]
                    del self.df[fc]
                    lg.warning(f'>> FLAG COLUMN: {c} REMOVED BECAUSE THE RELATED PARAM WAS EMPTY')

        for c in self.get_cols_by_attrs('flag'):
            if self.df[self.df[c] == 9][c].index.size == self.df.index.size:
                self.cols[c]['attrs'].append('empty')
                lg.warning(f'>> FLAG: {c} IS MARKED AS EMPTY')

                # NOTE: if the flag has 9 in all the rows means that the param has NaN in all the rows
                #       so, the param should be removed before. This should be fixed in a more appropriate way

        # required columns can be nan in order to create the hash_id ??
        for c in self.get_cols_by_attrs(['required']):
            if self.df[c].isnull().all():
                self.df[c]['attrs'].append('empty')
                lg.warning(f'>> COLUMN: {c} MARKED AS EMPTY')
 def save_moves(self):
     lg.info('-- SAVE MOVES')
     if not self.moves.empty:
         self.moves.to_csv(
             os.path.join(TMP, 'moves.csv'),
             index_label='index',
         )
示例#21
0
    def _init_all_flag_values(self):
        ''' Set all the possible flag values. Generates a dictionary like this:
            self.env.all_flags = {
                2: 'FLAG 2',
                3: 'FLAG 3',
                ...
            }
        '''
        cols = self.env.cruise_data.get_cols_by_attrs(['flag'])
        flag_vals = self.env.cds_df[cols].values.ravel(
            'K')  # ravel('K') to flatten the multidimensional array
        flag_vals = flag_vals[~np.isnan(flag_vals)]  # remove nan
        flag_vals = np.unique(flag_vals)  # select the unique values
        flag_vals = flag_vals.astype(np.int64)  # convert to integer
        flag_vals = flag_vals.tolist()  # convert to python list

        # forcing the basic values
        # TODO: Create a flag form in order to set the flag values by hand
        if 2 not in flag_vals:
            flag_vals.append(2)
        if 3 not in flag_vals:
            flag_vals.append(3)
        if 4 not in flag_vals:
            flag_vals.append(4)
        flag_vals = sorted(flag_vals)

        for f in flag_vals:
            self.env.all_flags[f] = 'Flag {}'.format(f)
        lg.info('>> INIT ALL FLAG VALUES: {}'.format(self.env.all_flags))
        if len(flag_vals) > len(CIRCLE_COLORS):
            for f in flag_vals:
                if f > 9:
                    CIRCLE_COLORS.update({f: CIRCLE_COLORS[9]})
 def export_csv(self):
     """ Create an export_data.csv file to export it with node
         It will export the latest saved data
     """
     lg.info('-- EXPORT CSV')
     if path.isfile(path.join(TMP, 'export_data.csv')):
         os.remove(path.join(TMP, 'export_data.csv'))
     aux_df = self.df.copy(deep=True)
     aux_df = aux_df.replace(np.nan,
                             -999)  # float64 fields value will be -999.0
     cols = self.get_cols_to_export()
     aux_df = aux_df.filter(cols)
     aux_df = self.round_cols(aux_df)
     external_names = []
     for c in cols:
         if len(self.cols[c]['external_name']) > 0:
             external_names.append(
                 self.cols[c]['external_name']
                 [0])  # computed do not have external_name
         else:
             external_names.append(c)
     aux_df.to_csv(
         path_or_buf=os.path.join(TMP, 'export_data.csv'),
         header=external_names,
         index=False,
     )
     return True
示例#23
0
    def _upd_pc_srcs(self, df_fs=None, stt_order=[]):
        ''' Update profile circle sources. The self.env.pc_src is updated
            in order to mark the selected samples profiles over all the plots.

            @df_fs: DF with data only with the current stations to show
            @stt_order: selected stations, red color at the end of the list
        '''
        lg.info('-- UPDATE PROFILE CIRCLE SOURCES')
        start = time.time()
        prof_df = self._get_empty_prof_df()
        tabs = self.env.f_handler.tab_list
        if df_fs is not None:
            stt_order_reversed = list(reversed(stt_order))
            d_temp = {}
            df_cur = df_fs.filter(self.env.cur_plotted_cols + [STNNBR])
            for tab in tabs:
                cur_cols_in_tab = self.env.f_handler.get_cols_in_tab(tab)
                if self.env.plot_prof_invsbl_points is False:
                    flag = self.env.tabs_flags_plots[tab]['flag']
                    df_cur = df_fs[df_fs[flag].isin(self.env.visible_flags)]

                i = NPROF - 1
                for stt in stt_order_reversed:
                    df_stt = df_cur[df_cur[STNNBR] == stt]
                    for col in cur_cols_in_tab:  # TODO: only for cols that appear in the current processed tab
                        df_aux = df_stt[col]
                        d_temp['{}_{}_{}'.format(tab, col, i)] = df_aux
                    i -= 1
            prof_df = prof_df.assign(**d_temp)
            prof_df.dropna(
                how='all',
                inplace=True)  # just in case there are some NaN rows lefovers
        return prof_df
示例#24
0
 def _set_cols_from_json_file(self):
     """ The columns are set directly from the columns.json file """
     lg.info('-- SET ATTRIBUTES FROM JSON FILE --')
     if path.isfile(path.join(TMP, 'columns.json')):
         with open(path.join(TMP, 'columns.json'), 'r') as f:
             attr = json.load(f)
         self.cols = attr
 def delete_computed_parameter(self, args):
     ''' Delete the value passed in the argument:
         args = {
             'value': 'example_column',
         }
     '''
     lg.info('-- DELETE COMPUTED PARAMETER')
     value = args.get('value', False)
     current_columns = self.cruise_data.get_cols_by_attrs(['all'])
     if value in current_columns:
         try:
             if value in self.cruise_data.df.columns:
                 del self.cruise_data.df[value]
             del self.cruise_data.cols[value]
             return {
                 'success': True,
             }
         except Exception:
             return {
                 'success': False,
             }
     else:
         return {
             'success': False,
         }
示例#26
0
 def compare_data(self):
     lg.info('-- COMPARE DATA')
     self._init_cruise_data(update=True)  # self.env.cd_aux is set here
     CruiseDataUpdate(
     )  # self.env.cd_update uses cd_aux to make comparisons
     compared_data = self.env.cd_update.get_compared_data()
     return compared_data
示例#27
0
 def rollback(self, changes):
     lg.info('-- DATATABLE ROLLBACK')
     if self.env.selection != []:
         for t in changes:
             patch = {
                 'flag': [
                     (t[1], t[2]),
                 ]
             }  # rollback to the old value
             self.env.dt_manual_update = False
             self.data_table.source.patch(patch)
             self.env.dt_manual_update = True
     else:
         if changes != []:
             for t in changes:
                 patch = {
                     'flag': [
                         (t[1], ''),
                     ]
                 }  # rollback to the initial value
                 self.env.dt_manual_update = False
                 self.data_table.source.patch(patch)
                 self.env.dt_manual_update = True
         else:
             self.env.dt_manual_update = False
             self.table_df = pd.DataFrame(
                 dict(
                     parameter=self.params,  # remove flags from here
                     value=[''] * len(self.params),
                     flag=[''] * len(self.params),
                 ))
             self.data_table.source.data = self.table_df.to_dict('list')
             self.env.dt_manual_update = True
示例#28
0
    def _set_units(self):
        ''' Checks if the file has an unit row or not.
            The df should have all strings and nan values
                * if there is at least one nan in the row               > unit row
                * if all the cells are strings                          > unit row
                * if there is at least one number (stored as string)    > no unit row
        '''
        lg.info('-- CHECK IF THE UNIT ROW EXISTS')
        exp = re.compile("^-?\d+?(\.\d+)?$")
        def is_number(s):
            ''' Returns True if the string is a number:
                    float or integer
            '''
            if exp.match(s) is None:
                return s.isdigit()  # to check if all are digits
            return True

        units_raw = self.df.iloc[0].values.tolist()
        no_unit_row = False
        for u in units_raw:  # the loop continues only if it is a string and not number
            if not isinstance(u, str) and np.isnan(u):
                break
            if is_number(u):
                no_unit_row = True
                break
        if no_unit_row is False:
            self.df = self.df[1:-1].reset_index(drop=True)  # rewrite index column and remove the units row
            for u in units_raw:
                if isinstance(u, str):
                    self.unit_list.append(u.strip())
                else:
                    self.unit_list.append('nan')
示例#29
0
    def set_oct_exe_path(self, path=None):
        ''' This method is run when
                * The shared.json file already has a path set >> path in argument
                * The octave path is set manually >> path in argument as well
        '''
        lg.info('-- SET OCT EXE PATH')
        # lg.warning('>> MANUAL PATH: {}'.format(path))

        if path is not None:
            if sys.platform == 'win32':
                if os.path.basename(path) != 'octave-cli.exe':
                    self.oct_exe_path = os.path.join(path, 'octave-cli.exe')
                else:
                    self.oct_exe_path = path
            else:
                if os.path.basename(path) != 'octave-cli':
                    self.oct_exe_path = os.path.join(path, 'octave-cli')
                else:
                    self.oct_exe_path = path

        if self.oct_exe_path is not False:
            os.environ['OCTAVE_EXECUTABLE'] = self.oct_exe_path
            try:
                oct2py_lib = importlib.import_module('oct2py')
                self.oc = oct2py_lib.octave
                self.oc.addpath(os.path.join(OCEAN_DATA_QC_PY, 'octave'))
                self.oc.addpath(os.path.join(OCEAN_DATA_QC_PY, 'octave', 'CANYON-B'))
                return {'octave_path': self.oct_exe_path }
            except Exception as e:
                lg.error('>> oct2py LIBRARY COULD NOT BE IMPORTED, OCTAVE PATH WAS NOT SET CORRECTLY')
        return {'octave_path': False }
示例#30
0
 def remove_cols_from_qc_plot_tabs(self, cols=[]):
     ''' Checks if the columns that are in the plot layout
         and removes them if needed
     '''
     lg.info('-- REMOVE COLS FROM QC PLOT TABS. REMOVING: {}'.format(cols))
     if cols != [] and path.isfile(path.join(TMP, 'settings.json')):
         config = {}
         tabs = {}
         with open(path.join(TMP, 'settings.json'), 'r') as f:
             config = json.load(f, object_pairs_hook=OrderedDict)
             if 'qc_plot_tabs' in config:
                 tabs = config.get('qc_plot_tabs', False)
                 tabs_to_rmv = []
                 for tab in tabs:
                     graphs_to_rmv = []
                     for graph in tabs[tab]:
                         if graph.get('x', '') in cols or graph.get(
                                 'y', '') in cols:
                             graphs_to_rmv.append(graph)
                     for g in graphs_to_rmv:
                         tabs[tab].remove(g)
                     if tabs[tab] == []:  # if all the plot of some tab were removed
                         tabs_to_rmv.append(tab)
                 for t in tabs_to_rmv:
                     del tabs[
                         t]  # >> take into account that here config is also updated
         with open(path.join(TMP, 'settings.json'), 'w') as f:
             json.dump(config, f, indent=4, sort_keys=True)