Exemplo n.º 1
0
def get_column_info():
    column_id = request.args.get('column_id')
    found_row = db.session.query(ColumnMetadata).filter(
        ColumnMetadata.id == column_id).first()
    db_extractor = db_structure.DBExtractor(found_row.dataset_name)
    col_info = db_extractor.analyze_column(table=found_row.table_name,
                                           column=found_row.column_source_name)

    return jsonify(col_info)
Exemplo n.º 2
0
 def setUpClass(self):
     self.db_maker = db_structure.DBMaker(dataset_name='sample2', directory_path=os.path.join('datasets', 'sample2'))
     self.db_maker.create_db()
     self.db_linker = db_structure.DBLinker(dataset_name='sample2')
     self.db_linker.add_global_fk('col1')
     self.db_linker.add_global_fk('col2')
     self.db_linker.add_global_fk('col3')
     self.db_linker.add_global_fk('col4')
     self.db_linker.add_global_fk('col5')
     self.db_linker.add_global_fk('col6')
     self.db_linker.add_global_fk('col7')
     self.db_linker.add_global_fk('col8')
     self.db_extractor = db_structure.DBExtractor(dataset_name='sample2')
Exemplo n.º 3
0
def get_accessible_tables():
    return_data = {}
    chosen_dataset = request.args.get('chosen_dataset')
    chosen_ind_column_ids = request.args.getlist('chosen_ind_column_ids[]',
                                                 None)
    chosen_outcome_column_id = request.args.get('chosen_outcome_column_id',
                                                None)
    all_tables = [
        x[0] for x in db.session.query(TableMetadata.table_name).filter(
            TableMetadata.dataset_name == chosen_dataset).all()
    ]

    if len(chosen_ind_column_ids) == 0 and chosen_outcome_column_id in [
            None, ''
    ]:
        # User hasn't chosen anything yet, so both the independent variables and outcome variables will have the same options. This will happen when dataset has changed
        for table in all_tables:
            return_data[table] = True
    else:
        if chosen_outcome_column_id in [None, '']:
            all_chosen_column_ids = chosen_ind_column_ids
        else:
            all_chosen_column_ids = [chosen_outcome_column_id
                                     ] + chosen_ind_column_ids

        include_tables = list(
            set([
                x[0]
                for x in db.session.query(ColumnMetadata.table_name).filter(
                    ColumnMetadata.id.in_(all_chosen_column_ids))
            ]))

        db_extractor = db_structure.DBExtractor(dataset_name=chosen_dataset)
        accessible_tables = db_extractor.find_multi_tables_still_accessible_tables(
            include_tables=include_tables)
        for table in all_tables:
            if table in include_tables or table in accessible_tables:
                return_data[table] = True
            else:
                return_data[table] = False

    return jsonify(return_data)
Exemplo n.º 4
0
 def setUpClass(self):
     # Assume that I've already set up all the links and such
     self.db_extractor = db_structure.DBExtractor(dataset_name='TOPICC')
Exemplo n.º 5
0
def get_graph_data():
    return_data = {}
    chosen_dataset = request.args.get('chosen_dataset')
    chosen_ind_column_ids = request.args.getlist('chosen_ind_column_ids[]',
                                                 None)
    chosen_ind_column_ids = [int(x) for x in chosen_ind_column_ids]
    if len(chosen_ind_column_ids) == 0:
        return jsonify({})

    chosen_outcome_column_id = request.args.get('chosen_outcome_column_id',
                                                None)
    if chosen_outcome_column_id == '':
        chosen_outcome_column_id = None
    else:
        chosen_outcome_column_id = int(chosen_outcome_column_id)

    aggregate_fxn = request.args.get('aggregate_fxn')

    column_metadata = db.session.query(ColumnMetadata).filter(
        ColumnMetadata.id.in_(chosen_ind_column_ids +
                              [chosen_outcome_column_id])).all()

    db_extractor = db_structure.DBExtractor(dataset_name=chosen_dataset)

    tables = list(set(x.table_name for x in column_metadata))
    table_columns_of_interest = [(x.table_name, x.column_source_name)
                                 for x in column_metadata]
    groupby_columns = [
        f'{x.table_name}_{x.column_source_name}' for x in column_metadata
        if x.id in chosen_ind_column_ids
    ]

    aggregate_column = None
    aggregate_column_display_name = None
    for x in column_metadata:
        if x.id == chosen_outcome_column_id:
            aggregate_column = f'{x.table_name}_{x.column_source_name}'
            aggregate_column_display_name = x.column_custom_name

    paths = db_extractor.find_paths_multi_tables(tables)
    df = db_extractor.get_biggest_df_from_paths(paths,
                                                table_columns_of_interest)

    # Gets filters with {column_id: filter data}
    filters_with_id_keys = json.loads(request.args.get('filters', None))
    # Need to rewrite to {table_columnsource: filter_data}
    filters_with_name_keys = {}
    for column_id_str, filter in filters_with_id_keys.items():
        column_id = int(column_id_str)
        for x in column_metadata:
            if x.id == column_id:
                filters_with_name_keys[
                    f'{x.table_name}_{x.column_source_name}'] = filter
                continue

    aggregated_df = db_extractor.aggregate_df(df, groupby_columns,
                                              filters_with_name_keys,
                                              aggregate_column, aggregate_fxn)

    labels = list(aggregated_df['groupby_labels'])
    outcome_possibilities = [
        x for x in aggregated_df.columns if x != 'groupby_labels'
    ]
    datasets = []
    for outcome_possibility in outcome_possibilities:
        datasets.append({
            'label': outcome_possibility,
            'data': list(aggregated_df[outcome_possibility])
        })

    groupby_col_names = [
        x.column_custom_name for x in column_metadata
        if x.id in chosen_ind_column_ids
    ]
    groupby_axis_label = ''
    for x in groupby_col_names:
        groupby_axis_label += x + '_'
    groupby_axis_label = groupby_axis_label[:-1]
    if aggregate_column_display_name is None:
        title = f'{aggregate_fxn} broken down by {groupby_axis_label}'
    else:
        title = f'{aggregate_fxn} of {aggregate_column_display_name} broken down by {groupby_axis_label}'

    return_data = {
        'labels': labels,
        'datasets': datasets,
        'title': title,
        'xaxis_label': groupby_axis_label,
        'yaxis_label': aggregate_fxn
    }

    return jsonify(return_data)