Example #1
0
def find_and_replace(df_data, data):
    """Searches for a value and replaces if indicated"""
    id = data['file_id']
    # Heading of column being where find and replace is being carried out
    heading = data['filter_column_heading']
    # Might be better to just find it throughout df_data

    if data['find_value'] == 'nan':
        filter_applied = df_data[heading].isnull()
    else:
        temp = df_data[heading]
        temp = temp.astype('str').str.lower()
        filter_applied = np.array(temp == str(data['find_value']).lower())

    if data['replace_pressed']:
        df_data[heading][filter_applied] = data['replace_value']
        column_values = df_data[heading][filter_applied]

        if len(column_values) > 0:
            error_data, dtypes_dict = get_dtype_data(id)
            dicts = get_geolocation_dictionary()
            temp_prob_list, temp_error_counter = identify_col_dtype(
                column_values, heading, dicts)
            dtypes_dict[heading][filter_applied] = temp_error_counter
            error_data[heading] = get_prob_list(dtypes_dict[heading])
            save_validation_data(error_data, id, dtypes_dict)
            update_data(File.objects.get(id=id).file, df_data)
        df_data = df_data[df_data[heading] == data['replace_value']]
    else:
        df_data = df_data[filter_applied]
    return df_data
Example #2
0
def update(id, data):
    '''Updates cell that user edits.'''
    df_data = get_file_data(id)
    error_data, dtypes_dict = get_dtype_data(id)

    if not data['changeHeader'] == '':
        count = 2
        tmp = data['header_value']
        while tmp in df_data.columns:
            tmp = data['header_value'] + str(count)
            count += 1
        data['header_value'] = tmp
        df_data = df_data.rename(columns={data['header_tobe_changed']: data['header_value']})
        dtypes_dict[data['header_value']] = dtypes_dict[data['header_tobe_changed']]
        dtypes_dict.pop(data['header_tobe_changed'], None)
        error_data[data['header_value']] = error_data[data['header_tobe_changed']]
        error_data.pop(data['header_tobe_changed'], None)
    else:
        heading = data['column']
        line_no = data['line_no']
        df_data[heading][line_no] = data['cell_value']

        prob_list, error_count = update_cell_type(df_data[heading][line_no], dtypes_dict[heading], line_no, heading)
        dtypes_dict[heading] = error_count
        error_data[heading] = prob_list

    save_validation_data(error_data, id, dtypes_dict)
    update_data(File.objects.get(id=id).file, df_data)

    return {'success' : 1}
Example #3
0
def get_errors(data):
    '''Gets data that does not match the most probable data type found for each column.'''
    
    temp_error_message = {}
    id = data['file_id']
    start_pos = data['start_pos']
    end_pos = data['end_pos']
    df_data = get_file_data(id)
    column_headings = df_data.columns
    error_data, dtypes_dict = get_dtype_data(id)
    errors, line_nos = check_dtypes(error_data, dtypes_dict, column_headings)
    selection = np.array(range(0, len(error_data[column_headings[0]])))
    amount = end_pos - start_pos
    
    for i in errors:

        if len(errors[i]) > 0:
            counter = 0
            line_no_selection = selection[line_nos[i]]#[start_pos:end_pos]
            errors_selection = errors[i]#[start_pos:end_pos]

            for j in errors_selection:#minus one for line no
                message = ('Found a ' + j + ' value instead of the most populous value ' + dtypes_dict[i][0][0] + '.')
                line_no = str(line_no_selection[counter])
                temp_error_message[''.join([line_no,'|',i])] = (message)
                counter += 1

    context = {'error_messages': temp_error_message}
    return context
Example #4
0
def delete_data(id, data):
    """Deletes data based on data"""
    df_data = get_file_data(id)
    row_keys = list(map(int, data['row_keys']))
    df_data = df_data.drop(df_data.index[row_keys])
    df_data = df_data.reset_index(drop=True)
    error_data, dtypes_dict = get_dtype_data(id)
    error_data, dtypes_dict = remove_entries(error_data, dtypes_dict, row_keys)
    save_validation_data(error_data, id, dtypes_dict)
    update_data(File.objects.get(id=id).file, df_data)
    return {'success': 1}
Example #5
0
def update(id, data):
    """Updates cell that user edits."""
    df_data = get_file_data(id)
    error_data, dtypes_dict = get_dtype_data(id)

    if not data['changeHeader'] == '':
        count = 2
        tmp = data['header_value']
        while tmp in df_data.columns:
            tmp = data['header_value'] + str(count)
            count += 1
        data['header_value'] = tmp
        df_data = df_data.rename(
            columns={data['header_tobe_changed']: data['header_value']})
        dtypes_dict[data['header_value']] = dtypes_dict[
            data['header_tobe_changed']]
        dtypes_dict.pop(data['header_tobe_changed'], None)
        error_data[data['header_value']] = error_data[
            data['header_tobe_changed']]
        error_data.pop(data['header_tobe_changed'], None)
    else:
        heading = data['column']
        line_no = data['line_no']
        df_data[heading][line_no] = data['cell_value']

        # TODO: Logic BUGS check everything why using if like below
        # First logic error on then save_validation_data.
        # When used test code will be to if
        # When used GraphQL will be to else
        if type(error_data[next(iter(error_data))]) == list:
            prob_list, error_count = update_cell_type(
                df_data[heading][line_no], dtypes_dict[heading], line_no,
                heading)
        else:
            prob_list, error_count = update_cell_type(
                df_data[heading][line_no], error_data[heading], line_no,
                heading)

        dtypes_dict[heading] = error_count
        error_data[heading] = prob_list

    save_validation_data(error_data, id, dtypes_dict)
    update_data(File.objects.get(id=id).file, df_data)

    return {'success': 1}
Example #6
0
def begin_mapping(data):
    # Performs manual mapping process
    if 'mapping_dict' in data:
        final_file_headings = {}
        id = data['metadata_id']

        # Get relevant data
        # save_mapping(id, instance)

        df_data = get_file_data(id)

        data_model_dict, filter_headings_dict, empty_entries_dict, \
            multi_entry_dict, point_base_dict = split_mapping_data(data)
        error_data, dtypes_dict = get_dtype_data(id)

        # Check if any new Geolocation information to save
        if 'coord' in point_base_dict and \
                not point_base_dict['coord']['lat'] == '':
            # shoud save here and get later
            lat = point_base_dict['coord']['lat']
            lon = point_base_dict['coord']['lon']
            df_data['geolocation'] = 'pointbased'
            dtypes_dict['geolocation'] = df_data['geolocation'].copy()
            df_data['geolocation'] = df_data[lon].astype(
                str) + "," + df_data[lat].astype('str')
            data_model_dict['geolocation'] = ['geolocation']
            # drop the two columns
            dtypes_dict.pop(lat, None)
            dtypes_dict.pop(lon, None)
            df_data.drop([lon, lat], inplace=True, axis=1)

            # dtypes_dict['geolocation'] =
            point_based = True
        else:
            point_based = False

        # Reformat dataframe for dates or categories
        if len(data_model_dict['value']) > 1:
            df_data, dtypes_dict, data_model_dict = convert_df(
                df_data, multi_entry_dict, data_model_dict,
                empty_entries_dict.pop('empty_value_format'), dtypes_dict)

        # Apply missing values
        df_data, data_model_dict, dtypes_dict = apply_missing_values(
            df_data, data_model_dict, dtypes_dict, empty_entries_dict)

        # Check if datatypes of data is correct
        result, correction_mappings, context = check_mapping_dtypes(
            data_model_dict, dtypes_dict)

        if not result:
            print(context)
            # return context  # Bad mapping
            raise Exception(context)

        # TODO: check is new_dtypes_dict necessary
        # why can't this be moved in front of check mappings?
        # Could be important if user wants to see which parts of data did not
        # get mapped and why they did not
        error_lines, new_dtypes_dict = generate_error_data(df_data)
        save_validation_data(error_lines, id, dtypes_dict)

        # TODO: check is this is needed
        for key in new_dtypes_dict:
            dtypes_dict[key] = new_dtypes_dict[key]

        # Get the reverse mapping of data_model_dict
        # TODO: check if this is needed
        final_file_headings = {}
        for key in data_model_dict:
            if data_model_dict[key]:
                if key == 'filters':
                    final_file_headings[key] = data_model_dict[key]
                else:
                    final_file_headings[key] = data_model_dict[key][0]

        # Normalise data
        df_data = correct_data(df_data, correction_mappings, error_lines,
                               final_file_headings, point_based)

        # Filter out bad data from datafram
        filter_applied = (
            df_data[final_file_headings['indicator']].notnull()
            # The sections of data model
            # are not allowed to be empty
            & df_data[final_file_headings['date']].notnull()
            & df_data[final_file_headings['value']].notnull()
            & df_data[final_file_headings['geolocation']].notnull())

        df_data = df_data[filter_applied].reset_index()  # Remove empty values
        df_data[final_file_headings['date']] = pd.to_numeric(
            # Convert all dates to numbers
            # TODO: check is this needed, normalise should do this
            df_data[final_file_headings['date']]).astype(int)

        # Create missing entries that are used in Datapoints datamodel
        instance, created = DateFormat.objects.get_or_create(
            type='YYYY')  # Todo make dynamic
        if created:
            instance.save()

        final_file_headings['date_format'] = 'date_format'
        final_file_headings['metadata'] = 'metadata'

        metadata = File.objects.get(id=id)
        df_data['metadata'] = metadata
        df_data['date_format'] = instance

        # Save and get foreign key data for datapoints model
        ind_dict, headings_dict, geolocation_dict, value_format_dict, \
            filters_dict = get_save_unique_datapoints(
                df_data, final_file_headings, metadata, instance,
                filter_headings_dict, point_based)

        # Save Datapoints
        dicts = [
            ind_dict, headings_dict, geolocation_dict, value_format_dict,
            filters_dict
        ]

        save_datapoints(df_data, final_file_headings, filter_headings_dict,
                        dicts)

        context = {'success': 1}
        return context
    else:
        context = {
            'error_messages': 'No data in dictionary sent',
            'success': 0
        }
        raise context
Example #7
0
def get_errors(data):
    """
    Gets data that does not match
    the most probable data type found for each column
    """

    error_messages = []
    id = data['file_id']
    start_pos = data['start_pos']
    end_pos = data['end_pos']
    df_data = get_file_data(id)
    column_headings = df_data.columns
    error_data, dtypes_dict = get_dtype_data(id)
    # TODO: Logic BUGS check everything why using if like below
    # First logic error on then save_validation_data.
    # When used test code will be to if
    # When used GraphQL will be to else
    two_index = False
    try:
        errors, line_nos = check_dtypes(error_data, dtypes_dict,
                                        column_headings)
        selection = np.array(range(0, len(error_data[column_headings[0]])))
    except Exception as e:
        errors, line_nos = check_dtypes(dtypes_dict, error_data,
                                        column_headings)
        selection = np.array(range(0, len(dtypes_dict[column_headings[0]])))
        two_index = False

    amount = end_pos - start_pos
    error_rows = []

    for i in errors:

        if len(errors[i]) > 0:
            counter = 0
            try:
                line_no_selection = selection[line_nos[i]]
                # [start_pos:end_pos]
            except Exception as e:
                line_no_selection = None
                pass

            errors_selection = errors[i]  # [start_pos:end_pos]

            # TODO: related above bug related the below
            for j in errors_selection:  # minus one for line no
                try:
                    dtype = dtypes_dict[i][0][0] if two_index \
                        else dtypes_dict[i][0]

                    row = line_no_selection[counter]
                    if row not in error_rows:
                        error_rows.append(row)

                    message = ('Found a ' + j +
                               ' value instead of the most populous value ' +
                               dtype + '.')
                    line_no = str(row)
                    error_message_row = dict()
                    error_message_row[''.join([line_no, '|', i])] = (message)
                    error_messages.append(error_message_row)
                    counter += 1
                except Exception as e:
                    pass

    context = {'error_messages': error_messages}
    return context, error_rows
Example #8
0
def filter_for_errors(df_data, data):
    filter_column = data['error_filter_value']
    error_data, dtypes_dict = get_dtype_data(data['id'])
    errors, line_nos = check_dtypes(error_data, dtypes_dict, [filter_column],
                                    data['start_pos'], data['end_pos'])
    return df_data[line_nos[filter_column]]