Пример #1
0
        existing_subcategories_list = {
            item['name']
            for item in existing_subcategories
        }
    else:
        the_subcategory = DatasetSubcategory.objects.get(
            name=subcategory_name, categoryId=the_category)

    if subcategory_name not in dataset_name_to_object:
        newdataset = Dataset(
            name=subcategory_name,
            description='This is a dataset imported by the automated fetcher',
            namespace='unaids',
            categoryId=the_category,
            subcategoryId=the_subcategory)
        newdataset.save()
        dataset_name_to_object[subcategory_name] = newdataset
        new_datasets_list.append(newdataset)
    else:
        newdataset = Dataset.objects.get(name=subcategory_name,
                                         categoryId=the_category)

    source_name = 'UNAIDS'
    if source_name not in source_name_to_object:
        newsource = Source(name=source_name,
                           description=json.dumps(source_description),
                           datasetId=newdataset.pk)
        newsource.save()
        source_name_to_object[source_name] = newsource
    else:
        newsource = Source.objects.get(name=source_name,
Пример #2
0
def process_csv_file_insert(filename_to_process: str, original_filename: str):
    print('Processing: %s' % original_filename)

    global unique_data_tracker
    global datasets_list

    current_file_vars_countries = set(
    )  # keeps track of variables+countries we saw in the current file
    current_file_var_codes = set()
    current_file_var_names = set()
    previous_row = tuple()

    # inserting a subcategory
    if file_to_category_dict[
            original_filename] not in existing_subcategories_list:
        the_subcategory = DatasetSubcategory(
            name=file_to_category_dict[original_filename],
            fk_dst_cat_id=the_category)
        the_subcategory.save()
        existing_subcategories_list.add(
            file_to_category_dict[original_filename])
    else:
        the_subcategory = DatasetSubcategory.objects.get(
            name=file_to_category_dict[original_filename])

    insert_string = 'INSERT into data_values (value, year, fk_ent_id, fk_var_id) VALUES (%s, %s, %s, %s)'  # this is used for constructing the query for mass inserting to the data_values table
    data_values_tuple_list = []

    # inserting a dataset
    newdataset = Dataset(
        name='%s: %s' % (file_to_category_dict[original_filename],
                         file_dataset_names[original_filename]),
        description='This is a dataset imported by the automated fetcher',
        namespace='faostat',
        fk_dst_cat_id=the_category,
        fk_dst_subcat_id=the_subcategory)
    newdataset.save()
    datasets_list.append(newdataset)

    # reading source information from a csv file in metadata_dir
    metadata_file_path = os.path.join(
        metadata_dir,
        os.path.splitext(original_filename)[0] + ".csv")
    data_published_by = 'Food and Agriculture Organization of the United Nations (FAO)'
    data_publishers_source = ''
    additional_information = ''
    variable_description = ''
    if os.path.isfile(metadata_file_path):
        with open(metadata_file_path, encoding='latin-1') as metadatacsv:
            metadatareader = csv.DictReader(metadatacsv)
            metadatacolumns = tuple(metadatareader.fieldnames)
            for row in metadatareader:
                if row['Subsection Code'] == '1.1':
                    data_published_by = row['Metadata']
                if row['Subsection Code'] == '3.1':
                    variable_description = row['Metadata']
                if row['Subsection Code'] == '3.4':
                    additional_information = row['Metadata']
                if row['Subsection Code'] == '20.1':
                    data_publishers_source = row['Metadata']

    # inserting a dataset source
    newsource = Source(
        name=file_dataset_names[original_filename],
        description=source_template %
        (file_dataset_names[original_filename], data_published_by,
         data_publishers_source, additional_information),
        datasetId=newdataset.pk)
    newsource.save()

    existing_fao_variables = Variable.objects.filter(
        fk_dst_id__in=Dataset.objects.filter(namespace='faostat'))
    existing_fao_variables_dict = {}
    for each in existing_fao_variables:
        existing_fao_variables_dict[each.name] = each

    with open(filename_to_process, encoding='latin-1') as currentfile:
        currentreader = csv.DictReader(currentfile)
        filecolumns = tuple(currentreader.fieldnames)

        # these column types are very similar
        if filecolumns == column_types[0] or filecolumns == column_types[1] \
           or filecolumns == column_types[2] or filecolumns == column_types[3] \
           or filecolumns == column_types[4]:

            for row in currentreader:
                if filecolumns == column_types[0]:
                    countryname = row['Area']
                    variablename = row['Item']
                    variablecode = row['Item Code']
                if filecolumns == column_types[1]:
                    countryname = row['Country']
                    variablename = '%s - %s' % (row['Item'], row['Element'])
                    variablecode = '%s - %s' % (row['ItemCode'],
                                                row['ElementCode'])
                if filecolumns == column_types[2]:
                    countryname = row['Area']
                    variablename = '%s - %s' % (row['Item'], row['Element'])
                    variablecode = '%s - %s' % (row['Item Code'],
                                                row['Element Code'])
                if filecolumns == column_types[3]:
                    countryname = row['Country']
                    variablename = '%s - %s' % (row['Item'], row['Element'])
                    variablecode = '%s - %s' % (row['Item Code'],
                                                row['Element Code'])
                if filecolumns == column_types[4]:
                    countryname = row['Country']
                    variablename = '%s - %s' % (row['Indicator'],
                                                row['Source'])
                    variablecode = '%s - %s' % (row['Indicator Code'],
                                                row['Source Code'])

                if original_filename == 'Emissions_Agriculture_Energy_E_All_Data_(Norm).zip':
                    variablename += ' - %s' % row['Unit']

                if original_filename == 'Production_LivestockPrimary_E_All_Data_(Normalized).zip':
                    variablename += ' - %s' % row['Unit']

                if original_filename == 'Trade_LiveAnimals_E_All_Data_(Normalized).zip':
                    variablename += ' - %s' % row['Unit']

                # avoiding duplicate rows
                if original_filename == 'Inputs_Pesticides_Use_E_All_Data_(Normalized).zip':
                    if row['Item Code'] not in current_file_var_codes and row[
                            'Item'] not in current_file_var_names:
                        current_file_var_codes.add(row['Item Code'])
                        current_file_var_names.add(row['Item'])
                    elif row['Item Code'] in current_file_var_codes and row[
                            'Item'] in current_file_var_names:
                        pass
                    else:
                        continue

                # avoiding duplicate rows
                if original_filename == 'FoodBalanceSheets_E_All_Data_(Normalized).csv':
                    if tuple(row) == previous_row:
                        previous_row = tuple(row)
                        continue
                    else:
                        previous_row = tuple(row)

                try:
                    year = int(row['Year'])
                    value = float(row['Value'])
                except ValueError:
                    year = False
                    value = False

                variablename = file_dataset_names[
                    original_filename] + ': ' + variablename

                current_file_vars_countries.add(
                    tuple([countryname, variablecode]))

                process_one_row(year, value, countryname, variablecode,
                                variablename, existing_fao_variables_dict,
                                row['Unit'], newsource, newdataset,
                                variable_description, data_values_tuple_list)

            unique_data_tracker.update(current_file_vars_countries)

        # these are the files that require several iterations over all rows
        if filecolumns == column_types[5] or filecolumns == column_types[
                6] or filecolumns == column_types[7]:
            if filecolumns == column_types[5]:
                iterations = [{
                    'country_field': 'Donor Country',
                    'varname_format': '%s - Donors'
                }, {
                    'country_field': 'Recipient Country',
                    'varname_format': '%s - Recipients'
                }]
            if filecolumns == column_types[6]:
                iterations = [{
                    'country_field': 'Reporter Countries',
                    'varname_format': '%s - %s - Reporters'
                }, {
                    'country_field': 'Partner Countries',
                    'varname_format': '%s - %s - Partners'
                }]
            if filecolumns == column_types[7]:
                iterations = [{
                    'country_field': 'Donor',
                    'varname_format': '%s - %s - Donors'
                }, {
                    'country_field': 'Recipient Country',
                    'varname_format': '%s - %s - Recipients'
                }]
            for oneiteration in iterations:
                file_stream_holder = {
                }  # we will break down these files into smaller files
                dict_writer_holder = {}
                separate_files_names = {
                }  # we will keep the filenames in this dict
                unique_vars = []
                # first we collect all variable names
                currentfile.seek(0)
                row_counter = 0
                for row in currentreader:
                    if row['Year'] == 'Year':
                        continue
                    row_counter += 1
                    if row_counter % 300 == 0:
                        time.sleep(
                            0.001
                        )  # this is done in order to not keep the CPU busy all the time
                    if filecolumns == column_types[5]:
                        variablename = oneiteration['varname_format'] % row[
                            'Item']
                    if filecolumns == column_types[6]:
                        variablename = oneiteration['varname_format'] % (
                            row['Item'], row['Element'])
                    if filecolumns == column_types[7]:
                        variablename = oneiteration['varname_format'] % (
                            row['Item'], row['Purpose'])
                    if variablename not in unique_vars:
                        unique_vars.append(variablename)
                # then we break the dataset into files named after the variable names
                for varname in unique_vars:
                    separate_files_names[varname.replace('/', '+') +
                                         '.csv'] = varname
                    file_stream_holder[varname] = open(os.path.join(
                        '/tmp',
                        varname.replace('/', '+') + '.csv'),
                                                       'w+',
                                                       encoding='latin-1')
                    dict_writer_holder[varname] = csv.DictWriter(
                        file_stream_holder[varname],
                        fieldnames=[
                            'Country', 'Variable', 'Varcode', 'Year', 'Unit',
                            'Value'
                        ])
                    dict_writer_holder[varname].writeheader()
                # go back to the beginning of the file
                currentfile.seek(0)
                row_counter = 0
                for row in currentreader:
                    if row['Year'] == 'Year':
                        continue
                    row_counter += 1
                    if row_counter % 300 == 0:
                        time.sleep(
                            0.001
                        )  # this is done in order to not keep the CPU busy all the time
                    if filecolumns == column_types[5]:
                        variablename = oneiteration['varname_format'] % row[
                            'Item']
                        variablecode = row['Item Code']
                        dict_writer_holder[variablename].writerow({
                            'Country':
                            row[oneiteration['country_field']],
                            'Variable':
                            variablename,
                            'Varcode':
                            variablecode,
                            'Unit':
                            row['Unit'],
                            'Year':
                            row['Year'],
                            'Value':
                            row['Value']
                        })
                    if filecolumns == column_types[6]:
                        variablename = oneiteration['varname_format'] % (
                            row['Item'], row['Element'])
                        variablecode = '%s - %s' % (row['Item Code'],
                                                    row['Element Code'])
                        dict_writer_holder[variablename].writerow({
                            'Country':
                            row[oneiteration['country_field']],
                            'Variable':
                            variablename,
                            'Varcode':
                            variablecode,
                            'Unit':
                            row['Unit'],
                            'Year':
                            row['Year'],
                            'Value':
                            row['Value']
                        })
                    if filecolumns == column_types[7]:
                        variablename = oneiteration['varname_format'] % (
                            row['Item'], row['Purpose'])
                        variablecode = '%s - %s' % (row['Item Code'],
                                                    row['Purpose Code'])
                        dict_writer_holder[variablename].writerow({
                            'Country':
                            row[oneiteration['country_field']],
                            'Variable':
                            variablename,
                            'Varcode':
                            variablecode,
                            'Unit':
                            row['Unit'],
                            'Year':
                            row['Year'],
                            'Value':
                            row['Value']
                        })
                    if row_counter % 100000 == 0:
                        for fileholder, actual_file in file_stream_holder.items(
                        ):
                            actual_file.flush()
                            os.fsync(actual_file.fileno())
                for fileholder, actual_file in file_stream_holder.items():
                    actual_file.close()

                # now parsing and importing each file individually

                for each_separate_file, file_variable_name in separate_files_names.items(
                ):
                    unique_records_holder = {}
                    with open('/tmp/%s' % each_separate_file,
                              encoding='latin-1') as separate_file:
                        separate_file_reader = csv.DictReader(separate_file)
                        row_counter = 0
                        for row in separate_file_reader:
                            row_counter += 1
                            if row_counter % 300 == 0:
                                time.sleep(
                                    0.001
                                )  # this is done in order to not keep the CPU busy all the time
                            countryname = row['Country']
                            variablecode = row['Varcode']
                            variableunit = row['Unit']
                            year = row['Year']
                            value = row['Value']

                            try:
                                year = int(year)
                                value = float(value)
                            except ValueError:
                                year = False
                                value = False
                            if year is not False and value is not False:
                                unique_record = tuple([countryname, year])
                                if unique_record not in unique_records_holder:
                                    unique_records_holder[
                                        unique_record] = value
                                else:
                                    unique_records_holder[
                                        unique_record] += value
                    for key, value in unique_records_holder.items():
                        variablename = file_dataset_names[
                            original_filename] + ': ' + file_variable_name
                        process_one_row(
                            list(key)[1], str(value),
                            list(key)[0], variablecode, variablename,
                            existing_fao_variables_dict, variableunit,
                            newsource, newdataset, variable_description,
                            data_values_tuple_list)

                    os.remove('/tmp/%s' % each_separate_file)

        if len(data_values_tuple_list):  # insert any leftover data_values
            with connection.cursor() as c:
                c.executemany(insert_string, data_values_tuple_list)