示例#1
0
        existing_categories = DatasetCategory.objects.values('name')
        existing_categories_list = {item['name'] for item in existing_categories}

        if un_wpp_category_name_in_db not in existing_categories_list:
            the_category = DatasetCategory(name=un_wpp_category_name_in_db, fetcher_autocreated=True)
            the_category.save()

        else:
            the_category = DatasetCategory.objects.get(name=un_wpp_category_name_in_db)

        existing_subcategories = DatasetSubcategory.objects.filter(fk_dst_cat_id=the_category.pk).values('name')
        existing_subcategories_list = {item['name'] for item in existing_subcategories}

        the_subcategory_name = dataset_info['category']
        if the_subcategory_name not in existing_subcategories_list:
            the_subcategory = DatasetSubcategory(name=the_subcategory_name, fk_dst_cat_id=the_category)
            the_subcategory.save()
        else:
            the_subcategory = DatasetSubcategory.objects.get(name=the_subcategory_name, fk_dst_cat_id=the_category)

        wb = load_workbook(os.path.join(wpp_downloads_save_location, file_to_parse), read_only=True)
        sheets = wb.get_sheet_names()
        sheets.remove('NOTES')  # we don't need this sheet

        if dataset_info['structure'] == 6:
            dataset_saved = False
            for sheet in sheets:
                variables_saved = False
                column_number = 0
                row_number = 0
                var_to_add_dict = {}
示例#2
0
        country_tool_names_dict[
            each_country.country_name.lower()] = each_country.owid_country

    c_name_entity_ref = {
    }  # this dict will hold the country names from excel and the appropriate entity object (this is used when saving the variables and their values)

    insert_string = 'INSERT into data_values (value, year, entityId, variableId) VALUES (%s, %s, %s, %s)'  # this is used for constructing the query for mass inserting to the data_values table

    data_values_tuple_list = []

    row_number = 0

    subcategory_name = 'UNAIDS'

    if subcategory_name not in existing_subcategories_list:
        the_subcategory = DatasetSubcategory(name=subcategory_name,
                                             categoryId=the_category)
        the_subcategory.save()

        existing_subcategories = DatasetSubcategory.objects.filter(
            categoryId=the_category.pk).values('name')
        existing_subcategories_list = {
            item['name']
            for item in existing_subcategories
        }
    else:
        the_subcategory = DatasetSubcategory.objects.get(
            name=subcategory_name, categoryId=the_category)

    if subcategory_name not in dataset_name_to_object:
        newdataset = Dataset(
            name=subcategory_name,
示例#3
0
                    for cell in row:

                        column_number += 1

                        if row_number == 1 and column_number == 1:
                            varname = cell.value

                        if row_number == 2 and column_number == 1:
                            varunit = cell.value

                        if row_number == 3 and column_number == 1:
                            # inserting a subcategory and dataset
                            if dataset_to_category[
                                    varname] not in existing_subcategories_list:
                                the_subcategory = DatasetSubcategory(
                                    name=dataset_to_category[varname],
                                    categoryId=the_category)
                                the_subcategory.save()
                                newdataset = Dataset(
                                    name='Clio-Infra - %s' %
                                    the_subcategory.name,
                                    description=
                                    'This is a dataset imported by the automated fetcher',
                                    namespace='clioinfra',
                                    categoryId=the_category,
                                    subcategoryId=the_subcategory)
                                newdataset.save()
                                new_datasets_list.append(newdataset)
                                existing_subcategories_list.add(
                                    dataset_to_category[varname])
                            else:
    for file in glob.glob(ghdx_downloads_save_location + "/*.zip"):
        z = zipfile.ZipFile(file)
        for each in z.namelist():
            if '.csv' in each:
                csv_filename = ghdx_downloads_save_location + each
        z.extractall(ghdx_downloads_save_location)
        with open(csv_filename, 'r', encoding='utf8') as f:
            print('Processing: %s' % file)
            reader = csv.DictReader(f)
            for row in reader:
                row_number += 1
                if row['sex_name'] in sex_names and row['age_name'] in age_names and row[
                    'metric_name'] in metric_names and row['measure_name'] in measure_names and row['cause_name'] == 'All causes':
                    if row['rei_name'] not in existing_subcategories_list:
                        the_subcategory = DatasetSubcategory(name=row['rei_name'], fk_dst_cat_id=the_category)
                        the_subcategory.save()
                        newdataset = Dataset(name=row['rei_name'],
                                             description='This is a dataset imported by the automated fetcher',
                                             namespace='gbd_risk', fk_dst_cat_id=the_category,
                                             fk_dst_subcat_id=the_subcategory)
                        newdataset.save()
                        dataset_name_to_object[row['rei_name']] = newdataset
                        new_datasets_list.append(newdataset)
                        newsource = Source(name=row['rei_name'],
                                           description=json.dumps(source_description),
                                           datasetId=newdataset.pk)
                        newsource.save()
                        source_name_to_object[row['rei_name']] = newsource
                        existing_subcategories = DatasetSubcategory.objects.filter(
                            fk_dst_cat_id=the_category.pk).values(
示例#5
0
    country_tool_names_dict = {}

    for each_country in country_tool_names:
        country_tool_names_dict[
            each_country.country_name.lower()] = each_country.owid_country

    c_name_entity_ref = {
    }  # this dict will hold the country names from excel and the appropriate entity object (this is used when saving the variables and their values)

    insert_string = 'INSERT into data_values (value, year, entityId, variableId) VALUES (%s, %s, %s, %s)'  # this is used for constructing the query for mass inserting to the data_values table

    data_values_tuple_list = []

    for section in sections:
        if section not in existing_subcategories_list:
            the_subcategory = DatasetSubcategory(name=section,
                                                 categoryId=the_category)
            the_subcategory.save()

            existing_subcategories = DatasetSubcategory.objects.filter(
                categoryId=the_category.pk).values('name')
            existing_subcategories_list = {
                item['name']
                for item in existing_subcategories
            }
        else:
            the_subcategory = DatasetSubcategory.objects.get(
                name=section, categoryId=the_category)

        if section not in dataset_name_to_object:
            newdataset = Dataset(
                name=section,
    data_values_tuple_list = []

    for file in glob.glob(ghdx_downloads_save_location + "/*.zip"):
        z = zipfile.ZipFile(file)
        for each in z.namelist():
            if '.csv' in each:
                csv_filename = ghdx_downloads_save_location + each
        z.extractall(ghdx_downloads_save_location)
        with open(csv_filename, 'r', encoding='utf8') as f:
            print('Processing: %s' % file)
            reader = csv.DictReader(f)
            for row in reader:
                row_number += 1
                if row['sex_name'] in sex_names and row['age_name'] in age_names and row['metric_name'] in metric_names and row['measure_name'] in measure_names:
                    if row['cause_name'] not in existing_subcategories_list:
                        the_subcategory = DatasetSubcategory(name=row['cause_name'], categoryId=the_category)
                        the_subcategory.save()
                        newdataset = Dataset(name=row['cause_name'],
                                             description='This is a dataset imported by the automated fetcher',
                                             namespace='gbd_prevalence_by_gender', categoryId=the_category,
                                             subcategoryId=the_subcategory)
                        newdataset.save()
                        dataset_name_to_object[row['cause_name']] = newdataset
                        new_datasets_list.append(newdataset)
                        newsource = Source(name=row['cause_name'],
                                           description=json.dumps(source_description),
                                           datasetId=newdataset.pk)
                        newsource.save()
                        source_name_to_object[row['cause_name']] = newsource
                        existing_subcategories = DatasetSubcategory.objects.filter(categoryId=the_category.pk).values(
                            'name')
                        columns_to_process.append(onec)

            filename = metadata_dict[file_name]['dataset']

            if file_name not in [
                    'DIOC_CITIZEN_AGE', 'DIOC_DURATION_STAY',
                    'DIOC_FIELD_STUDY', 'DIOC_LFS', 'DIOC_SECTOR',
                    'DIOC_SEX_AGE', 'MIG', 'REF_TOTALOFFICIAL',
                    'REF_TOTALRECPTS', 'TABLE3A', 'EDU_ENRL_MOBILE',
                    'EDU_GRAD_MOBILE', 'IO_GHG_2015'
            ]:

                if metadata_dict[file_name][
                        'category'] not in existing_subcategories_list:
                    the_subcategory = DatasetSubcategory(
                        name=metadata_dict[file_name]['category'],
                        fk_dst_cat_id=the_category)
                    the_subcategory.save()
                    newdataset = Dataset(
                        name=metadata_dict[file_name]['category'],
                        description=
                        'This is a dataset imported by the automated fetcher',
                        namespace='oecd_stat',
                        fk_dst_cat_id=the_category,
                        fk_dst_subcat_id=the_subcategory)
                    newdataset.save()
                    dataset_name_to_object[metadata_dict[file_name]
                                           ['category']] = newdataset
                    new_datasets_list.append(newdataset)

                    existing_subcategories = DatasetSubcategory.objects.filter(
示例#8
0
                    for cell in row:

                        column_number += 1

                        if row_number == 1 and column_number == 1:
                            varname = cell.value

                        if row_number == 2 and column_number == 1:
                            varunit = cell.value

                        if row_number == 3 and column_number == 1:
                            # inserting a subcategory and dataset
                            if dataset_to_category[
                                    varname] not in existing_subcategories_list:
                                the_subcategory = DatasetSubcategory(
                                    name=dataset_to_category[varname],
                                    fk_dst_cat_id=the_category)
                                the_subcategory.save()
                                newdataset = Dataset(
                                    name='Clio-Infra - %s' %
                                    the_subcategory.name,
                                    description=
                                    'This is a dataset imported by the automated fetcher',
                                    namespace='clioinfra',
                                    fk_dst_cat_id=the_category,
                                    fk_dst_subcat_id=the_subcategory)
                                newdataset.save()
                                new_datasets_list.append(newdataset)
                                existing_subcategories_list.add(
                                    dataset_to_category[varname])
                            else:
示例#9
0
def process_csv_file_insert(filename_to_process: str, original_filename: str):
    print('Processing: %s' % original_filename)

    global unique_data_tracker
    global datasets_list

    current_file_vars_countries = set(
    )  # keeps track of variables+countries we saw in the current file
    current_file_var_codes = set()
    current_file_var_names = set()
    previous_row = tuple()

    # inserting a subcategory
    if file_to_category_dict[
            original_filename] not in existing_subcategories_list:
        the_subcategory = DatasetSubcategory(
            name=file_to_category_dict[original_filename],
            fk_dst_cat_id=the_category)
        the_subcategory.save()
        existing_subcategories_list.add(
            file_to_category_dict[original_filename])
    else:
        the_subcategory = DatasetSubcategory.objects.get(
            name=file_to_category_dict[original_filename])

    insert_string = 'INSERT into data_values (value, year, fk_ent_id, fk_var_id) VALUES (%s, %s, %s, %s)'  # this is used for constructing the query for mass inserting to the data_values table
    data_values_tuple_list = []

    # inserting a dataset
    newdataset = Dataset(
        name='%s: %s' % (file_to_category_dict[original_filename],
                         file_dataset_names[original_filename]),
        description='This is a dataset imported by the automated fetcher',
        namespace='faostat',
        fk_dst_cat_id=the_category,
        fk_dst_subcat_id=the_subcategory)
    newdataset.save()
    datasets_list.append(newdataset)

    # reading source information from a csv file in metadata_dir
    metadata_file_path = os.path.join(
        metadata_dir,
        os.path.splitext(original_filename)[0] + ".csv")
    data_published_by = 'Food and Agriculture Organization of the United Nations (FAO)'
    data_publishers_source = ''
    additional_information = ''
    variable_description = ''
    if os.path.isfile(metadata_file_path):
        with open(metadata_file_path, encoding='latin-1') as metadatacsv:
            metadatareader = csv.DictReader(metadatacsv)
            metadatacolumns = tuple(metadatareader.fieldnames)
            for row in metadatareader:
                if row['Subsection Code'] == '1.1':
                    data_published_by = row['Metadata']
                if row['Subsection Code'] == '3.1':
                    variable_description = row['Metadata']
                if row['Subsection Code'] == '3.4':
                    additional_information = row['Metadata']
                if row['Subsection Code'] == '20.1':
                    data_publishers_source = row['Metadata']

    # inserting a dataset source
    newsource = Source(
        name=file_dataset_names[original_filename],
        description=source_template %
        (file_dataset_names[original_filename], data_published_by,
         data_publishers_source, additional_information),
        datasetId=newdataset.pk)
    newsource.save()

    existing_fao_variables = Variable.objects.filter(
        fk_dst_id__in=Dataset.objects.filter(namespace='faostat'))
    existing_fao_variables_dict = {}
    for each in existing_fao_variables:
        existing_fao_variables_dict[each.name] = each

    with open(filename_to_process, encoding='latin-1') as currentfile:
        currentreader = csv.DictReader(currentfile)
        filecolumns = tuple(currentreader.fieldnames)

        # these column types are very similar
        if filecolumns == column_types[0] or filecolumns == column_types[1] \
           or filecolumns == column_types[2] or filecolumns == column_types[3] \
           or filecolumns == column_types[4]:

            for row in currentreader:
                if filecolumns == column_types[0]:
                    countryname = row['Area']
                    variablename = row['Item']
                    variablecode = row['Item Code']
                if filecolumns == column_types[1]:
                    countryname = row['Country']
                    variablename = '%s - %s' % (row['Item'], row['Element'])
                    variablecode = '%s - %s' % (row['ItemCode'],
                                                row['ElementCode'])
                if filecolumns == column_types[2]:
                    countryname = row['Area']
                    variablename = '%s - %s' % (row['Item'], row['Element'])
                    variablecode = '%s - %s' % (row['Item Code'],
                                                row['Element Code'])
                if filecolumns == column_types[3]:
                    countryname = row['Country']
                    variablename = '%s - %s' % (row['Item'], row['Element'])
                    variablecode = '%s - %s' % (row['Item Code'],
                                                row['Element Code'])
                if filecolumns == column_types[4]:
                    countryname = row['Country']
                    variablename = '%s - %s' % (row['Indicator'],
                                                row['Source'])
                    variablecode = '%s - %s' % (row['Indicator Code'],
                                                row['Source Code'])

                if original_filename == 'Emissions_Agriculture_Energy_E_All_Data_(Norm).zip':
                    variablename += ' - %s' % row['Unit']

                if original_filename == 'Production_LivestockPrimary_E_All_Data_(Normalized).zip':
                    variablename += ' - %s' % row['Unit']

                if original_filename == 'Trade_LiveAnimals_E_All_Data_(Normalized).zip':
                    variablename += ' - %s' % row['Unit']

                # avoiding duplicate rows
                if original_filename == 'Inputs_Pesticides_Use_E_All_Data_(Normalized).zip':
                    if row['Item Code'] not in current_file_var_codes and row[
                            'Item'] not in current_file_var_names:
                        current_file_var_codes.add(row['Item Code'])
                        current_file_var_names.add(row['Item'])
                    elif row['Item Code'] in current_file_var_codes and row[
                            'Item'] in current_file_var_names:
                        pass
                    else:
                        continue

                # avoiding duplicate rows
                if original_filename == 'FoodBalanceSheets_E_All_Data_(Normalized).csv':
                    if tuple(row) == previous_row:
                        previous_row = tuple(row)
                        continue
                    else:
                        previous_row = tuple(row)

                try:
                    year = int(row['Year'])
                    value = float(row['Value'])
                except ValueError:
                    year = False
                    value = False

                variablename = file_dataset_names[
                    original_filename] + ': ' + variablename

                current_file_vars_countries.add(
                    tuple([countryname, variablecode]))

                process_one_row(year, value, countryname, variablecode,
                                variablename, existing_fao_variables_dict,
                                row['Unit'], newsource, newdataset,
                                variable_description, data_values_tuple_list)

            unique_data_tracker.update(current_file_vars_countries)

        # these are the files that require several iterations over all rows
        if filecolumns == column_types[5] or filecolumns == column_types[
                6] or filecolumns == column_types[7]:
            if filecolumns == column_types[5]:
                iterations = [{
                    'country_field': 'Donor Country',
                    'varname_format': '%s - Donors'
                }, {
                    'country_field': 'Recipient Country',
                    'varname_format': '%s - Recipients'
                }]
            if filecolumns == column_types[6]:
                iterations = [{
                    'country_field': 'Reporter Countries',
                    'varname_format': '%s - %s - Reporters'
                }, {
                    'country_field': 'Partner Countries',
                    'varname_format': '%s - %s - Partners'
                }]
            if filecolumns == column_types[7]:
                iterations = [{
                    'country_field': 'Donor',
                    'varname_format': '%s - %s - Donors'
                }, {
                    'country_field': 'Recipient Country',
                    'varname_format': '%s - %s - Recipients'
                }]
            for oneiteration in iterations:
                file_stream_holder = {
                }  # we will break down these files into smaller files
                dict_writer_holder = {}
                separate_files_names = {
                }  # we will keep the filenames in this dict
                unique_vars = []
                # first we collect all variable names
                currentfile.seek(0)
                row_counter = 0
                for row in currentreader:
                    if row['Year'] == 'Year':
                        continue
                    row_counter += 1
                    if row_counter % 300 == 0:
                        time.sleep(
                            0.001
                        )  # this is done in order to not keep the CPU busy all the time
                    if filecolumns == column_types[5]:
                        variablename = oneiteration['varname_format'] % row[
                            'Item']
                    if filecolumns == column_types[6]:
                        variablename = oneiteration['varname_format'] % (
                            row['Item'], row['Element'])
                    if filecolumns == column_types[7]:
                        variablename = oneiteration['varname_format'] % (
                            row['Item'], row['Purpose'])
                    if variablename not in unique_vars:
                        unique_vars.append(variablename)
                # then we break the dataset into files named after the variable names
                for varname in unique_vars:
                    separate_files_names[varname.replace('/', '+') +
                                         '.csv'] = varname
                    file_stream_holder[varname] = open(os.path.join(
                        '/tmp',
                        varname.replace('/', '+') + '.csv'),
                                                       'w+',
                                                       encoding='latin-1')
                    dict_writer_holder[varname] = csv.DictWriter(
                        file_stream_holder[varname],
                        fieldnames=[
                            'Country', 'Variable', 'Varcode', 'Year', 'Unit',
                            'Value'
                        ])
                    dict_writer_holder[varname].writeheader()
                # go back to the beginning of the file
                currentfile.seek(0)
                row_counter = 0
                for row in currentreader:
                    if row['Year'] == 'Year':
                        continue
                    row_counter += 1
                    if row_counter % 300 == 0:
                        time.sleep(
                            0.001
                        )  # this is done in order to not keep the CPU busy all the time
                    if filecolumns == column_types[5]:
                        variablename = oneiteration['varname_format'] % row[
                            'Item']
                        variablecode = row['Item Code']
                        dict_writer_holder[variablename].writerow({
                            'Country':
                            row[oneiteration['country_field']],
                            'Variable':
                            variablename,
                            'Varcode':
                            variablecode,
                            'Unit':
                            row['Unit'],
                            'Year':
                            row['Year'],
                            'Value':
                            row['Value']
                        })
                    if filecolumns == column_types[6]:
                        variablename = oneiteration['varname_format'] % (
                            row['Item'], row['Element'])
                        variablecode = '%s - %s' % (row['Item Code'],
                                                    row['Element Code'])
                        dict_writer_holder[variablename].writerow({
                            'Country':
                            row[oneiteration['country_field']],
                            'Variable':
                            variablename,
                            'Varcode':
                            variablecode,
                            'Unit':
                            row['Unit'],
                            'Year':
                            row['Year'],
                            'Value':
                            row['Value']
                        })
                    if filecolumns == column_types[7]:
                        variablename = oneiteration['varname_format'] % (
                            row['Item'], row['Purpose'])
                        variablecode = '%s - %s' % (row['Item Code'],
                                                    row['Purpose Code'])
                        dict_writer_holder[variablename].writerow({
                            'Country':
                            row[oneiteration['country_field']],
                            'Variable':
                            variablename,
                            'Varcode':
                            variablecode,
                            'Unit':
                            row['Unit'],
                            'Year':
                            row['Year'],
                            'Value':
                            row['Value']
                        })
                    if row_counter % 100000 == 0:
                        for fileholder, actual_file in file_stream_holder.items(
                        ):
                            actual_file.flush()
                            os.fsync(actual_file.fileno())
                for fileholder, actual_file in file_stream_holder.items():
                    actual_file.close()

                # now parsing and importing each file individually

                for each_separate_file, file_variable_name in separate_files_names.items(
                ):
                    unique_records_holder = {}
                    with open('/tmp/%s' % each_separate_file,
                              encoding='latin-1') as separate_file:
                        separate_file_reader = csv.DictReader(separate_file)
                        row_counter = 0
                        for row in separate_file_reader:
                            row_counter += 1
                            if row_counter % 300 == 0:
                                time.sleep(
                                    0.001
                                )  # this is done in order to not keep the CPU busy all the time
                            countryname = row['Country']
                            variablecode = row['Varcode']
                            variableunit = row['Unit']
                            year = row['Year']
                            value = row['Value']

                            try:
                                year = int(year)
                                value = float(value)
                            except ValueError:
                                year = False
                                value = False
                            if year is not False and value is not False:
                                unique_record = tuple([countryname, year])
                                if unique_record not in unique_records_holder:
                                    unique_records_holder[
                                        unique_record] = value
                                else:
                                    unique_records_holder[
                                        unique_record] += value
                    for key, value in unique_records_holder.items():
                        variablename = file_dataset_names[
                            original_filename] + ': ' + file_variable_name
                        process_one_row(
                            list(key)[1], str(value),
                            list(key)[0], variablecode, variablename,
                            existing_fao_variables_dict, variableunit,
                            newsource, newdataset, variable_description,
                            data_values_tuple_list)

                    os.remove('/tmp/%s' % each_separate_file)

        if len(data_values_tuple_list):  # insert any leftover data_values
            with connection.cursor() as c:
                c.executemany(insert_string, data_values_tuple_list)