Пример #1
0
def validate_csv(data):
    if not data:
        raise ValueError(" CSV not valid (empty?)")

    if not data[0].get('orgunit', None):
        raise ValueError(u"+++ CSV not valid: CSV must have 'orgunit' header")

    if len(data[0]) <= 1:
        raise ValueError(u"+++ No programs found in CSV")

    orgunit_uids = [ou['orgunit'] for ou in data]
    if len(orgunit_uids) != len(set(orgunit_uids)):
        raise ValueError(u"Duplicate Orgunits (rows) found in the CSV")

    for ou in orgunit_uids:
        if not is_valid_uid(ou):
            raise ValueError(
                u"OrgUnit {} is not a valid UID in the CSV".format(ou))

    for row in data:
        for p in row.keys():
            if not is_valid_uid(p) and p != 'orgunit':
                raise ValueError(
                    u"Program {} is not a valid UID in the CSV".format(p))
    return True
Пример #2
0
def get_org_units(selection_type, value, random_size=None):

    global api_source

    org_units = list()
    if selection_type == 'uid':
        # Hardcoded list of OU UIDs separated by commas
        org_units = value.split(',')
        for ou_uid in org_units:
            if not is_valid_uid(ou_uid):
                logger.error('OU uid provided ' + ou_uid + ' is not valid')
                exit(1)
    else:
        ou_filter = ""
        if selection_type == 'uid_children':
            if not is_valid_uid(value):
                logger.error('OU uid provided for parent ' + value +
                             ' is not valid')
                exit(1)
            ou_filter = "parent.id:in:[" + value + "]"  # To verify
        elif selection_type == 'name':
            ou_filter = "name:in:[" + value + "]"  # To verify
        elif selection_type == 'ilike':
            ou_filter = "name:ilike:" + value  # To verify
        elif selection_type == 'code':
            ou_filter = "code:in:[" + value + "]"
        elif selection_type == 'level':
            if value.isnumeric() and 0 < int(value):
                ou_filter = "level:in:[" + value + "]"
            else:
                logger.error('OU level to use must be integer positive, ' +
                             value + ' is not valid')
                exit(1)
        else:
            logger.error("Unknown parameter for OU selection: " +
                         selection_type)
            exit(1)

        OUs = api_source.get('organisationUnits',
                             params={
                                 "paging": "false",
                                 "fields": "id,name",
                                 "filter": ou_filter
                             }).json()['organisationUnits']

        logger.warning("Found " + str(len(OUs)) + " OUs")
        org_units = extract_json_element_as_list(OUs, 'id')
        if random_size is not None and len(org_units) > random_size:
            logger.warning("Extracting random sample of " + str(random_size) +
                           " size")
            org_units = sample(org_units, random_size)

    return org_units
Пример #3
0
def validate_csv(data):
    if not data[0].get('key', None) or not data[0].get('value', None):
        raise ValueError("CSV not valid: CSV must have 'key' and 'value' as headers")

    object_uids = [obj['key'] for obj in data]
    for uid in object_uids:
        if not is_valid_uid(uid):
            raise ValueError("Object {} is not a valid UID in the CSV".format(uid))
    if len(object_uids) != len(set(object_uids)):
        raise ValueError("Duplicate Objects (rows) found in the CSV")
    return True
Пример #4
0
def validate_csv(data):
    if not data[0].get('uid', None) or not data[0].get('attributeValue', None):
        raise PKClientException(
            "CSV not valid: CSV must have 'uid' and 'attributeValue' as headers"
        )

    object_uids = [obj['uid'] for obj in data]
    for uid in object_uids:
        if not is_valid_uid(uid):
            raise PKClientException(
                "Object '{}' is not a valid UID in the CSV".format(uid))
    if len(object_uids) != len(set(object_uids)):
        raise PKClientException("Duplicate Objects (rows) found in the CSV.")
    return True
Пример #5
0
def main():
    args = parse_args()
    setup_logger()

    api = Api(server=args.server, username=args.username, password=args.password)

    if not is_valid_uid(args.attribute_uid):
        logger.error("Attribute {} is not a valid UID".format(args.attribute_uid))

    data = list(load_csv(args.source_csv))
    validate_csv(data)

    attr_get = {'fields': 'id,name,{}Attribute'.format(args.object_type[:-1])}
    attr = api.get('attributes/{}'.format(args.attribute_uid), params=attr_get).json()
    if attr['{}Attribute'.format(args.object_type[:-1])] is False:
        logger.error("Attribute {} is not assigned to type {}".format(args.attribute_uid, args.object_type[:-1]))

    logger.info(
        "[{}] - Updating Attribute Values for Attribute \033[1m{}\033[0m for \033[1m{}\033[0m \033[1m{}\033[0m...".format(
            args.server, args.attribute_uid, len(data), args.object_type))
    try:
        time.sleep(3)
    except KeyboardInterrupt:
        logger.warn("\033[1m{}\033[0m".format("Aborted!"))
        pass

    for i, obj in enumerate(data, 1):
        obj_uid = obj.get('key')
        attribute_value = obj.get('value')
        params_get = {'fields': ':owner'}
        obj_old = api.get('{}/{}'.format(args.object_type, obj_uid), params=params_get).json()
        obj_updated = create_or_update_attributevalues(obj=obj_old, attribute_uid=args.attribute_uid,
                                                       attribute_value=attribute_value)
        api.put('{}/{}'.format(args.object_type, obj_uid), params=None, data=obj_updated)
        logger.info(u"{}/{} - Updated AttributeValue: {} - {}: {}".format(i, len(data), attribute_value,
                                                                                        args.object_type[:-1], obj_uid))
Пример #6
0
def main():
    import argparse
    global api_source

    my_parser = argparse.ArgumentParser(
        prog='dummy_data_agg',
        description='Create dummy data for aggregated datasets',
        epilog="example1"
        "\nexample2",
        formatter_class=argparse.RawDescriptionHelpFormatter)
    my_parser.add_argument(
        'Dataset',
        metavar='dataset_param',
        type=str,
        help='the uid of the dataset to use or a string to filter datasets')
    my_parser.add_argument(
        '-sd',
        '--start_date',
        action="store",
        dest="start_date",
        type=str,
        help=
        'start date for the period to use to generate data (default is today - 1 year)'
    )
    my_parser.add_argument(
        '-ptf',
        '--period_type_filter',
        action="store",
        dest="period_type_filter",
        type=str,
        help='only applicable when having multiple datasets: d, w, m, y')
    my_parser.add_argument(
        '-ed',
        '--end_date',
        action="store",
        dest="end_date",
        type=str,
        help=
        'end date for the period to use to generate data (default is today)')
    my_parser.add_argument(
        '-ous',
        '--org_unit_selection',
        action="store",
        metavar=('type', 'value'),
        nargs=2,
        help=
        'Provide a type of org unit selection from [uid,uid_children,name,code,level] and the value to use'
        'Eg: --ous uid QXtjg5dh34A')
    # Parameters should be 0 or 1
    my_parser.add_argument('-cf',
                           '--create_flat_file',
                           action="store",
                           metavar='file_name',
                           const='xxx',
                           nargs='?',
                           help='Create spreadsheet for min/max values'
                           'Eg: --create_flat_file=my_file.csv')
    my_parser.add_argument('-uf',
                           '--use_flat_file',
                           action="store",
                           metavar='file_name',
                           nargs=1,
                           help='Use spreadsheet for min/max values'
                           'Eg: --use_flat_file=my_file.csv')
    my_parser.add_argument(
        '-i',
        '--instance',
        action="store",
        dest="instance",
        type=str,
        help=
        'instance to use for dummy data injection (robot account is required!) - default is the URL in auth.json'
    )
    my_parser.add_argument(
        '-ours',
        '--ous_random_size',
        action="store",
        dest="ous_random_size",
        type=str,
        help=
        'From all OUs selected from ous command, takes a random sample of ous_random_size'
    )

    args = my_parser.parse_args()

    credentials_file = 'auth.json'

    try:
        f = open(credentials_file)
    except IOError:
        print(
            "Please provide file auth.json with credentials for DHIS2 server")
        exit(1)
    else:
        with open(credentials_file, 'r') as json_file:
            credentials = json.load(json_file)
        if args.instance is not None:
            api_source = Api(args.instance, credentials['dhis']['username'],
                             credentials['dhis']['password'])
        else:
            api_source = Api.from_auth_file(credentials_file)

    logger.warning("Server source running DHIS2 version {} revision {}".format(
        api_source.version, api_source.revision))

    #WHAT
    dsParam = args.Dataset
    # WHERE
    ouUIDs = list()
    #WHEN
    start_date = ""
    end_date = ""
    periods = list()

    # Assign values from parameters provided if applicable
    if args.create_flat_file is None:  # If we are creating a flat file it does not matter if not provided
        if args.org_unit_selection is None:
            print(
                'Please provide a value for org_unit_selection to create the dummy data'
            )
        else:
            if len(args.org_unit_selection) >= 1:
                ouUIDs = get_org_units(args.org_unit_selection[0],
                                       args.org_unit_selection[1],
                                       int(args.ous_random_size))
                if len(ouUIDs) == 0:
                    print('The OU selection ' + args.org_unit_selection[0] +
                          ' ' + args.org_unit_selection[1] +
                          ' returned no result')
                    exit(1)

        if args.start_date is None:
            start_date = (date.today() -
                          timedelta(days=365)).strftime("%Y-%m-%d")
        else:
            start_date = args.start_date
            if not isDateFormat(start_date):
                print('Start date provided ' + start_date +
                      ' has a wrong format')
                exit(1)
        if args.end_date is None:
            end_date = (date.today()).strftime("%Y-%m-%d")
        else:
            end_date = args.end_date
            if not isDateFormat(end_date):
                print('End date provided ' + end_date + ' has a wrong format')
                exit(1)

    periods = list()

    if args.create_flat_file is not None:
        df_min_max = pd.DataFrame({},
                                  columns=[
                                      'DE UID', 'COC UID', 'DE Name',
                                      'COC Name', 'valueType', 'min', 'max'
                                  ])
    else:
        df_min_max = None

    if args.use_flat_file is not None:
        filename = args.use_flat_file
        logger.info("Reading " + filename + " for min/max value")
        df_min_max = pd.read_csv(filename, sep=None, engine='python')

    CC = api_source.get('categoryCombos',
                        params={
                            "paging": "false",
                            "fields": "id,name,categoryOptionCombos"
                        }).json()['categoryCombos']
    CC = reindex(CC, 'id')
    defaultCC = ''
    for catcomboUID in CC:
        if CC[catcomboUID]['name'] == 'default':
            defaultCC = catcomboUID
            break
    if defaultCC == '':
        logger.warning('Could not find default Category Combo')

    COC = api_source.get('categoryOptionCombos',
                         params={
                             "paging": "false",
                             "fields": "id,name"
                         }).json()['categoryOptionCombos']
    COC = reindex(COC, 'id')

    DE = api_source.get(
        'dataElements',
        params={
            "paging": "false",
            "fields":
            "id,name,categoryCombo,aggregationType,valueType,optionSet"
        }).json()['dataElements']
    DE = reindex(DE, 'id')

    # Check for optionSets in the DE
    optionSetUIDs = list()
    for de in DE:
        if 'optionSet' in de:
            optionSetUIDs.append(de['optionSet']['id'])
    if len(optionSetUIDs) > 0:
        options = api_source.get('options',
                                 params={
                                     "paging":
                                     "false",
                                     "fields":
                                     "id,name,code",
                                     "filter":
                                     "optionSet.id:eq:" +
                                     ','.join(optionSetUIDs)
                                 }).json()['options']

    de_numeric_types = [
        'INTEGER_POSITIVE', 'INTEGER', 'INTEGER_ZERO_OR_POSITIVE', 'NUMBER',
        'PERCENTAGE', 'INTEGER_ZERO_OR_NEGATIVE'
    ]

    # Get the datasets"
    if is_valid_uid(dsParam):
        dataset_filter = "id:eq:" + dsParam
    else:
        dataset_filter = "name:like:" + dsParam

    dataSets = api_source.get(
        'dataSets',
        params={
            "paging": "false",
            "fields": "id,name,dataSetElements,periodType,"
            "formType,dataEntryForm,sections,organisationUnits",
            "filter": dataset_filter
        }).json()['dataSets']
    # Only one dataSet
    if len(dataSets) == 0:
        logger.error("Could not find any dataset")
        exit(1)
    else:
        if len(dataSets) > 1 and args.period_type_filter is not None:
            periodTypeFilter = args.period_type_filter
            if periodTypeFilter.lower() not in [
                    'daily', 'weekly', 'monthly', 'quarterly', 'yearly'
            ]:
                logger.error('Period type to filter not supported:' +
                             periodTypeFilter)
            else:
                filteredDatasets = list()
                for ds in dataSets:
                    if ds['periodType'].lower() == periodTypeFilter.lower():
                        filteredDatasets.append(ds)
                dataSets = filteredDatasets

        # Create workbook
        if args.create_flat_file is not None:
            ouput_file_name = 'datasets_' + dsParam + '.xlsx'
            ouput_file_name = args.create_flat_file + '.xlsx'
            writer = pd.ExcelWriter(ouput_file_name)
        for ds in dataSets:
            logger.info("Processing dataset " + ds['name'])
            if start_date != "" and end_date != "":
                logger.info("Period type is " + ds['periodType'] +
                            " - Generating periods from " + start_date +
                            " to " + end_date)
                periods = get_periods(ds['periodType'], start_date, end_date)
            if len(ouUIDs) > 0:
                logger.info("Verifying org unit selection")
                for ou_uid in ouUIDs:
                    if not is_ou_assigned_to_ds(ou_uid, ds):
                        ouUIDs.remove(ou_uid)
                        logger.warning("Org unit " + ou_uid +
                                       " is not assigned to dataset " +
                                       ds['id'])

            dsDataElements = dict()
            greyedFields = list()

            # Analyse the sections of the dataSet looking for greyedFields
            if 'sections' in ds:
                sectionUIDs = ""
                for section in ds['sections']:
                    sectionUIDs += (section['id'] + ",")
                logger.info("Found " + str(sectionUIDs.count(',')) +
                            " sections in dataset")
                # Get sections
                sections = api_source.get(
                    'sections',
                    params={
                        "paging": "false",
                        "fields":
                        "id,name,greyedFields[dataElement,categoryOptionCombo]",
                        "filter": "id:in:[" + sectionUIDs + "]"
                    }).json()['sections']
                for section in sections:
                    if len(section['greyedFields']) > 0:
                        for element in section['greyedFields']:
                            greyedFields.append(
                                element['dataElement']['id'] + '.' +
                                element['categoryOptionCombo']['id'])

            # Get dataElements
            for DSE in ds['dataSetElements']:
                df_min_max = pd.DataFrame({},
                                          columns=[
                                              'DE UID', 'COC UID', 'DE Name',
                                              'COC Name', 'valueType', 'min',
                                              'max'
                                          ])
                de = ''
                if 'dataElement' in DSE:
                    deUID = DSE['dataElement']['id']
                    dsDataElements[deUID] = dict()
                    de = DE[deUID]  # Get all dataElement information
                    dsDataElements[deUID]['valueType'] = de['valueType']

                    # Add options to the dataelement dict if pertinent
                    if 'optionSet' in de:
                        options = api_source.get('options',
                                                 params={
                                                     "paging":
                                                     "false",
                                                     "fields":
                                                     "id,name,code",
                                                     "filter":
                                                     "optionSet.id:eq:" +
                                                     de['optionSet']['id']
                                                 }).json()['options']
                        dsDataElements[deUID]['options'] = list()
                        for option in options:
                            dsDataElements[deUID]['options'].append(
                                option['code'])

                    # Check if the Category Combo is specified in the dataElement definition
                    COCs = list()
                    if 'categoryCombo' in de and de['categoryCombo'][
                            'id'] != defaultCC:
                        COCs = CC[de['categoryCombo']
                                  ['id']]['categoryOptionCombos']

                    # Check if Category Combo is specified for the dataElement in the dataSet
                    elif 'categoryCombo' in DSE and DSE['categoryCombo'][
                            'id'] != defaultCC:
                        COCs = CC[DSE['categoryCombo']
                                  ['id']]['categoryOptionCombos']

                    # Add COCs to the dataElement dictionary
                    if len(COCs) > 0:
                        dsDataElements[deUID]['COCs'] = list()
                        for coc in COCs:
                            dsDataElements[deUID]['COCs'].append(coc['id'])

            logger.info("Found " + str(len(dsDataElements)) +
                        " dataElements in dataset")

            if args.create_flat_file is not None:
                for de in dsDataElements:
                    if 'COCs' in dsDataElements[de]:
                        for coc in dsDataElements[de]['COCs']:
                            str_pair = de + "." + coc
                            if str_pair not in greyedFields:
                                df_min_max = df_min_max.append(
                                    {
                                        "DE UID":
                                        de,
                                        "COC UID":
                                        coc,
                                        "DE Name":
                                        DE[de]['name'],
                                        "COC Name":
                                        COC[coc]['name'],
                                        "valueType":
                                        dsDataElements[de]['valueType'],
                                        "min":
                                        "",
                                        "max":
                                        ""
                                    },
                                    ignore_index=True)
                    else:
                        df_min_max = df_min_max.append(
                            {
                                "DE UID": de,
                                "COC UID": "",
                                "DE Name": DE[de]['name'],
                                "COC Name": "",
                                "valueType": dsDataElements[de]['valueType'],
                                "min": "",
                                "max": ""
                            },
                            ignore_index=True)

                # Save csv file
                # export_csv = df_min_max.to_csv(r'./ds_' + ds['name'].replace(' ', '_') + '_min_max.csv', index=None,
                #                               header=True)
                df_min_max.to_excel(writer, ds['id'], index=False)

            else:
                dataValueSets = list()
                ouCount = 1
                for ouUID in ouUIDs:
                    logger.info("Processing org unit " + ouUID + " - " +
                                str(ouCount) + "/" + str(len(ouUIDs)))
                    for period in periods:
                        #logger.info("Processing period " + period)
                        for de in dsDataElements:
                            value_type = dsDataElements[de]['valueType']
                            min_value = max_value = None
                            options = None
                            if 'options' in dsDataElements[de]:
                                options = dsDataElements[de]['options']
                            if 'COCs' in dsDataElements[de]:
                                for coc in dsDataElements[de]['COCs']:
                                    str_pair = de + "." + coc
                                    if str_pair not in greyedFields:
                                        if df_min_max is not None:
                                            min_value, max_value = get_min_max_from_df(
                                                df_min_max, value_type, de,
                                                coc)
                                        # logger.info(
                                        #     "Generating value for DE (" + value_type + "): " + DE[de]['name'] + " with COC")
                                        value = generate_dummy_value({
                                            'value_type':
                                            value_type,
                                            'min_value':
                                            min_value,
                                            'max_value':
                                            max_value,
                                            'options':
                                            options
                                        })
                                        if value is not None:  # Skip if it is None
                                            dataValueSets.append({
                                                "dataElement":
                                                de,
                                                "categoryOptionCombo":
                                                coc,
                                                "value":
                                                value,
                                                "orgUnit":
                                                ouUID,
                                                "period":
                                                period
                                            })
                                    # else:
                                    #     logger.warning('Skipping ' + str_pair + ' because is greyed in section')
                            else:
                                if df_min_max is not None:
                                    min_value, max_value = get_min_max_from_df(
                                        df_min_max, value_type, de)
                                # logger.info("Generating value for DE (" + value_type + "): " + DE[de]['name'])
                                value = generate_dummy_value({
                                    'value_type': value_type,
                                    'min_value': min_value,
                                    'max_value': max_value,
                                    'options': options
                                })
                                if value is not None:  # Skip if it is None
                                    dataValueSets.append({
                                        "dataElement": de,
                                        "value": value,
                                        "orgUnit": ouUID,
                                        "period": period
                                    })

                    post_to_server({'dataValues': dataValueSets},
                                   'dataValueSets')
                    dataValueSets = list()
                    ouCount += 1

        if args.create_flat_file is not None:
            writer.save()
Пример #7
0
def main():
    pd.set_option('display.max_columns', None)

    import argparse

    my_parser = argparse.ArgumentParser(prog='create_flat_file',
                                        description='Create dummy data flat file in Google Spreadsheets',
                                        epilog="python create_flat_file Lt6P15ps7f6 --with_teis_from=GZ5Ty90HtW [email protected]"
                                               "\npython create_flat_file Lt6P15ps7f6 --repeat_stage Hj38Uhfo012 5 --repeat_stage 77Ujkfoi9kG 3 [email protected] [email protected]",
                                        formatter_class=argparse.RawDescriptionHelpFormatter)
    my_parser.add_argument('Program_UID', metavar='program_uid', type=str,
                           help='the uid of the program to use')
    my_parser.add_argument('-wtf', '--with_teis_from', action="store", dest="OrgUnit", type=str,
                           help='Pulls TEIs from specified org unit and adds them to flat file. '
                                'Eg: --with_teis_from_ou=Q7RbNZcHrQ9')
    my_parser.add_argument('-rs', '--repeat_stage', action="append", metavar=('stage_uid', 'number_repeats'), nargs=2,
                           help='provide a stage uid which is REPEATABLE and specify how many times you are planning to enter it. '
                                'Eg: --repeat_stage QXtjg5dh34A 3')
    my_parser.add_argument('-sw', '--share_with', action="append", metavar='email', nargs=1,
                           help='email address to share the generated spreadsheet with as OWNER. '
                                'Eg: [email protected]')
    args = my_parser.parse_args()

    program_uid = args.Program_UID
    if not is_valid_uid(program_uid):
        print('The program uid specified is not valid')
        sys.exit()
    if args.OrgUnit is not None and not is_valid_uid(args.OrgUnit):
        print('The orgunit uid specified is not valid')
        sys.exit()
    if args.repeat_stage is not None and len(args.repeat_stage) > 0:
        for param in args.repeat_stage:
            if not is_valid_uid(param[0]):
                print('The program stage uid specified ' + param[0] + ' is not valid')
                sys.exit()
            try:
                int(param[1])
            except ValueError:
                print('The repetition value ' + param[1] + ' is not an integer')
                sys.exit()
    if args.share_with is not None and len(args.share_with) > 0:
        for param in args.share_with:
            if not (re.search('^[a-z0-9]+[\._]?[a-z0-9]+[@]\w+[.]\w{2,3}$', param[0])):
                print("The email address " + param[0] + " is not valid")

    # Print DHIS2 Info
    logger.warning("Server source running DHIS2 version {} revision {}"
                   .format(api_source.version, api_source.revision))

    ##############
    # df = pd.read_csv('program-Case_Based_Surveillance.csv', sep=None, engine='python')
    # #
    # # # stages_counter = { 'K5ac7u3V5bB': 1, 'ang4CLldbIu': 5, 'UvYb6qJpQu0': 1 }
    # #
    # # #json_tei = api_source.get('trackedEntityInstances/dRdztYSReOZ', params={'fields':'*'}).json()
    # #
    # params = {
    #     'ou': 'RI95HQRHbKc', # GD7TowwI46c
    #     'ouMode': 'DESCENDANTS',
    #     'program': program_uid,
    #     'skipPaging': 'true',
    #     'lastUpdatedDuration': '4d',
    #     'fields': '*',
    #     'includeAllAttributes': 'true'
    # }
    #
    # list_teis = api_source.get('trackedEntityInstances', params=params).json()['trackedEntityInstances']
    #
    # logger.info("Found " + str(len(list_teis)) + " TEIs")
    #
    # user = '******'
    # stages_counter = dict()
    # for tei in list_teis:
    #     counter = dict()
    #     if "enrollments" in tei and len(tei["enrollments"][0]) > 0: # and tei["enrollments"][0]["storedBy"] == user:
    #         if len(tei['enrollments']) == 1:
    #             if tei['enrollments'][0]['program'] == program_uid:
    #                 if 'events' in tei['enrollments'][0]:
    #                     events = tei['enrollments'][0]['events']
    #                     for event in events:
    #                         if event["programStage"] in counter:
    #                             counter[event["programStage"]] +=1
    #                         else:
    #                             counter[event["programStage"]] = 1
    #             else:
    #                 logger.error("TEI enrolled in program " + tei['enrollments'][0]['program'] + " not supported")
    #         else:
    #             logger.error('error, multi-enrollment not supported')
    #     for key in counter:
    #         if key not in stages_counter or stages_counter[key] < counter[key]:
    #             stages_counter[key] = counter[key]
    #             logger.info('Found ' + str(stages_counter[key]) + ' instances of ' + key)
    #
    # df = add_repeatable_stages(df, stages_counter)
    # for tei in list_teis:
    #     # if tei['trackedEntityInstance'] != 'j17HROzXGEn':
    #     #     continue
    #     if len(tei["enrollments"][0]) > 0:  # and tei["enrollments"][0]["storedBy"] == user:
    #         result = add_json_tei_to_metadata_df(tei, df)

    # export_csv = df.to_csv(r'./program-Case_Based_Surveillance-Dummy_data.csv', index=None, header=True)

    ###########
    df = pd.DataFrame({}, columns=["Stage", "Section", "TEA / DE / eventDate", "UID", "valueType", "optionSet",
                                   "mandatory"])

    try:
        program = api_source.get('programs/' + program_uid,
                                 params={"paging": "false",
                                         "fields": "id,name,enrollmentDateLabel,programTrackedEntityAttributes,programStages,programRuleVariables,organisationUnits,trackedEntityType,version"}).json()
    except RequestException as e:
        if e.code == 404:
            logger.error('Program ' + program_uid + ' specified does not exist')
            sys.exit()

    if isinstance(program, dict):
        # If the program has many org units assigned, this can take a long time to run!!!
        # orgunits_uid = json_extract_nested_ids(program, 'organisationUnits')
        # if args.OrgUnit is not None and args.OrgUnit not in orgunits_uid:
        #     logger.error('The organisation unit ' + args.OrgUnit + ' is not assigned to program ' + program_uid)
        # print('Number of OrgUnits:' + str(len(orgunits_uid)))

        programStages_uid = json_extract_nested_ids(program, 'programStages')
        if args.repeat_stage is not None:
            for param in args.repeat_stage:
                found = False
                for uid in programStages_uid:
                    if param[0] == uid:
                        found = True
                        break
                if not found:
                    logger.error(uid + ' specified is not a valid stage for program ' + program_uid)
                    sys.exit()

        teas_uid = json_extract_nested_ids(program, 'trackedEntityAttribute')
        programRuleVariables_uid = json_extract_nested_ids(program, 'programRuleVariables')

        print('Program:' + program['name'])

        print('Number of TEAs:' + str(len(teas_uid)))
        TEAs = api_source.get('trackedEntityAttributes',
                              params={"paging": "false", "fields": "id,name,aggregationType,valueType,optionSet",
                                      "filter": "id:in:[" + ','.join(teas_uid) + "]"}).json()[
            'trackedEntityAttributes']
        TEAs = reindex(TEAs, 'id')

        # Add the first row with eventDate and Enrollment label
        enrollmentDateLabel = "Enrollment date"
        if 'enrollmentDateLabel' in program:
            enrollmentDateLabel = program['enrollmentDateLabel']
        # Add the program UID as UID for enrollmentDate
        df = df.append({"Stage": "Enrollment", "Section": "", "TEA / DE / eventDate": enrollmentDateLabel,
                        "UID": program_uid, "valueType": "DATE", "optionSet": "", "mandatory": 'True'},
                       ignore_index=True)
        optionSetDict = dict()
        for TEA in program['programTrackedEntityAttributes']:
            tea_uid = TEA['trackedEntityAttribute']['id']
            optionSet_def = ""
            if 'optionSet' in TEAs[tea_uid]:
                optionSet = TEAs[tea_uid]['optionSet']['id']
                if optionSet not in optionSetDict:
                    options = api_source.get('options', params={"paging": "false",
                                                                "order": "sortOrder:asc",
                                                                "fields": "id,code",
                                                                "filter": "optionSet.id:eq:" + optionSet}).json()[
                        'options']
                    optionsList = json_extract(options, 'code')
                    optionSetDict[optionSet] = optionsList
                optionSet_def = '\n'.join(optionSetDict[optionSet])
            df = df.append({"Stage": "", "Section": "", "TEA / DE / eventDate": TEA['name'],
                            "UID": tea_uid,
                            "valueType": TEA['valueType'], "optionSet": optionSet_def,
                            "mandatory": TEA['mandatory']}, ignore_index=True)

            # print("TEA: " + TEA['name'] + " (" + TEA['valueType'] + ")")

        print('Number of Program Rule Variables:' + str(len(programRuleVariables_uid)))
        programRuleVariables = api_source.get('programRuleVariables',
                                              params={"paging": "false",
                                                      "filter": "id:in:[" + ','.join(programRuleVariables_uid) + "]",
                                                      "fields": "id,name,programRuleVariableSourceType,dataElement,trackedEntityAttribute"
                                                      }).json()['programRuleVariables']
        programRules = api_source.get('programRules',
                                      params={"paging": "false",
                                              "filter": "program.id:eq:" + program_uid,
                                              "fields": "id,name,condition"}).json()['programRules']

        programRules_uid = json_extract(programRules, 'id')
        programRules = reindex(programRules, 'id')
        print('Number of Program Rules:' + str(len(programRules_uid)))
        # for uid in programRules:
        #     print('Program Rule: ' + programRules[uid]['name'])

        programRuleActions = api_source.get('programRuleActions',
                                            params={"paging": "false",
                                                    "filter": "programRule.id:in:[" + ','.join(programRules_uid) + "]",
                                                    "fields": "id,name,programRuleActionType,data,content"}).json()[
            'programRuleActions']
        programRuleActions_uid = json_extract(programRuleActions, 'id')
        print('Number of Program Rule Actions:' + str(len(programRuleActions_uid)))

        print('Number of Program Stages:' + str(len(programStages_uid)))
        programStages = api_source.get('programStages',
                                       params={"paging": "false", "order": "sortOrder:asc",
                                               "filter": "id:in:[" + ','.join(programStages_uid) + "]",
                                               "fields": "id,name,executionDateLabel,programStageSections,programStageDataElements"}).json()[
            'programStages']

        for programStage in programStages:
            print('Stage:' + programStage['name'] + " (" + programStage['id'] + ")")
            # Add header to dataframe
            event_date_label = 'Event Date'
            if 'executionDateLabel' in programStage:
                event_date_label = programStage['executionDateLabel']
            df = df.append({"Stage": programStage['name'], "Section": "",
                            "TEA / DE / eventDate": event_date_label,
                            "UID": programStage['id'], "valueType": "DATE", "optionSet": "", "mandatory": 'True'},
                           ignore_index=True)
            des_uid = json_extract_nested_ids(programStage, 'dataElement')

            dataElements = api_source.get('dataElements',
                                          params={"paging": "false",
                                                  "fields": "id,name,categoryCombo,aggregationType,valueType,optionSet",
                                                  "filter": "id:in:[" + ','.join(des_uid) + "]"}).json()[
                'dataElements']
            dataElements = reindex(dataElements, 'id')
            # dataElements = reindex(dataElements, 'id')

            print('Number of DEs:' + str(len(des_uid)))
            if 'programStageSections' in programStage and len(programStage['programStageSections']) > 0:
                programStageSections_uid = json_extract_nested_ids(programStage, 'programStageSections')
                programStageSections = api_source.get('programStageSections',
                                                      params={"paging": "false", "order": "sortOrder:asc",
                                                              "fields": "id,name,dataElements",
                                                              "filter": "id:in:[" + ','.join(
                                                                  programStageSections_uid) + "]"}).json()[
                    'programStageSections']
                dataElements_programStage = dict()
                for elem in programStage['programStageDataElements']:
                    key_value = elem['dataElement']['id']
                    dataElements_programStage[key_value] = elem

                for programStageSection in programStageSections:
                    print("Program Stage Section:" + programStageSection['name'])
                    section_label = programStageSection['name']

                    for dataElement in programStageSection['dataElements']:
                        dataElement_id = dataElement['id']
                        # This will fail if the DE is present in the PSSection but not in the PS, so we check first
                        # if the key exists. If not, we warn the user and skip this
                        if dataElement_id not in dataElements:
                            logger.warning("Data Element with UID " + dataElement_id +
                                           " is present in program stage section but not assigned to the program stage")
                            logger.warning("SKIPPING")
                        else:
                            dataElement_def = dataElements[dataElement_id]
                            dataElement_PS = dataElements_programStage[dataElement_id]
                            print('DE: ' + dataElement_def['name'] + " (" + dataElement_def['valueType'] + ")")
                            optionSet_def = ""

                            if 'optionSet' in dataElement_def:
                                optionSet = dataElement_def['optionSet']['id']
                                if optionSet not in optionSetDict:
                                    options = api_source.get('options', params={"paging": "false",
                                                                                "order": "sortOrder:asc",
                                                                                "fields": "id,code",
                                                                                "filter": "optionSet.id:eq:" + optionSet}).json()[
                                        'options']
                                    optionsList = json_extract(options, 'code')
                                    optionSetDict[optionSet] = optionsList

                                optionSet_def = '\n'.join(optionSetDict[optionSet])

                            df = df.append({"Stage": "", "Section": section_label,
                                            "TEA / DE / eventDate": dataElement_def['name'],
                                            "UID": dataElement_id, "valueType": dataElement_def['valueType'],
                                            "optionSet": optionSet_def, "mandatory": dataElement_PS['compulsory']},
                                           ignore_index=True)
                        if section_label != "":
                            section_label = ""

            else:  # Assume BASIC todo: create CUSTOM
                for dataElement in programStage['programStageDataElements']:
                    dataElement_id = dataElement['dataElement']['id']
                    dataElement_def = dataElements[dataElement_id]
                    print('DE: ' + dataElement_def['name'] + " (" + dataElement_def['valueType'] + ")")
                    optionSet_def = ""
                    if 'optionSet' in dataElement_def:
                        optionSet = dataElement_def['optionSet']['id']
                        if optionSet not in optionSetDict:
                            options = api_source.get('options', params={"paging": "false",
                                                                        "order": "sortOrder:asc",
                                                                        "fields": "id,code",
                                                                        "filter": "optionSet.id:eq:" + optionSet}).json()[
                                'options']
                            optionsList = json_extract(options, 'code')
                            optionSetDict[optionSet] = optionsList

                        optionSet_def = '\n'.join(optionSetDict[optionSet])

                        # print('    with optionSet = ' + dataElement['optionSet']['id'])
                    df = df.append({"Stage": "", "Section": "", "TEA / DE / eventDate": dataElement_def['name'],
                                    "UID": dataElement_id, "valueType": dataElement_def['valueType'],
                                    "optionSet": optionSet_def, "mandatory": dataElement['compulsory']},
                                   ignore_index=True)

                # Find out if it is used in programRuleVariable
                # for PRV in programRuleVariables:
                #     if 'dataElement' in PRV and PRV['dataElement']['id'] == dataElement['id']:
                #         print('Used in PRV:' + PRV['name'] + " (" + PRV['id'] + ")")
                # # Find out if used in ProgramRuleAction
                # for PRA in programRuleActions:
                #     if 'dataElement' in PRA and PRA['dataElement']['id'] == dataElement['id']:
                #         print('Used in PRA:' + PRA['name'] + " (" + PRA['id'] + ")")
                #         print('Program Rule:' + programRules[PRA['programRule']['id']]['name'])
        # stages_counter = { 'ang4CLldbIu':25 }
        # df = add_repeatable_stages(df, stages_counter)
        # for tei in list_teis:
        #     if len(tei["enrollments"][0]) > 0:  # and tei["enrollments"][0]["storedBy"] == user:
        #         result = add_json_tei_to_metadata_df(tei, df)
        #
        # export_csv = df.to_csv(r'./program-Case_Based_Surveillance-Dummy_data.csv', index=None, header=True)

        # get TEIs from OU
        if args.OrgUnit is not None:
            params = {
                'ou': args.OrgUnit,
                'ouMode': 'DESCENDANTS',
                'program': program_uid,
                'skipPaging': 'true',
                # 'lastUpdatedDuration': '4d',
                'fields': '*',
                'includeAllAttributes': 'true'
            }

            list_teis = api_source.get('trackedEntityInstances', params=params).json()['trackedEntityInstances']

            logger.info("Found " + str(len(list_teis)) + " TEIs")

            stages_counter = dict()
            for tei in list_teis:
                counter = dict()
                if "enrollments" in tei and len(
                        tei["enrollments"][0]) > 0:  # and tei["enrollments"][0]["storedBy"] == user:
                    if len(tei['enrollments']) == 1:
                        if tei['enrollments'][0]['program'] == program_uid:
                            if 'events' in tei['enrollments'][0]:
                                events = tei['enrollments'][0]['events']
                                for event in events:
                                    if event["programStage"] in counter:
                                        counter[event["programStage"]] += 1
                                    else:
                                        counter[event["programStage"]] = 1
                        else:
                            logger.error(
                                "TEI enrolled in program " + tei['enrollments'][0]['program'] + " not supported")
                    else:
                        logger.error('error, multi-enrollment not supported')
                for key in counter:
                    if key not in stages_counter or stages_counter[key] < counter[key]:
                        stages_counter[key] = counter[key]
                        # logger.info('Found ' + str(stages_counter[key]) + ' instances of ' + key)

            df = add_repeatable_stages(df, stages_counter)
            for tei in list_teis:
                if len(tei["enrollments"][0]) > 0:  # and tei["enrollments"][0]["storedBy"] == user:
                    result = add_json_tei_to_metadata_df(tei, df)

        # Check if there are repeatable stages (only if TEIs were not provided)
        elif args.repeat_stage is not None and len(args.repeat_stage) > 0:
            stages_counter = dict()
            for param in args.repeat_stage:
                stages_counter[param[0]] = int(param[1])
            df = add_repeatable_stages(df, stages_counter)

        # Create the spreadsheet
        url = create_google_spreadsheet(program, df, args.share_with)
        if url != "":
            logger.info('Spreadsheet created here: ' + url)
        else:
            logger.error("Something went wrong")
Пример #8
0
            metadataWithCustomForms = metadataWithCustomForms + programStages

        logger.info("Found " + str(len(metadataWithCustomForms)) + " custom forms in server")

        # Check for custom reports
        # htmlReports = api.get("") # api/reports/UpFWLROhLW7?fields=designContent
        # if len(htmlReports) > 0:
        #     metadataWithCustomForms = metadataWithCustomForms + htmlReports
        # logger.info("Found " + str(len(htmlReports)) + " custom reports in server")

        for element in metadataWithCustomForms:
            if update:
                element_found = False
                for sheet in xls_read.sheet_names:
                    # Only process the worksheets which have a DHIS2 UID
                    if is_valid_uid(sheet) and sheet == element['id']:
                        # we found the dataSet/programStage in the excel -> get it as df
                        element_found = True
                        df_old = pd.read_excel(xls_read, sheet_name=sheet)
                        df_old.fillna('', inplace=True)

                        if 'key' not in df_old or 'en' not in df_old:
                            logger.error('Worksheet for ' + sheet + ' is missing key and en columns')
                            exit(1)
                        df = pd.DataFrame({}, columns=df_old.columns)
                        break
                if not element_found:
                    df_old = None
                    df = pd.DataFrame({}, columns=['key', 'en'])
            else:
                df = pd.DataFrame({}, columns=['key', 'en'])
Пример #9
0
def parse_args():
    description = "{}Set Attribute Values sourced from CSV file.{}".format(
        Style.BRIGHT, Style.RESET_ALL)

    usage = """
{}Example:{} dhis2-pk-attribute-setter -s play.dhis2.org/dev -u admin -p district -c file.csv -t organisationUnits -a pt5Ll9bb2oP

{}CSV file structure:{}
uid   | attributeValue
------|---------------
UID   | myValue
""".format(Style.BRIGHT, Style.RESET_ALL, Style.BRIGHT, Style.RESET_ALL)

    parser = argparse.ArgumentParser(
        usage=usage,
        description=description,
        formatter_class=argparse.RawTextHelpFormatter)
    parser._action_groups.pop()
    required = parser.add_argument_group('required arguments')
    required.add_argument(
        '-t',
        dest='object_type',
        action='store',
        required=True,
        help=
        "Object type to set attributeValues to: {organisationUnits, dataElements, ...}"
    )
    required.add_argument('-c',
                          dest='source_csv',
                          action='store',
                          required=True,
                          help="Path to CSV file with Attribute Values")
    required.add_argument('-a',
                          dest='attribute_uid',
                          action='store',
                          help='Attribute UID',
                          required=True)

    optional = parser.add_argument_group('optional arguments')
    optional.add_argument('-s',
                          dest='server',
                          action='store',
                          help="DHIS2 server URL")
    optional.add_argument('-u',
                          dest='username',
                          action='store',
                          help="DHIS2 username")
    optional.add_argument('-p',
                          dest='password',
                          action='store',
                          help="DHIS2 password")

    args = parser.parse_args()
    if args.object_type not in OBJ_TYPES:
        raise PKClientException(
            "argument -t must be a valid object_type - one of:\n{}".format(
                ', '.join(sorted(OBJ_TYPES))))
    if not is_valid_uid(args.attribute_uid):
        raise PKClientException("Attribute {} is not a valid UID".format(
            args.attribute_uid))

    if not args.password:
        if not args.username:
            raise PKClientException(
                "ArgumentError: Must provide a username via argument -u")
        password = getpass.getpass(
            prompt="Password for {} @ {}: ".format(args.username, args.server))
    else:
        password = args.password
    return args, password
Пример #10
0
def main():

    my_parser = argparse.ArgumentParser(description='dashboard_checker')
    my_parser.add_argument('-i',
                           '--instance',
                           action="store",
                           dest="instance",
                           type=str,
                           help='URL of the instance to process')
    my_parser.add_argument(
        '-df',
        '--dashboard_filter',
        action="store",
        dest="dashboard_filter",
        type=str,
        help='Either a prefix or a list of comma separated UIDs')
    my_parser.add_argument('--no_data_warning',
                           dest='no_data_warning',
                           action='store_true')
    my_parser.add_argument('--omit-no_data_warning',
                           dest='no_data_warning',
                           action='store_false')
    my_parser.add_argument('-v',
                           '--verbose',
                           dest='verbose',
                           action='store_true')
    my_parser.set_defaults(no_data_warning=True)
    my_parser.set_defaults(verbose=False)
    args = my_parser.parse_args()

    if args.instance is not None:
        instances = [{
            'name': args.instance.split('/')[-1].replace(':', '_'),
            'url': args.instance
        }]
    else:
        instances = [
            #{'name':'newdemos', 'url':'https://who-demos.dhis2.org/newdemos', 'SQL_view_TRK':'xfemQFHUTUV', 'SQL_view_AGG':'lg8lFbDMw2Z'}
            #{'name':'tracker_dev', 'url': 'https://who-dev.dhis2.org/tracker_dev', 'SQL_view_TRK': 'xfemQFHUTUV', 'SQL_view_AGG': 'lg8lFbDMw2Z'}
            {
                'name': 'covid-19',
                'url': 'https://demos.dhis2.org/covid-19',
                'SQL_view_TRK': 'xfemQFHUTUV',
                'SQL_view_AGG': 'lg8lFbDMw2Z'
            }
        ]

    log_file = "./dashboard_checker.log"
    setup_logger(log_file)

    credentials_file = './auth.json'

    df = pd.DataFrame({},
                      columns=[
                          'dashboard_name', 'type', 'uid', 'name', 'issue',
                          'api_link', 'app_link'
                      ])

    errors_found = 0

    for instance in instances:
        try:
            f = open(credentials_file)
        except IOError:
            print(
                "Please provide file auth.json with credentials for DHIS2 server"
            )
            exit(1)
        else:
            with open(credentials_file, 'r') as json_file:
                credentials = json.load(json_file)
            api_source = Api(instance['url'], credentials['dhis']['username'],
                             credentials['dhis']['password'])

        # Get dashboards
        params = {"fields": "*", "paging": "false"}
        if args.dashboard_filter is not None:
            item_list = args.dashboard_filter.split(',')
            if len(item_list) == 1 and not is_valid_uid(item_list[0]):
                params["filter"] = "name:$like:" + args.dashboard_filter
            # Let's consider it as a list of uids
            else:
                # Validate the list
                for item in item_list:
                    if not is_valid_uid(item):
                        logger.error("UID " + item +
                                     " is not a valid DHIS2 UID")
                        exit(1)
                params["filter"] = "id:in:[" + args.dashboard_filter + "]"

        dashboards = api_source.get('dashboards',
                                    params=params).json()['dashboards']

        dashboard_item_with_issues_row = dict()

        for dashboard in dashboards:
            logger.info('Processing dashboard ' + dashboard['name'])
            dashboard_item_with_issues_row['dashboard_name'] = dashboard[
                'name']
            if '2.33' not in api_source.version:
                dashboard_items = [
                    'visualization', 'eventReport', 'eventChart', 'map'
                ]
            else:
                dashboard_items = [
                    'chart', 'reportTable', 'eventReport', 'eventChart', 'map'
                ]
            for dashboardItem in dashboard['dashboardItems']:
                # The dashboard item could be of type TEXT, for example
                # in this case there is nothing to do
                dashboard_item_type_found = False
                for dashboard_item in dashboard_items:
                    if dashboard_item in dashboardItem:
                        dashboard_item_type_found = True
                        dashboard_item_with_issues_row['issue'] = ""
                        dashboard_item_with_issues_row['type'] = dashboard_item
                        dashboard_item_with_issues_row['uid'] = dashboardItem[
                            dashboard_item]['id']
                        dashboard_item_with_issues_row['name'] = ""
                        if args.verbose:
                            logger.info('Trying ' + dashboard_item + ' ' +
                                        dashboardItem[dashboard_item]['id'])
                        try:
                            api_endpoint = dashboard_item + 's/' + dashboardItem[
                                dashboard_item]['id']
                            dashboard_item_with_issues_row[
                                'api_link'] = instance[
                                    'url'] + '/api/' + api_endpoint
                            item = api_source.get(api_endpoint,
                                                  params={
                                                      "fields": "*"
                                                  }).json()
                        except RequestException as e:
                            logger.error(dashboard_item + ' ' +
                                         dashboardItem[dashboard_item]['id'] +
                                         " BROKEN with error " + str(e))
                            dashboard_item_with_issues_row['issue'] = str(e)
                            errors_found += 1
                        else:
                            dashboard_item_with_issues_row['name'] = item[
                                'name']
                            if dashboard_item in ['eventReport', 'eventChart']:
                                continue
                            # Try to get the data
                            try:
                                if dashboard_item == 'map':
                                    for map_view in item['mapViews']:
                                        params = build_analytics_payload(
                                            map_view, args.verbose)
                                        if params != {}:
                                            if 'layer' in map_view and map_view[
                                                    'layer'] == 'event' and 'program' in map_view:
                                                data = api_source.get(
                                                    'analytics/events/query/' +
                                                    map_view['program']['id'],
                                                    params=params).json()
                                            else:
                                                data = api_source.get(
                                                    'analytics',
                                                    params=params).json()
                                else:
                                    data = api_source.get(
                                        'analytics',
                                        params=build_analytics_payload(
                                            item, args.verbose)).json()
                            except RequestException as e:
                                logger.error(
                                    dashboard_item + ' ' +
                                    dashboardItem[dashboard_item]['id'] +
                                    " data cannot be retrieved with error " +
                                    str(e))
                                dashboard_item_with_issues_row['issue'] = str(
                                    e)
                                errors_found += 1
                            else:
                                # print(data['rows'])
                                if args.no_data_warning and (
                                        'rows' not in data
                                        or len(data['rows']) == 0):
                                    dashboard_item_with_issues_row[
                                        'issue'] = 'NO DATA'
                                    logger.warning(
                                        dashboardItem[dashboard_item]['id'] +
                                        ': NO DATA!!!')

                            #exit(0)

                if dashboard_item_type_found and dashboard_item_with_issues_row[
                        'issue'] != "":
                    if dashboard_item_with_issues_row[
                            'type'] == 'visualization':
                        dashboard_item_with_issues_row['app_link'] = instance['url'] + \
                                                                     '/dhis-web-data-visualizer/index.html#/' + \
                                                                     dashboard_item_with_issues_row['uid']
                    elif dashboard_item_with_issues_row['type'] == 'map':
                        dashboard_item_with_issues_row['app_link'] = instance['url'] + \
                                                                     '/dhis-web-maps/index.html'
                    elif dashboard_item_with_issues_row[
                            'type'] == 'eventReport':
                        dashboard_item_with_issues_row['app_link'] = instance['url'] + \
                                                                     'dhis-web-event-reports/index.html?id=' + \
                                                                     dashboard_item_with_issues_row['uid']
                    elif dashboard_item_with_issues_row[
                            'type'] == 'eventChart':
                        dashboard_item_with_issues_row['app_link'] = instance['url'] + \
                                                                     '/dhis-web-event-visualizer/index.html?id=' + \
                                                                     dashboard_item_with_issues_row['uid']
                    df = df.append(dashboard_item_with_issues_row,
                                   ignore_index=True)

    export_csv = df.to_csv(instance['name'] + '.csv', index=None, header=True)

    # Release log handlers
    handlers = logger.handlers[:]
    for handler in handlers:
        handler.close()
        logger.removeHandler(handler)

    return errors_found
Пример #11
0
def main():

    logger.warning("Server source running DHIS2 version {} revision {}".format(
        api.version, api.revision))

    import argparse

    my_parser = argparse.ArgumentParser(
        prog='delete_TEIs',
        description='Delete all TEIs created by robot',
        epilog="",
        formatter_class=argparse.RawDescriptionHelpFormatter)
    my_parser.add_argument('Program_UID',
                           metavar='program_uid',
                           type=str,
                           help='the uid of the program to use')
    my_parser.add_argument(
        '-ou',
        '--org_unit',
        action="store",
        dest="OrgUnit",
        type=str,
        help=
        'Rather than deleting from the root of the tree, deletes from a specific orgUnit including descendants'
        'Eg: --ou=Q7RbNZcHrQ9')

    args = my_parser.parse_args()
    program_uid = args.Program_UID
    if not is_valid_uid(program_uid):
        logger.error('The program uid specified is not a valid DHIS2 uid')
        exit(1)
    else:
        try:
            program = api.get('programs/' + program_uid).json()
        except RequestException as e:
            if e.code == 404:
                logger.error('Program ' + program_uid +
                             ' specified does not exist')
                exit(1)

    ou = 'GD7TowwI46c'  # Trainingland
    if args.OrgUnit is not None:
        if not is_valid_uid(args.OrgUnit):
            logger.error('The orgunit uid specified is not a valid DHIS2 uid')
            exit(1)
        else:
            try:
                orgunit = api.get('organisationUnits/' + args.OrgUnit).json()
            except RequestException as e:
                if e.code == 404:
                    logger.error('Org Unit ' + args.OrgUnit +
                                 ' specified does not exist')
                    exit(1)
            else:
                ou = orgunit[0]

    params = {
        'ou': ou,
        'ouMode': 'DESCENDANTS',
        'program': program_uid,
        'skipPaging': 'true',
        #'lastUpdatedDuration': '4d',
        #'fields': '*'
        'fields': 'trackedEntityInstance,enrollments'
    }

    data = api.get('trackedEntityInstances',
                   params=params).json()['trackedEntityInstances']

    logger.info("Found " + str(len(data)) + " TEIs")

    user = '******'
    for tei in data:
        # #### Uncomment this to filter by user
        if 'enrollments' not in tei:
            import json
            logger.info(json.dumps(tei, indent=4))
        if tei["enrollments"][0]["storedBy"] != user:
            logger.warning("Skipping tei stored by " +
                           tei["enrollments"][0]["storedBy"])
            continue
        # ####
        tei_uid = tei['trackedEntityInstance']
        try:
            response = api.delete('trackedEntityInstances/' + tei_uid)
        except RequestException as e:
            logger.error(e)
            pass
        else:
            logger.info("TEI " + tei_uid + " removed")
Пример #12
0
    my_parser.add_argument('Program_UID',
                           metavar='program_uid',
                           type=str,
                           help='the uid of the program to use')
    my_parser.add_argument('--with_teis_from_ou',
                           action="store",
                           dest="OrgUnit",
                           type=str)
    my_parser.add_argument('--stage_repeat',
                           action="append",
                           metavar=('stage_uid', 'number_repeats'),
                           nargs=2)
    args = my_parser.parse_args()

    program_uid = args.Program_UID
    if not is_valid_uid(program_uid):
        print('The program uid specified is not valid')
        sys.exit()
    if args.OrgUnit is not None and not is_valid_uid(args.OrgUnit):
        print('The orgunit uid specified is not valid')
        sys.exit()
    if args.stage_repeat is not None and len(args.stage_repeat) > 0:
        for param in args.stage_repeat:
            if not is_valid_uid(param[0]):
                print('The program stage uid specified ' + param[0] +
                      ' is not valid')
                sys.exit()
            try:
                int(param[1])
            except ValueError:
                print('The repetition value ' + param[1] +