Beispiel #1
0
def main(argv):

    global g

    parser = argparse.ArgumentParser()
    parser.add_argument('--output-filename', required=False,
        help='Output CSV filename. Defaults to ./tmp/pledges_[datetime_stamp].csv')
    parser.add_argument('--message-output-filename', required=False, help='Filename of message output file. If ' +
        'unspecified, defaults to stderr')
    g.args = parser.parse_args()

    message_level = util.get_ini_setting('logging', 'level')
    util.set_logger(message_level, g.args.message_output_filename, os.path.basename(__file__))

    ccb_app_username = util.get_ini_setting('ccb', 'app_username', False)
    ccb_app_password = util.get_ini_setting('ccb', 'app_password', False)
    ccb_subdomain = util.get_ini_setting('ccb', 'subdomain', False)

    curr_date_str = datetime.datetime.now().strftime('%m/%d/%Y')

    pledge_summary_report_info = {
        "id":"",
        "type":"pledge_giving_summary",
        "pledge_type":"family",
        "date_range":"",
        "ignore_static_range":"static",
        "start_date":"01/01/1990",
        "end_date":curr_date_str,
        "campus_ids":["1"],
        "output":"csv"
    }
    
    pledge_summary_request = {
        'request': json.dumps(pledge_summary_report_info),
        'output': 'export'
    }

    pledge_detail_dialog_report_info = {
        "type":"pledge_giving_detail",
        "id":""
    }

    pledge_detail_dialog_request = {
        'aj': 1,
        'ax': 'create_modal',
        'request': json.dumps(pledge_detail_dialog_report_info),
    }

    pledge_detail_report_info = {
        'id':'',
        'type': 'pledge_giving_detail',
        'transaction_detail_type_id': '{coa_id}', # {coa_id} is substituted at run-time
        'print_type': 'family',
        'split_child_records': '1',
        'show': 'all',
        'date_range': '',
        'ignore_static_range': 'static',
        'start_date': '01/01/1990',
        'end_date': curr_date_str,
        'campus_ids': ['1'],
        'output': 'csv'
    }

    pledge_detail_request = {
        'request': json.dumps(pledge_detail_report_info), # This is also replaced at run-time
        'output': 'export'
    }

    with requests.Session() as http_session:
        util.login(http_session, ccb_subdomain, ccb_app_username, ccb_app_password)

        # Get list of pledged categories
        pledge_summary_response = http_session.post('https://' + ccb_subdomain + '.ccbchurch.com/report.php',
            data=pledge_summary_request)
        pledge_summary_succeeded = False
        if pledge_summary_response.status_code == 200:
            match_pledge_summary_info = re.search('COA Category', pledge_summary_response.text)
            if match_pledge_summary_info != None:
                pledge_summary_succeeded = True
        if not pledge_summary_succeeded:
            logging.error('Pledge Summary retrieval failure. Aborting!')
            util.sys_exit(1)
        csv_reader = csv.reader(StringIO.StringIO(pledge_summary_response.text.encode('ascii', 'ignore')))
        header_row = True
        list_pledge_categories = []
        for row in csv_reader:
            if header_row:
                assert row[0] == 'COA Category'
                header_row = False
            else:
                list_pledge_categories.append(unicode(row[0]))

        # Get dictionary of category option IDs
        report_page = http_session.get('https://' + ccb_subdomain + '.ccbchurch.com/service/report_settings.php',
            params=pledge_detail_dialog_request)
        if report_page.status_code == 200:
            match_report_options = re.search(
                '<select\s+name=\\\\"transaction_detail_type_id\\\\"\s+id=\\\\"\\\\"\s*>(.*?)<\\\/select>',
                report_page.text)
            pledge_categories_str = match_report_options.group(1)
        else:
            logging.error('Error retrieving report settings page. Aborting!')
            util.sys_exit(1)
        dict_pledge_categories = {}
        root_str = ''
        for option_match in re.finditer(r'<option\s+value=\\"([0-9]+)\\"\s*>([^<]*)<\\/option>',
            pledge_categories_str):
            if re.match(r'&emsp;', option_match.group(2)):
                dict_pledge_categories[root_str + ' : ' + option_match.group(2)[6:]] = int(option_match.group(1))
            else:
                root_str = option_match.group(2)
                dict_pledge_categories[root_str] = int(option_match.group(1))

        # Loop over each category with pledges and pull back CSV list of pledges for that category
        output_csv_header = None
        if g.args.output_filename is not None:
            output_filename = g.args.output_filename
        else:
            output_filename = './tmp/pledges_' + datetime.datetime.now().strftime('%Y%m%d%H%M%S') + '.csv'
        util.test_write(output_filename)
        with open(output_filename, 'wb') as csv_output_file:
            csv_writer = csv.writer(csv_output_file)
            for pledge_category in list_pledge_categories:
                logging.info('Retrieving pledges for ' + pledge_category)
                if pledge_category in dict_pledge_categories:
                    pledge_detail_report_info['transaction_detail_type_id'] = \
                        str(dict_pledge_categories[pledge_category])
                    pledge_detail_request['request'] = json.dumps(pledge_detail_report_info)
                    pledge_detail_response = http_session.post('https://' + ccb_subdomain + \
                        '.ccbchurch.com/report.php', data=pledge_detail_request)
                    pledge_detail_succeeded = False
                    if pledge_detail_response.status_code == 200 and pledge_detail_response.text[:8] == 'Name(s),':
                        pledge_detail_succeeded = True
                        csv_reader = csv.reader(StringIO.StringIO(pledge_detail_response.text.encode('ascii',
                            'ignore')))
                        header_row = True
                        for row in csv_reader:
                            if header_row:
                                header_row = False
                                if output_csv_header is None:
                                    output_csv_header = ['COA ID', 'COA Category'] + row
                                    amount_column_index = output_csv_header.index('Total Pledged')
                                    csv_writer.writerow(output_csv_header)
                            else:
                                row = [dict_pledge_categories[pledge_category], pledge_category] + row
                                if row[amount_column_index] != '0': # Ignore non-pledge (contrib-only) rows
                                    csv_writer.writerow(row)
                    if not pledge_detail_succeeded:
                        logging.warning('Pledge Detail retrieval failure for category ' + pledge_category)
                else:
                    logging.warning('Unknown pledge category. ' + pledge_category)

    logging.info('Pledge details retrieved successfully and written to ' + output_filename)

    util.sys_exit(0)
def main(argv):

    global g

    parser = argparse.ArgumentParser()
    parser.add_argument('--output-filename', required=False,
        help='Output CSV filename. Defaults to ./tmp/[datetime_stamp]_pledges.csv')
    parser.add_argument('--message-output-filename', required=False, help='Filename of message output file. If ' +
        'unspecified, defaults to stderr')
    g.args = parser.parse_args()

    message_level = util.get_ini_setting('logging', 'level')
    util.set_logger(message_level, g.args.message_output_filename, os.path.basename(__file__))

    ccb_app_username = util.get_ini_setting('ccb', 'app_username', False)
    ccb_app_password = util.get_ini_setting('ccb', 'app_password', False)
    ccb_subdomain = util.get_ini_setting('ccb', 'subdomain', False)

    curr_date_str = datetime.datetime.now().strftime('%m/%d/%Y')

    individual_detail_report_info = {
        'id':'',
        'type': 'export_individuals_change_log',
        'print_type': 'export_individuals',
        'query_id': '',
        'campus_ids': ['1']
    }

    individual_detail_request = {
        'request': json.dumps(individual_detail_report_info),
        'output': 'export'
    }

    with requests.Session() as http_session:
        util.login(http_session, ccb_subdomain, ccb_app_username, ccb_app_password)

        # Pull back complete CSV containing detail info for every individual in CCB database
        output_csv_header = None
        if g.args.output_filename is not None:
            output_filename = g.args.output_filename
        else:
            output_filename = './tmp/individuals_' + datetime.datetime.now().strftime('%Y%m%d%H%M%S') + '.csv'
        util.test_write(output_filename)
        with open(output_filename, 'wb') as csv_output_file:
            csv_writer = csv.writer(csv_output_file)
            logging.info('Note that it takes CCB a minute or two to pull retrive all individual information')
            individual_detail_response = http_session.post('https://' + ccb_subdomain + '.ccbchurch.com/report.php',
                data=individual_detail_request)
            individual_detail_succeeded = False
            if individual_detail_response.status_code == 200 and \
                individual_detail_response.text[:16] == '"Individual ID",':
                individual_detail_succeeded = True
                csv_reader = csv.reader(StringIO.StringIO(individual_detail_response.text.encode('ascii', 'ignore')))
                for row in csv_reader:
                    csv_writer.writerow(row)
            if not individual_detail_succeeded:
                logging.error('Individual Detail retrieval failed')
                util.sys_exit(1)
            else:
                logging.info('Individual info successfully retrieved into file ' + output_filename)

    util.sys_exit(0)
Beispiel #3
0
def main(argv):

    global g

    parser = argparse.ArgumentParser()
    parser.add_argument('--input-filename', required=False, help='Name of input XML file from previous ' +
        'group_profiles XML retrieval. If not specified, groups XML data retreived from CCB REST API.')
    parser.add_argument('--output-groups-filename', required=False, help='Name of CSV output file listing group ' +
        'information. Defaults to ./tmp/groups_[datetime_stamp].csv')
    parser.add_argument('--output-participants-filename', required=False, help='Name of CSV output file listing ' +
        'group participant information. Defaults to ./tmp/group_participants_[datetime_stamp].csv')
    parser.add_argument('--message-output-filename', required=False, help='Filename of message output file. If ' +
        'unspecified, defaults to stderr')
    parser.add_argument('--keep-temp-file', action='store_true', help='If specified, temp file created with XML ' +
        'from REST API call is not deleted')
    g.args = parser.parse_args()

    message_level = util.get_ini_setting('logging', 'level')
    util.set_logger(message_level, g.args.message_output_filename, os.path.basename(__file__))

    ccb_subdomain = util.get_ini_setting('ccb', 'subdomain', False)
    ccb_api_username = util.get_ini_setting('ccb', 'api_username', False)
    ccb_api_password = util.get_ini_setting('ccb', 'api_password', False)

    # Set groups and participant filenames and test validity
    if g.args.output_groups_filename is not None:
        output_groups_filename = g.args.output_groups_filename
    else:
        output_groups_filename = './tmp/groups_' + datetime.datetime.now().strftime('%Y%m%d%H%M%S') + '.csv'
    util.test_write(output_groups_filename)
    if g.args.output_participants_filename is not None:
        output_participants_filename = g.args.output_participants_filename
    else:
        output_participants_filename = './tmp/group_participants_' + \
            datetime.datetime.now().strftime('%Y%m%d%H%M%S') + '.csv'
    util.test_write(output_participants_filename)

    if g.args.input_filename is not None:
        # Pull groups XML from input file specified by user
        input_filename = g.args.input_filename
    else:
        input_filename = util.ccb_rest_xml_to_temp_file(ccb_subdomain, 'group_profiles', ccb_api_username,
            ccb_api_password)
        if input_filename is None:
            logging.error('Could not retrieve group_profiles, so aborting!')
            util.sys_exit(1)

    # Properties to peel off each 'group' node in XML
    list_group_props = [
        'name',
        'description',
        'campus',
        'group_type',
        'department',
        'area',
        'group_capacity',
        'meeting_day',
        'meeting_time',
        'childcare_provided',
        'interaction_type',
        'membership_type',
        'notification',
        'listed',
        'public_search_listed',
        'inactive'
    ]

    participant_nodes = [
        'ccb_api/response/groups/group/director', 'ccb_api/response/groups/group/coach',
        'ccb_api/response/groups/group/main_leader', 'ccb_api/response/groups/group/leaders/leader',
        'ccb_api/response/groups/group/participants/participant'
    ]

    path = []
    dict_path_ids = {}
    group_id = None
    logging.info('Creating groups and group participants output files.')
    with open(output_groups_filename, 'wb') as csv_output_groups_file:
        csv_writer_groups = csv.writer(csv_output_groups_file)
        csv_writer_groups.writerow(['id'] + list_group_props)
        with open(output_participants_filename, 'wb') as csv_output_participants_file:
            csv_writer_participants = csv.writer(csv_output_participants_file)
            csv_writer_participants.writerow(['group_id', 'participant_id', 'participant_type'])
            for event, elem in ElementTree.iterparse(input_filename, events=('start', 'end')):
                if event == 'start':
                    path.append(elem.tag)
                    full_path = '/'.join(path)
                    if full_path == 'ccb_api/response/groups/group':
                        current_group_id = elem.attrib['id']
                elif event == 'end':
                    if full_path == 'ccb_api/response/groups/group':
                        # Emit 'groups' row
                        props_csv = util.get_elem_id_and_props(elem, list_group_props)
                        csv_writer_groups.writerow(props_csv)
                        elem.clear() # Throw away 'group' node from memory when done processing it
                    elif full_path in participant_nodes:
                        # Emit 'group_participants' row
                        props_csv = [ current_group_id, elem.attrib['id'], elem.tag ]
                        csv_writer_participants.writerow(props_csv)
                    path.pop()
                    full_path = '/'.join(path)

    logging.info('Groups written to ' + output_groups_filename)
    logging.info('Group Participants written to ' + output_participants_filename)

    # If caller didn't specify input filename, then delete the temporary file we retrieved into
    if g.args.input_filename is None:
        if g.args.keep_temp_file:
            logging.info('Temporary downloaded XML retained in file: ' + input_filename)
        else:
            os.remove(input_filename)

    util.sys_exit(0)
def main(argv):
    global g

    parser = argparse.ArgumentParser()
    parser.add_argument('--input-events-filename', required=False, help='Name of input CSV file from previous ' +
        'event occurrences retrieval. If not specified, event list CSV data is retrieved from CCB UI.')
    parser.add_argument('--output-events-filename', required=False, help='Name of CSV output file listing event ' +
        'information. Defaults to ./tmp/events_[datetime_stamp].csv')
    parser.add_argument('--output-attendance-filename', required=False, help='Name of CSV output file listing ' +
        'attendance information. Defaults to ./tmp/attendance_[datetime_stamp].csv')
    parser.add_argument('--message-output-filename', required=False, help='Filename of message output file. If ' +
        'unspecified, defaults to stderr')
    parser.add_argument('--keep-temp-file', action='store_true', help='If specified, temp event occurrences CSV ' + \
        'file created with CSV data pulled from CCB UI (event list report) is not deleted so it can be used ' + \
        'in subsequent runs')
    parser.add_argument('--all-time', action='store_true', help='Normally, attendance data is only archived for ' + \
        'current year (figuring earlier backups covered earlier years). But setting this flag, collects ' \
        'attendance data note just for this year but across all years')
    g.args = parser.parse_args()

    message_level = util.get_ini_setting('logging', 'level')
    util.set_logger(message_level, g.args.message_output_filename, os.path.basename(__file__))

    g.ccb_subdomain = util.get_ini_setting('ccb', 'subdomain', False)
    ccb_app_username = util.get_ini_setting('ccb', 'app_username', False)
    ccb_app_password = util.get_ini_setting('ccb', 'app_password', False)
    g.ccb_api_username = util.get_ini_setting('ccb', 'api_username', False)
    g.ccb_api_password = util.get_ini_setting('ccb', 'api_password', False)

    datetime_now = datetime.datetime.now()
    curr_date_str = datetime_now.strftime('%m/%d/%Y')

    if g.args.all_time:
        start_date_str = '01/01/1990'
    else:
        start_date_str = '01/01/' + datetime_now.strftime('%Y')

    logging.info('Gathering attendance data between ' + start_date_str + ' and ' + curr_date_str)

    event_list_info = {
        "id":"",
        "type":"event_list",
        "date_range":"",
        "ignore_static_range":"static",
        "start_date":start_date_str,
        "end_date":curr_date_str,
        "additional_event_types":["","non_church_wide_events","filter_off"],
        "campus_ids":["1"],
        "output":"csv"
    }

    event_list_request = {
        'request': json.dumps(event_list_info),
        'output': 'export'
    }

    # Set events and attendance filenames and test validity
    if g.args.output_events_filename is not None:
        output_events_filename = g.args.output_events_filename
    else:
        output_events_filename = './tmp/events_' + datetime.datetime.now().strftime('%Y%m%d%H%M%S') + '.csv'
    util.test_write(output_events_filename)
    if g.args.output_attendance_filename is not None:
        output_attendance_filename = g.args.output_attendance_filename
    else:
        output_attendance_filename = './tmp/attendance_' + \
            datetime.datetime.now().strftime('%Y%m%d%H%M%S') + '.csv'
    util.test_write(output_attendance_filename)

    input_filename = util.ccb_rest_xml_to_temp_file(g.ccb_subdomain, 'event_profiles', g.ccb_api_username,
        g.ccb_api_password)
    if input_filename is None:
        logging.error('CCB REST API call for event_profiles failed. Aborting!')
        util.sys_exit(1)

    # Properties to peel off each 'event' node in XML
    list_event_props = [
        'name',
        'description',
        'leader_notes',
        'start_datetime',
        'end_datetime',
        'timezone',
        'recurrence_description',
        'approval_status',
        'listed',
        'public_calendar_listed'
    ] # Also collect event_id, group_id, organizer_id

    path = []
    dict_list_event_names = defaultdict(list)
    with open(output_events_filename, 'wb') as csv_output_events_file:
        csv_writer_events = csv.writer(csv_output_events_file)
        csv_writer_events.writerow(['event_id'] + list_event_props + ['group_id', 'organizer_id']) # Write header row
        for event, elem in ElementTree.iterparse(input_filename, events=('start', 'end')):
            if event == 'start':
                path.append(elem.tag)
                full_path = '/'.join(path)
                if full_path == 'ccb_api/response/events/event':
                    current_event_id = elem.attrib['id']
            elif event == 'end':
                if full_path == 'ccb_api/response/events/event':
                    # Emit 'events' row
                    props_csv = util.get_elem_id_and_props(elem, list_event_props)
                    event_id = props_csv[0] # get_elem_id_and_props() puts 'id' prop at index 0
                    name = props_csv[1] # Cheating here...we know 'name' prop is index 1
                    dict_list_event_names[name].append(event_id)
                    props_csv.append(current_group_id)
                    props_csv.append(current_organizer_id)
                    csv_writer_events.writerow(props_csv)
                    elem.clear() # Throw away 'event' node from memory when done processing it
                elif full_path == 'ccb_api/response/events/event/group':
                    current_group_id = elem.attrib['id']
                elif full_path == 'ccb_api/response/events/event/organizer':
                    current_organizer_id = elem.attrib['id']
                path.pop()
                full_path = '/'.join(path)

    if g.args.input_events_filename is not None:
        # Pull calendared events CSV from file
        input_filename = g.args.input_events_filename
    else:
        # Create UI user session to pull list of calendared events
        logging.info('Logging in to UI session')
        with requests.Session() as http_session:
            util.login(http_session, g.ccb_subdomain, ccb_app_username, ccb_app_password)

            # Get list of all scheduled events
            logging.info('Retrieving list of all scheduled events.  This might take a couple minutes!')
            event_list_response = http_session.post('https://' + g.ccb_subdomain + '.ccbchurch.com/report.php',
                data=event_list_request)
            event_list_succeeded = False
            if event_list_response.status_code == 200:
                event_list_response.raw.decode_content = True
                with tempfile.NamedTemporaryFile(delete=False) as temp:
                    input_filename = temp.name
                    first_chunk = True
                    for chunk in event_list_response.iter_content(chunk_size=1024):
                        if chunk: # filter out keep-alive new chunks
                            if first_chunk:
                                if chunk[:13] != '"Event Name",':
                                    logging.error('Mis-formed calendared events CSV returned. Aborting!')
                                    util.sys_exit(1)
                                first_chunk = False
                            temp.write(chunk)
                    temp.flush()

    with open(input_filename, 'rb') as csvfile:
        csv_reader = csv.reader(csvfile)
        with open(output_attendance_filename, 'wb') as csv_output_file:
            csv_writer = csv.writer(csv_output_file)
            csv_writer.writerow(['event_id', 'event_occurrence', 'individual_id', 'count'])
            header_row = True
            for row in csv_reader:
                if header_row:
                    header_row = False
                    output_csv_header = row
                    event_name_column_index = row.index('Event Name')
                    attendance_column_index = row.index('Actual Attendance')
                    date_column_index = row.index('Date')
                    start_time_column_index = row.index('Start Time')
                else:
                    # Retrieve attendees for events which have non-zero number of attendees
                    if row[attendance_column_index] != '0':
                        if row[event_name_column_index] in dict_list_event_names:
                            retrieve_attendance(csv_writer, dict_list_event_names[row[event_name_column_index]],
                                row[date_column_index], row[start_time_column_index],
                                row[attendance_column_index])
                        else:
                            logging.warning("Unrecognized event name '" + row[event_name_column_index] + "'")

    # If caller didn't specify input filename, then delete the temporary file we retrieved into
    if g.args.input_events_filename is None:
        if g.args.keep_temp_file:
            logging.info('Temporary downloaded calendared events CSV retained in file: ' + input_filename)
        else:
            os.remove(input_filename)

    logging.info('Event profile data written to ' + output_events_filename)
    logging.info('Attendance data written to ' + output_attendance_filename)

    util.sys_exit(0)