Beispiel #1
0
    def print_results(engine_dictionary, search_dictionaries, length_engines):
        # executes the search_class and retrives the link
        # then prints the result on an excel file
        # splits search_dictionaries depending on begin_from_index
        search_dict_length = (len(search_dictionaries) // length_engines)
        begin_from_index = engine_dictionary['index'] * search_dict_length

        search_class = engine_dictionary['search_engine']

        search_restricted_dict = search_dictionaries[begin_from_index:(
            begin_from_index + search_dict_length)]

        links = []
        for search_dict in search_restricted_dict:
            # dynamic polymorphism
            links.append(search_class(search_dict).get_link())

        class_name = search_class.__name__.upper()
        ExcelWriter.write_excel(class_name, search_restricted_dict, links)
Beispiel #2
0
def main():
    usage = "usage: %prog [options]"
    parser = OptionParser(usage)
    parser.add_option("-i",
                      "--input",
                      dest="report",
                      help="NUnit XML report file name")
    parser.add_option("-o",
                      "--output",
                      dest="filename",
                      help="Generated Excel file name without extension")
    parser.add_option(
        "-l",
        "--light",
        dest="light",
        default=False,
        action="store_true",
        help=
        "Activate light report generation (CSV files instead of Excel workbook)"
    )

    (options, args) = parser.parse_args()

    # Exit if missing of invalid options
    if not check_options(options):
        parser.print_help()
        sys.exit(1)

    # Read NUnitTestAssembly from input report
    assemblies = read_assemblies(options.report)

    # Create the necessary generators
    generators = create_generators(assemblies)

    # Generate the appropriate report
    writer = CSVWriter(generators) if options.light else ExcelWriter(
        generators)
    clean_filename(options)
    writer.generate_report(options.filename)

    print("[NUnit-Stats]: Bye")
Beispiel #3
0
from constants import *
from gmail_parser import GmailParser
from docx_reader import DocxReader
from excel_writer import ExcelWriter
import os
import shutil

if not os.path.exists(ATTACHMENTS_FOLDER):
    print(f"Creating '{ATTACHMENTS_FOLDER}' directory to store attachments...")
    os.makedirs(ATTACHMENTS_FOLDER)
    os.makedirs(DOCS_FOLDER)
    os.makedirs(EXCEL_FOLDER)
    os.makedirs(IMAGES_FOLDER)

print("\n*** [PARSING EMAILS] ***")
email_parser = GmailParser("*****@*****.**")
email_parser.parse_emails()

print(f"\n\n*** [READING DOCUMENTS AND STORING THEM IN {DOCS_FOLDER} ***")
docx_reader = DocxReader()
dictionaries = docx_reader.parse_docx_files()
print(
    f"{len(dictionaries)} document{'s' if len(dictionaries) > 1 else ''} found."
)

print(f"\n\n*** [WRITING EXCEL FILES IN {EXCEL_FOLDER}] ***")
excel_writer = ExcelWriter(dictionaries)
excel_writer.write_excel_files()

shutil.rmtree(TMP_FOLDER, ignore_errors=True)
Beispiel #4
0
def export_xls(title):

    # Create a spreadsheet and get active workbook
    ew = ExcelWriter()
    ws = ew.get_active()
    ws.title = "Weekly Report"

    # Create title row
    titles = [
        "Crisis",
        "Country",
        "Week Covered By Report",
        "Day Covered By Report",
        "Type Of Disaster",
        "Status",
    ]

    hpf = HumanProfileField.objects.filter(parent__isnull=True)
    pinf = PeopleInNeedField.objects.filter(parent__isnull=True)
    haf = HumanAccessField.objects.all()
    hapf = HumanAccessPinField.objects.all()

    # HUMANITARIAN PROFILE
    for field in hpf:
        titles.append(field.name + '__Number__Source__Comment')
        for ch_field in field.humanprofilefield_set.all():
            titles.append(ch_field.name + '__Number__Source__Comment')

    # TODO: Replace Source/Date with Source
    # PEOPLE IN NEED
    for field in pinf:
        titles.append(field.name + '__'
                      'Total__Source/Date__Comment__'
                      'At Risk__Source/Date__Comment__'
                      'Moderate__Source/Date__Comment__'
                      'Severe__Source/Date__Comment__'
                      'Planned__Source/Date__Comment')
        for ch_field in field.peopleinneedfield_set.all():
            titles.append(ch_field.name + '__'
                          'Total__Source/Date__Comment__'
                          'At Risk__Source/Date__Comment__'
                          'Moderate__Source/Date__Comment__'
                          'Severe__Source/Date__Comment__'
                          'Planned__Source/Date__Comment')

    # ipc
    titles.append('ipc__None/Minimal__Stressed__Crisis__Emergency__Famine'
                  '__Source__Comment')

    # HUMANITARIAN ACCESS
    for field in haf:
        titles.append(field.name + '__Yes/No__Source/Date__Comment')

    # HUMANITARIAN ACCESS PIN
    for field in hapf:
        titles.append(field.name + '__Number__Source/Date__Comment')

    # Create Columns
    col = 0
    for t in titles:
        i = col
        splits = t.split('__')

        ws.cell(row=1, column=i + 1).value = splits[0]
        ws.cell(row=1, column=i + 1).font = Font(bold=True)

        if len(splits) > 1:
            for j, split in enumerate(splits[1:]):
                ws.cell(row=2, column=i + j + 1).value = split
                ws.cell(row=2, column=i + j + 1).font = Font(bold=True)

            col = col + len(splits) - 2
            ws.merge_cells(start_row=1,
                           end_row=1,
                           start_column=i + 1,
                           end_column=i + len(splits) - 1)

        col = col + 1

    ew.auto_fit_cells_in_row(1, ws)
    ew.auto_fit_cells_in_row(2, ws)

    # Thick, dotted border for readibility
    for cell in list(ws.rows)[1 - 1]:
        cell.border = Border(bottom=Side(border_style='dotted'))
    for cell in list(ws.rows)[2 - 1]:
        cell.border = Border(bottom=Side(border_style='thick'))

    # Create Rows

    for event in Event.objects.all():
        for report in event.weeklyreport_set.all():
            rows = RowCollection(1)
            data = json.loads(report.data)

            # Report Info
            isoreport = report.start_date.isocalendar()

            row = [
                event.name,
                report.country.name,
                'Week ' + str(isoreport[1]) + ' ' + str(isoreport[0]),
                map_day(data.get('day-select')),
                DisasterType.objects.get(
                    pk=get_dict(data, 'disaster_type')).name if get_dict(
                        data, 'disaster_type') else '',
                ReportStatus.objects.get(pk=get_dict(data, 'status'))
                if get_dict(data, 'status') else '',
            ]

            # HUMANITARIAN PROFILE
            for field in hpf:
                row.extend([
                    get_dict(data, 'human.number.' + str(field.pk)),
                    get_source(data, 'human.source.' + str(field.pk)),
                    get_dict(data, 'human.comment.' + str(field.pk))
                ])

                for ch_field in field.humanprofilefield_set.all():
                    row.extend([
                        get_dict(data, 'human.number.' + str(ch_field.pk)),
                        get_source(data, 'human.source.' + str(ch_field.pk)),
                        get_dict(data, 'human.comment.' + str(ch_field.pk))
                    ])

            # PEOPLE IN NEED
            for field in pinf:
                _fields = ['total', 'at-risk', 'moderate', 'severe', 'planned']
                _data = []
                for _field in _fields:
                    _data.extend([
                        get_dict(data,
                                 'people.' + _field + '.' + str(field.pk)),
                        get_source(
                            data,
                            'people.' + _field + '-source.' + str(field.pk)),
                        get_dict(
                            data,
                            'people.' + _field + '-comment.' + str(field.pk)),
                    ])
                row.extend(_data)
                for ch_field in field.peopleinneedfield_set.all():
                    _data = []
                    for _field in _fields:
                        _data.extend([
                            get_dict(
                                data,
                                'people.' + _field + '.' + str(ch_field.pk)),
                            get_source(
                                data, 'people.' + _field + '-source.' +
                                str(ch_field.pk)),
                            get_dict(
                                data, 'people' + _field + '-comment' +
                                str(ch_field.pk)),
                        ])
                    row.extend(_data)

            # ipc
            row.extend([data['ipc'][ipc_field] for ipc_field in 'abcde'])
            row.extend([get_source(data, 'ipc.f')])
            row.extend([data['ipc']['g']])

            # HUMANITARIAN ACCESS
            for field in haf:
                row.extend([
                    get_dict(data, 'access.' + str(field.pk)),
                    get_source(data, 'access-extra.source.' + str(field.pk)),
                    get_dict(data, 'access-extra.comment.' + str(field.pk)),
                ])

            # HUMANITARIAN ACCESS PIN
            for field in hapf:
                row.extend([
                    get_dict(data, 'access-pin.number.' + str(field.pk)),
                    get_source(data, 'access-pin.source.' + str(field.pk)),
                    get_dict(data, 'access-pin.comment.' + str(field.pk)),
                ])
            #                   Add to Row Collection
            rows.add_values(row)

            #                   Add to Workbench
            ew.append(rows.rows, ws)

    # ew.wb.save('balances.xlsx')
    return ew.get_http_response(title)
 def write(self, file_path):
     data = self._create_data()
     writer = ExcelWriter(file_path)
     writer.write(data)
Beispiel #6
0
def main():
    root_path = os.path.dirname(os.path.abspath(__file__))
    os.chdir(root_path)
    result = do_input_validation()
    result.verbose = True
    setup_logging(logfile="bt_report.log", scrnlog=result.verbose)
    logger.info("Running Script with -> Vertica Server: %s, Vertica User: %s, Customer ID: %s, DeviceType ID: %s, "
                "Date: %s, CSV File: %s, Verbose: %s" % (result.server, result.username, result.customer,
                                                         result.device_type_id, result.start_date, result.csv_file,
                                                         result.verbose))
    date_list, end_date = get_dates(start_date=result.start_date)
    date_formatter = datetime.strptime(end_date, TIME_FORMAT_MDY)
    year_month_format = "%s%02d" % (date_formatter.year, date_formatter.month)
    report_name = "50_Network Intrusion Prevention_Detection Service Report_%s" % year_month_format
    report_output_file_path = get_output_file_path(base_path=root_path, template_file=report_name)
    logger.info("Report Generation Started. Result file: %s" % report_output_file_path)
    # print report_output_file_path
    try:
        vertica_db_instance = VerticaDatabase(
            server=result.server,
            user=result.username,
            password=result.password
        )
        final_data_dict, severity_records, top_5_alarms = fetch_records(
            db_instance=vertica_db_instance,
            customer_id=result.customer,
            device_type_ids=result.device_type_id,
            start_date=result.start_date,
            end_date=end_date,
            date_range=date_list,
            csv_file_path=result.csv_file
        )
        # print top_5_alarms
        # print final_data_dict
        workbook = ExcelWriter(report_output_file_path)
        sheet_name = "50_NIDS_IPS_Report_%s" % year_month_format
        workbook.write_to_document_file(sheet_name=sheet_name, date_str=end_date)
        workbook.write_data_worksheet(sheet_name="DATA", data=final_data_dict, top_alarms=top_5_alarms)
        workbook.draw_top5_charts(sheet_name="TOP 5")
        workbook.write_main_worksheet(sheet_name="MAIN", data=severity_records, start_date=result.start_date,
                                      end_date=end_date)
        workbook.close()
        logger.info("Report Generation Completed. Result file: %s" % report_output_file_path)
        print("Report Generation Completed. Result file: %s" % report_output_file_path)
    except Exception, ex:
        logger.exception(ex)
        sys.exit()
Beispiel #7
0
def export_and_save(event_pk, filename):
    # Create a spreadsheet and get active workbook
    ew = ExcelWriter()
    ws = ew.get_active()
    ws.title = "Split Entries"
    wsg = ew.wb.create_sheet("Grouped Entries")

    # Create title row
    titles = [
        "Country",
        "Date of Lead Publication",
        "Date of Information",
        "Created By",
        "Date Imported",
        "Lead Title",
        "Source",
        "Excerpt",
        "Reliability",
        "Severity",
        "Demographic Groups",
        "Specific Needs Groups",
        "Affected Groups",
        "Pillar",
        "Subpillar",
        "Sector",
        "Subsector",
    ]

    countries = entry_models.Event.objects.get(pk=event_pk).countries\
                            .all().distinct()

    for country in countries:
        admin_levels = country.adminlevel_set.all()
        for i, admin_level in enumerate(admin_levels):
            titles.append('Admin {}'.format(i))

    for i, t in enumerate(titles):
        ws.cell(row=1, column=i + 1).value = t
        ws.cell(row=1, column=i + 1).font = Font(bold=True)

        wsg.cell(row=1, column=i + 1).value = t
        wsg.cell(row=1, column=i + 1).font = Font(bold=True)

    ew.auto_fit_cells_in_row(1, ws)
    ew.auto_fit_cells_in_row(1, wsg)

    # Add each information in each entry belonging to this event
    informations = entry_models.EntryInformation.objects.filter(
        entry__lead__event__pk=event_pk, entry__template=None).distinct()

    grouped_rows = []
    for i, info in enumerate(informations):
        try:
            rows = RowCollection(1)

            rows.permute_and_add(info.entry.lead.event.countries.all())

            rows.add_values([
                format_date(info.entry.lead.published_at),
                format_date(info.date), info.entry.created_by,
                format_date(info.entry.created_at.date()),
                info.entry.lead.name, info.entry.lead.source_name,
                xstr(info.excerpt), info.reliability.name, info.severity.name
            ])

            # Column Name `Demographic Groups` Renamed to
            # `Vulnerable Group` as specified in Issue #280
            rows.permute_and_add(info.vulnerable_groups.all())
            rows.permute_and_add(info.specific_needs_groups.all())
            rows.permute_and_add(info.affected_groups.all())

            attributes = []
            if info.informationattribute_set.count() > 0:
                for attr in info.informationattribute_set.all():
                    attr_data = [
                        attr.subpillar.pillar.name, attr.subpillar.name
                    ]

                    if attr.sector:
                        attr_data.append(attr.sector.name)
                        if attr.subsectors.count() > 0:
                            for ss in attr.subsectors.all():
                                attributes.append(attr_data + [ss.name])
                        else:
                            attributes.append(attr_data + [''])
                    else:
                        attributes.append(attr_data + ['', ''])
            else:
                attributes.append(['', '', '', ''])

            rows.permute_and_add_list(attributes)

            for country in countries:
                admin_levels = country.adminlevel_set.all()
                for admin_level in admin_levels:
                    selections = []
                    for map_selection in info.map_selections.all():
                        if admin_level == map_selection.admin_level:
                            selections.append(map_selection.name)
                    rows.permute_and_add(selections)

            ew.append(rows.rows, ws)
            grouped_rows.append(rows.group_rows)
        except:
            pass

    ew.append(grouped_rows, wsg)
    ew.save_to(filename)
Beispiel #8
0
def export_analysis_xls(title,
                        event_pk=None,
                        information_pks=None,
                        request_data=None):

    # Create a spreadsheet and get active workbook
    ew = ExcelWriter()
    ws = ew.get_active()
    ws.title = "Split Entries"
    wsg = ew.wb.create_sheet("Grouped Entries")

    # Create title row
    titles = [
        "Date of Lead Publication", "Imported By", "Date Imported", "URL",
        "Lead Title", "Source", "Excerpt"
    ]

    event = entry_models.Event.objects.get(pk=event_pk)
    elements = json.loads(event.entry_template.elements)
    sTypes = [
        'date-input', 'scale', 'number-input', 'multiselect', 'organigram'
    ]
    element_ids = []
    geo_elements = []

    for element in elements:
        eType = element['type']
        if eType in sTypes:
            titles.append(element['label'])
        elif eType == 'matrix1d':
            titles.append([element['title'], 'Dimension', 'Sub-Dimension'])
        elif eType == 'matrix2d':
            titles.append([
                element['title'], 'Dimension', 'Sub-Dimension', 'Sector',
                'Subsector'
            ])
        elif eType == 'number2d':
            for row in element['rows']:
                for column in element['columns']:
                    titles.append('{} - {}'.format(row['title'],
                                                   column['title']))
            for row in element['rows']:
                titles.append('{} matches'.format(row['title']))
        elif eType == 'geolocations':
            geo_elements.append(element['id'])
        else:
            continue
        element_ids.append([element['id'], eType])

    if event_pk:
        countries = entry_models.Event.objects.get(pk=event_pk).countries.\
                                 all().distinct()
    else:
        countries = entry_models.Country.objects.all().distinct()

    for country in countries:
        admin_levels = country.adminlevel_set.all()
        for admin_level in admin_levels:
            titles.append(admin_level.name)
            titles.append('{} P-Code'.format(admin_level.name))

    index = 0
    for t in titles:
        if isinstance(t, list):
            for wswsg in [ws, wsg]:
                wswsg.cell(row=1, column=index + 1).value = t[0]
                wswsg.cell(row=1, column=index + 1).font = Font(bold=True)
                wswsg.merge_cells(start_row=1,
                                  end_row=1,
                                  start_column=index + 1,
                                  end_column=index + len(t) - 1)
            for ele in t[1:]:
                for wswsg in [ws, wsg]:
                    wswsg.cell(row=2, column=index + 1).value = ele
                    wswsg.cell(row=2, column=index + 1).font = Font(bold=True)
                index = index + 1
        else:
            for wswsg in [ws, wsg]:
                wswsg.cell(row=1, column=index + 1).value = t
                wswsg.cell(row=1, column=index + 1).font = Font(bold=True)
            index = index + 1

    ew.auto_fit_cells_in_row(1, ws)
    ew.auto_fit_cells_in_row(1, wsg)

    # Add each information in each entry belonging to this event
    informations = entry_models.EntryInformation.objects.filter(
        ~Q(entry__template=None), entry__lead__event__pk=event_pk).distinct()

    if information_pks:
        informations = informations.filter(pk__in=information_pks)
    informations = analysis_filter(informations, request_data, elements)

    grouped_rows = []
    for i, info in enumerate(informations):
        try:
            rows = RowCollection(1)

            lead_url = info.entry.lead.url
            if Attachment.objects.filter(lead=info.entry.lead).count() > 0:
                lead_url = info.entry.lead.attachment.upload.url

            rows.add_values([
                format_date(info.entry.lead.published_at),
                info.entry.created_by,
                format_date(info.entry.created_at.date()), lead_url,
                info.entry.lead.name, info.entry.lead.source_name,
                xstr(info.excerpt)
            ])

            infoE = json.loads(info.elements)
            for element_id, element_type in element_ids:
                element = list_filter(infoE, 'id', element_id)
                get_analysis_data(elements, element_id, element, element_type,
                                  rows)

            for country in countries:
                admin_levels = country.adminlevel_set.all()
                for admin_level in admin_levels:
                    selections = []
                    for map_selections in [
                            geoE for geoE in infoE
                            if geoE.get('id') in geo_elements
                    ]:
                        for map_selection in map_selections.get('value', []):
                            map_selection_list = map_selection.split(':')

                            if len(map_selection_list) == 3:
                                map_selection_list.append('')

                            if len(map_selection_list) == 4:
                                m_iso3, m_admin, m_name, pcode = \
                                    map_selection_list
                                if admin_level.level == int(m_admin):
                                    selections.append([m_name, pcode])

                    if len(selections) == 0:
                        selections = [['', '']]
                    rows.permute_and_add_list(selections)

            ew.append(rows.rows, ws)
            grouped_rows.append(rows.group_rows)
        except:
            pass

    ew.append(grouped_rows, wsg)

    # ew.save_to('/tmp/text.xls')  # REMOVE THIS
    return ew.get_http_response(title)