Ejemplo n.º 1
0
def main(mora_base: str, use_ad: bool, output_file_path: str) -> None:
    mh = MoraHelper(hostname=mora_base, export_ansi=True)

    employees: List[ExportUser] = create_mapping(mh, use_ad)
    employee_dicts: List[Dict] = list(map(methodcaller("dict"), employees))

    fields = ["cpr", "mo_uuid", "ad_guid", "sam_account_name"]
    mh._write_csv(fields, employee_dicts, output_file_path)
Ejemplo n.º 2
0
    def export_engagement(self, mh: MoraHelper, filename, lc, lc_historic):
        rows = []

        logger.info("Reading users")
        if lc:
            employees = list(map(lambda x: x[0], lc.users.values()))
        else:
            employees = mh.read_all_users()

        logger.info("Reading engagements")
        # Todo: This O(#employees x #engagments), a pre-sorting of engagements would
        # make it O(#employees + #engagments) - consider if this is worth the effort
        for employee in employees:
            logger.info("employee: %r", employee)
            if lc:
                for row in self._gen_from_loracache(employee, lc, lc_historic):
                    rows.append(row)
            else:
                for row in self._gen_from_mo(employee, mh):
                    rows.append(row)

        mh._write_csv(self.fieldnames, rows, filename)
Ejemplo n.º 3
0
def main(speedup, dry_run=None):
    t = time.time()

    mh = MoraHelper(hostname=SETTINGS['mora.base'], export_ansi=False)

    dest_folder = pathlib.Path(SETTINGS['mora.folder.query_export'])
    root_unit = SETTINGS['exporters.plan2learn.root_unit']

    if speedup:
        # Here we should activate read-only mode, actual state and
        # full history dumps needs to be in sync.

        # Full history does not calculate derived data, we must
        # fetch both kinds.
        lc = LoraCache(resolve_dar=True, full_history=False)
        lc.populate_cache(dry_run=dry_run, skip_associations=True)
        lc.calculate_derived_unit_data()
        lc.calculate_primary_engagements()

        lc_historic = LoraCache(resolve_dar=False,
                                full_history=True,
                                skip_past=True)
        lc_historic.populate_cache(dry_run=dry_run, skip_associations=True)
        # Here we should de-activate read-only mode
    else:
        lc = None
        lc_historic = None

    # Todo: We need the nodes structure to keep a consistent output,
    # consider if the 70 seconds is worth the implementation time of
    # reading this from cache.
    nodes = mh.read_ou_tree(root_unit)

    brugere_rows = export_bruger(mh, nodes, lc, lc_historic)
    print('Bruger: {}s'.format(time.time() - t))
    logger.info('Bruger: {}s'.format(time.time() - t))

    filename = str(dest_folder / 'plan2learn_organisation.csv')
    eksporterede_afdelinger = export_organisation(mh, nodes, filename, lc)
    print('Organisation: {}s'.format(time.time() - t))
    logger.info('Organisation: {}s'.format(time.time() - t))

    filename = str(dest_folder / 'plan2learn_engagement.csv')
    brugere_rows = export_engagement(mh, filename, eksporterede_afdelinger,
                                     brugere_rows, lc, lc_historic)
    print('Engagement: {}s'.format(time.time() - t))
    logger.info('Engagement: {}s'.format(time.time() - t))

    filename = str(dest_folder / 'plan2learn_stillingskode.csv')
    export_stillingskode(mh, nodes, filename)
    print('Stillingskode: {}s'.format(time.time() - t))
    logger.info('Stillingskode: {}s'.format(time.time() - t))

    filename = str(dest_folder / 'plan2learn_leder.csv')
    export_leder(mh, nodes, filename, eksporterede_afdelinger)
    print('Leder: {}s'.format(time.time() - t))
    logger.info('Leder: {}s'.format(time.time() - t))

    # Now exported the now fully populated brugere.csv
    filename = str(dest_folder / 'plan2learn_bruger.csv')
    brugere_fieldnames = [
        'BrugerId', 'CPR', 'Navn', 'E-mail', 'Mobil', 'Stilling'
    ]
    mh._write_csv(brugere_fieldnames, brugere_rows, filename)

    print('Export completed')
    logger.info('Export completed')