Example #1
0
def import_opus(
    ad_reader=None,
    import_all: bool = False,
    import_last=False,
    opus_id=None,
    rundb_write: bool = True,
) -> None:
    """Import one or all files from opus even if no previous files have been imported"""
    settings = load_settings()
    filter_ids = settings.get("integrations.opus.units.filter_ids", [])
    skip_employees = settings.get("integrations.opus.skip_employees", False)
    dumps = opus_helpers.read_available_dumps()

    all_dates = dumps.keys()
    # Default is read first file only
    export_dates = [min(all_dates)]
    if import_last:
        export_dates = [max(all_dates)]
    elif import_all:
        export_dates = sorted(all_dates)

    export_dates = prepend(None, export_dates)
    date_pairs = pairwise(export_dates)
    for date1, date2 in date_pairs:
        import_one(
            ad_reader,
            date2,
            date1,
            dumps,
            filter_ids,
            opus_id=opus_id,
            rundb_write=rundb_write,
        )
Example #2
0
def list_org_units(session, org_name: str) -> list:
    query = session.query(Enhed.bvn, Enhed.organisatorisk_sti)
    data = query.all()
    data_df = pd.DataFrame(data, columns=["Enhedsnr", "Sti"])
    data_df = expand_org_path(data_df, "Sti")
    # Return data as a list of tuples with columns as the first element
    parsed_data = list(prepend(data_df.columns, data_df.to_records(index=False)))
    return parsed_data
Example #3
0
    def _print_res_row(cls, row_key: str, res_row: Iterable[object]) -> None:
        """Print the given simulation row.

        `cls` is the writer class.
        `row_key` is the row key.
        `res_row` is the simulation row.

        """
        cls._writer.writerow(prepend(row_key, res_row))
Example #4
0
def _fill_cp_util(processor: ProcessorDesc, program: Sequence[HwInstruction],
                  util_info: BagValDict[ICaseString, InstrState],
                  issue_rec: _IssueInfo) -> None:
    """Calculate the utilization of a new clock pulse.

    `processor` is the processor to fill the utilization of whose units
                at the current clock pulse.
    `program` is the program to execute.
    `util_info` is the unit utilization information to fill.
    `issue_rec` is the issue record.

    """
    in_units = chain(processor.in_out_ports, processor.in_ports)
    dst_units = more_itertools.prepend(
        _instr_sinks.OutSink(_get_out_ports(processor)),
        map(lambda dst: _instr_sinks.UnitSink(dst, program),
            chain(processor.out_ports, processor.internal_units)))
    _fill_inputs(_build_cap_map(processor_utils.units.sorted_models(in_units)),
                 program, util_info, _mov_flights(dst_units,
                                                  util_info), issue_rec)
Example #5
0
    def test_stalled_outputs_are_not_flushed(self, extra_instr_lst):
        """Test data hazards at output ports.

        `self` is this test case.
        `extra_instr_lst` is the extra instructions to execute after the
                          ones causing the hazard.

        """
        program = starmap(HwInstruction, chain(
            [[[], "R1", "ALU"], [["R1"], "R2", "ALU"]], extra_instr_lst))
        extra_instr_len = len(extra_instr_lst)
        cores = starmap(lambda name, width: UnitModel(
            ICaseString(name), width, ["ALU"], LockInfo(True, True), []),
                        [("core 1", 1), ("core 2", 1 + extra_instr_len)])
        extra_instr_seq = range(2, 2 + extra_instr_len)
        assert simulate(
            tuple(program), HwSpec(ProcessorDesc([], [], cores, []))) == [
                BagValDict(cp_util) for cp_util in
                [{ICaseString("core 1"): [InstrState(0)], ICaseString(
                    "core 2"): starmap(InstrState, more_itertools.prepend(
                        [1, StallState.DATA],
                        ([instr] for instr in extra_instr_seq)))},
                 {ICaseString("core 2"): [InstrState(1)]}]]
Example #6
0
    def test_hazard(self, in_width, in_mem_util, out_unit_params, extra_util):
        """Test detecting structural hazards.

        `self` is this test case.
        `in_width` is the width of the input unit.
        `in_mem_util` is the list of input unit capabilities requiring
                      memory utilization.
        `out_unit_params` are the creation parameters of output units.
        `extra_util` is the extra utilization beyond the second clock
                     pulse.

        """
        in_unit = UnitModel(ICaseString("input"), in_width, ["ALU"],
                            LockInfo(True, False), in_mem_util)
        out_units = (UnitModel(ICaseString(name), width, ["ALU"],
                               LockInfo(False, True), mem_access)
                     for name, width, mem_access in out_unit_params)
        out_units = (FuncUnit(out_unit, [in_unit]) for out_unit in out_units)
        cp1_util = {ICaseString("input"): map(InstrState, range(in_width))}
        assert simulate(
            [HwInstruction([], out_reg, "ALU") for out_reg in ["R1", "R2"]],
            HwSpec(ProcessorDesc([in_unit], out_units, [], []))) == list(
                map(BagValDict, more_itertools.prepend(cp1_util, extra_util)))
Example #7
0
 def test_multiple(self):
     value = 'ab'
     iterator = iter('cdefg')
     actual = tuple(mi.prepend(value, iterator))
     expected = ('ab', ) + tuple('cdefg')
     self.assertEqual(actual, expected)
Example #8
0
 def test_basic(self):
     value = 'a'
     iterator = iter('bcdefg')
     actual = list(mi.prepend(value, iterator))
     expected = list('abcdefg')
     self.assertEqual(actual, expected)
Example #9
0
def list_MED_members(session, org_names: dict) -> list:
    """Lists all "tilknyntninger" to an organisation.

    Returns a list of tuples with titles as first element
    and data on members in subsequent tuples. Example:
    [
        ("Navn", "Email", "Tilknytningstype", "Enhed"),
        ("Fornavn Efternavn", "*****@*****.**", "Formand", "Enhed")
    ]
    """
    alle_enheder = set_of_org_units(session, org_names["løn"])
    alle_MED_enheder = set_of_org_units(session, org_names["MED"])
    Emails = (
        session.query(Adresse.værdi, Adresse.bruger_uuid)
        .filter(
            Adresse.adressetype_titel == "AD-Email",
            or_(
                Adresse.synlighed_titel.is_(None),
                Adresse.synlighed_titel != "Hemmelig",
            ),
        )
        .subquery()
    )
    Phonenr = (
        session.query(Adresse.værdi, Adresse.bruger_uuid)
        .filter(
            Adresse.adressetype_titel == "AD-Telefonnummer",
            or_(
                Adresse.synlighed_titel.is_(None),
                Adresse.synlighed_titel != "Hemmelig",
            ),
        )
        .subquery()
    )
    eng_unit = (
        session.query(
            Enhed.navn, Enhed.organisatorisk_sti, Engagement.bruger_uuid
        ).filter(
            Enhed.uuid == Engagement.enhed_uuid,
            Engagement.enhed_uuid.in_(alle_enheder),
            Engagement.bruger_uuid == Bruger.uuid,
        )
    ).subquery()

    query = (
        session.query(
            Bruger.fornavn + " " + Bruger.efternavn,
            Emails.c.værdi,
            Phonenr.c.værdi,
            Tilknytning.tilknytningstype_titel,
            Enhed.navn,
            eng_unit.c.navn,
            eng_unit.c.organisatorisk_sti,
        )
        .filter(
            Enhed.uuid == Tilknytning.enhed_uuid,
            Tilknytning.enhed_uuid.in_(alle_MED_enheder),
            Tilknytning.bruger_uuid == Bruger.uuid,
        )
        .join(Emails, Emails.c.bruger_uuid == Bruger.uuid, isouter=True)
        .join(Phonenr, Phonenr.c.bruger_uuid == Bruger.uuid, isouter=True)
        .join(eng_unit, eng_unit.c.bruger_uuid == Bruger.uuid)
        .order_by(Bruger.efternavn)
    )
    data = query.all()
    data_df = pd.DataFrame(
        data,
        columns=[
            "Navn",
            "Email",
            "Telefonnummer",
            "Tilknytningstype",
            "Tilknytningsenhed",
            "Ansættelsesenhed",
            "Sti",
        ],
    )
    data_df = expand_org_path(data_df, "Sti")
    # Return data as a list of tuples with columns as the first element
    parsed_data = list(prepend(data_df.columns, data_df.to_records(index=False)))
    return parsed_data
Example #10
0
def list_employees(session, org_name: str) -> list:
    """Lists all employees in organisation.

    Returns a list of tuples with titles as first element
    and data on employees in subsequent tuples. Example:
    [
        (Navn", "CPR", "Email", "Telefon", "Enhed", "Stilling"),
        ("Fornavn Efternavn", 0123456789,  "*****@*****.**", "12345678",
            "Enhedsnavn", "Stillingsbetegnelse")
    ]
    """
    alle_enheder = set_of_org_units(session, org_name)

    Emails = (
        session.query(Adresse.værdi, Adresse.bruger_uuid)
        .filter(
            Adresse.adressetype_titel == "AD-Email",
            or_(
                Adresse.synlighed_titel.is_(None),
                Adresse.synlighed_titel != "Hemmelig",
            ),
        )
        .subquery()
    )
    Phonenr = (
        session.query(Adresse.værdi, Adresse.bruger_uuid)
        .filter(
            Adresse.adressetype_titel == "AD-Telefonnummer",
            or_(
                Adresse.synlighed_titel.is_(None),
                Adresse.synlighed_titel != "Hemmelig",
            ),
        )
        .subquery()
    )
    query = (
        session.query(
            Bruger.fornavn + " " + Bruger.efternavn,
            Bruger.cpr,
            Emails.c.værdi,
            Phonenr.c.værdi,
            Enhed.navn,
            Enhed.organisatorisk_sti,
            Engagement.stillingsbetegnelse_titel,
        )
        .filter(
            Enhed.uuid == Engagement.enhed_uuid,
            Engagement.enhed_uuid.in_(alle_enheder),
            Engagement.bruger_uuid == Bruger.uuid,
        )
        .join(Emails, Emails.c.bruger_uuid == Bruger.uuid, isouter=True)
        .join(Phonenr, Phonenr.c.bruger_uuid == Bruger.uuid, isouter=True)
        .order_by(Bruger.efternavn)
    )
    data = query.all()
    data_df = pd.DataFrame(
        data,
        columns=[
            "Navn",
            "CPR",
            "AD-Email",
            "AD-Telefonnummer",
            "Enhed",
            "Sti",
            "Stilling",
        ],
    )
    data_df = expand_org_path(data_df, "Sti")
    # Return data as a list of tuples with columns as the first element
    parsed_data = list(prepend(data_df.columns, data_df.to_records(index=False)))
    return parsed_data
Example #11
0
 def test_multiple(self):
     value = 'ab'
     iterator = iter('cdefg')
     actual = tuple(mi.prepend(value, iterator))
     expected = ('ab',) + tuple('cdefg')
     self.assertEqual(actual, expected)
Example #12
0
 def test_basic(self):
     value = 'a'
     iterator = iter('bcdefg')
     actual = list(mi.prepend(value, iterator))
     expected = list('abcdefg')
     self.assertEqual(actual, expected)
Example #13
0
 def __iter__(self) -> Iterator[Card]:
     return (mitt.prepend(self.set_aside,
                          super().__iter__())
             if self.set_aside is not None else super().__iter__())
 def test_multiple(self):
     value = "ab"
     iterator = iter("cdefg")
     actual = tuple(mi.prepend(value, iterator))
     expected = ("ab",) + tuple("cdefg")
     self.assertEqual(actual, expected)
 def test_basic(self):
     value = "a"
     iterator = iter("bcdefg")
     actual = list(mi.prepend(value, iterator))
     expected = list("abcdefg")
     self.assertEqual(actual, expected)