Esempio n. 1
0
    def _read_update_serial(self, fname, update=False):
        """Parse existing serial in zonefile, and optionally updates
        the serial. Returns the serial used."""

        all_lines = []
        if os.path.exists(fname):
            with io.open(fname, 'r', encoding='UTF-8') as fin:
                for line in fin:
                    m = ZoneUtils.re_serial.search(line)
                    if m:
                        serial = m.group(1)
                        logger.debug("Old serial: %s" % serial)
                        if not update:
                            return serial
                        if serial[:-2] == time.strftime('%Y%m%d'):
                            serial = int(serial) + 1
                        else:
                            serial = time.strftime('%Y%m%d') + '01'
                        logger.debug("New serial: %s" % serial)
                        line = "%30s ; Serialnumber\n" % serial
                    all_lines.append(line)
        if not update:
            # First time this zone is written
            serial = time.strftime('%Y%m%d') + '01'
            logger.debug("First time; new serial used: %s" % serial)
            return serial
        # Rewrite the entire file in case the serial line length has changed
        f = AtomicFileWriter(fname, 'w', encoding='UTF-8')
        f.write("".join(all_lines))
        f.close()
Esempio n. 2
0
def main(args=None):
    ENCODING = 'utf-8'
    logger = Factory.get_logger('cronjob')
    db = Factory.get(b'Database')()
    co = Factory.get(b'Constants')(db)

    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument('-o', '--output', default='/tmp/report.html')
    commands = parser.add_subparsers(help="available commands")

    # name
    name_command = commands.add_parser(
        'name',
        help="Generate report on differences in names.")
    name_command.set_defaults(func=compare_names)
    name_command.set_defaults(check_system=co.system_sap)
    name_command.add_argument(
        'source_system',
        type=partial(argparse_const, db, co.AuthoritativeSystem))

    args = parser.parse_args(args)
    command = args.func
    del args.func

    # Other commands?
    logger.info('Generating report ({!s})'.format(args.output))
    af = AtomicFileWriter(args.output)

    report = command(db, logger, args)
    report.find('head/meta[@charset]').set('charset', ENCODING)
    af.write("<!DOCTYPE html>\n")
    af.write(ElementTree.tostring(report, encoding=ENCODING))

    af.close()
    logger.info('Done')
Esempio n. 3
0
    def _read_update_serial(self, fname, update=False):
        """Parse existing serial in zonefile, and optionally updates
        the serial. Returns the serial used."""

        all_lines = []
        if os.path.exists(fname):
            with io.open(fname, 'r', encoding='UTF-8') as fin:
                for line in fin:
                    m = ZoneUtils.re_serial.search(line)
                    if m:
                        serial = m.group(1)
                        logger.debug("Old serial: %s" % serial)
                        if not update:
                            return serial
                        if serial[:-2] == time.strftime('%Y%m%d'):
                            serial = int(serial) + 1
                        else:
                            serial = time.strftime('%Y%m%d') + '01'
                        logger.debug("New serial: %s" % serial)
                        line = "%30s ; Serialnumber\n" % serial
                    all_lines.append(line)
        if not update:
            # First time this zone is written
            serial = time.strftime('%Y%m%d') + '01'
            logger.debug("First time; new serial used: %s" % serial)
            return serial
        # Rewrite the entire file in case the serial line length has changed
        f = AtomicFileWriter(fname, 'w', encoding='UTF-8')
        f.write("".join(all_lines))
        f.close()
Esempio n. 4
0
def write_file(filename, persons, skip_incomplete, skip_header=False):
    """Exports info in `persons' and generates file export `filename'.

    :param bool skip_incomplete: Don't write persons without all fields.
    :param bool skip_header: Do not write field header. Default: write header.
    :param [dict()] persons: Person information to write.
    :param basestring filename: The name of the file to write."""
    from string import Template
    f = AtomicFileWriter(filename)
    i = 0
    if not skip_header:
        f.write(
            'title;firstname;lastname;feide_id;'
            'email_address;phone;ssn\n')
    for person in persons:
        if skip_incomplete and not all(person.values()):
            continue
        person = dict(map(lambda (x, y): (x, '' if y is None else y),
                          person.iteritems()))
        f.write(
            Template('$title;$firstname;$lastname;$feide_id;'
                     '$email_address;$phone;$ssn\n').substitute(person))
        i += 1
    f.close()
    logger.info('Wrote %d users to file %s', i, filename)
Esempio n. 5
0
def write_file(filename, persons, skip_incomplete, skip_header=False):
    """Exports info in `persons' and generates file export `filename'.

    :param bool skip_incomplete: Don't write persons without all fields.
    :param bool skip_header: Do not write field header. Default: write header.
    :param [dict()] persons: Person information to write.
    :param basestring filename: The name of the file to write."""
    from string import Template
    f = AtomicFileWriter(filename)
    i = 0
    if not skip_header:
        f.write('title;firstname;lastname;feide_id;'
                'email_address;phone;ssn\n')
    for person in persons:
        if skip_incomplete and not all(person.values()):
            continue
        person = dict(
            map(lambda (x, y): (x, ''
                                if y is None else y), person.iteritems()))
        f.write(
            Template('$title;$firstname;$lastname;$feide_id;'
                     '$email_address;$phone;$ssn\n').substitute(person))
        i += 1
    f.close()
    logger.info('Wrote %d users to file %s', i, filename)
Esempio n. 6
0
def main(args=None):
    ENCODING = 'utf-8'
    logger = Factory.get_logger('cronjob')
    db = Factory.get(b'Database')()
    co = Factory.get(b'Constants')(db)

    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument('-o', '--output', default='/tmp/report.html')
    commands = parser.add_subparsers(help="available commands")

    # name
    name_command = commands.add_parser(
        'name', help="Generate report on differences in names.")
    name_command.set_defaults(func=compare_names)
    name_command.set_defaults(check_system=co.system_sap)
    name_command.add_argument('source_system',
                              type=partial(argparse_const, db,
                                           co.AuthoritativeSystem))

    args = parser.parse_args(args)
    command = args.func
    del args.func

    # Other commands?
    logger.info('Generating report ({!s})'.format(args.output))
    af = AtomicFileWriter(args.output)

    report = command(db, logger, args)
    report.find('head/meta[@charset]').set('charset', ENCODING)
    af.write("<!DOCTYPE html>\n")
    af.write(ElementTree.tostring(report, encoding=ENCODING))

    af.close()
    logger.info('Done')
Esempio n. 7
0
def get_sted_info(outfile):
    f = AtomicFileWriter(outfile, 'w')
    f.write(xml.xml_hdr + "<data>\n")

    steder = LT.GetSteder()
    for s in steder:
        column_names = LT.get_column_names(s)
        f.write(xml.xmlify_dbrow(s, xml.conv_colnames(column_names), 'sted', 0) + "\n")
        komm = LT.GetStedKomm(s['fakultetnr'], s['instituttnr'], s['gruppenr'])
        for k in komm:
            column_names2 = LT.get_column_names(k)
            f.write(xml.xmlify_dbrow(k, xml.conv_colnames(column_names2), 'komm') + "\n")
        # od
        f.write("</sted>\n")
    # od 
    f.write("</data>\n")
    f.close()
Esempio n. 8
0
def process_employees(filename, db):
    """
    Read all entries from FILENAME and insert information into Cerebrum.
    """

    stream = AtomicFileWriter(filename, "w")

    person = Factory.get("Person")(db)
    const = Factory.get("Constants")(db)
    account = Factory.get("Account")(db)

    total = 0
    failed = 0

    for db_row in person.list_external_ids(const.system_sap,
                                           const.externalid_sap_ansattnr):

        if not locate_person(person, db_row.person_id):
            logger.error(
                "Aiee! list_external_ids returned person id %s,"
                "but person.find() failed", db_row.person_id)
            failed += 1
            continue
        # fi

        sap_id = str(db_row.external_id)
        phone = get_contact(person, const.contact_phone, const)
        cellphone = get_contact(person, const.contact_mobile_phone, const)
        email = get_email(person, account)

        stream.write(tuple_to_sap_row((sap_id, phone, cellphone, email)))
        stream.write("\n")
        total += 1
    # od

    stream.close()
    logger.debug("Total %d record(s) exported; %d record(s) failed", total,
                 failed)
Esempio n. 9
0
def process_employees(filename, db):
    """
    Read all entries from FILENAME and insert information into Cerebrum.
    """

    stream = AtomicFileWriter(filename, "w")

    person = Factory.get("Person")(db)
    const = Factory.get("Constants")(db)
    account = Factory.get("Account")(db)

    total = 0; failed = 0

    for db_row in person.list_external_ids(const.system_sap,
                                           const.externalid_sap_ansattnr):

        if not locate_person(person, db_row.person_id):
            logger.error("Aiee! list_external_ids returned person id %s,"
                         "but person.find() failed",
                         db_row.person_id)
            failed += 1
            continue
        # fi

        sap_id = str(db_row.external_id)
        phone = get_contact(person, const.contact_phone, const)
        cellphone = get_contact(person, const.contact_mobile_phone, const)
        email = get_email(person, account)
            
        stream.write(tuple_to_sap_row((sap_id, phone, cellphone, email)))
        stream.write("\n")
        total += 1
    # od

    stream.close()
    logger.debug("Total %d record(s) exported; %d record(s) failed",
                 total, failed)
Esempio n. 10
0
    def generate_hosts_file(self, fname, with_comments=False):
        f = AtomicFileWriter(fname, "w")

        # IPv4
        fm = ForwardMap(self._zone)
        order = fm.a_records.keys()
        order.sort(lambda x, y: int(fm.a_records[x]['ipnr'] - fm.a_records[y][
            'ipnr']))

        entity_id2comment = {}
        if with_comments:
            for row in DnsOwner.DnsOwner(db).list_traits(co.trait_dns_comment):
                entity_id = int(row['entity_id'])
                entity_id2comment[entity_id] = ' # ' + row['strval']

        # If multiple A-records have the same name with different IP, the
        # dns_owner data is only shown for the first IP.
        shown_owner = {}
        for a_id in order:
            line = ''
            a_ref = fm.a_records[a_id]

            prefix = '%s\t%s' % (a_ref['a_ip'], self._exp_name(a_ref['name']))
            line = ''
            names = list()

            dns_owner_id = int(a_ref['dns_owner_id'])
            if dns_owner_id in shown_owner:
                # raise ValueError, "%s already shown?" % a_ref['name']
                continue
            shown_owner[dns_owner_id] = True

            for c_ref in fm.cnames.get(dns_owner_id, []):
                names.append(c_ref['name'])

            line += " " + " ".join([self._exp_name(n) for n in names])
            line += entity_id2comment.get(int(a_ref['dns_owner_id']), '')

            f.write(self._wrap_line(prefix, line))

        # IPv6
        order = fm.aaaa_records.keys()

        entity_id2comment = {}
        if with_comments:
            for row in DnsOwner.DnsOwner(db).list_traits(co.trait_dns_comment):
                entity_id = int(row['entity_id'])
                entity_id2comment[entity_id] = ' # ' + row['strval']

        # If multiple A-records have the same name with different IP, the
        # dns_owner data is only shown for the first IP.
        shown_owner = {}
        for a_id in order:
            line = ''
            a_ref = fm.aaaa_records[a_id]

            prefix = '%s\t%s' % (a_ref['aaaa_ip'], self._exp_name(
                a_ref['name']))
            line = ''
            names = list()

            dns_owner_id = int(a_ref['dns_owner_id'])
            if dns_owner_id in shown_owner:
                # raise ValueError, "%s already shown?" % a_ref['name']
                continue
            shown_owner[dns_owner_id] = True

            for c_ref in fm.cnames.get(dns_owner_id, []):
                names.append(c_ref['name'])

            line += " " + " ".join([self._exp_name(n) for n in names])
            line += entity_id2comment.get(int(a_ref['dns_owner_id']), '')

            f.write(self._wrap_line(prefix, line))

        f.close()
Esempio n. 11
0
class ZoneUtils(object):
    """ Zone file writer.  """

    re_serial = re.compile(r'(\d+)\s*;\s*Serialnumber')

    def __init__(self, zone, origin=None):
        self._zone = zone
        self._as_reversemap = False
        if zone is None:
            self._as_reversemap = True
            self.__origin = origin

    def exp_name(self, name, no_dot=False):
        """ Expands relative names to FQDN for the zone. """
        ret = name
        if not name[-1] == '.':
            ret = name + self._zone.postfix
        if no_dot and ret[-1] == '.':
            ret = ret[:-1]
        return ret

    def trim_name(self, name):
        """ Strips FQDNs to a relative zone name. """
        if name.endswith(self._zone.postfix):
            return name[:-len(self._zone.postfix)]
        return name

    def open(self, fname):
        self._file = AtomicFileWriter(fname, "w", encoding='UTF-8')
        self._fname = fname
        return self

    def write_heads(self, heads, data_dir):
        """ Write template data to the zone file. """
        self.write(HEADER_SPLITTER)
        serial = self._read_update_serial(self._fname)
        logger.debug("write_heads; serial: %s" % serial)
        first = True
        for h in heads:
            logger.debug("Looking at header-file '%s'" % h)
            with io.open(h, "r", encoding='UTF-8') as fin:
                lines = []
                for line in fin:
                    m = ZoneUtils.re_serial.search(line)
                    if m:
                        line = "%30s ; Serialnumber\n" % serial
                    lines.append(line)
                if first and self._as_reversemap and not [
                        x for x in lines if x.startswith('$ORIGIN')
                ]:
                    lines.insert(0, self.__origin)
                self.write("".join(lines))
            first = False
        self.write(EXTRA_SPLITTER)

    def close(self):
        self._file.replace = False
        self._file.close()
        if not self._file.discarded:
            self._read_update_serial(self._file.tmpname, update=True)
            os.rename(self._file.tmpname, self._file.name)

    def __enter__(self):
        return self

    def __exit__(self, *exc_info):
        self.close()

    def write(self, s):
        self._file.write(s)

    def _read_update_serial(self, fname, update=False):
        """Parse existing serial in zonefile, and optionally updates
        the serial. Returns the serial used."""

        all_lines = []
        if os.path.exists(fname):
            with io.open(fname, 'r', encoding='UTF-8') as fin:
                for line in fin:
                    m = ZoneUtils.re_serial.search(line)
                    if m:
                        serial = m.group(1)
                        logger.debug("Old serial: %s" % serial)
                        if not update:
                            return serial
                        if serial[:-2] == time.strftime('%Y%m%d'):
                            serial = int(serial) + 1
                        else:
                            serial = time.strftime('%Y%m%d') + '01'
                        logger.debug("New serial: %s" % serial)
                        line = "%30s ; Serialnumber\n" % serial
                    all_lines.append(line)
        if not update:
            # First time this zone is written
            serial = time.strftime('%Y%m%d') + '01'
            logger.debug("First time; new serial used: %s" % serial)
            return serial
        # Rewrite the entire file in case the serial line length has changed
        f = AtomicFileWriter(fname, 'w', encoding='UTF-8')
        f.write("".join(all_lines))
        f.close()
Esempio n. 12
0
    def generate_hosts_file(self, fname, with_comments=False):
        f = AtomicFileWriter(fname, "w")

        # IPv4
        fm = ForwardMap(self._zone)
        order = fm.a_records.keys()
        order.sort(lambda x, y: int(fm.a_records[x]['ipnr'] -
                                    fm.a_records[y]['ipnr']))

        entity_id2comment = {}
        if with_comments:
            for row in DnsOwner.DnsOwner(db).list_traits(co.trait_dns_comment):
                entity_id = int(row['entity_id'])
                entity_id2comment[entity_id] = ' # ' + row['strval']

        # If multiple A-records have the same name with different IP, the
        # dns_owner data is only shown for the first IP.
        shown_owner = {}
        for a_id in order:
            line = ''
            a_ref = fm.a_records[a_id]

            prefix = '%s\t%s' % (a_ref['a_ip'], self._exp_name(a_ref['name']))
            line = ''
            names = list()

            dns_owner_id = int(a_ref['dns_owner_id'])
            if dns_owner_id in shown_owner:
                # raise ValueError, "%s already shown?" % a_ref['name']
                continue
            shown_owner[dns_owner_id] = True

            for c_ref in fm.cnames.get(dns_owner_id, []):
                names.append(c_ref['name'])

            line += " " + " ".join([self._exp_name(n) for n in names])
            line += entity_id2comment.get(int(a_ref['dns_owner_id']), '')

            f.write(self._wrap_line(prefix, line))

        # IPv6
        order = fm.aaaa_records.keys()

        entity_id2comment = {}
        if with_comments:
            for row in DnsOwner.DnsOwner(db).list_traits(co.trait_dns_comment):
                entity_id = int(row['entity_id'])
                entity_id2comment[entity_id] = ' # ' + row['strval']

        # If multiple A-records have the same name with different IP, the
        # dns_owner data is only shown for the first IP.
        shown_owner = {}
        for a_id in order:
            line = ''
            a_ref = fm.aaaa_records[a_id]

            prefix = '%s\t%s' % (a_ref['aaaa_ip'],
                                 self._exp_name(a_ref['name']))
            line = ''
            names = list()

            dns_owner_id = int(a_ref['dns_owner_id'])
            if dns_owner_id in shown_owner:
                # raise ValueError, "%s already shown?" % a_ref['name']
                continue
            shown_owner[dns_owner_id] = True

            for c_ref in fm.cnames.get(dns_owner_id, []):
                names.append(c_ref['name'])

            line += " " + " ".join([self._exp_name(n) for n in names])
            line += entity_id2comment.get(int(a_ref['dns_owner_id']), '')

            f.write(self._wrap_line(prefix, line))

        f.close()
Esempio n. 13
0
class ZoneUtils(object):
    """ Zone file writer.  """

    re_serial = re.compile(r'(\d+)\s*;\s*Serialnumber')

    def __init__(self, zone, origin=None):
        self._zone = zone
        self._as_reversemap = False
        if zone is None:
            self._as_reversemap = True
            self.__origin = origin

    def exp_name(self, name, no_dot=False):
        """ Expands relative names to FQDN for the zone. """
        ret = name
        if not name[-1] == '.':
            ret = name+self._zone.postfix
        if no_dot and ret[-1] == '.':
            ret = ret[:-1]
        return ret

    def trim_name(self, name):
        """ Strips FQDNs to a relative zone name. """
        if name.endswith(self._zone.postfix):
            return name[:-len(self._zone.postfix)]
        return name

    def open(self, fname):
        self._file = AtomicFileWriter(fname, "w", encoding='UTF-8')
        self._fname = fname
        return self

    def write_heads(self, heads, data_dir):
        """ Write template data to the zone file. """
        self.write(HEADER_SPLITTER)
        serial = self._read_update_serial(self._fname)
        logger.debug("write_heads; serial: %s" % serial)
        first = True
        for h in heads:
            logger.debug("Looking at header-file '%s'" % h)
            with io.open(h, "r", encoding='UTF-8') as fin:
                lines = []
                for line in fin:
                    m = ZoneUtils.re_serial.search(line)
                    if m:
                        line = "%30s ; Serialnumber\n" % serial
                    lines.append(line)
                if first and self._as_reversemap and not [
                        x for x in lines if x.startswith('$ORIGIN')]:
                    lines.insert(0, self.__origin)
                self.write("".join(lines))
            first = False
        self.write(EXTRA_SPLITTER)

    def close(self):
        self._file.replace = False
        self._file.close()
        if not self._file.discarded:
            self._read_update_serial(self._file.tmpname, update=True)
            os.rename(self._file.tmpname, self._file.name)

    def __enter__(self):
        return self

    def __exit__(self, *exc_info):
        self.close()

    def write(self, s):
        self._file.write(s)

    def _read_update_serial(self, fname, update=False):
        """Parse existing serial in zonefile, and optionally updates
        the serial. Returns the serial used."""

        all_lines = []
        if os.path.exists(fname):
            with io.open(fname, 'r', encoding='UTF-8') as fin:
                for line in fin:
                    m = ZoneUtils.re_serial.search(line)
                    if m:
                        serial = m.group(1)
                        logger.debug("Old serial: %s" % serial)
                        if not update:
                            return serial
                        if serial[:-2] == time.strftime('%Y%m%d'):
                            serial = int(serial) + 1
                        else:
                            serial = time.strftime('%Y%m%d') + '01'
                        logger.debug("New serial: %s" % serial)
                        line = "%30s ; Serialnumber\n" % serial
                    all_lines.append(line)
        if not update:
            # First time this zone is written
            serial = time.strftime('%Y%m%d') + '01'
            logger.debug("First time; new serial used: %s" % serial)
            return serial
        # Rewrite the entire file in case the serial line length has changed
        f = AtomicFileWriter(fname, 'w', encoding='UTF-8')
        f.write("".join(all_lines))
        f.close()
Esempio n. 14
0
def do_sillydiff(dirname, oldfile, newfile, outfile):
    """ This very silly. Why? """
    today = time.strftime("%d.%m.%Y")
    try:
        oldfile = io.open(os.path.join(dirname, oldfile), "r",
                          encoding="latin1")
        line = oldfile.readline()
        line = line.rstrip()
    except IOError:
        logger.warn("Warning, old file did not exist, assuming first run ever")
        os.link(os.path.join(dirname, newfile),
                os.path.join(dirname, outfile))
        return

    old_dict = dict()
    while line:
        key = line[0:12]
        value = old_dict.get(key, list())
        value.append(line[13:])
        old_dict[key] = value
        line = oldfile.readline()
        line = line.rstrip()
    oldfile.close()

    out = AtomicFileWriter(os.path.join(dirname, outfile), 'w',
                           encoding="latin1")
    newin = io.open(os.path.join(dirname, newfile), encoding="latin1")

    for newline in newin:
        newline = newline.rstrip()
        pnr = newline[0:12]
        data = newline[13:]
        if pnr in old_dict:
            if data not in old_dict[pnr]:
                # Some change, want to update with new values.
                out.write(newline + "\n")
            else:
                old_dict[pnr].remove(data)
            # If nothing else is left, delete the key from the dictionary
            if not old_dict[pnr]:
                del old_dict[pnr]
        else:
            # completely new entry, output unconditionally
            out.write(newline + "\n")

    # Now, there is one problem left: we cannot output the old data blindly,
    # as people's names might have changed. So, we force *every* old record to
    # the current names in Cerebrum. This may result in the exactly same
    # record being output twice, but it should be fine. 
    person = Factory.get("Person")(db)
    const = Factory.get("Constants")(db)
    logger.debug("%d old records left", len(old_dict))
    for leftpnr in old_dict:
        # FIXME: it is unsafe to assume that this will succeed
        first, last = fnr2names(person, const, leftpnr[:-1])
        if not (first and last):
            logger.warn("No name information for %s is available. %d "
                        "entry(ies) will be skipped",
                        leftpnr[:-1], len(old_dict[leftpnr]))
            continue

        for entry in old_dict[leftpnr]:
            vals = entry.split(";")
            vals[2] = first
            vals[3] = last
            vals[13] = today
            vals[17] = ""
            out.write("%s;%s\n" % (leftpnr, ";".join(vals)))
    out.close()
    newin.close()
Esempio n. 15
0
def get_person_info(outfile):
    """
    Henter info om alle personer i LT som er av interesse.  Ettersom
    opplysningene samles fra flere datakilder, lagres de først i en dict
    persondta
    """

    # Lag mapping fra stillingskodenr til titel (ala overing)
    skode2tittel = {}
    for t in LT.GetTitler():
        skode2tittel[t['stillingkodenr']] = (t['tittel'], t['univstkatkode'])
    # od

    # Lag mapping fra univstkatkode til hovedkatkode (VIT etc.)
    kate2hovedkat = {}
    for t in LT.GetHovedkategorier():
        kate2hovedkat[t['univstkatkode']] = t['hovedkatkode']
    # od

    # Hent alle aktive tilsetninger
    tils = LT.GetTilsettinger()
    persondta = {}
    for t in tils:
        key = '-'.join(["%i" % x for x in [t['fodtdag'], t['fodtmnd'],
                                           t['fodtar'], t['personnr']]])
        if not persondta.has_key(key):
            persondta[key] = {}
        # fi

        persondta[key]['tils'] = persondta[key].get('tils', []) + [t]
    # od

    # Hent alle reservasjoner
    res = LT.GetReservasjoner()
    reservasjoner = {}
    for r in res:
        key = '-'.join(["%i" % x for x in [r['fodtdag'], r['fodtmnd'],
                                           r['fodtar'], r['personnr']]])
        if not reservasjoner.has_key(key):
            reservasjoner[key] = {}
        # fi

        reservasjoner[key]['res'] = reservasjoner[key].get('res', []) + [r]
    # od

    # Hent alle lønnsposteringer siste 30 dager.
    #
    # Tidligere cachet vi disse dataene slik at vi kunne søke over
    # færre dager, men det ser ikke ut til å være nødvendig da søket
    # ikke tar mer enn ca et minutt
    tid = time.strftime("%Y%m%d", time.gmtime(time.time() - (3600*24*30)))
    lonnspost = LT.GetLonnsPosteringer(tid)
    for lp in lonnspost:
        key = '-'.join(["%i" % x for x in [lp['fodtdag'], lp['fodtmnd'],
                                           lp['fodtar'], lp['personnr']]])
        if not persondta.has_key(key):
            persondta[key] = {}
        # fi

        persondta[key]['bil'] = persondta[key].get('bil', []) + [lp]
    # od

    gjester = LT.GetGjester()
    for g in gjester:
        key = '-'.join(["%i" % x for x in [g['fodtdag'], g['fodtmnd'],
                                           g['fodtar'], g['personnr']]])
        if not persondta.has_key(key):
            persondta[key] = {}
        # fi

        persondta[key]['gjest'] = persondta[key].get('gjest', []) + [g]
    # od

    permisjoner = LT.GetPermisjoner()
    for p in permisjoner:
        key = string.join([ str(x)
                            for x in 
                              [p["fodtdag"], p["fodtmnd"],
                               p["fodtar"], p["personnr"]]
                            ], "-")
        if not persondta.has_key(key):
            persondta[key] = {}
        # fi

        if not persondta[key].has_key("permisjon"):
            persondta[key]["permisjon"] = {}
        # fi

        # Since LT.Permisjon(key, tilsnr) is the PK, this assignment will
        # never overwrite any information
        pkey = str(p.fields.tilsnr)
        if not persondta[key]["permisjon"].has_key(pkey):
            persondta[key]["permisjon"][pkey] = []
        # fi
        
        persondta[key]["permisjon"][pkey].append(p)
    # od


    # Skriv ut informasjon om de personer vi allerede har hentet, og
    # hent noe tilleggsinformasjon om dem
    f = AtomicFileWriter(outfile, 'w')
    f.write(xml.xml_hdr + "<data>\n")
    for p in persondta.keys():
        fodtdag, fodtmnd, fodtar, personnr = p.split('-')
        pi = LT.GetPersonInfo(fodtdag, fodtmnd, fodtar, personnr)
        picols = LT.get_column_names(pi)
        f.write(
            xml.xmlify_dbrow(pi[0],  xml.conv_colnames(picols), 'person', 0,
                             extra_attr={'fodtdag': fodtdag, 'fodtmnd':fodtmnd,
                                         'fodtar':fodtar, 'personnr': personnr}
                             ) + "\n")
        tlf = LT.GetArbTelefon(fodtdag, fodtmnd, fodtar, personnr)
        tlfcols = LT.get_column_names(tlf)
        for t in tlf:
            f.write("  "+xml.xmlify_dbrow(
                t, xml.conv_colnames(tlfcols), 'arbtlf') + "\n")
        # od

        komm = LT.GetPersKomm(fodtdag, fodtmnd, fodtar, personnr)
        kcols = LT.get_column_names(komm)
        for k in komm:
            f.write("  "+xml.xmlify_dbrow(
                k,  xml.conv_colnames(kcols), 'komm') + "\n")
        # od

        roller = LT.GetPersonRoller(fodtdag, fodtmnd, fodtar, personnr)
        rcols = LT.get_column_names(roller)
        for r in roller:
            f.write("  "+xml.xmlify_dbrow(
                r, xml.conv_colnames(rcols), 'rolle') +"\n")
        # od

        permisjoner = persondta[p].get("permisjon", {})
        for t in persondta[p].get("tils", ()):
            attr = dict([(key, t[key]) for key in ("fakultetnr_utgift",
                                                   "instituttnr_utgift",
                                                   "gruppenr_utgift",
                                                   "prosent_tilsetting",
                                                   "dato_fra", "dato_til",
                                                   "tilsnr")])
            key = "stillingkodenr_beregnet_sist"
            attr[key] = int(t[key])
            sk = skode2tittel[t[key]]
            attr["hovedkat"] = kate2hovedkat[sk[1]]
            attr["tittel"] = sk[0]
            f.write("  " +
                    xml.xmlify_dbrow(attr.values(), attr.keys(),
                                     "tils", close_tag=0) +
                    "\n")

            formatted_leaves = output_leaves(t, permisjoner)
            for leave in formatted_leaves:
                attr = dict(leave)
                f.write("  " +
                        xml.xmlify_dbrow(attr.values(),
                                         attr.keys(), "permisjon")
                        + "\n")
            # od
            
            f.write("</tils>\n" )
        # od

        if reservasjoner.has_key(p): 
            for r in reservasjoner[p].get('res', ()):
                attr = dict([(key, r[key]) for key in ("katalogkode",
                                                       "felttypekode",
                                                       "resnivakode",)])
                f.write("  " +
                        xml.xmlify_dbrow(attr.values(), attr.keys(),
                                         "res") + "\n")
            # od
        # fi
            
        prev = None
        # Order by 'stedkode', then by reverse date
        persondta[p].get('bil', []).sort(lambda x, y:
                                         cmp(make_key(x), make_key(y))
                                         or cmp(y["dato_oppgjor"],
                                                x["dato_oppgjor"]))
        for t in persondta[p].get('bil', []):
            if make_key(t) == make_key(prev):
                continue
            # fi

            attr = dict([(key, t[key]) for key in ("dato_oppgjor",
                                                   "fakultetnr_kontering",
                                                   "instituttnr_kontering",
                                                   "gruppenr_kontering",)])
            f.write("  " +
                    xml.xmlify_dbrow(attr.values(), attr.keys(),
                                     "bilag") + "\n")
            prev = t
        # od

        for g in persondta[p].get('gjest', ()):
            attr = dict([(key, g[key]) for key in ("fakultetnr",
                                                   "instituttnr",
                                                   "gruppenr",
                                                   "gjestetypekode",
                                                   "dato_fra",
                                                   "dato_til",)])
            f.write("  "
                    + xml.xmlify_dbrow(attr.values(), attr.keys(), "gjest")
                    + "\n")
        # od
 
        f.write("</person>\n")

    f.write("</data>\n")
    f.close()
Esempio n. 16
0
def do_sillydiff(dirname, oldfile, newfile, outfile):
    today = time.strftime("%d.%m.%Y")
    try:
        oldfile = open(os.path.join(dirname, oldfile), "r")
        line = oldfile.readline()
        line = line.rstrip()
    except IOError:
        logger.warn("Warning, old file did not exist, assuming first run ever")
        os.link(os.path.join(dirname, newfile), os.path.join(dirname, outfile))
        return

    old_dict = dict()
    while line:
        key = line[0:12]
        value = old_dict.get(key, list())
        value.append(line[13:])
        old_dict[key] = value

        line = oldfile.readline()
        line = line.rstrip()
    oldfile.close()

    out = AtomicFileWriter(os.path.join(dirname, outfile), 'w')
    newin = open(os.path.join(dirname, newfile))

    for newline in newin:
        newline = newline.rstrip()
        pnr = newline[0:12]
        data = newline[13:]
        if pnr in old_dict:
            if data not in old_dict[pnr]:
                # Some change, want to update with new values.
                out.write(newline + "\n")
            else:
                old_dict[pnr].remove(data)

            # If nothing else is left, delete the key from the dictionary
            if not old_dict[pnr]:
                del old_dict[pnr]
        else:
            # completely new entry, output unconditionally
            out.write(newline + "\n")

    # Now, there is one problem left: we cannot output the old data blindly,
    # as people's names might have changed. So, we force *every* old record to
    # the current names in Cerebrum. This may result in the exactly same
    # record being output twice, but it should be fine.
    person = Factory.get("Person")(db)
    const = Factory.get("Constants")(db)
    logger.debug("%d old records left", len(old_dict))
    for leftpnr in old_dict:
        # FIXME: it is unsafe to assume that this will succeed
        first, last = fnr2names(person, const, leftpnr[:-1])
        if not (first and last):
            logger.warn(
                "No name information for %s is available. %d "
                "entry(ies) will be skipped", leftpnr[:-1],
                len(old_dict[leftpnr]))
            continue

        for entry in old_dict[leftpnr]:
            vals = entry.split(";")
            vals[2] = first
            vals[3] = last
            vals[13] = today
            vals[17] = ""
            out.write("%s;%s\n" % (leftpnr, ";".join(vals)))

    out.close()
    newin.close()