Example #1
0
def write_fnrupdate_info(outfile):
    """Lager fil med informasjon om alle fødselsnummerendringer"""
    logger.info("Writing fnrupdate info to '%s'" % outfile)
    stream = AtomicFileWriter(outfile, 'w')
    writer = xmlprinter.xmlprinter(stream,
                                   indent_level = 2,
                                   # Human-readable output
                                   data_mode = True,
                                   input_encoding = "latin1")
    writer.startDocument(encoding = "iso8859-1")

    db = Factory.get("Database")()
    const = Factory.get("Constants")(db)

    writer.startElement("data", {"source_system" : str(const.system_fs)})

    data = fs.person.list_fnr_endringer()
    for row in data:
        # Make the format resemble the corresponding FS output as close as
        # possible.
        attributes = { "type" : str(const.externalid_fodselsnr), 
                       "new"  : "%06d%05d" % (row["fodselsdato_naverende"],
                                              row["personnr_naverende"]),
                       "old"  : "%06d%05d" % (row["fodselsdato_tidligere"],
                                              row["personnr_tidligere"]),
                       "date" : str(row["dato_foretatt"]),
                     }
        
        writer.emptyElement("external_id", attributes)
    # od

    writer.endElement("data")
    writer.endDocument()
    stream.close()
def main():
    """Main driver for the file generation."""

    global xmlwriter, db, const, logger

    db = Factory.get("Database")()
    const = Factory.get("Constants")(db)
    logger = Factory.get_logger("cronjob")

    try:
        opts, args = getopt.getopt(sys.argv[1:], "o:",
                                   ["out-file="])
    except getopt.GetoptError:
        usage(1)

    filename = None
    for opt, val in opts:
        if opt in ('-o', '--out-file'):
            filename = val
    if not filename:
        usage(1)    

    stream = AtomicFileWriter(filename)
    xmlwriter = xmlprinter.xmlprinter(stream,
                                      indent_level=2,
                                      # human-friendly output
                                      data_mode=True,
                                      input_encoding="UTF-8")
    # Get information about persons
    persons = fetch_person_data()
    # Get information about courses (kurs)
    courses = fetch_course_data()
    # Generate and write document
    generate_document(persons, courses)
    stream.close()
Example #3
0
def main():
    """Main driver for the file generation."""

    global xmlwriter, db, const, logger

    db = Factory.get("Database")()
    const = Factory.get("Constants")(db)
    logger = Factory.get_logger("cronjob")

    try:
        opts, args = getopt.getopt(sys.argv[1:], "o:",
                                   ["out-file="])
    except getopt.GetoptError:
        usage(1)

    filename = None
    for opt, val in opts:
        if opt in ('-o', '--out-file'):
            filename = val
    if not filename:
        usage(1)    

    stream = AtomicFileWriter(filename)
    xmlwriter = xmlprinter.xmlprinter(stream,
                                      indent_level=2,
                                      # human-friendly output
                                      data_mode=True,
                                      input_encoding="UTF-8")
    # Get information about persons
    persons = fetch_person_data()
    # Get information about courses (kurs)
    courses = fetch_course_data()
    # Generate and write document
    generate_document(persons, courses)
    stream.close()
Example #4
0
    def _read_update_serial(self, fname, update=False):
        """Parse existing serial in zonefile, and optionally updates
        the serial. Returns the serial used."""

        all_lines = []
        if os.path.exists(fname):
            with io.open(fname, 'r', encoding='UTF-8') as fin:
                for line in fin:
                    m = ZoneUtils.re_serial.search(line)
                    if m:
                        serial = m.group(1)
                        logger.debug("Old serial: %s" % serial)
                        if not update:
                            return serial
                        if serial[:-2] == time.strftime('%Y%m%d'):
                            serial = int(serial) + 1
                        else:
                            serial = time.strftime('%Y%m%d') + '01'
                        logger.debug("New serial: %s" % serial)
                        line = "%30s ; Serialnumber\n" % serial
                    all_lines.append(line)
        if not update:
            # First time this zone is written
            serial = time.strftime('%Y%m%d') + '01'
            logger.debug("First time; new serial used: %s" % serial)
            return serial
        # Rewrite the entire file in case the serial line length has changed
        f = AtomicFileWriter(fname, 'w', encoding='UTF-8')
        f.write("".join(all_lines))
        f.close()
Example #5
0
def write_fnrupdate_info(outfile):
    """Lager fil med informasjon om alle fødselsnummerendringer"""
    stream = AtomicFileWriter(outfile, 'w')
    writer = xmlprinter.xmlprinter(stream,
                                   indent_level = 2,
                                   # Human-readable output
                                   data_mode = True,
                                   input_encoding = "latin1")
    writer.startDocument(encoding = "iso8859-1")

    db = Factory.get("Database")()
    const = Factory.get("Constants")(db)

    writer.startElement("data", {"source_system" : str(const.system_fs)})

    data = fs.person.list_fnr_endringer()
    for row in data:
        # Make the format resemble the corresponding FS output as close as
        # possible.
        attributes = { "type" : str(const.externalid_fodselsnr), 
                       "new"  : "%06d%05d" % (row["fodselsdato_naverende"],
                                              row["personnr_naverende"]),
                       "old"  : "%06d%05d" % (row["fodselsdato_tidligere"],
                                              row["personnr_tidligere"]),
                       "date" : str(row["dato_foretatt"]),
                     }
        
        writer.emptyElement("external_id", attributes)
    # od

    writer.endElement("data")
    writer.endDocument()
    stream.close()
Example #6
0
def main():
    global logger, const, cerebrum_db, xmlwriter
    logger = Factory.get_logger("cronjob")
    logger.info("generating a new XML for export_ACL")

    cerebrum_db = Factory.get("Database")()
    const = Factory.get("Constants")(cerebrum_db)

    opts, rest = getopt.getopt(sys.argv[1:], "f:", [
        "--out-file=",
    ])
    filename = None
    for option, value in opts:
        if option in ("-f", "--out-file"):
            filename = value
        # fi
    # od

    _cache_id_types()
    stream = AtomicFileWriter(filename)
    xmlwriter = xmlprinter.xmlprinter(
        stream,
        indent_level=2,
        # Human-readable output
        data_mode=True,
        input_encoding="latin1")
    generate_report()
    stream.close()
Example #7
0
def main():
    global logger, const, cerebrum_db, xmlwriter
    logger = Factory.get_logger("cronjob")
    logger.info("generating a new XML for export_ACL")

    cerebrum_db = Factory.get("Database")()
    const = Factory.get("Constants")(cerebrum_db)

    opts, rest = getopt.getopt(sys.argv[1:], "f:",
                               ["--out-file=",])
    filename = None
    for option, value in opts:
        if option in ("-f", "--out-file"):
            filename = value
        # fi
    # od

    _cache_id_types()
    stream = AtomicFileWriter(filename)
    xmlwriter = xmlprinter.xmlprinter(stream,
                                      indent_level = 2,
                                      # Human-readable output
                                      data_mode = True,
                                      input_encoding = "latin1")
    generate_report()
    stream.close()
 def get_output_stream(self, filename, codec):
     """ Get a unicode-compatible stream to write. """
     if filename == '-':
         self.stream = sys.stdout
     else:
         self.stream = AtomicFileWriter(filename,
                                        mode='w',
                                        encoding=codec.name)
Example #9
0
def main():
    """
    Start method for this script. 
    """
    global logger, db

    db = Factory.get("Database")()
    logger = Factory.get_logger("cronjob")
    logger.info("Generating UA dump")

    try:
        options, rest = getopt.getopt(sys.argv[1:], "i:o:hdes", [
            "input-file=",
            "output-directory=",
            "help",
            "distribute",
            "employees",
            "students",
        ])
    except getopt.GetoptError:
        logger.exception("foo")
        usage(1)

    output_directory = None
    sysname = None
    person_file = None
    distribute = False
    do_employees = False
    do_students = False
    for option, value in options:
        if option in ("-o", "--output-directory"):
            output_directory = value
        elif option in ("-i", "--input-file"):
            sysname, person_file = value.split(":")
        elif option in ("-h", "--help"):
            usage(2)
        elif option in ("-d", "--distribute"):
            distribute = True
        elif option in ("-e", "--employees"):
            do_employees = True
        elif option in ("-s", "--students"):
            do_students = True

    output_file = AtomicFileWriter(
        os.path.join(output_directory, "uadata.new"), "w")
    generate_output(output_file, do_employees, do_students, sysname,
                    person_file)
    output_file.close()

    diff_file = "uadata.%s" % time.strftime("%Y-%m-%d")
    do_sillydiff(output_directory, "uadata.old", "uadata.new", diff_file)
    os.rename(os.path.join(output_directory, "uadata.new"),
              os.path.join(output_directory, "uadata.old"))

    if distribute:
        passwd = db._read_password(cereconf.UA_FTP_HOST, cereconf.UA_FTP_UNAME)
        ftpput(cereconf.UA_FTP_HOST, cereconf.UA_FTP_UNAME, passwd,
               output_directory, diff_file, "ua-lt")
Example #10
0
def main():
    parser = argparse.ArgumentParser(
        description='Generates a dump file for the UA database')
    parser.add_argument(
        '-i', '--input-file',
        type=text_type,
        help='system name and input file (e.g. system_sap:/path/to/file)')
    parser.add_argument(
        '-o', '--output-directory',
        type=text_type,
        help='output directory')
    parser.add_argument(
        '-d', '--distribute',
        action='store_true',
        dest='distribute',
        default=False,
        help='transfer file')
    parser.add_argument(
        '-e', '--employees',
        action='store_true',
        dest='do_employees',
        default=False,
        help='include employees in the output')
    args = parser.parse_args()

    logger.info("Generating UA dump")

    sysname, person_file = args.input_file.split(":")

    output_file = AtomicFileWriter(
        os.path.join(args.output_directory, "uadata.new"), "w",
        encoding="latin1")
    generate_output(output_file, args.do_employees, sysname, person_file)
    output_file.close()

    diff_file = "uadata.%s" % time.strftime("%Y-%m-%d")
    do_sillydiff(args.output_directory, "uadata.old", "uadata.new", diff_file)
    os.rename(os.path.join(args.output_directory, "uadata.new"),
              os.path.join(args.output_directory, "uadata.old"))

    if args.distribute:
        logger.info('Uploading file to %s', cereconf.UA_FTP_HOST)
        passwd = read_password(cereconf.UA_FTP_UNAME, cereconf.UA_FTP_HOST)
        ftpput(host=cereconf.UA_FTP_HOST,
               uname=cereconf.UA_FTP_UNAME,
               password=passwd,
               local_dir=args.output_directory,
               file=diff_file,
               remote_dir="ua-lt")

    logger.info('Done')
Example #11
0
def main():
    """
    Start method for this script. 
    """
    global logger, db

    db = Factory.get("Database")()
    logger = Factory.get_logger("cronjob")
    logger.info("Generating UA dump")

    try:
        options, rest = getopt.getopt(
            sys.argv[1:],
            "i:o:hdes",
            ["input-file=", "output-directory=", "help", "distribute", "employees", "students"],
        )
    except getopt.GetoptError:
        logger.exception("foo")
        usage(1)

    output_directory = None
    sysname = None
    person_file = None
    distribute = False
    do_employees = False
    do_students = False
    for option, value in options:
        if option in ("-o", "--output-directory"):
            output_directory = value
        elif option in ("-i", "--input-file"):
            sysname, person_file = value.split(":")
        elif option in ("-h", "--help"):
            usage(2)
        elif option in ("-d", "--distribute"):
            distribute = True
        elif option in ("-e", "--employees"):
            do_employees = True
        elif option in ("-s", "--students"):
            do_students = True

    output_file = AtomicFileWriter(os.path.join(output_directory, "uadata.new"), "w")
    generate_output(output_file, do_employees, do_students, sysname, person_file)
    output_file.close()

    diff_file = "uadata.%s" % time.strftime("%Y-%m-%d")
    do_sillydiff(output_directory, "uadata.old", "uadata.new", diff_file)
    os.rename(os.path.join(output_directory, "uadata.new"), os.path.join(output_directory, "uadata.old"))

    if distribute:
        passwd = db._read_password(cereconf.UA_FTP_HOST, cereconf.UA_FTP_UNAME)
        ftpput(cereconf.UA_FTP_HOST, cereconf.UA_FTP_UNAME, passwd, output_directory, diff_file, "ua-lt")
Example #12
0
def write_file(filename, persons, skip_incomplete, skip_header=False):
    """Exports info in `persons' and generates file export `filename'.

    :param bool skip_incomplete: Don't write persons without all fields.
    :param bool skip_header: Do not write field header. Default: write header.
    :param [dict()] persons: Person information to write.
    :param basestring filename: The name of the file to write."""
    from string import Template
    f = AtomicFileWriter(filename)
    i = 0
    if not skip_header:
        f.write('title;firstname;lastname;feide_id;'
                'email_address;phone;ssn\n')
    for person in persons:
        if skip_incomplete and not all(person.values()):
            continue
        person = dict(
            map(lambda (x, y): (x, ''
                                if y is None else y), person.iteritems()))
        f.write(
            Template('$title;$firstname;$lastname;$feide_id;'
                     '$email_address;$phone;$ssn\n').substitute(person))
        i += 1
    f.close()
    logger.info('Wrote %d users to file %s', i, filename)
Example #13
0
def main(args=None):
    ENCODING = 'utf-8'
    logger = Factory.get_logger('cronjob')
    db = Factory.get(b'Database')()
    co = Factory.get(b'Constants')(db)

    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument('-o', '--output', default='/tmp/report.html')
    commands = parser.add_subparsers(help="available commands")

    # name
    name_command = commands.add_parser(
        'name', help="Generate report on differences in names.")
    name_command.set_defaults(func=compare_names)
    name_command.set_defaults(check_system=co.system_sap)
    name_command.add_argument('source_system',
                              type=partial(argparse_const, db,
                                           co.AuthoritativeSystem))

    args = parser.parse_args(args)
    command = args.func
    del args.func

    # Other commands?
    logger.info('Generating report ({!s})'.format(args.output))
    af = AtomicFileWriter(args.output)

    report = command(db, logger, args)
    report.find('head/meta[@charset]').set('charset', ENCODING)
    af.write("<!DOCTYPE html>\n")
    af.write(ElementTree.tostring(report, encoding=ENCODING))

    af.close()
    logger.info('Done')
    def build_export(self, outfile):
        """Build and create the export file."""
        logger.info("Start building export, writing to %s", outfile)

        # The header is in a non standard format starting with a hash mark.
        # We "fix" it by just naming the first field #username
        fields = [
            '#username', 'fnr', 'firstname', 'lastname', 'worktitle',
            'primary_mail', 'affiliation'
        ]

        persons = []
        for person_id, export_atter in self._export_attrs.items():
            affs = self._person_affs.get(person_id)
            aff_str = self._aff_char_separator.join(affs)
            person = {
                '#username': export_atter[0],
                'fnr': export_atter[1],
                'firstname': export_atter[2],
                'lastname': export_atter[3],
                'worktitle': export_atter[4],
                'primary_mail': export_atter[5],
                'affiliation': aff_str,
            }
            persons.append(person)
        logger.info("Starting write export")
        with AtomicFileWriter(outfile, mode='w', encoding='utf-8') as stream:
            writer = _csvutil.UnicodeDictWriter(stream,
                                                fields,
                                                dialect=CerebrumDialect)
            writer.writeheader()
            writer.writerows(persons)
        logger.info("Wrote data to %r", outfile)
def write_file(filename, codec, persons, skip_incomplete, skip_header=False):
    """Exports info in `persons' and generates file export `filename'.

    :param bool skip_incomplete: Don't write persons without all fields.
    :param bool skip_header: Do not write field header. Default: write header.
    :param [dict()] persons: Person information to write.
    :param basestring filename: The name of the file to write.
    """
    fields = [
        'title', 'firstname', 'lastname', 'feide_id', 'email_address', 'phone',
        'ssn'
    ]
    i = 0
    with AtomicFileWriter(filename, mode='w', encoding=codec.name) as stream:
        writer = _csvutils.UnicodeDictWriter(stream,
                                             fields,
                                             dialect=BergHansenDialect)

        if not skip_header:
            writer.writeheader()

        for i, person in enumerate(persons, 1):
            if skip_incomplete and not all(person.values()):
                continue
            person = dict(
                map(lambda t: (t[0], '' if t[1] is None else t[1]),
                    person.items()))
            writer.writerow(person)
    logger.info('Wrote %d users to file %s', i, filename)
Example #16
0
def ldif_outfile(tree, filename=None, default=None, explicit_default=False,
                 max_change=None, module=cereconf):
    """(Open and) return LDIF outfile for <tree>.

    Use <filename> if specified,
    otherwise module.LDAP_<tree>['file'] unless <explicit_default>,
    otherwise return <default> (an open filehandle) if that is not None.
    (explicit_default should be set if <default> was opened from a
    <filename> argument and not from module.LDAP*['file'].)

    When opening a file, use SimilarSizeWriter where close() fails if
    the resulting file has changed more than <max_change>, or
    module.LDAP_<tree>['max_change'], or module.LDAP['max_change'].
    If max_change is unset or >= 100, just open the file normally.
    """
    if not (filename or explicit_default):
        filename = getattr(module, 'LDAP_' + tree).get('file')
        if filename:
            filename = os.path.join(module.LDAP['dump_dir'], filename)
    if filename:
        if max_change is None:
            max_change = ldapconf(tree, 'max_change', default=ldapconf(
                None, 'max_change', default=100, module=module),
                module=module)
        if max_change < 100:
            f = SimilarSizeWriter(filename, 'w')
            f.max_pct_change = max_change
        else:
            f = AtomicFileWriter(filename, 'w')
        return f
    if default:
        return default
    raise _Errors.CerebrumError(
        'Outfile not specified and LDAP_{0}["file"] not set'.format(tree))
Example #17
0
    def _build_xml(self):
        """Generate the xml files."""
        with AtomicFileWriter(self.userfile_pay, 'wb') as fh_pay, \
                AtomicFileWriter(self.userfile_track, 'wb') as fh_trk:

            logger.info("Start building pay export, writing to %s" %
                        self.userfile_pay)
            xml_pay = xmlprinter(fh_pay,
                                 indent_level=2,
                                 data_mode=True,
                                 input_encoding='utf-8')
            xml_pay.startDocument(encoding='utf-8')
            xml_pay.startElement('UserList')
            logger.info("Start building track export, writing to %s" %
                        self.userfile_track)
            xml_trk = xmlprinter(fh_trk,
                                 indent_level=2,
                                 data_mode=True,
                                 input_encoding='utf-8')
            xml_trk.startDocument(encoding='utf-8')
            xml_trk.startElement('UserList')

            for item in self.export_users:
                if item['Mode'] == "Pay":
                    xml_pay.startElement('User')
                    xml_pay.dataElement('UserLogon', item['UserLogon'])
                    xml_pay.dataElement('CostCode', item['CostCode'])
                    xml_pay.dataElement('FullName', item['FullName'])
                    xml_pay.dataElement('Email', item['Email'])
                    xml_pay.endElement('User')
                elif item['Mode'] == "Track":
                    xml_trk.startElement('User')
                    xml_trk.dataElement('UserLogon', item['UserLogon'])
                    xml_trk.dataElement('CostCode', item['CostCode'])
                    xml_trk.dataElement('FullName', item['FullName'])
                    xml_trk.dataElement('Email', item['Email'])
                    xml_trk.endElement('User')
                else:
                    logger.error("MODE invalid: %s" % (item['Mode'], ))

            xml_pay.endElement('UserList')
            xml_pay.endDocument()
            xml_trk.endElement('UserList')
            xml_trk.endDocument()
            logger.info("Writing done")
Example #18
0
def write_file(filename, persons, skip_incomplete, skip_header=False):
    """Exports info in `persons' and generates file export `filename'.

    :param bool skip_incomplete: Don't write persons without all fields.
    :param bool skip_header: Do not write field header. Default: write header.
    :param [dict()] persons: Person information to write.
    :param basestring filename: The name of the file to write."""
    from string import Template
    f = AtomicFileWriter(filename)
    i = 0
    if not skip_header:
        f.write(
            'title;firstname;lastname;feide_id;'
            'email_address;phone;ssn\n')
    for person in persons:
        if skip_incomplete and not all(person.values()):
            continue
        person = dict(map(lambda (x, y): (x, '' if y is None else y),
                          person.iteritems()))
        f.write(
            Template('$title;$firstname;$lastname;$feide_id;'
                     '$email_address;$phone;$ssn\n').substitute(person))
        i += 1
    f.close()
    logger.info('Wrote %d users to file %s', i, filename)
Example #19
0
def main(args=None):
    ENCODING = 'utf-8'
    logger = Factory.get_logger('cronjob')
    db = Factory.get(b'Database')()
    co = Factory.get(b'Constants')(db)

    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument('-o', '--output', default='/tmp/report.html')
    commands = parser.add_subparsers(help="available commands")

    # name
    name_command = commands.add_parser(
        'name',
        help="Generate report on differences in names.")
    name_command.set_defaults(func=compare_names)
    name_command.set_defaults(check_system=co.system_sap)
    name_command.add_argument(
        'source_system',
        type=partial(argparse_const, db, co.AuthoritativeSystem))

    args = parser.parse_args(args)
    command = args.func
    del args.func

    # Other commands?
    logger.info('Generating report ({!s})'.format(args.output))
    af = AtomicFileWriter(args.output)

    report = command(db, logger, args)
    report.find('head/meta[@charset]').set('charset', ENCODING)
    af.write("<!DOCTYPE html>\n")
    af.write(ElementTree.tostring(report, encoding=ENCODING))

    af.close()
    logger.info('Done')
Example #20
0
class XMLWriter(object):
    # TODO: Move to separate file
    # TODO: should produce indented XML for easier readability

    def __init__(self, fname):
        self.__file = AtomicFileWriter(fname, 'wb')
        self.gen = xmlprinter.xmlprinter(self.__file,
                                         indent_level=2,
                                         data_mode=1,
                                         input_encoding='UTF-8')

    def startTag(self, tag, attrs={}):
        a = {}
        # saxutils don't like integers as values (convert to str)
        for k, v in attrs.iteritems():
            a[k] = str(attrs[k])
        self.gen.startElement(tag, a)

    def endTag(self, tag):
        self.gen.endElement(tag)

    def emptyTag(self, tag, attrs={}):
        a = {}
        # saxutils don't like integers as values (convert to str)
        for k, v in attrs.iteritems():
            a[k] = str(v)
        self.gen.emptyElement(tag, a)

    def dataElement(self, tag, data, attrs={}):
        a = {}
        for k, v in attrs.iteritems():
            a[k] = str(v)
        self.gen.dataElement(tag, data, a)

    def comment(self, data):  # TODO: implement
        self.gen.comment(data)

    def startDocument(self, encoding):
        self.gen.startDocument(encoding)

    def endDocument(self):
        self.gen.endDocument()
        self.__file.close()
Example #21
0
class XMLWriter(object):
    # TODO: Move to separate file
    # TODO: should produce indented XML for easier readability

    def __init__(self, fname):
        self.__file = AtomicFileWriter(fname, 'wb')
        self.gen = xmlprinter.xmlprinter(
            self.__file, indent_level=2, data_mode=1,
            input_encoding='UTF-8')

    def startTag(self, tag, attrs={}):
        a = {}
        # saxutils don't like integers as values (convert to str)
        for k, v in attrs.iteritems():
            a[k] = str(attrs[k])
        self.gen.startElement(tag, a)

    def endTag(self, tag):
        self.gen.endElement(tag)

    def emptyTag(self, tag, attrs={}):
        a = {}
        # saxutils don't like integers as values (convert to str)
        for k, v in attrs.iteritems():
            a[k] = str(v)
        self.gen.emptyElement(tag, a)

    def dataElement(self, tag, data, attrs={}):
        a = {}
        for k, v in attrs.iteritems():
            a[k] = str(v)
        self.gen.dataElement(tag, data, a)

    def comment(self, data):  # TODO: implement
        self.gen.comment(data)

    def startDocument(self, encoding):
        self.gen.startDocument(encoding)

    def endDocument(self):
        self.gen.endDocument()
        self.__file.close()
Example #22
0
def generate_file(filename):
    """Write the data about everyone to L{filename}.

    :type filename: basestring
    :param filename:
      Output filename
    """
    ostream = AtomicFileWriter(filename, mode='wb')
    writer = csv.writer(ostream,
                        delimiter=';',
                        quotechar='',
                        quoting=csv.QUOTE_NONE,
                        # Make sure that lines end with a Unix-style linefeed
                        lineterminator='\n')
    db = Factory.get("Database")()
    person = Factory.get("Person")(db)
    const = Factory.get("Constants")()
    logger.debug("Preloading uname/account data...")
    cache = build_cache(db)
    fetch_person_fields.cache = cache
    logger.debug("...finished")
    processed = set()
    for person_id in generate_people(db):
        if person_id in processed:
            continue
        logger.debug("Processing person id=%d", person_id)
        processed.add(person_id)
        person.clear()
        person.find(person_id)
        fields = fetch_person_fields(person, const)
        if fields is not None:
            # csv module does not support unicode (str is called on all
            # non-string variables), so we encode in UTF-8 before writing CSV
            # rows
            # PYTHON3 remove encoding
            writer.writerow(
                [x.encode('UTF-8') if isinstance(x, unicode) else x for x in fields])
        else:
            logger.info("ansattnr is missing for person id=%d", person_id)
    logger.debug("Output %d people", len(processed))
    ostream.close()
Example #23
0
def atomic_or_stdout(filename):
    """ A writable stream context.

    This context wraps the AtomicFileWriter context, so that we can handle the
    special case '-', where we want a stdout stream that doesn't close on
    context exit.
    """
    if filename == '-':
        yield sys.stdout
    else:
        with AtomicFileWriter(filename) as f:
            yield f
Example #24
0
    def _read_update_serial(self, fname, update=False):
        """Parse existing serial in zonefile, and optionally updates
        the serial. Returns the serial used."""

        all_lines = []
        if os.path.exists(fname):
            with io.open(fname, 'r', encoding='UTF-8') as fin:
                for line in fin:
                    m = ZoneUtils.re_serial.search(line)
                    if m:
                        serial = m.group(1)
                        logger.debug("Old serial: %s" % serial)
                        if not update:
                            return serial
                        if serial[:-2] == time.strftime('%Y%m%d'):
                            serial = int(serial) + 1
                        else:
                            serial = time.strftime('%Y%m%d') + '01'
                        logger.debug("New serial: %s" % serial)
                        line = "%30s ; Serialnumber\n" % serial
                    all_lines.append(line)
        if not update:
            # First time this zone is written
            serial = time.strftime('%Y%m%d') + '01'
            logger.debug("First time; new serial used: %s" % serial)
            return serial
        # Rewrite the entire file in case the serial line length has changed
        f = AtomicFileWriter(fname, 'w', encoding='UTF-8')
        f.write("".join(all_lines))
        f.close()
Example #25
0
def get_fnr_update_info(filename):
    """
    Fetch updates in Norwegian sosial security number (fødselsnummer) from
    LT and generate a suitable xml dump containing the changes.
    """

    output_stream = AtomicFileWriter(filename, "w")
    writer = xmlprinter.xmlprinter(output_stream,
                                   indent_level = 2,
                                   # Output is for humans too
                                   data_mode = True,
                                   input_encoding = 'latin1')
    writer.startDocument(encoding = "iso8859-1")

    db = Factory.get("Database")()
    const = Factory.get("Constants")(db)
    writer.startElement("data", {"source_system" : str(const.system_lt)})
    
    for row in LT.GetFnrEndringer():
        # Make the format resemble the corresponding FS output as close as
        # possible.
        attributes = { "type" : str(const.externalid_fodselsnr), 
                       "new"  : "%02d%02d%02d%05d" % (row["fodtdag_ble_til"],
                                                      row["fodtmnd_ble_til"],
                                                      row["fodtar_ble_til"],
                                                      row["personnr_ble_til"]),
                       "old"  : "%02d%02d%02d%05d" % (row["fodtdag_kom_fra"],
                                                      row["fodtmnd_kom_fra"],
                                                      row["fodtar_kom_fra"],
                                                      row["personnr_kom_fra"]),
                       "date" : str(row["dato_endret"]),
                     }
        
        writer.emptyElement("external_id", attributes)
    # od

    writer.endElement("data")
    writer.endDocument()
    output_stream.close()
Example #26
0
def output_xml(output_file, sysname, personfile, oufile):
    """Output the data from sysname source."""

    output_stream = AtomicFileWriter(output_file, "w")
    writer = xmlprinter.xmlprinter(output_stream,
                                   indent_level=2,
                                   data_mode=True,
                                   input_encoding="latin1")

    # Hardcoded headers
    writer.startDocument(encoding="iso8859-1")

    writer.startElement("fridaImport")

    writer.startElement("beskrivelse")
    output_element(writer, "UIO", "kilde")
    # ISO8601 style -- the *only* right way :)
    output_element(writer, time.strftime("%Y-%m-%d %H:%M:%S"), "dato")
    output_element(writer, "UiO-FRIDA", "mottager")
    writer.endElement("beskrivelse")

    writer.startElement("institusjon")
    output_element(writer, cereconf.DEFAULT_INSTITUSJONSNR, "institusjonsnr")
    output_element(writer, "Universitetet i Oslo", "navnBokmal")
    output_element(writer, "University of Oslo", "navnEngelsk")
    output_element(writer, "UiO", "akronym")
    output_element(writer, "1110", "NSDKode")
    writer.endElement("institusjon")

    # Dump all OUs
    ou_cache = output_OUs(writer, sysname, oufile)

    # Dump all people
    output_people(writer, sysname, personfile, ou_cache)

    writer.endElement("fridaImport")
    writer.endDocument()
    output_stream.close()
Example #27
0
def output_xml(output_file, sysname, personfile, oufile):
    """Output the data from sysname source."""

    output_stream = AtomicFileWriter(output_file, "w")
    writer = xmlprinter.xmlprinter(output_stream,
                                   indent_level=2,
                                   data_mode=True,
                                   input_encoding="latin1")

    # Hardcoded headers
    writer.startDocument(encoding="iso8859-1")

    writer.startElement("fridaImport")

    writer.startElement("beskrivelse")
    output_element(writer, "UIO", "kilde")
    # ISO8601 style -- the *only* right way :)
    output_element(writer, time.strftime("%Y-%m-%d %H:%M:%S"), "dato")
    output_element(writer, "UiO-FRIDA", "mottager")
    writer.endElement("beskrivelse")

    writer.startElement("institusjon")
    output_element(writer, cereconf.DEFAULT_INSTITUSJONSNR, "institusjonsnr")
    output_element(writer, "Universitetet i Oslo", "navnBokmal")
    output_element(writer, "University of Oslo", "navnEngelsk")
    output_element(writer, "UiO", "akronym")
    output_element(writer, "1110", "NSDKode")
    writer.endElement("institusjon")

    # Dump all OUs
    ou_cache = output_OUs(writer, sysname, oufile)

    # Dump all people
    output_people(writer, sysname, personfile, ou_cache)

    writer.endElement("fridaImport")
    writer.endDocument()
    output_stream.close()
Example #28
0
def generate_file(filename):
    """Write the data about everyone to L{filename}.

    :type filename: basestring
    :param filename:
      Output filename
    """
    ostream = AtomicFileWriter(filename)
    writer = csv.writer(
        ostream,
        delimiter=';',
        quotechar='',
        quoting=csv.QUOTE_NONE,
        # Make sure that lines end with a Unix-style linefeed
        lineterminator='\n')
    db = Factory.get("Database")()
    person = Factory.get("Person")(db)
    const = Factory.get("Constants")()
    logger.debug("Preloading uname/account data...")
    cache = build_cache(db)
    fetch_person_fields.cache = cache
    logger.debug("...finished")
    processed = set()
    for person_id in generate_people(db):
        if person_id in processed:
            continue
        logger.debug("Processing person id=%d", person_id)
        processed.add(person_id)
        person.clear()
        person.find(person_id)
        fields = fetch_person_fields(person, const)
        if fields is not None:
            writer.writerow(fields)
        else:
            logger.info("ansattnr is missing for person id=%d", person_id)
    logger.debug("Output %d people", len(processed))
    ostream.close()
Example #29
0
def generate_file(filename):
    """Write the data about everyone to L{filename}.

    :type filename: basestring
    :param filename:
      Output filename
    """
    ostream = AtomicFileWriter(filename)
    writer = csv.writer(ostream,
                        delimiter=';',
                        quotechar='',
                        quoting=csv.QUOTE_NONE,
                        # Make sure that lines end with a Unix-style linefeed
                        lineterminator='\n')
    db = Factory.get("Database")()
    person = Factory.get("Person")(db)
    const = Factory.get("Constants")()
    logger.debug("Preloading uname/account data...")
    cache = build_cache(db)
    fetch_person_fields.cache = cache
    logger.debug("...finished")
    processed = set()
    for person_id in generate_people(db):
        if person_id in processed:
            continue
        logger.debug("Processing person id=%d", person_id)
        processed.add(person_id)
        person.clear()
        person.find(person_id)
        fields = fetch_person_fields(person, const)
        if fields is not None:
            writer.writerow(fields)
        else:
            logger.info("ansattnr is missing for person id=%d", person_id)
    logger.debug("Output %d people", len(processed))
    ostream.close()
Example #30
0
def report_users(stream_name, databases):
    """
    Prepare status report about users in various databases.
    """

    def report_no_exc(user, report_missing, item, acc_name, func_list):
        """We don't want to bother with ignore/'"""

        try:
            return make_report(user, report_missing, item, acc_name,
                               check_expired, *func_list)
        except:
            logger.exception("Failed accessing db=%s (accessor=%s):",
                             item["dbname"], acc_name)

    db_cerebrum = Factory.get("Database")()
    person = Factory.get("Person")(db_cerebrum)
    constants = Factory.get("Constants")(db_cerebrum)

    with AtomicFileWriter(stream_name, "w", encoding='UTF-8') as report_stream:

        for item in databases:
            # Report expired users for all databases
            message = report_no_exc(item.dbuser, item.report_missing,
                                    item, item["report_accessor"],
                                    [
                                        lambda acc: check_spread(
                                            acc,
                                            constants.spread_uio_nis_user),
                                        lambda acc: check_owner_status(
                                            person, constants, acc.owner_id,
                                            acc.account_name)]
                                    if item.report_missing
                                    else [])
            if message:
                report_stream.write("{} contains these {} accounts:\n"
                                    .format(item.dbname,
                                            "strange"
                                            if item.report_missing
                                            else "expired"))
                report_stream.write(message)
                report_stream.write("\n")
Example #31
0
def main():
    global group, constants

    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument('-s',
                        '--spread',
                        required=True,
                        help="choose all groups with given spread")
    parser.add_argument('-o',
                        '--outfile',
                        default=DEFAULT_OUTFILE,
                        help="override default file name (%(default)s)")
    parser.add_argument(
        '-f',
        '--flat',
        default=False,
        action='store_true',
        help=("flatten out groups (find all account-members of groups and "
              "their groupmembers)"))
    args = parser.parse_args()

    logger = Factory.get_logger('cronjob')
    db = Factory.get('Database')()
    constants = Factory.get('Constants')(db)
    group = Factory.get("Group")(db)

    spread = int(constants.Spread(args.spread))

    with AtomicFileWriter(args.outfile, 'w') as stream:

        logger.info("Getting groups")
        grps = group.search(spread=spread)

        logger.info("Processing groups")
        groups_and_members = make_groups_list(args.flat, grps)

        logger.info("Writing groups file.")
        for k, v in groups_and_members.iteritems():
            stream.write(k + ';' + v)
            stream.write('\n')
    logger.info("All done.")
Example #32
0
def process_employees(filename, db):
    """
    Read all entries from FILENAME and insert information into Cerebrum.
    """

    stream = AtomicFileWriter(filename, "w")

    person = Factory.get("Person")(db)
    const = Factory.get("Constants")(db)
    account = Factory.get("Account")(db)

    total = 0
    failed = 0

    for db_row in person.list_external_ids(const.system_sap,
                                           const.externalid_sap_ansattnr):

        if not locate_person(person, db_row.person_id):
            logger.error(
                "Aiee! list_external_ids returned person id %s,"
                "but person.find() failed", db_row.person_id)
            failed += 1
            continue
        # fi

        sap_id = str(db_row.external_id)
        phone = get_contact(person, const.contact_phone, const)
        cellphone = get_contact(person, const.contact_mobile_phone, const)
        email = get_email(person, account)

        stream.write(tuple_to_sap_row((sap_id, phone, cellphone, email)))
        stream.write("\n")
        total += 1
    # od

    stream.close()
    logger.debug("Total %d record(s) exported; %d record(s) failed", total,
                 failed)
Example #33
0
def main():
    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument(
        '-o', '--output',
        type=text_type,
        default=cereconf.LDAP_PERSON.get('entitlements_file', ''),
        dest='output',
        help='output file')
    args = parser.parse_args()

    if not args.output:
        parser.exit('No output file specified')

    logger.info('Start')
    logger.info('Fetching groups...')
    groups = get_groups_with_entitlement()
    logger.info('Mapping entitlements to person...')
    entitlements_per_person = map_entitlements_to_persons(groups)
    logger.info('Writing to %s', args.output)
    data = json.dumps(entitlements_per_person, ensure_ascii=False)
    with AtomicFileWriter(args.output, 'w') as fd:
        fd.write(data)
    logger.info('Done')
Example #34
0
def process_employees(filename, db):
    """
    Read all entries from FILENAME and insert information into Cerebrum.
    """

    stream = AtomicFileWriter(filename, "w")

    person = Factory.get("Person")(db)
    const = Factory.get("Constants")(db)
    account = Factory.get("Account")(db)

    total = 0; failed = 0

    for db_row in person.list_external_ids(const.system_sap,
                                           const.externalid_sap_ansattnr):

        if not locate_person(person, db_row.person_id):
            logger.error("Aiee! list_external_ids returned person id %s,"
                         "but person.find() failed",
                         db_row.person_id)
            failed += 1
            continue
        # fi

        sap_id = str(db_row.external_id)
        phone = get_contact(person, const.contact_phone, const)
        cellphone = get_contact(person, const.contact_mobile_phone, const)
        email = get_email(person, account)
            
        stream.write(tuple_to_sap_row((sap_id, phone, cellphone, email)))
        stream.write("\n")
        total += 1
    # od

    stream.close()
    logger.debug("Total %d record(s) exported; %d record(s) failed",
                 total, failed)
class SrcExtidRemover(object):
    def __init__(self, co, pe, ssys, external_id_type):
        self.co = co
        self.pe = pe
        self.ssys = ssys
        self.external_id_type = external_id_type
        self.other_ssys = set(
            co.human2constant(s)
            for s in SYSTEM_LOOKUP_ORDER) - set([self.ssys])
        log_ents = ['Checking:'] + [text_type(s) for s in self.other_ssys]
        logger.debug(' '.join(log_ents))
        self.dump = []
        self.stream = None

    def get_persons(self):
        """
        :return generator:
            A generator that yields persons with the given id types.
        """
        logger.debug('get_persons ...')
        for row in self.pe.search_external_ids(
                source_system=self.ssys,
                id_type=self.external_id_type,
                entity_type=self.co.entity_person,
                fetchall=False):
            yield {
                'entity_id': int(row['entity_id']),
                'ext_id': int(row['external_id']),
            }

    def in_other_ssys(self):
        """
        :return bool:
            True iff external id exists in any other relevant source system
        """
        for o_ssys in self.other_ssys:
            if self.pe.get_external_id(o_ssys, self.external_id_type):
                return True
        return False

    def remover(self):
        """
        delete external id from source system if it exists in
        """
        logger.debug('start remover ...')
        for i, person in enumerate(self.get_persons()):
            self.pe.clear()
            self.pe.find(person['entity_id'])
            if self.in_other_ssys():
                self.pe._delete_external_id(self.ssys, self.external_id_type)
                self.dump.append(person)
            if not (i + 1) % 10000:
                logger.debug(' remover: Treated {} entities'.format(i + 1))

    def get_output_stream(self, filename, codec):
        """ Get a unicode-compatible stream to write. """
        if filename == '-':
            self.stream = sys.stdout
        else:
            self.stream = AtomicFileWriter(filename,
                                           mode='w',
                                           encoding=codec.name)

    def write_csv_report(self):
        """ Write a CSV report to a stream.

        :param stream: file-like object that can write unicode strings
        :param persons: iterable with mappings that has keys ('ext_id', 'name')
        """
        writer = UnicodeWriter(self.stream, dialect=CerebrumDialect)
        for person in self.dump:
            writer.writerow((person['ext_id'], person['entity_id'],
                             time.strftime('%m/%d/%Y %H:%M:%S')))
        self.stream.flush()
        if self.stream is not sys.stdout:
            self.stream.close()
Example #36
0
 def open(self, fname):
     self._file = AtomicFileWriter(fname, "w", encoding='UTF-8')
     self._fname = fname
     return self
Example #37
0
    def generate_hosts_file(self, fname, with_comments=False):
        f = AtomicFileWriter(fname, "w")

        # IPv4
        fm = ForwardMap(self._zone)
        order = fm.a_records.keys()
        order.sort(lambda x, y: int(fm.a_records[x]['ipnr'] -
                                    fm.a_records[y]['ipnr']))

        entity_id2comment = {}
        if with_comments:
            for row in DnsOwner.DnsOwner(db).list_traits(co.trait_dns_comment):
                entity_id = int(row['entity_id'])
                entity_id2comment[entity_id] = ' # ' + row['strval']

        # If multiple A-records have the same name with different IP, the
        # dns_owner data is only shown for the first IP.
        shown_owner = {}
        for a_id in order:
            line = ''
            a_ref = fm.a_records[a_id]

            prefix = '%s\t%s' % (a_ref['a_ip'], self._exp_name(a_ref['name']))
            line = ''
            names = list()

            dns_owner_id = int(a_ref['dns_owner_id'])
            if dns_owner_id in shown_owner:
                # raise ValueError, "%s already shown?" % a_ref['name']
                continue
            shown_owner[dns_owner_id] = True

            for c_ref in fm.cnames.get(dns_owner_id, []):
                names.append(c_ref['name'])

            line += " " + " ".join([self._exp_name(n) for n in names])
            line += entity_id2comment.get(int(a_ref['dns_owner_id']), '')

            f.write(self._wrap_line(prefix, line))

        # IPv6
        order = fm.aaaa_records.keys()

        entity_id2comment = {}
        if with_comments:
            for row in DnsOwner.DnsOwner(db).list_traits(co.trait_dns_comment):
                entity_id = int(row['entity_id'])
                entity_id2comment[entity_id] = ' # ' + row['strval']

        # If multiple A-records have the same name with different IP, the
        # dns_owner data is only shown for the first IP.
        shown_owner = {}
        for a_id in order:
            line = ''
            a_ref = fm.aaaa_records[a_id]

            prefix = '%s\t%s' % (a_ref['aaaa_ip'],
                                 self._exp_name(a_ref['name']))
            line = ''
            names = list()

            dns_owner_id = int(a_ref['dns_owner_id'])
            if dns_owner_id in shown_owner:
                # raise ValueError, "%s already shown?" % a_ref['name']
                continue
            shown_owner[dns_owner_id] = True

            for c_ref in fm.cnames.get(dns_owner_id, []):
                names.append(c_ref['name'])

            line += " " + " ".join([self._exp_name(n) for n in names])
            line += entity_id2comment.get(int(a_ref['dns_owner_id']), '')

            f.write(self._wrap_line(prefix, line))

        f.close()
Example #38
0
class ZoneUtils(object):
    """ Zone file writer.  """

    re_serial = re.compile(r'(\d+)\s*;\s*Serialnumber')

    def __init__(self, zone, origin=None):
        self._zone = zone
        self._as_reversemap = False
        if zone is None:
            self._as_reversemap = True
            self.__origin = origin

    def exp_name(self, name, no_dot=False):
        """ Expands relative names to FQDN for the zone. """
        ret = name
        if not name[-1] == '.':
            ret = name+self._zone.postfix
        if no_dot and ret[-1] == '.':
            ret = ret[:-1]
        return ret

    def trim_name(self, name):
        """ Strips FQDNs to a relative zone name. """
        if name.endswith(self._zone.postfix):
            return name[:-len(self._zone.postfix)]
        return name

    def open(self, fname):
        self._file = AtomicFileWriter(fname, "w", encoding='UTF-8')
        self._fname = fname
        return self

    def write_heads(self, heads, data_dir):
        """ Write template data to the zone file. """
        self.write(HEADER_SPLITTER)
        serial = self._read_update_serial(self._fname)
        logger.debug("write_heads; serial: %s" % serial)
        first = True
        for h in heads:
            logger.debug("Looking at header-file '%s'" % h)
            with io.open(h, "r", encoding='UTF-8') as fin:
                lines = []
                for line in fin:
                    m = ZoneUtils.re_serial.search(line)
                    if m:
                        line = "%30s ; Serialnumber\n" % serial
                    lines.append(line)
                if first and self._as_reversemap and not [
                        x for x in lines if x.startswith('$ORIGIN')]:
                    lines.insert(0, self.__origin)
                self.write("".join(lines))
            first = False
        self.write(EXTRA_SPLITTER)

    def close(self):
        self._file.replace = False
        self._file.close()
        if not self._file.discarded:
            self._read_update_serial(self._file.tmpname, update=True)
            os.rename(self._file.tmpname, self._file.name)

    def __enter__(self):
        return self

    def __exit__(self, *exc_info):
        self.close()

    def write(self, s):
        self._file.write(s)

    def _read_update_serial(self, fname, update=False):
        """Parse existing serial in zonefile, and optionally updates
        the serial. Returns the serial used."""

        all_lines = []
        if os.path.exists(fname):
            with io.open(fname, 'r', encoding='UTF-8') as fin:
                for line in fin:
                    m = ZoneUtils.re_serial.search(line)
                    if m:
                        serial = m.group(1)
                        logger.debug("Old serial: %s" % serial)
                        if not update:
                            return serial
                        if serial[:-2] == time.strftime('%Y%m%d'):
                            serial = int(serial) + 1
                        else:
                            serial = time.strftime('%Y%m%d') + '01'
                        logger.debug("New serial: %s" % serial)
                        line = "%30s ; Serialnumber\n" % serial
                    all_lines.append(line)
        if not update:
            # First time this zone is written
            serial = time.strftime('%Y%m%d') + '01'
            logger.debug("First time; new serial used: %s" % serial)
            return serial
        # Rewrite the entire file in case the serial line length has changed
        f = AtomicFileWriter(fname, 'w', encoding='UTF-8')
        f.write("".join(all_lines))
        f.close()
Example #39
0
def get_sted_info(outfile):
    f = AtomicFileWriter(outfile, 'w')
    f.write(xml.xml_hdr + "<data>\n")

    steder = LT.GetSteder()
    for s in steder:
        column_names = LT.get_column_names(s)
        f.write(xml.xmlify_dbrow(s, xml.conv_colnames(column_names), 'sted', 0) + "\n")
        komm = LT.GetStedKomm(s['fakultetnr'], s['instituttnr'], s['gruppenr'])
        for k in komm:
            column_names2 = LT.get_column_names(k)
            f.write(xml.xmlify_dbrow(k, xml.conv_colnames(column_names2), 'komm') + "\n")
        # od
        f.write("</sted>\n")
    # od 
    f.write("</data>\n")
    f.close()
Example #40
0
def main():
    global cerebrum_db, constants, fs_db, xmlwriter, logger, with_email, \
            with_cell, extra_contact_fields

    cerebrum_db = Factory.get("Database")()
    constants = Factory.get("Constants")(cerebrum_db)

    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument('-f',
                        '--out-file',
                        dest='filename',
                        help='XML-file to be generated',
                        required=True)
    parser.add_argument('-i',
                        '--institution',
                        dest='institution',
                        help='Name of institution to put in report',
                        required=True)
    parser.add_argument('-e',
                        '--with-email',
                        dest='with_email',
                        action='store_true',
                        default=False,
                        help='Include email info')
    parser.add_argument('-c',
                        '--with-cellular',
                        dest='with_cell',
                        action='store_true',
                        default=False,
                        help='Include cellphone data')
    parser.add_argument('-x',
                        '--extra-contact-fields',
                        dest='extra_contact_fields',
                        default=None,
                        help=('Add extra contact-fields to the export. '
                              'Format: xml_name:contact_type:source_system. '
                              'contact_type and source_system must be valid '
                              'constant names.'))
    parser.add_argument('-l',
                        '--logger-name',
                        dest='logger',
                        help='Logger instance to use (default: cronjob)',
                        default='cronjob')
    args = parser.parse_args()

    if args.extra_contact_fields is not None:
        extra_fields_unparsed = args.extra_contact_fields.split(',')
        extra_fields_unparsed = [
            field_entry.strip() for field_entry in extra_fields_unparsed
        ]
        extra_contact_fields = []
        for unparsed_field in extra_fields_unparsed:
            field_raw_data = unparsed_field.split(':')
            field_dict = dict()
            field_dict['xml_name'] = field_raw_data[0]
            field_dict['contact_type'] = field_raw_data[1]
            field_dict['source_system'] = field_raw_data[2]
            extra_contact_fields.append(field_dict)
    else:
        extra_contact_fields = None

    logger = Factory.get_logger(args.logger)
    logger.info("generating ABC export")

    with_email = args.with_email
    with_cell = args.with_cell

    _cache_id_types()
    fs_db = make_fs()
    stream = AtomicFileWriter(args.filename)
    xmlwriter = xmlprinter.xmlprinter(
        stream,
        indent_level=2,
        # human-friendly output
        data_mode=True,
        input_encoding="latin1")
    generate_report(args.institution)
    stream.close()
Example #41
0
def get_person_info(outfile):
    """
    Henter info om alle personer i LT som er av interesse.  Ettersom
    opplysningene samles fra flere datakilder, lagres de først i en dict
    persondta
    """

    # Lag mapping fra stillingskodenr til titel (ala overing)
    skode2tittel = {}
    for t in LT.GetTitler():
        skode2tittel[t['stillingkodenr']] = (t['tittel'], t['univstkatkode'])
    # od

    # Lag mapping fra univstkatkode til hovedkatkode (VIT etc.)
    kate2hovedkat = {}
    for t in LT.GetHovedkategorier():
        kate2hovedkat[t['univstkatkode']] = t['hovedkatkode']
    # od

    # Hent alle aktive tilsetninger
    tils = LT.GetTilsettinger()
    persondta = {}
    for t in tils:
        key = '-'.join(["%i" % x for x in [t['fodtdag'], t['fodtmnd'],
                                           t['fodtar'], t['personnr']]])
        if not persondta.has_key(key):
            persondta[key] = {}
        # fi

        persondta[key]['tils'] = persondta[key].get('tils', []) + [t]
    # od

    # Hent alle reservasjoner
    res = LT.GetReservasjoner()
    reservasjoner = {}
    for r in res:
        key = '-'.join(["%i" % x for x in [r['fodtdag'], r['fodtmnd'],
                                           r['fodtar'], r['personnr']]])
        if not reservasjoner.has_key(key):
            reservasjoner[key] = {}
        # fi

        reservasjoner[key]['res'] = reservasjoner[key].get('res', []) + [r]
    # od

    # Hent alle lønnsposteringer siste 30 dager.
    #
    # Tidligere cachet vi disse dataene slik at vi kunne søke over
    # færre dager, men det ser ikke ut til å være nødvendig da søket
    # ikke tar mer enn ca et minutt
    tid = time.strftime("%Y%m%d", time.gmtime(time.time() - (3600*24*30)))
    lonnspost = LT.GetLonnsPosteringer(tid)
    for lp in lonnspost:
        key = '-'.join(["%i" % x for x in [lp['fodtdag'], lp['fodtmnd'],
                                           lp['fodtar'], lp['personnr']]])
        if not persondta.has_key(key):
            persondta[key] = {}
        # fi

        persondta[key]['bil'] = persondta[key].get('bil', []) + [lp]
    # od

    gjester = LT.GetGjester()
    for g in gjester:
        key = '-'.join(["%i" % x for x in [g['fodtdag'], g['fodtmnd'],
                                           g['fodtar'], g['personnr']]])
        if not persondta.has_key(key):
            persondta[key] = {}
        # fi

        persondta[key]['gjest'] = persondta[key].get('gjest', []) + [g]
    # od

    permisjoner = LT.GetPermisjoner()
    for p in permisjoner:
        key = string.join([ str(x)
                            for x in 
                              [p["fodtdag"], p["fodtmnd"],
                               p["fodtar"], p["personnr"]]
                            ], "-")
        if not persondta.has_key(key):
            persondta[key] = {}
        # fi

        if not persondta[key].has_key("permisjon"):
            persondta[key]["permisjon"] = {}
        # fi

        # Since LT.Permisjon(key, tilsnr) is the PK, this assignment will
        # never overwrite any information
        pkey = str(p.fields.tilsnr)
        if not persondta[key]["permisjon"].has_key(pkey):
            persondta[key]["permisjon"][pkey] = []
        # fi
        
        persondta[key]["permisjon"][pkey].append(p)
    # od


    # Skriv ut informasjon om de personer vi allerede har hentet, og
    # hent noe tilleggsinformasjon om dem
    f = AtomicFileWriter(outfile, 'w')
    f.write(xml.xml_hdr + "<data>\n")
    for p in persondta.keys():
        fodtdag, fodtmnd, fodtar, personnr = p.split('-')
        pi = LT.GetPersonInfo(fodtdag, fodtmnd, fodtar, personnr)
        picols = LT.get_column_names(pi)
        f.write(
            xml.xmlify_dbrow(pi[0],  xml.conv_colnames(picols), 'person', 0,
                             extra_attr={'fodtdag': fodtdag, 'fodtmnd':fodtmnd,
                                         'fodtar':fodtar, 'personnr': personnr}
                             ) + "\n")
        tlf = LT.GetArbTelefon(fodtdag, fodtmnd, fodtar, personnr)
        tlfcols = LT.get_column_names(tlf)
        for t in tlf:
            f.write("  "+xml.xmlify_dbrow(
                t, xml.conv_colnames(tlfcols), 'arbtlf') + "\n")
        # od

        komm = LT.GetPersKomm(fodtdag, fodtmnd, fodtar, personnr)
        kcols = LT.get_column_names(komm)
        for k in komm:
            f.write("  "+xml.xmlify_dbrow(
                k,  xml.conv_colnames(kcols), 'komm') + "\n")
        # od

        roller = LT.GetPersonRoller(fodtdag, fodtmnd, fodtar, personnr)
        rcols = LT.get_column_names(roller)
        for r in roller:
            f.write("  "+xml.xmlify_dbrow(
                r, xml.conv_colnames(rcols), 'rolle') +"\n")
        # od

        permisjoner = persondta[p].get("permisjon", {})
        for t in persondta[p].get("tils", ()):
            attr = dict([(key, t[key]) for key in ("fakultetnr_utgift",
                                                   "instituttnr_utgift",
                                                   "gruppenr_utgift",
                                                   "prosent_tilsetting",
                                                   "dato_fra", "dato_til",
                                                   "tilsnr")])
            key = "stillingkodenr_beregnet_sist"
            attr[key] = int(t[key])
            sk = skode2tittel[t[key]]
            attr["hovedkat"] = kate2hovedkat[sk[1]]
            attr["tittel"] = sk[0]
            f.write("  " +
                    xml.xmlify_dbrow(attr.values(), attr.keys(),
                                     "tils", close_tag=0) +
                    "\n")

            formatted_leaves = output_leaves(t, permisjoner)
            for leave in formatted_leaves:
                attr = dict(leave)
                f.write("  " +
                        xml.xmlify_dbrow(attr.values(),
                                         attr.keys(), "permisjon")
                        + "\n")
            # od
            
            f.write("</tils>\n" )
        # od

        if reservasjoner.has_key(p): 
            for r in reservasjoner[p].get('res', ()):
                attr = dict([(key, r[key]) for key in ("katalogkode",
                                                       "felttypekode",
                                                       "resnivakode",)])
                f.write("  " +
                        xml.xmlify_dbrow(attr.values(), attr.keys(),
                                         "res") + "\n")
            # od
        # fi
            
        prev = None
        # Order by 'stedkode', then by reverse date
        persondta[p].get('bil', []).sort(lambda x, y:
                                         cmp(make_key(x), make_key(y))
                                         or cmp(y["dato_oppgjor"],
                                                x["dato_oppgjor"]))
        for t in persondta[p].get('bil', []):
            if make_key(t) == make_key(prev):
                continue
            # fi

            attr = dict([(key, t[key]) for key in ("dato_oppgjor",
                                                   "fakultetnr_kontering",
                                                   "instituttnr_kontering",
                                                   "gruppenr_kontering",)])
            f.write("  " +
                    xml.xmlify_dbrow(attr.values(), attr.keys(),
                                     "bilag") + "\n")
            prev = t
        # od

        for g in persondta[p].get('gjest', ()):
            attr = dict([(key, g[key]) for key in ("fakultetnr",
                                                   "instituttnr",
                                                   "gruppenr",
                                                   "gjestetypekode",
                                                   "dato_fra",
                                                   "dato_til",)])
            f.write("  "
                    + xml.xmlify_dbrow(attr.values(), attr.keys(), "gjest")
                    + "\n")
        # od
 
        f.write("</person>\n")

    f.write("</data>\n")
    f.close()
Example #42
0
def do_sillydiff(dirname, oldfile, newfile, outfile):
    """ This very silly. Why? """
    today = time.strftime("%d.%m.%Y")
    try:
        oldfile = io.open(os.path.join(dirname, oldfile), "r",
                          encoding="latin1")
        line = oldfile.readline()
        line = line.rstrip()
    except IOError:
        logger.warn("Warning, old file did not exist, assuming first run ever")
        os.link(os.path.join(dirname, newfile),
                os.path.join(dirname, outfile))
        return

    old_dict = dict()
    while line:
        key = line[0:12]
        value = old_dict.get(key, list())
        value.append(line[13:])
        old_dict[key] = value
        line = oldfile.readline()
        line = line.rstrip()
    oldfile.close()

    out = AtomicFileWriter(os.path.join(dirname, outfile), 'w',
                           encoding="latin1")
    newin = io.open(os.path.join(dirname, newfile), encoding="latin1")

    for newline in newin:
        newline = newline.rstrip()
        pnr = newline[0:12]
        data = newline[13:]
        if pnr in old_dict:
            if data not in old_dict[pnr]:
                # Some change, want to update with new values.
                out.write(newline + "\n")
            else:
                old_dict[pnr].remove(data)
            # If nothing else is left, delete the key from the dictionary
            if not old_dict[pnr]:
                del old_dict[pnr]
        else:
            # completely new entry, output unconditionally
            out.write(newline + "\n")

    # Now, there is one problem left: we cannot output the old data blindly,
    # as people's names might have changed. So, we force *every* old record to
    # the current names in Cerebrum. This may result in the exactly same
    # record being output twice, but it should be fine. 
    person = Factory.get("Person")(db)
    const = Factory.get("Constants")(db)
    logger.debug("%d old records left", len(old_dict))
    for leftpnr in old_dict:
        # FIXME: it is unsafe to assume that this will succeed
        first, last = fnr2names(person, const, leftpnr[:-1])
        if not (first and last):
            logger.warn("No name information for %s is available. %d "
                        "entry(ies) will be skipped",
                        leftpnr[:-1], len(old_dict[leftpnr]))
            continue

        for entry in old_dict[leftpnr]:
            vals = entry.split(";")
            vals[2] = first
            vals[3] = last
            vals[13] = today
            vals[17] = ""
            out.write("%s;%s\n" % (leftpnr, ";".join(vals)))
    out.close()
    newin.close()
Example #43
0
 def open(self, fname):
     self._file = AtomicFileWriter(fname, "w")
     self._fname = fname
Example #44
0
            else:
                id_list.append(value)

        elif option in ("-f", "--file"):
            output_filename = value

    # Option "--all" overrides specific id-lists
    for option, value in options:
        if option in ("-a", "--all"):
            logger.info(
                "Option '--all' specified; all id-types will be included")
            id_list = selectors.keys()

    if not id_list:
        logger.warn("No IDs specified for export. No XML file generated")
        return 2

    stream = AtomicFileWriter(output_filename, "w")
    writer = xmlprinter.xmlprinter(
        stream,
        indent_level=2,
        # Human-readable output
        data_mode=True,
        input_encoding="latin1")
    generate_export(writer, id_list)
    stream.close()


if __name__ == "__main__":
    sys.exit(main())
Example #45
0
 def __init__(self, fname):
     self.__file = AtomicFileWriter(fname, 'wb')
     self.gen = xmlprinter.xmlprinter(self.__file,
                                      indent_level=2,
                                      data_mode=1,
                                      input_encoding='UTF-8')
Example #46
0
 def __init__(self, fname):
     self.__file = AtomicFileWriter(fname, 'wb')
     self.gen = xmlprinter.xmlprinter(
         self.__file, indent_level=2, data_mode=1,
         input_encoding='UTF-8')
Example #47
0
 def open(self, fname):
     self._file = AtomicFileWriter(fname, "w", encoding='UTF-8')
     self._fname = fname
     return self
Example #48
0
class ZoneUtils(object):
    """ Zone file writer.  """

    re_serial = re.compile(r'(\d+)\s*;\s*Serialnumber')

    def __init__(self, zone, origin=None):
        self._zone = zone
        self._as_reversemap = False
        if zone is None:
            self._as_reversemap = True
            self.__origin = origin

    def exp_name(self, name, no_dot=False):
        """ Expands relative names to FQDN for the zone. """
        ret = name
        if not name[-1] == '.':
            ret = name + self._zone.postfix
        if no_dot and ret[-1] == '.':
            ret = ret[:-1]
        return ret

    def trim_name(self, name):
        """ Strips FQDNs to a relative zone name. """
        if name.endswith(self._zone.postfix):
            return name[:-len(self._zone.postfix)]
        return name

    def open(self, fname):
        self._file = AtomicFileWriter(fname, "w", encoding='UTF-8')
        self._fname = fname
        return self

    def write_heads(self, heads, data_dir):
        """ Write template data to the zone file. """
        self.write(HEADER_SPLITTER)
        serial = self._read_update_serial(self._fname)
        logger.debug("write_heads; serial: %s" % serial)
        first = True
        for h in heads:
            logger.debug("Looking at header-file '%s'" % h)
            with io.open(h, "r", encoding='UTF-8') as fin:
                lines = []
                for line in fin:
                    m = ZoneUtils.re_serial.search(line)
                    if m:
                        line = "%30s ; Serialnumber\n" % serial
                    lines.append(line)
                if first and self._as_reversemap and not [
                        x for x in lines if x.startswith('$ORIGIN')
                ]:
                    lines.insert(0, self.__origin)
                self.write("".join(lines))
            first = False
        self.write(EXTRA_SPLITTER)

    def close(self):
        self._file.replace = False
        self._file.close()
        if not self._file.discarded:
            self._read_update_serial(self._file.tmpname, update=True)
            os.rename(self._file.tmpname, self._file.name)

    def __enter__(self):
        return self

    def __exit__(self, *exc_info):
        self.close()

    def write(self, s):
        self._file.write(s)

    def _read_update_serial(self, fname, update=False):
        """Parse existing serial in zonefile, and optionally updates
        the serial. Returns the serial used."""

        all_lines = []
        if os.path.exists(fname):
            with io.open(fname, 'r', encoding='UTF-8') as fin:
                for line in fin:
                    m = ZoneUtils.re_serial.search(line)
                    if m:
                        serial = m.group(1)
                        logger.debug("Old serial: %s" % serial)
                        if not update:
                            return serial
                        if serial[:-2] == time.strftime('%Y%m%d'):
                            serial = int(serial) + 1
                        else:
                            serial = time.strftime('%Y%m%d') + '01'
                        logger.debug("New serial: %s" % serial)
                        line = "%30s ; Serialnumber\n" % serial
                    all_lines.append(line)
        if not update:
            # First time this zone is written
            serial = time.strftime('%Y%m%d') + '01'
            logger.debug("First time; new serial used: %s" % serial)
            return serial
        # Rewrite the entire file in case the serial line length has changed
        f = AtomicFileWriter(fname, 'w', encoding='UTF-8')
        f.write("".join(all_lines))
        f.close()
Example #49
0
    def generate_hosts_file(self, fname, with_comments=False):
        f = AtomicFileWriter(fname, "w")

        # IPv4
        fm = ForwardMap(self._zone)
        order = fm.a_records.keys()
        order.sort(lambda x, y: int(fm.a_records[x]['ipnr'] - fm.a_records[y][
            'ipnr']))

        entity_id2comment = {}
        if with_comments:
            for row in DnsOwner.DnsOwner(db).list_traits(co.trait_dns_comment):
                entity_id = int(row['entity_id'])
                entity_id2comment[entity_id] = ' # ' + row['strval']

        # If multiple A-records have the same name with different IP, the
        # dns_owner data is only shown for the first IP.
        shown_owner = {}
        for a_id in order:
            line = ''
            a_ref = fm.a_records[a_id]

            prefix = '%s\t%s' % (a_ref['a_ip'], self._exp_name(a_ref['name']))
            line = ''
            names = list()

            dns_owner_id = int(a_ref['dns_owner_id'])
            if dns_owner_id in shown_owner:
                # raise ValueError, "%s already shown?" % a_ref['name']
                continue
            shown_owner[dns_owner_id] = True

            for c_ref in fm.cnames.get(dns_owner_id, []):
                names.append(c_ref['name'])

            line += " " + " ".join([self._exp_name(n) for n in names])
            line += entity_id2comment.get(int(a_ref['dns_owner_id']), '')

            f.write(self._wrap_line(prefix, line))

        # IPv6
        order = fm.aaaa_records.keys()

        entity_id2comment = {}
        if with_comments:
            for row in DnsOwner.DnsOwner(db).list_traits(co.trait_dns_comment):
                entity_id = int(row['entity_id'])
                entity_id2comment[entity_id] = ' # ' + row['strval']

        # If multiple A-records have the same name with different IP, the
        # dns_owner data is only shown for the first IP.
        shown_owner = {}
        for a_id in order:
            line = ''
            a_ref = fm.aaaa_records[a_id]

            prefix = '%s\t%s' % (a_ref['aaaa_ip'], self._exp_name(
                a_ref['name']))
            line = ''
            names = list()

            dns_owner_id = int(a_ref['dns_owner_id'])
            if dns_owner_id in shown_owner:
                # raise ValueError, "%s already shown?" % a_ref['name']
                continue
            shown_owner[dns_owner_id] = True

            for c_ref in fm.cnames.get(dns_owner_id, []):
                names.append(c_ref['name'])

            line += " " + " ".join([self._exp_name(n) for n in names])
            line += entity_id2comment.get(int(a_ref['dns_owner_id']), '')

            f.write(self._wrap_line(prefix, line))

        f.close()
Example #50
0
            elif value in id_list:
                logger.warn("Duplicate ID value %s (duplicate ignored)", value)
            else:
                id_list.append(value)

        elif option in ("-f", "--file"):
            output_filename = value

    # Option "--all" overrides specific id-lists
    for option, value in options:
        if option in ("-a", "--all"):
            logger.info("Option '--all' specified; all id-types will be included")
            id_list = selectors.keys()

    if not id_list:
        logger.warn("No IDs specified for export. No XML file generated")
        return 2

    stream = AtomicFileWriter(output_filename, "w")
    writer = xmlprinter.xmlprinter(stream,
                                   indent_level = 2,
                                   # Human-readable output
                                   data_mode = True,
                                   input_encoding = "latin1")
    generate_export(writer, id_list)
    stream.close()


if __name__ == "__main__":
    sys.exit(main())
Example #51
0
def main():
    global cerebrum_db, constants, fs_db, xmlwriter, logger, with_email, \
            with_cell, extra_contact_fields

    cerebrum_db = Factory.get("Database")()
    constants = Factory.get("Constants")(cerebrum_db)

    parser = argparse.ArgumentParser(description=__doc__)
    parser.add_argument('-f', '--out-file', dest='filename',
                        help='XML-file to be generated',
                        required=True)
    parser.add_argument('-i', '--institution',
                        dest='institution',
                        help='Name of institution to put in report',
                        required=True)
    parser.add_argument('-e', '--with-email',
                        dest='with_email',
                        action='store_true',
                        default=False,
                        help='Include email info')
    parser.add_argument('-c', '--with-cellular',
                        dest='with_cell',
                        action='store_true',
                        default=False,
                        help='Include cellphone data')
    parser.add_argument('-x', '--extra-contact-fields',
                        dest='extra_contact_fields',
                        default=None,
                        help=('Add extra contact-fields to the export. '
                              'Format: xml_name:contact_type:source_system. '
                              'contact_type and source_system must be valid '
                              'constant names.'))
    parser.add_argument('-l', '--logger-name',
                        dest='logger',
                        help='Logger instance to use (default: cronjob)',
                        default='cronjob')
    args = parser.parse_args()

    if args.extra_contact_fields is not None:
        extra_fields_unparsed = args.extra_contact_fields.split(',')
        extra_fields_unparsed = [field_entry.strip()
                                 for field_entry in extra_fields_unparsed]
        extra_contact_fields = []
        for unparsed_field in extra_fields_unparsed:
            field_raw_data = unparsed_field.split(':')
            field_dict = dict()
            field_dict['xml_name'] = field_raw_data[0]
            field_dict['contact_type'] = field_raw_data[1]
            field_dict['source_system'] = field_raw_data[2]
            extra_contact_fields.append(field_dict)
    else:
        extra_contact_fields = None

    logger = Factory.get_logger(args.logger)
    logger.info("generating ABC export")

    with_email = args.with_email
    with_cell = args.with_cell

    _cache_id_types()
    fs_db = make_fs()
    stream = AtomicFileWriter(args.filename)
    xmlwriter = xmlprinter.xmlprinter(stream,
                                      indent_level=2,
                                      # human-friendly output
                                      data_mode=True,
                                      input_encoding="latin1")
    generate_report(args.institution)
    stream.close()
Example #52
0
 def open(self, fname):
     self._file = AtomicFileWriter(fname, "w")
     self._fname = fname