def check_schema_versions(db, strict=False): modules = { 'ad': 'Cerebrum.modules.ADObject', 'ad_email': 'Cerebrum.modules.no.uit.ad_email', 'apikeys': 'Cerebrum.modules.apikeys', 'auditlog': 'Cerebrum.modules.audit', 'bofhd_requests': 'Cerebrum.modules.bofhd_requests.request', 'changelog': 'Cerebrum.modules.ChangeLog', 'disk_quota': 'Cerebrum.modules.disk_quota', 'dns': 'Cerebrum.modules.dns', 'email': 'Cerebrum.modules.Email', 'entity_expire': 'Cerebrum.modules.entity_expire', 'entity_trait': 'Cerebrum.modules.EntityTrait', 'eventlog': 'Cerebrum.modules.EventLog', 'events': 'Cerebrum.modules.event_publisher', 'hostpolicy': 'Cerebrum.modules.hostpolicy', 'legacy_users': 'Cerebrum.modules.legacy_users', 'note': 'Cerebrum.modules.Note', 'password_history': 'Cerebrum.modules.pwcheck.history', 'posixuser': '******', 'stedkode': 'Cerebrum.modules.no.Stedkode', 'stillingskoder': 'Cerebrum.modules.no.stillingskoder', 'consent': 'Cerebrum.modules.consent.Consent', 'employment': 'Cerebrum.modules.no.PersonEmployment', 'gpg': 'Cerebrum.modules.gpg', } meta = Metainfo.Metainfo(db) for name, value in meta.list(): if isinstance(value, tuple): print("WARNING: The version number of module {modulename} is " "saved as a tuple.".format(modulename=name)) value = "%d.%d.%d" % value if name == Metainfo.SCHEMA_VERSION_KEY: if not Cerebrum.__version__ == value: print("WARNING: cerebrum version %s does not" " match schema version %s" % (Cerebrum.__version__, value)) if strict: exit(1) elif name.startswith('sqlmodule_'): name = name[len('sqlmodule_'):] if name not in modules: # print "WARNING: unknown module %s" % name # if strict: exit(1) continue try: module = dyn_import(modules[name]) version = module.__version__ except Exception as e: print("ERROR: can't find version of module %s: %s" % (name, e)) continue if not version == value: print("WARNING: module %s version %s does" " not match schema version %s" % (name, version, value)) if strict: exit(1) else: print("ERROR: unknown metainfo %s: %s" % (name, value)) if strict: exit(1)
def migrate_to_1_1(): # Add extra columns to tables for sql in ("ALTER TABLE paid_quota_status " " ADD COLUMN accum_quota NUMERIC(8)", "ALTER TABLE paid_quota_status " " ADD COLUMN kroner NUMERIC(7,2)", "ALTER TABLE paid_quota_history " " ADD COLUMN pageunits_accum NUMERIC(6,0)", "ALTER TABLE paid_quota_history " " ADD COLUMN kroner NUMERIC(7,2)"): print sql db.execute(sql) # Fill new columns with data for sql in ( "UPDATE paid_quota_status SET kroner=paid_quota*0.3, accum_quota=0", # pageunits_paid -> kroner "UPDATE paid_quota_history " "SET kroner=0.3*pageunits_paid, pageunits_accum=0", # pageunits_paid skal ikke lenger røres ved betalinger "UPDATE paid_quota_history SET pageunits_paid=0 " "WHERE transaction_type=%i" % co.pqtt_quota_fill_pay, # # Innbetalinger kunne i teorien vært gjort som med søket # under, men da ville ikke summen gått opp med # paid_quota_status.paid_quota*0.3. Vi 'faker' derfor det # innbetalte beløpet, slik at det er loggført 200.10 kr selv # om studenten kun betalte 200.00 kr. Differansen er på # totalt ca 2400kr i studentenes favør. # # "UPDATE paid_quota_history SET kroner=t.kroner " # "FROM paid_quota_transaction t " # "WHERE paid_quota_history.job_id=t.job_id" ): print sql db.execute(sql) # Remove obsolete columns for sql in ("ALTER TABLE paid_quota_status DROP COLUMN paid_quota", "ALTER TABLE paid_quota_transaction DROP COLUMN kroner", "ALTER TABLE paid_quota_status" " ALTER COLUMN accum_quota SET NOT NULL", "ALTER TABLE paid_quota_status" " ALTER COLUMN kroner SET NOT NULL", "ALTER TABLE paid_quota_history" " ALTER COLUMN pageunits_accum SET NOT NULL", "ALTER TABLE paid_quota_history" " ALTER COLUMN kroner SET NOT NULL"): print sql db.execute(sql) meta = Metainfo.Metainfo(db) meta.set_metainfo('sqlmodule_%s' % 'printer_quota', '1.1') db.commit()
def update_sql_metainfo(): #unprocessed = get_unprocessed_count() #if unprocessed != 0: # raise Exception( # 'Cannot update changelog version, still {} unprocessed'.format( # unprocessed)) from Cerebrum import Metainfo db = Factory.get("Database")() meta = Metainfo.Metainfo(db) meta.set_metainfo("sqlmodule_changelog", "1.4") db.commit() logger.info("changelog version set to 1.4")
def check_schema_versions(db, strict=False): modules = { 'ad': 'Cerebrum.modules.ADObject', 'changelog': 'Cerebrum.modules.ChangeLog', 'dns': 'Cerebrum.modules.dns', 'email': 'Cerebrum.modules.Email', 'entity_trait': 'Cerebrum.modules.EntityTrait', 'eventlog': 'Cerebrum.modules.EventLog', 'event-publisher': 'Cerebrum.modules.event_publisher', 'hostpolicy': 'Cerebrum.modules.hostpolicy', 'note': 'Cerebrum.modules.Note', 'password_history': 'Cerebrum.modules.pwcheck.history', 'posixuser': '******', 'stedkode': 'Cerebrum.modules.no.Stedkode', 'consent': 'Cerebrum.modules.consent.Consent', 'employment': 'Cerebrum.modules.no.PersonEmployment', 'virtual_group': 'Cerebrum.modules.virtualgroup', 'virtual_group_ou': 'Cerebrum.modules.virtualgroup.OUGroup', 'gpg': 'Cerebrum.modules.gpg', } meta = Metainfo.Metainfo(db) for name, value in meta.list(): if name == Metainfo.SCHEMA_VERSION_KEY: if not Cerebrum._version == value: print( "WARNING: cerebrum version %s does not" " match schema version %s" % ("%d.%d.%d" % Cerebrum._version, "%d.%d.%d" % value)) if strict: exit(1) elif name.startswith('sqlmodule_'): name = name[len('sqlmodule_'):] if name not in modules: # print "WARNING: unknown module %s" % name # if strict: exit(1) continue try: module = dyn_import(modules[name]) version = module.__version__ except Exception, e: print "ERROR: can't find version of module %s: %s" % (name, e) continue if not version == value: print( "WARNING: module %s version %s does" " not match schema version %s" % (name, version, value)) if strict: exit(1) else: print "ERROR: unknown metainfo %s: %s" % (name, value) if strict: exit(1)
def runfile(fname, db, debug, phase): """Execute an SQL definition file. @type fname: str @param fname: The file path for the given SQL definition file. @type db: Cerebrum.database.Database @param db: The Cerebrum database object, used for communicating with the db. @type debug: int @param debug: Sets how much debug information that should be printed out, e.g. traceback of errors. @type phase: str @param phase: What phase/category/stage that should be executed. This is used to decide what should be executed from the SQL file. """ global all_ok print("Reading file (phase=%s): <%s>" % (phase, fname)) statements = list(sql_parser.parse_sql_file(fname)) output_col = None max_col = 78 metainfo = {} for _, for_phase, for_rdbms, stmt in sql_parser.categorize(statements): if for_phase == sql_parser.PHASE_METAINFO: key, value = sql_parser.parse_metainfo(stmt) metainfo[key] = value continue if for_phase != phase: continue if (for_rdbms is not None and for_rdbms != db.rdbms_id): continue try: status = "." try: db.execute(stmt) except db.DatabaseError as e: all_ok = False status = "E" print("\n ERROR: [%s]" % stmt) print(e) if debug: print(" Database error: ", end="") if debug >= 2: # Re-raise error, causing us to (at least) # break out of this for loop. raise else: traceback.print_exc(file=sys.stdout) except Exception as e: all_ok = False status = "E" print("\n ERROR: [%s]" % (stmt, )) print(e) traceback.print_exc(file=sys.stdout) raise finally: if not output_col: status = " " + status output_col = 0 sys.stdout.write(status) output_col += len(status) if output_col >= max_col: sys.stdout.write("\n") output_col = 0 sys.stdout.flush() # TODO: Why commit after each statement? Wouldn't it be better to # execute statements, and do rollback on failures? This just causes # us to end up in a broken state if anything goes wrong... db.commit() if phase in { sql_parser.PHASE_MAIN, sql_parser.PHASE_METAINFO, sql_parser.PHASE_DROP }: # Update metainfo meta = Metainfo.Metainfo(db) if metainfo['name'] == 'core': name = Metainfo.SCHEMA_VERSION_KEY # tuple, for some reason? version = metainfo['version'].version else: name = 'sqlmodule_%s' % metainfo['name'] version = str(metainfo['version']) if phase == 'drop': meta.del_metainfo(name) else: meta.set_metainfo(name, version) db.commit() if output_col is not None: print("")
def setupCerebrum(cls): """Sets up an empty Cerebrum database and fill it with needed data. Imitates makedb.py for doing this.""" # TODO: does changing cereconf affect the different modules? # TODO: doesn't work correctly when several classes needs setup and # teardown... fix. cls.dbname = 'nosetest_individuation_%s' % int(random.random() * 1000000000) print "Database: ", cls.dbname cereconf.CEREBRUM_DATABASE_NAME_new = cls.dbname cereconf.CEREBRUM_DATABASE_NAME_original = cereconf.CEREBRUM_DATABASE_NAME cls.dbuser = (cereconf.CEREBRUM_DATABASE_CONNECT_DATA['table_owner'] or cereconf.CEREBRUM_DATABASE_CONNECT_DATA['user']) def read_password(user, system, host=None, *args): """Mockup of Util's password reader""" # Would prefer to overwrite only 'system' and run original function: #return Utils.read_password_original(user=user, # system=cereconf.CEREBRUM_DATABASE_NAME_original) if system == cereconf.CEREBRUM_DATABASE_NAME_new: system = cereconf.CEREBRUM_DATABASE_NAME_original filename = cls.helper_generate_password_filename(user, system, host) f = file(filename) try: # .rstrip() removes any trailing newline, if present. dbuser, dbpass = f.readline().rstrip('\n').split('\t', 1) assert dbuser == user return dbpass finally: f.close() #Utils.read_password_original = Utils.read_password # could this be referenced to somehow? Utils.read_password = read_password # create a password file for the new database os.link(cls.helper_generate_password_filename(cls.dbuser, cereconf.CEREBRUM_DATABASE_NAME_original, cereconf.CEREBRUM_DATABASE_CONNECT_DATA['host']), cls.helper_generate_password_filename(cls.dbuser, cls.dbname, cereconf.CEREBRUM_DATABASE_CONNECT_DATA['host'])) # TODO: This requires an already existing database to first connect # to, for creating temporary. Could it be done without any db instead? # TODO: Could try to connect to a default database instead, i.e. # 'postgres', or maybe 'cerebrum', if the first connection attempt # doesn't work. db = Factory.get('Database')(user=cls.dbuser) db.execute('rollback') db.execute('create database %s' % cls.dbname) db.commit() db.close() del db cereconf.CEREBRUM_DATABASE_NAME = cls.dbname cereconf.CEREBRUM_DDL_DIR = os.path.join(os.path.dirname(__file__), "../../../design") cls.db = Factory.get('Database')(user=cls.dbuser) cls.db.cl_init(change_program='nosetest') # Force all Constants-writing to use the same db-connection from Cerebrum.Constants import _CerebrumCode _CerebrumCode.sql.fset(None, cls.db) from Cerebrum import Metainfo import makedb reload(makedb) # TODO: does this fix that cereconf gets updated # correctly for the module? global meta meta = Metainfo.Metainfo(cls.db) makedb.meta = meta debug = 0 extra_files = [] # TODO: it seems a bit complex having to include all the sql files, # just to avoid dependencies between them? E.g. should ephorte # relevant data be referenced by mod_auth? for f in ('mod_changelog.sql', 'mod_entity_trait.sql', 'mod_password_history.sql', 'mod_posix_user.sql', 'mod_email.sql', 'mod_employment.sql', 'mod_sap.sql', 'mod_printer_quota.sql', 'mod_stedkode.sql', 'mod_ephorte.sql', 'mod_voip.sql', 'bofhd_tables.sql', 'bofhd_auth.sql', 'mod_hostpolicy.sql', 'mod_dns.sql', ): extra_files.append(os.path.join(cereconf.CEREBRUM_DDL_DIR, f)) #bofhd_tables.sql, bofhd_auth.sql, mod_job_runner.sql for f in makedb.get_filelist(cls.db, extra_files=extra_files): makedb.runfile(f, cls.db, debug, 'code') cls.db.commit() makedb.insert_code_values(cls.db, debug=debug) cls.db.commit() # TODO: check if this loop is necessary for testing for f in makedb.get_filelist(cls.db, extra_files=extra_files): makedb.runfile(f, cls.db, debug, 'main') cls.db.commit() makedb.makeInitialUsers(cls.db) # Other tweaks cls.setupCerebrumForIndividuation()
if db_user is None: db_user = cereconf.CEREBRUM_DATABASE_CONNECT_DATA['user'] if db_user is not None: print "'table_owner' not set in CEREBRUM_DATABASE_CONNECT_DATA." print "Will use regular 'user' (%s) instead." % db_user db = Factory.get('Database')(user=db_user) db.cl_init(change_program="makedb") # Force all Constants-writing to use the same db-connection # as CREATE TABLE++ # TDB: could _CerebrumCode have a classmethod to do this, and # also empty all cached constants? from Cerebrum.Constants import _CerebrumCode _CerebrumCode.sql.fset(None, db) meta = Metainfo.Metainfo(db) for opt, val in opts: if opt in ('-h', '--help'): usage() if opt in ('-d', '--debug'): debug += 1 elif opt == '--drop': # We won't drop any tables (which might be holding data) # unless we're explicitly asked to do so. do_drop = True elif opt == '--clean-codes-from-cl': clean_codes_from_change_log(db) sys.exit() elif opt == '--only-insert-codes': insert_code_values(db) check_schema_versions(db)