def __call__(self, *args, **kwargs): global mainlog global session # FIXME That's not right : how can we be sure that's the actual session that has thrown the exception ? try: r = self.call_decorated(*args, **kwargs) # mainlog.debug("RollbackDecorator.__call__ calling with args : {} kwargs :{}".format(args, kwargs)) # r = super(RollbackDecorator,self).__call__(*args,**kwargs) # mainlog.debug("RollbackDecorator.__call__ call complete") return r # if self.instance: # return self.func(self.instance,*args,**kwargs) # else: # return self.func(*args) except Exception as e: session().rollback() if type(e) != DataException: # I assume DataException are handled properly mainlog.info("Rollback done because of an exception") mainlog.exception(str(e)) log_stacktrace() if RollbackDecorator.callback_operational_error is not None and isinstance( e, OperationalError): f = RollbackDecorator.callback_operational_error[0] f(e) raise e
def create_all_tables(): mainlog.info("Creating all the tables and sequences") Base.metadata.create_all(db_engine(), checkfirst=True) session().commit() mainlog.info("Creating all the functions in the database") create_functions(session()) session().commit()
def extractAll(zipName, tmp_dir=""): mainlog.info("Unzipping {} to {}".format(zipName, tmp_dir)) z = ZipFile(zipName) for f in z.namelist(): dest = os.path.join(tmp_dir, f.replace('/', os.sep)) if f.endswith('/'): os.makedirs(dest) else: z.extract(f, tmp_dir)
def check_db_connection(db_url): # I need DB url because I didn't find a way to get that information # from the session(), connection()... # Rage hard, maxi vinyl import subprocess import re if not db_url: return False mainlog.debug("check_db_connection: Trying to connect to the database") try: session().connection().execute( "SELECT count(*) from {}.employees".format(DATABASE_SCHEMA)) mainlog.debug("check_db_connection: Executed query") session().commit() mainlog.debug("check_db_connection: commited") return True except Exception as ex: mainlog.error("Can't query the database !!! Is it connected ?") ret = str(ex) # mainlog.exception(ex) if type(db_url) == list: db_url = db_url[0] server_host = re.search("(@.*:)", db_url).groups()[0].replace("@", "").replace( ":", "") try: mainlog.info("I'll try a ping at {}".format(server_host)) r = subprocess.Popen("\\Windows\\System32\\ping -n 1 " + server_host, stdout=PIPE, shell=False).stdout.read() mainlog.info("Ping to {} result is : {}".format(server_host, r)) ret += "<br/><br/>" if "Reply" in r: mainlog.info( "Ping was successful, the DB server machine seems up") ret += _( " A ping was successful (so host is up, database is down)") else: ret += _(" A ping was not successful (so host is down)") return ret except Exception as ex: #mainlog.error(str(ex,'ASCII','replace')) return _("Ping failed, the host is down.")
def create_blank_database(admin_url, client_url): # Do administrative level stuff set_up_database(admin_url, client_url) init_db_session(admin_url) create_all_tables() disconnect_db() # Do client level stuff init_db_session(client_url, metadata, False) mainlog.info("Creating administration user/employee") create_root_account() do_basic_inserts() session().commit() # Insert some basic files # template_id = documents_service.save_template( open( os.path.join(resource_dir, "order_confirmation_report.docx"), "rb"), "order_confirmation_report.docx") # documents_service.update_template_description( template_id, _("Order confirmation template"), "order_confirmation_report.docx", HORSE_TEMPLATE) def add_template(description, filename, reference): with open(os.path.join(resource_dir, "server", filename), "rb") as f: template_id = documents_service.save_template(f, filename) documents_service.update_template_description( template_id, description, filename, reference) add_template(HORSE_TITLE_PREORDER, HORSE_TEMPLATE_PREORDER, HORSE_REFERENCE_PREORDER) add_template(HORSE_TITLE_ORDER_CONFIRMATION, HORSE_TEMPLATE_ORDER_CONFIRMATION, HORSE_REFERENCE_ORDER_CONFIRMATION)
def do_basic_inserts(do_sequence=True): mainlog.info("Initialising content in the database") t = TaskForPresence() t.kind = TaskForPresenceType.regular_time session().add(t) t = TaskForPresence() t.kind = TaskForPresenceType.unemployment_time session().add(t) if do_sequence: session().connection().execute( "INSERT INTO {}.gapless_seq VALUES('delivery_slip_id', '1')". format(DATABASE_SCHEMA)) session().connection().execute( "INSERT INTO {}.gapless_seq VALUES('order_id','1')".format( DATABASE_SCHEMA)) session().connection().execute( "INSERT INTO {}.gapless_seq VALUES('preorder_id','1')".format( DATABASE_SCHEMA)) session().connection().execute( "INSERT INTO {}.gapless_seq VALUES('supply_order_id','1')".format( DATABASE_SCHEMA)) session().commit() c = DocumentCategory() c.full_name = "Qualité" c.short_name = "Qual." session().add(c) session().commit() c = DocumentCategory() c.full_name = "Sales" c.short_name = "Sale" session().add(c) session().commit() c = DocumentCategory() c.full_name = "Production" c.short_name = "Prod" session().add(c) session().commit() # This shall pick the root account because at this point # we expect there's only one account in the database. employee = session().query(Employee).first() assert employee fq = FilterQuery() fq.family = FilterQuery.ORDER_PARTS_OVERVIEW_FAMILY fq.query = "Status = ready_for_production" fq.owner_id = employee.employee_id fq.name = "In production" fq.shared = True session().add(fq) fq = FilterQuery() fq.family = FilterQuery.ORDER_PARTS_OVERVIEW_FAMILY fq.query = "Status = completed" fq.owner_id = employee.employee_id fq.name = "Completed" fq.shared = True session().add(fq) fq = FilterQuery() fq.family = FilterQuery.DELIVERY_SLIPS_FAMILY fq.query = "CreationDate IN CurrentMonth" fq.owner_id = employee.employee_id fq.name = "This month's slips" fq.shared = True session().add(fq) fq = FilterQuery() fq.family = FilterQuery.SUPPLIER_ORDER_SLIPS_FAMILY fq.query = "CreationDate IN CurrentMonth" fq.owner_id = employee.employee_id fq.name = "Delivered this month" fq.shared = True session().add(fq) session().commit()
def set_up_database(url_admin, url_client): """ Create the very basic Koi database. That is : the client user, the admin user, the schema, grant privileges. :param url_admin: :param url_client: :return: """ # The administrative user must be "horse_adm" # He must have the right to create databases and roles # Just to be sure we're outside any connection disconnect_db() db_url, params = parse_db_url(url_client) login, password, dbname, host, port = extract_db_params_from_url(db_url) db_url, params = parse_db_url(url_admin) login_adm, password_adm, dbname, host, port = extract_db_params_from_url( db_url) mainlog.info("Admin user is {}, regular user is {}".format( login_adm, login)) db_url, params = template1_connection_parameters(url_admin) init_db_session(db_url, params=params) mainlog.info("creating database") conn = db_engine().connect() conn.execute("commit") conn.execute("drop database if exists {}".format(dbname)) if login_adm != login: conn.execute("drop role if exists {}".format(login)) conn.execute("CREATE ROLE {} LOGIN PASSWORD '{}'".format( login, password)) conn.execute( "ALTER ROLE {} SET statement_timeout = 30000".format(login)) conn.execute("commit") # Leave transaction conn.execute("CREATE DATABASE {}".format(dbname)) conn.execute("ALTER DATABASE {} SET search_path TO {},public".format( dbname, DATABASE_SCHEMA)) conn.close() disconnect_db() init_db_session(url_admin) session().commit() # Leave SQLA's transaction # Schema will be created for current database (i.e. horse or horse_test) mainlog.info("Creating schema {}".format(DATABASE_SCHEMA)) session().connection().execute("create schema {}".format(DATABASE_SCHEMA)) if login_adm != login: mainlog.info("Granting privileges to {}".format(login)) session().connection().execute("grant usage on schema {} to {}".format( DATABASE_SCHEMA, login)) # Alter the default privileges so that every tables and sequences # created right after will be usable by horse_clt # Also, if one adds tables, etc. afterwards, they'll benefit from # the privileges as well session().connection().execute("""ALTER DEFAULT PRIVILEGES FOR ROLE {} IN SCHEMA {} GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO {}""".format( login_adm, DATABASE_SCHEMA, login)) session().connection().execute("""ALTER DEFAULT PRIVILEGES FOR ROLE {} IN SCHEMA {} GRANT SELECT, UPDATE ON SEQUENCES TO {}""".format( login_adm, DATABASE_SCHEMA, login)) session().commit() disconnect_db() mainlog.info("Database setup complete")
def drop_all_tables(current_session): mainlog.info("Dropping all the functions in the database") drop_functions(current_session) # WARNING Pay attention with follwowing code, it destroys the session ! # But the session may be in use in other components (dao for example) # Close the connection to PG # This avoids DROP's to lock # http://www.sqlalchemy.org/trac/wiki/FAQ#MyprogramishangingwhenIsaytable.dropmetadata.drop_all # current_session.connection().close() # current_session.close() # current_session.bind.dispose() # current_session = session_factory() mainlog.info("Dropping all the tables in the database") #db_engine().execute( DropTable(ProductionFile)) Comment.__table__.drop(db_engine(), checkfirst=True) current_session.commit() CommentLocation.__table__.drop(db_engine(), checkfirst=True) #comments_locations.drop(db_engine(), checkfirst=True) current_session.commit() # StockItem.__table__.drop(db_engine(), checkfirst=True) # current_session.commit() DayEvent.__table__.drop(db_engine(), checkfirst=True) current_session.commit() TemplateDocument.__table__.drop(db_engine(), checkfirst=True) current_session.commit() documents_order_parts_table.drop(db_engine(), checkfirst=True) current_session.commit() documents_orders_table.drop(db_engine(), checkfirst=True) current_session.commit() documents_quality_events_table.drop(db_engine(), checkfirst=True) current_session.commit() QualityEvent.__table__.drop(db_engine(), checkfirst=True) current_session.commit() Document.__table__.drop(db_engine(), checkfirst=True) current_session.commit() DocumentCategory.__table__.drop(db_engine(), checkfirst=True) current_session.commit() AuditTrail.__table__.drop(db_engine(), checkfirst=True) current_session.commit() FilterQuery.__table__.drop(db_engine(), checkfirst=True) current_session.commit() MonthTimeSynthesis.__table__.drop(db_engine(), checkfirst=True) current_session.commit() SpecialActivity.__table__.drop(db_engine(), checkfirst=True) current_session.commit() TaskActionReport.__table__.drop(db_engine(), checkfirst=True) current_session.commit() TimeTrack.__table__.drop(db_engine(), checkfirst=True) current_session.commit() TaskForPresence.__table__.drop(db_engine(), checkfirst=True) current_session.commit() TaskOnOperation.__table__.drop(db_engine(), checkfirst=True) current_session.commit() TaskOnOrder.__table__.drop(db_engine(), checkfirst=True) current_session.commit() TaskOnNonBillable.__table__.drop(db_engine(), checkfirst=True) current_session.commit() Task.__table__.drop(db_engine(), checkfirst=True) current_session.commit() Operation.__table__.drop(db_engine(), checkfirst=True) current_session.commit() ProductionFile.__table__.drop(db_engine(), checkfirst=True) current_session.commit() DeliverySlipPart.__table__.drop(db_engine(), checkfirst=True) current_session.commit() OrderPart.__table__.drop(db_engine(), checkfirst=True) current_session.commit() Order.__table__.drop(db_engine(), checkfirst=True) current_session.commit() DeliverySlip.__table__.drop(db_engine(), checkfirst=True) current_session.commit() OfferPart.__table__.drop(db_engine(), checkfirst=True) current_session.commit() Offer.__table__.drop(db_engine(), checkfirst=True) current_session.commit() Customer.__table__.drop(db_engine(), checkfirst=True) current_session.commit() OperationDefinitionPeriod.__table__.drop(db_engine(), checkfirst=True) current_session.commit() Machine.__table__.drop(db_engine(), checkfirst=True) current_session.commit() OperationDefinition.__table__.drop(db_engine(), checkfirst=True) current_session.commit() DayTimeSynthesis.__table__.drop(db_engine(), checkfirst=True) current_session.commit() MonthTimeSynthesis.__table__.drop(db_engine(), checkfirst=True) current_session.commit() Employee.__table__.drop(db_engine(), checkfirst=True) current_session.commit() SupplyOrderPart.__table__.drop(db_engine(), checkfirst=True) current_session.commit() SupplyOrder.__table__.drop(db_engine(), checkfirst=True) current_session.commit() Supplier.__table__.drop(db_engine(), checkfirst=True) current_session.commit() Resource.__table__.drop(db_engine(), checkfirst=True) current_session.commit() UserClass.__table__.drop(db_engine(), checkfirst=True) current_session.commit() gapless_seq_table.drop(db_engine(), checkfirst=True) #current_session.connection().execute("DROP TYPE IF EXISTS ck_task_action_report_type") current_session.commit()
def drop_functions(current_session): try: current_session.connection().execute( "SET search_path TO {}".format("horse")) except Exception as ex: # Schema's not there, so nothing to delete mainlog.exception(ex) return current_session.connection().execute("BEGIN") try: # current_session.connection().execute("DROP TRIGGER IF EXISTS control_orders_accounting ON orders") # current_session.connection().execute("DROP TRIGGER IF EXISTS control_orders_accounting2 ON orders") # current_session.connection().execute("DROP TRIGGER IF EXISTS control_orders_accounting3 ON orders") current_session.connection().execute( "DROP TRIGGER IF EXISTS control_orders_accounting_delete ON {}.orders" .format(DATABASE_SCHEMA)) current_session.connection().execute( "DROP TRIGGER IF EXISTS control_orders_accounting_update ON {}.orders" .format(DATABASE_SCHEMA)) current_session.connection().execute( "DROP TRIGGER IF EXISTS control_orders_accounting_insert ON {}.orders" .format(DATABASE_SCHEMA)) current_session.connection().execute( "DROP TRIGGER IF EXISTS control_delivery_slips_delete ON {}.delivery_slip" .format(DATABASE_SCHEMA)) current_session.connection().execute( "DROP TRIGGER IF EXISTS control_delivery_slips_update ON {}.delivery_slip" .format(DATABASE_SCHEMA)) current_session.connection().execute( "DROP TRIGGER IF EXISTS control_delivery_slips_insert ON {}.delivery_slip" .format(DATABASE_SCHEMA)) current_session.connection().execute("COMMIT") except: current_session.connection().execute("ROLLBACK") schema_name = "horse" current_session.connection().execute("BEGIN") current_session.connection().execute( "DROP FUNCTION IF EXISTS {0}.check_orders_gapless_sequence()".format( DATABASE_SCHEMA)) current_session.connection().execute( "DROP FUNCTION IF EXISTS {0}.check_orders_gapless_sequence_insert()". format(DATABASE_SCHEMA)) current_session.connection().execute( "DROP FUNCTION IF EXISTS {0}.check_orders_gapless_sequence_delete()". format(DATABASE_SCHEMA)) current_session.connection().execute( "DROP FUNCTION IF EXISTS {0}.check_orders_gapless_sequence_update()". format(DATABASE_SCHEMA)) current_session.connection().execute( "DROP FUNCTION IF EXISTS {0}.check_delivery_slips_gapless_sequence_insert()" .format(DATABASE_SCHEMA)) current_session.connection().execute( "DROP FUNCTION IF EXISTS {0}.check_delivery_slips_gapless_sequence_delete()" .format(DATABASE_SCHEMA)) current_session.connection().execute( "DROP FUNCTION IF EXISTS {0}.check_delivery_slips_gapless_sequence_update()" .format(DATABASE_SCHEMA)) current_session.connection().execute( "DROP FUNCTION IF EXISTS {0}.gseq_nextval(t text)".format( DATABASE_SCHEMA)) current_session.connection().execute("COMMIT") mainlog.info("Dropped all functions")
def create_functions(current_session): mainlog.info("Creating all function") current_session.connection().execute("BEGIN") current_session.connection().execute( """CREATE FUNCTION {0}.gseq_nextval(t text) RETURNS integer AS $BODY$ DECLARE n integer; BEGIN -- The select also puts a LOCK on the table, ensuring nobody -- else can increase the number while we're in our transaction SELECT INTO n gseq_value+1 FROM {0}.gapless_seq WHERE gseq_name = t FOR UPDATE; IF n IS NULL THEN RAISE EXCEPTION 'Gapless sequence does not exist in the gapless sequence table'; END IF; -- Update will release the lock once the current TRANSACTION ends UPDATE {0}.gapless_seq SET gseq_value = n WHERE gseq_name = t; RETURN n; END;$BODY$ LANGUAGE plpgsql VOLATILE;""".format(DATABASE_SCHEMA)) current_session.connection().execute( """CREATE FUNCTION {0}.check_orders_gapless_sequence_insert() RETURNS trigger AS $$ DECLARE difference NUMERIC; min_id NUMERIC; max_id NUMERIC; cnt NUMERIC; BEGIN -- Call this AFTER INSERT IF NEW.accounting_label IS NULL THEN -- we're only interested in orders; not pre orders RETURN NULL; END IF; SELECT min(accounting_label) INTO min_id FROM {0}.orders; SELECT max(accounting_label) INTO max_id FROM {0}.orders; SELECT count(*) INTO cnt FROM {0}.orders WHERE accounting_label IS NOT NULL; IF cnt > 1 AND max_id - min_id + 1 <> cnt THEN RAISE EXCEPTION 'Gapless sequence has been broken'; ELSE RETURN NEW; END IF; END; $$ LANGUAGE plpgsql;""".format(DATABASE_SCHEMA)) current_session.connection().execute( """CREATE FUNCTION {0}.check_orders_gapless_sequence_delete() RETURNS trigger AS $$ DECLARE difference NUMERIC; min_id NUMERIC; max_id NUMERIC; cnt NUMERIC; n INTEGER; BEGIN -- Call this *only* AFTER DELETE -- Rememeber ! OLD is a standard PostgreSQL parameter denoting the row -- we're about to delete IF OLD.accounting_label IS NULL THEN -- we're only interested in orders; not pre orders RETURN NULL; END IF; -- The following select is done only to lock the gseq row. -- The consequence of this is that all inserts will be delayed -- and, consequently, the following three selects (max_id,min_id,count) -- will be reliable. That is, they will concern the result of -- the DELETE statement *only* (ie not mixed with inserts). -- Moreover, since the UPDATE don't allow any change of -- accouting label, the UPDATE won't interfere with the next 3 -- selects SELECT INTO n gseq_value FROM {0}.gapless_seq WHERE gseq_name = 'order_id' FOR UPDATE; SELECT min(accounting_label) INTO min_id FROM {0}.orders WHERE accounting_label IS NOT NULL; SELECT max(accounting_label) INTO max_id FROM {0}.orders WHERE accounting_label IS NOT NULL; SELECT count(*) INTO cnt FROM {0}.orders WHERE accounting_label IS NOT NULL; IF cnt > 0 AND (max_id - min_id + 1) <> cnt THEN -- Pay attention, the condition above is incomplete because it allows -- delete of either first row or last row RAISE EXCEPTION 'DB: Gapless sequence has been broken'; ELSE -- We allow a DELETE on the last order. Everything else -- will trigger an exception. IF OLD.accounting_label = n THEN UPDATE {0}.gapless_seq SET gseq_value = n - 1 WHERE gseq_name = 'order_id'; RETURN NULL; ELSE RAISE EXCEPTION 'DB: One can only delete the last order'; END IF; END IF; END; $$ LANGUAGE plpgsql;""".format(DATABASE_SCHEMA)) current_session.connection().execute( """CREATE FUNCTION {0}.check_orders_gapless_sequence_update() RETURNS trigger AS $$ DECLARE cnt NUMERIC; BEGIN IF (OLD.accounting_label IS NOT NULL) AND (NEW.accounting_label IS NULL OR NEW.accounting_label <> OLD.accounting_label) THEN RAISE EXCEPTION 'One cannot change the id of an order'; ELSE RETURN NEW; END IF; END; $$ LANGUAGE plpgsql;""".format(DATABASE_SCHEMA)) current_session.connection().execute( """CREATE FUNCTION {0}.check_delivery_slips_gapless_sequence_insert() RETURNS trigger AS $$ DECLARE difference NUMERIC; min_id NUMERIC; max_id NUMERIC; cnt NUMERIC; BEGIN -- Call this AFTER INSERT IF NEW.delivery_slip_id IS NULL THEN -- we're only interested in orders; not pre orders RETURN NULL; END IF; SELECT min(delivery_slip_id) INTO min_id FROM {0}.delivery_slip; SELECT max(delivery_slip_id) INTO max_id FROM {0}.delivery_slip; SELECT count(*) INTO cnt FROM {0}.delivery_slip WHERE delivery_slip_id IS NOT NULL; IF cnt > 1 AND max_id - min_id + 1 <> cnt THEN RAISE EXCEPTION 'Gapless sequence has been broken'; ELSE RETURN NEW; END IF; END; $$ LANGUAGE plpgsql;""".format(DATABASE_SCHEMA)) current_session.connection().execute( """CREATE FUNCTION {0}.check_delivery_slips_gapless_sequence_delete() RETURNS trigger AS $$ DECLARE difference NUMERIC; min_id NUMERIC; max_id NUMERIC; cnt NUMERIC; n INTEGER; BEGIN -- Call this *only* AFTER DELETE -- Rememeber ! OLD is a standard PostgreSQL parameter denoting the row -- we're about to delete IF OLD.delivery_slip_id IS NULL THEN -- we're only interested in orders; not pre orders RETURN NULL; END IF; -- The following select is done only to lock the gseq row. -- The consequence of this is that all inserts will be delayed -- and, consequently, the following three selects (max_id,min_id,count) -- will be reliable. That is, they will concern the result of -- the DELETE statement *only* (ie not mixed with inserts). -- Moreover, since the UPDATE don't allow any change of -- accouting label, the UPDATE won't interfere with the next 3 -- selects SELECT INTO n gseq_value FROM {0}.gapless_seq WHERE gseq_name = 'delivery_slip_id' FOR UPDATE; SELECT min(delivery_slip_id) INTO min_id FROM {0}.delivery_slip; SELECT max(delivery_slip_id) INTO max_id FROM {0}.delivery_slip; SELECT count(*) INTO cnt FROM {0}.delivery_slip; IF cnt > 0 AND (max_id - min_id + 1) <> cnt THEN -- Pay attention, the condition above is incomplete because it allows -- delete of either first row or last row RAISE EXCEPTION 'Gapless sequence has been broken in delivery slips'; ELSE -- We allow a DELETE on the last order. Everything else -- will trigger an exception. IF OLD.delivery_slip_id = n THEN UPDATE {0}.gapless_seq SET gseq_value = n - 1 WHERE gseq_name = 'delivery_slip_id'; RETURN NULL; ELSE RAISE EXCEPTION 'One can only delete the last delivery slip'; END IF; END IF; END; $$ LANGUAGE plpgsql;""".format(DATABASE_SCHEMA)) current_session.connection().execute( """CREATE FUNCTION {0}.check_delivery_slips_gapless_sequence_update() RETURNS trigger AS $$ DECLARE cnt NUMERIC; BEGIN IF (OLD.delivery_slip_id IS NOT NULL) AND (NEW.delivery_slip_id IS NULL OR NEW.delivery_slip_id <> OLD.delivery_slip_id) THEN RAISE EXCEPTION 'One cannot change the id of a slip'; ELSE RETURN NEW; END IF; END; $$ LANGUAGE plpgsql;""".format(DATABASE_SCHEMA)) # Trigger's name are note schem-qualified because it is inherited # from the table current_session.connection().execute( """CREATE TRIGGER control_orders_accounting_delete AFTER DELETE ON {0}.orders FOR EACH ROW EXECUTE PROCEDURE {0}.check_orders_gapless_sequence_delete()""" .format(DATABASE_SCHEMA)) current_session.connection().execute( """CREATE TRIGGER control_orders_accounting_insert AFTER INSERT ON {0}.orders FOR EACH ROW EXECUTE PROCEDURE {0}.check_orders_gapless_sequence_insert()""" .format(DATABASE_SCHEMA)) current_session.connection().execute( """CREATE TRIGGER control_orders_accounting_update BEFORE UPDATE ON {0}.orders FOR EACH ROW EXECUTE PROCEDURE {0}.check_orders_gapless_sequence_update()""" .format(DATABASE_SCHEMA)) current_session.connection().execute( """CREATE TRIGGER control_delivery_slips_delete AFTER DELETE ON {0}.delivery_slip FOR EACH ROW EXECUTE PROCEDURE {0}.check_delivery_slips_gapless_sequence_delete()""" .format(DATABASE_SCHEMA)) current_session.connection().execute( """CREATE TRIGGER control_delivery_slips_insert AFTER INSERT ON {0}.delivery_slip FOR EACH ROW EXECUTE PROCEDURE {0}.check_delivery_slips_gapless_sequence_insert()""" .format(DATABASE_SCHEMA)) current_session.connection().execute( """CREATE TRIGGER control_delivery_slips_update BEFORE UPDATE ON {0}.delivery_slip FOR EACH ROW EXECUTE PROCEDURE {0}.check_delivery_slips_gapless_sequence_update()""" .format(DATABASE_SCHEMA))
def upgrade_process(args): if platform.system() != 'Windows': mainlog.info( "The upgrade process won't work on something else than Windows... I skip that." ) return this_version = configuration.this_version # the one of this very code mainlog.debug("Client version is {}".format(this_version)) if args.no_update: mainlog.info("Skipping update process because --no-update is set") # This is rather strange. If we are started by regular Windows ways # (double click, cmd,...) PySide finds its DLL fine. # But, if it is started through the upgrade process (via Popen), then # it doesn't because Windows can't expand junction points correctly # (according to what I saw, this is not a bug in windows, but rather a # feature to prevent old code to misuse junction points) # So, for this code to work, one has to make sure that _setupQtDir # is not called during the import but right after (else it crashes). # This is how to patch the __init__py of PySide : # def _setupQtDirectories(zedir=None): # import sys # import os # from . import _utils # # if zedir: # pysideDir = zedir # else: # pysideDir = _utils.get_pyside_dir() # try: from PySide import _setupQtDirectories except Exception as ex: mainlog.error( "Unable to import _setupQtDirectories. Remember this was a bug fix, make sure " + "_setupQtDirectories is not called at the end of the __init__.py of pyside. " + "Check the comments in the code for more info.") mainlog.exception(ex) return if getattr(sys, 'frozen', False): # Frozen mainlog.debug("Fixing Qt import on frozen exe {}".format( os.path.normpath(os.getcwd()))) _setupQtDirectories(os.path.normpath(os.getcwd())) else: mainlog.debug("Fixed Qt import on NON frozen exe") _setupQtDirectories() return next_version = get_server_version( configuration.update_url_version ) # available on the server (abd maybe already downloaded) current_version = find_highest_installed_version( ) # one we have downloaded in the past mainlog.info( "This version is {}, last downloaded version = {}, version available on server = {}" .format(this_version, current_version, next_version)) if (not current_version or (current_version and this_version >= current_version)) and \ (not next_version or (next_version and this_version >= next_version)): mainlog.info( "The available versions are not more recent than the current one. No update necessary." ) return codename = configuration.get("Globals", "codename") # Update only if we have no current version or if the # next version is higher than ours if next_version and (not current_version or next_version > current_version): try: tmpfile = make_temp_file(prefix='NewVersion_' + version_to_str(next_version), extension='.zip') download_file(configuration.update_url_file, tmpfile) newdir = os.path.join( get_data_dir(), "{}-{}".format(codename, version_to_str(next_version))) extractAll(tmpfile, newdir) # show that we actually downloaded something current_version = next_version except Exception as ex: mainlog.error( "The download of version {} failed. Therefore, I'll go on with the current one." .format(next_version)) mainlog.exception(ex) # If we were able to download a version now or in the # past, then use this one. If not, then we run the # program (that is, the version that was installed # by the user) if current_version: current_dir = os.path.join( get_data_dir(), "{}-{}".format(codename, version_to_str(current_version))) # --no-update "signals" the control transfer (without it we'd # try to update with the latest version again creating an # endless loop) # os.chdir(os.path.join(current_dir,codename)) # FIXME Not sure this is useful; too tired to test cmd = [ os.path.join(os.path.join(current_dir, codename), codename + '.exe'), '--no-update' ] mainlog.info("Transferring control to {}".format(' '.join(cmd))) # DETACHED_PROCESS = 0x00000008 # CREATE_NEW_PROCESS_GROUP = 0x00000200 # subprocess.Popen( cmd,cwd=os.path.join(current_dir,'xxx'),creationflags=DETACHED_PROCESS|CREATE_NEW_PROCESS_GROUP) # From what I can see WinExec don't run in os.getcwd(), so I give it an absolute path. try: # win32api.WinExec will *NOT* block. The new version is run # in parallel. This allow us to quit so we don't have two # instances of Koi running simulatenaously # Unfortunaately, because of that it's hard to build a watch # dog that will protect us against a broken upgrade. # For example, we'd have to release our log files... res = win32api.WinExec(" ".join(cmd), win32con.SW_SHOWMAXIMIZED) sys.exit(RETURN_CODE_SUCCESS) except Exception as ex: mainlog.error( "Control transfer failed. There was an error while starting the newer version {}" .format(current_version)) mainlog.exception(ex) return False