def precheck_for_access_verify_db(config): uLogging.info("Checking verify_db exists...") import uDBValidator import uConfig config_v = copy.copy(config) config_v.database_name = config_v.database_name + '_verify' try: uSysDB.init(config_v) con_v = uSysDB.connect() except Exception, e: uLogging.debug(str(e)) uLogging.warn("%s DB does not exist" % config_v.database_name) try: if config_v.admin_db_password or uUtil.isLocalAddress( config_v.database_host): # try to create verify db: uLogging.info("Trying to create '%s' database..." % config_v.database_name) uDBValidator._recreate_verify_db(config_v) uLogging.info("'%s' is created successfully." % config_v.database_name) else: raise Exception( "Postgres admin user credentials are required to create %s" % config_v.database_name) except Exception, e: uLogging.debug(str(e)) dsn_login_short = re.sub("@.*", "", config_v.dsn_login) raise PrecheckFailed( reason="'%s' database is not accessible or does not exist" % config_v.database_name, what_to_do= "Connect to %s Postgres server as admin user and create database: 'CREATE DATABASE %s OWNER %s'. If database '%s' already exists, make sure its owner is '%s'." % (config_v.database_host, config_v.database_name, dsn_login_short, config_v.database_name, dsn_login_short))
def update_slave(task_item, config): is_slave_updated = None try: if Const.isOsaWinPlatform(task_item.host.platform.os): if task_item.paagent_dist_url: schedule_update_windows_slave(task_item) uAction.ntimes_retried(check_update_windows_slave, 8, 30)(task_item, config) is_slave_updated = True else: linux_update_result = update_linux_slave(task_item, config) check_update_linux_slave(task_item.host, linux_update_result) # This is a workaround: see async restart code in pa-agent RPM async_upgrade_finished_sec = 11 uLogging.debug("Waiting {async_upgrade_finished_sec} sec for agent upgrade process is finished " "on host {host}".format(host=task_item.host, async_upgrade_finished_sec=async_upgrade_finished_sec)) sleep(async_upgrade_finished_sec) uAction.ntimes_retried(wait_for_linux_slave, 8, 30)(task_item) is_slave_updated = True except Exception as e: uLogging.warn("Error happen during upgrade slave {host}, check details in log or wait for moment when " "the result will be processed".format(host=task_item.host)) uLogging.save_traceback() raise e return is_slave_updated
def findAgentConfig(rootpath): commip = None kernel_pub_key = "" encryption_key = "" system_password = "" pleskd_endpoint_port = '8352' etc_dir = os.path.join(rootpath, "etc") conf_path = os.path.join(etc_dir, "pleskd.props") if os.path.isfile(conf_path): conf = file(conf_path) pp = uUtil.readPropertiesFile(conf) conf.close() commip = pp.get('communication.ip', commip) kernel_pub_key = pp.get('kernel_pub_key', kernel_pub_key) encryption_key = pp.get('encryption_key', encryption_key) system_password = pp.get('passwd', system_password) pleskd_endpoint_port = pp.get('orb.endpoint.port', pleskd_endpoint_port) uLogging.debug('read agent config from %s' % conf_path) else: uLogging.debug( 'no agent config, taking defaults. valid for fresh install only!') rv = { 'commip': commip, 'kernel_pub_key': kernel_pub_key, 'encryption_key': encryption_key, 'system_password': system_password, 'pleskd_endpoint_port': pleskd_endpoint_port } return rv
def check_mount_bind_bug(): """Check that is possible to perform "mount --bind" command. This bug appear on PCS containers with outdated systemd. It was fixed (POA-99460). """ # It reproducible only on containers with RHEL/CentOS 7.2, so we need to check this. if determinePlatform().osverfull[:2] != ('7', '2') or not os.path.isfile("/proc/user_beancounters"): return True # Trying to bindmount inner_dir to outer_dir, raising an error if "mount --bind" failed. outer_dir = "/root/bind_test_dir_outer" inner_dir = "/root/bind_test_dir_inner" uUtil.execCommand(["mkdir", outer_dir, inner_dir]) try: uLogging.debug("Testing execution of \"mount --bind\" command started. ") uUtil.execCommand(["mount", "--bind", outer_dir, inner_dir]) # Waiting 2 sec after mounting (as pointed out by Vasily Averin), # if inner_dir is absent in mtab, umount will return an error. time.sleep(2) uUtil.execCommand(["umount", inner_dir]) uUtil.execCommand(["rmdir", outer_dir, inner_dir]) uLogging.debug("Testing execution of \"mount --bind\" command successfully passed. ") return True except: uUtil.execCommand(["rmdir", outer_dir, inner_dir]) uLogging.err('Precheck error: "mount --bind" command executed idly.\n') import sys sys.tracebacklimit = 0 raise Exception('\nUnable to complete the installation. ' 'The precheck error occurred: "mount --bind" command executed idly. ' 'This functionality is critical for the named daemon. If you use Virtuozzo container, ' 'then probably the systemd version in your OS repository is outdated. In this case, ' 'you should update your systemd to the version "systemd-219-19.el7_2.3" or higher ' 'so the "mount --bind" can be executed correctly.')
def installPOACoreBinaries(config, progress, build_rpms): uLogging.debug("Installing core RPM dependencies") uUtil.execCommand( ["yum", "-y", "-e", "0", "install", "libselinux-utils", "libgcc.x86_64", "glibc.x86_64", "libgcc.i686", "glibc.i686"]) uLogging.debug("Installing core RPM files") yum_local_install_package(build_rpms)
def runHCLCmd(hostId, commandText): uLogging.debug(commandText) con = uSysDB.connect() host = uPEM.getHost(hostId) commandText = commandText.replace( "${", '$${' ) #for more details see following issue https://jira.int.zone/browse/POA-109131 rq = Request(user='******', group='root') rq.command(commandText, stdout='stdout', stderr='stderr', valid_exit_codes=[0]) if Const.isOsaWinPlatform(host.platform.os): rq.export("default_shell_name", "cmd.exe", True) rq.export("shell_cmd_switch", "/C", True) else: rq.export("default_shell_name", "/bin/sh", True) rq.export("shell_cmd_switch", "-c", True) rqRv = None try: rqRv = rq.send(host) except uUtil.ExecFailed: rqRv = rq.performRaw(host) o = rqRv["stdout"] if o: uLogging.debug(o) return o
def removeDuplicates(table, con): # XXX: hack, truncate all timestamp columns to 1 second precision for # postgres, because PyPgSQL datetime does not hold enough data to make # correct equal comparisions with timestamp if uSysDB.DBType == uSysDB.PgSQL: dt_column_names = [c.name for c in table.columns if isinstance(c.type, uDBTypes.TimestampType)] if dt_column_names: query = "UPDATE %s SET " % table.name query += ', '.join(["%s = date_trunc('seconds', %s)" % (x, x) for x in dt_column_names]) cur = con.cursor() cur.execute(query) columns_str = ', '.join([str(c.name) for c in table.columns]) column_names = [c.name for c in table.columns] columns_placeholders_str = ','.join(['%s'] * len(table.columns)) cur = con.cursor() cur.execute("SELECT %(columns)s FROM %(tab)s GROUP BY %(columns)s HAVING COUNT(*) > 1" % {'columns': columns_str, 'tab': table.name}) upd_cur = con.cursor() for row in cur.fetchall(): uLogging.debug("Removing duplicate records %s", row) cond, params = generateCondition(column_names, row) row = tuple(row) upd_cur.execute(("DELETE FROM %s WHERE %s" % (table.name, cond)), params) upd_cur.execute(("INSERT INTO %s (%s) VALUES (%s)" % (table.name, columns_str, columns_placeholders_str)), row)
def copyFileToRemote(localFile, remoteDir, remoteAddress, userLogin, userPassword): tmpname = getSSH_ASKPASS(userPassword) try: remoteCommand = """DISPLAY=:0 SSH_ASKPASS='******' setsid scp -p -2 -oLogLevel=error \ -oStrictHostKeyChecking=no \ -oCheckHostIP=no \ -oUserKnownHostsFile=/dev/null \ -oPreferredAuthentications=publickey,password,keyboard-interactive \ '%s' '%s@%s:%s'""" % (tmpname, localFile, userLogin, remoteAddress, remoteDir) uLogging.debug(remoteCommand) pp = os.popen(remoteCommand) o = pp.read() s = pp.close() if s is not None: s = int(s) else: s = 0 if o: uLogging.debug(o) if s != 0: raise Exception("Command exited with status %d" % (s, )) return o finally: if os.path.isfile(tmpname): os.unlink(tmpname) #delete temporary file if exists just for sure
def importPackages(newestPackages, plesk_root, pkg_counter=None): # import pleskds - it registers platforms in system. uLogging.info("Importing new packages") new_pleskds = [] total_packages = 0 for pform_ctypes in newestPackages.values(): for ctype_packages in pform_ctypes.values(): total_packages += len(ctype_packages) if not pkg_counter: pkg_counter = uUtil.CounterCallback() pkg_counter.set_total(total_packages) for pform in newestPackages: if newestPackages[pform].has_key( 'other') and newestPackages[pform]['other'].has_key('pleskd'): new_pleskds.append(newestPackages[pform]['other']['pleskd']) if new_pleskds: uLogging.debug("importing new agent packages") uAction.retriable(doMassImport)(plesk_root, new_pleskds, pkg_counter) to_update = [x for x in new_pleskds if x.old] if to_update: updateManifestSources(plesk_root, to_update, pkg_counter) for pform in newestPackages: uLogging.debug("importing new packages for %s", pform) for ctype in newestPackages[pform]: if newestPackages[pform][ctype]: uAction.retriable(doMassImport)( plesk_root, newestPackages[pform][ctype].values(), pkg_counter)
def upgrade_aps_db(scriptsDir, con=None): if con is None: con = uSysDB.connect() # To be sure if not is_aps_db_installed(con): install_aps_db(scriptsDir, con) db_ver = get_db_version(con) uLogging.debug("APS DB v%d.%d found." % (db_ver[0], db_ver[1])) uLogging.info("Looking for application upgrades in '%s'" % scriptsDir) cursor = con.cursor() upgrade_found = None sc_pattern = get_update_scripts_pattern() sc_list = list_db_scripts(scriptsDir, sc_pattern) for path in sc_list: sc_matcher = sc_pattern.match(path) sc_ver = (int(sc_matcher.group(1)), int(sc_matcher.group(2))) if sc_ver[0] > db_ver[0] or (sc_ver[0] == db_ver[0] and sc_ver[1] > db_ver[1]): path = os.path.join(scriptsDir, path) execute_db_script(path, cursor) set_db_version(con, sc_ver[0], sc_ver[1]) con.commit() uLogging.info("'%s' applied." % path) upgrade_found = True db_ver = get_db_version(con) uSysDB.close(con) if upgrade_found: uLogging.info("APS DB upgraded to v%d.%d." % (db_ver[0], db_ver[1])) else: uLogging.info("No new upgrades for APS DB found.")
def deploy_file_for_test(self): uLogging.debug("Bloating dummy file for speed test in {deployed_path}".format(deployed_path=self.deployed_path)) uUtil.execCommand("dd if=/dev/zero of={deployed_path} bs=1M count={file_size_mb}".format( deployed_path=self.deployed_path, file_size_mb=self.file_size_mb)) uLogging.debug("Modifying magic number for recognizing fake file by processor for the 'fetch' HCL command.") with open(self.deployed_path, "r+b") as file_for_test: file_for_test.write(self.fake_magic_number)
def terminateLocked(self): uLogging.debug("Terminating locked workers in pool") self.terminateFlag = True self.release_locked() for t in self.threads: t.join()
def put(self, item): uLogging.debug('Putting {item} in the thread pool'.format(item=item)) self.condition.acquire() self.inputQ.append(item) self.tasks_number += 1 self.condition.notifyAll() self.condition.release()
def performRaw(self, host): if Const.isOsaWinPlatform(host.platform.os): shell = "cmd.exe" cmd_switch = "/C" else: shell = "/bin/sh" cmd_switch = "-c" self.export("default_shell_name", shell, True) self.export("shell_cmd_switch", cmd_switch, True) try: return self.__performHCLRequest(host, self.__document) except uUtil.ExecFailed, e: # Notation "ex_type_id:'103'" in stderr it is a sign that exception OBJECT_NOT_EXIST is raised # (from modules/platform/u/EAR/poakernel-public/Common.edl). # We need to retry HCL request because the reason may be an outdated CORBA cache. # Cache will be invalidated in this case, so repeated request will pass # (modules/platform/cells/pem_client/cpp/Naming/Naming.cpp). deprecated_cache_error_pattern = re.compile( "OBJ_ADAPTER|OBJECT_NOT_EXIST|ex_type_id:'103'") if re.search(deprecated_cache_error_pattern, e.err): uLogging.debug("HCL request exec failed. Retrying...") return self.__performHCLRequest(host, self.__document) else: raise
def execute(self, readonly, precheck=False): if readonly: raise Exception("SQLScript does not support readonly operations") if not uPEM.is_sc_installed(self.owner): uLogging.info("%s is not installed, skipping", self.owner) return None con = uSysDB.connect() cur = con.cursor() rv = None for stmt in self.get_code(): uLogging.debug('executing %s', stmt) kind = stmtKind(stmt) if kind in ('BEGIN', 'COMMIT', 'ROLLBACK'): uLogging.warn( '%s statements are ignored, <SQL> action always implicitly begins and commits transaction', kind) else: cur.execute(stmt) con.commit() uSysDB.close(con) return rv
def scheduleUsual(self, task): uLogging.debug("APITaskManagement.scheduleUsual %s" % task.name) args = {} if task.mutex: args['mutex'] = task.mutex if task.weight: args['weight_on'] = task.weight[0] args['weight'] = task.weight[1] if task.retries > 0: args['retries'] = {'number': task.retries} if task.timeout > 0: args['timeout'] = task.timeout if task.delay > 0: args['delay_sec'] = task.delay if task.ignore_failures == 'y': args['ignore_failures'] = True if task.params: args['params'] = [{ 'name': str(k), 'value': str(v) } for k, v in task.params.items()] return self.__scheduleCommon(task, args)
def logLastException(): """ logs last exception with stack trace """ import traceback uLogging.debug("Error trace:") uLogging.debug(traceback.format_exc()) uLogging.err(str(sys.exc_info()[1]))
def _get_billing_hosts(host_types): res = {} for c in host_types: try: res[c] = c.get_host_id() except: uLogging.debug('Component %s does not exist' % c.name) return res
def measure_hcl_execution_time(request): uLogging.debug("Measuring HCL execution time") start_time = datetime.datetime.now() request.perform() execution_time = (datetime.datetime.now() - start_time) uLogging.debug("HCL execution time measured: {s} s".format(s=timedelta_total_seconds(execution_time))) return execution_time
def reinstallPackageToHost(host_id, name, ctype): component_id, version = findHostComponentId(host_id, name, ctype) uLogging.debug("Reinstalling package (%s-%s-%s) to host %s", ctype, name, version, host_id) api = openapi.OpenAPI() api.pem.packaging.reinstallPackageByNameSync(host_id=1, pname=name, ptype=ctype)
def dropColumnDefault(oid, tname, name, cur): cur.execute( "SELECT dc.name FROM sys.default_constraints dc JOIN sys.columns c ON (c.column_id = dc.parent_column_id AND c.object_id = dc.parent_object_id) WHERE c.object_id = %s AND c.name = %s", (oid, name)) default_constraints = [row[0] for row in cur.fetchall()] for dc in default_constraints: uLogging.debug("Dropping default constraint %s on column %s", dc, name) cur.execute("ALTER TABLE %s DROP CONSTRAINT %s" % (tname, dc))
def terminate(self): uLogging.debug("Terminating monitoring for slave upgrade process") self.need_terminate.set() if self.is_alive(): upgrade_status_changed_event.set() self.join(30.0) uLogging.debug("Monitoring for slave upgrade process is terminated")
def _check_java_version(version_str): version_str = version_str.lower() if ('openjdk' not in version_str) or ('64-bit' not in version_str) or ( not re.search('openjdk version \"11\.', version_str)): raise uPrecheck.PrecheckFailed( 'incorrect JDK installed! OpenJDK 11 x64 required.', 'install JDK shipped with Operation Automation and ensure java executable is in PATH (linux) or in JAVA_HOME/bin/ (windows)' ) uLogging.debug('java version is correct')
def create_db(config, askYesNo, clean=False): # set flag 'clean' to True if you need to create 'clean' db based on template0, # else db based on template1 will be created with predefined table 'dual' and language 'plpgsql', that is ususal way # In Azure postgres, format user@host is used when connect, but in postgres DB there is short name, without '@host' part dsn_login_short = re.sub("@.*", "", config.dsn_login) admin_db_user_short = re.sub("@.*", "", config.admin_db_user) if config.admin_db_password is None: # connect locally admin_con = __db_connect(config.admin_db_user, config.admin_db) else: admin_con = __db_connect(config.admin_db_user, config.admin_db, config.database_host, config.admin_db_password, config.database_port) try: cur = admin_con.cursor() cur.execute("SELECT datname FROM pg_database WHERE datname='%s'" % config.database_name) if cur.fetchone(): uLogging.debug("Database '%s' already exist" % config.database_name) if config.reinstall or askYesNo("Database '%s' already exist, drop it?" % config.database_name): cur.execute("DROP DATABASE %s" % config.database_name) else: raise Exception("Database %s already exists" % config.database_name) else: uLogging.debug("Database '%s' does not exist" % config.database_name) cur.execute("SELECT 1 FROM pg_proc WHERE proname = 'plpgsql_call_handler' AND pronargs=0") if not cur.fetchone(): cur.execute( "CREATE OR REPLACE FUNCTION plpgsql_call_handler() RETURNS language_handler AS 'plpgsql.so' LANGUAGE 'c'") cur.execute("SELECT 1 FROM pg_language WHERE lanname='plpgsql'") if not cur.fetchone(): cur.execute( "CREATE TRUSTED PROCEDURAL LANGUAGE 'plpgsql' HANDLER plpgsql_call_handler LANCOMPILER 'PL/pgSQL';") cur.execute("SELECT 1 FROM pg_class WHERE relname='dual'") if not cur.fetchone(): cur.execute("CREATE TABLE dual(dummy CHAR(1))") cur.execute("TRUNCATE dual") cur.execute("INSERT INTO dual VALUES('X')") cur.execute("GRANT SELECT ON dual TO PUBLIC") cur = admin_con.cursor() cur.execute(getCreateDbSql(config.database_name, clean)) cur.execute("SELECT usesysid FROM pg_shadow WHERE usename = '%s'" % dsn_login_short) row = cur.fetchone() if not row: cur.execute("CREATE USER %s WITH PASSWORD '%s' NOCREATEDB; ALTER ROLE %s SET transform_null_equals TO on;" % (dsn_login_short, config.dsn_passwd, dsn_login_short)) cur.execute("SELECT usesysid FROM pg_shadow WHERE usename = '%s'" % dsn_login_short) row = cur.fetchone() dbuser_oid = row[0] # For cloud: admin user needs to be granted access to dns_login role cur.execute("GRANT %s TO %s" % (dsn_login_short, admin_db_user_short)) cur.execute("ALTER DATABASE %s OWNER TO %s" % (config.database_name, dsn_login_short)) if config.dsn_passwd: cur.execute("ALTER USER %s WITH ENCRYPTED PASSWORD '%s';" % (dsn_login_short, config.dsn_passwd)) finally: __set_uid_by_user('root') admin_con.close()
def get_paagent_rpm_path(binfo, platform): key = ('pa-agent', platform.os, platform.osver) if key in binfo.rpms_to_update: uLogging.debug("Found agent for platform {platform}".format(platform=platform)) r = binfo.rpms_to_update[key] return os.path.basename(r['path']) uLogging.debug("Agent for platform {platform} not found".format(platform=platform)) return None
def syncLocalesFromDB(cls): request = uHCL.Request(cls.get_host_id(), user='******', group='root') request.command( "python /usr/local/bm/tools_py/configureLocale.py syncLocalesFromDB", stdout='stdout', stderr='stderr', valid_exit_codes=[0]) output = request.perform() uLogging.debug('done, output \n%s' % output['stdout'])
def update_java_tzdata(jdk_tzupdater_path): if os.path.isfile(jdk_tzupdater_path): # requested http://www.iana.org/time-zones/repository/tzdata-latest.tar.gz tzupdater = '%s -jar %s -l' % (get_java_binary(), jdk_tzupdater_path) uLogging.debug('Updating java time zones') uUtil.execCommand( tzupdater, valid_codes=1) # tolerate failure if no iana.org access else: uLogging.debug('%s not found, skipping time zone update' % jdk_tzupdater_path)
def slave_upgrade_paagent_and_repourl(binfo, config): """Upgrade pa-agent on all slaves Parameters: :param binfo: uDLModel.BuildInfo :param config: uConfig.Config """ uLogging.debug("Preparing to update agent on slaves") upgrade_paagent_and_repourl(binfo, config, uPEM.get_hosts_with_agent())
def syncStores(cls): uLogging.info("Synchronization stores") request = uHCL.Request(cls.get_host_id(), user='******', group='root') request.command("python /usr/local/bm/tools_py/syncstores.py", stdout='stdout', stderr='stderr', valid_exit_codes=[0]) output = request.perform() uLogging.debug('done, output \n%s' % output['stdout']) return output
def do(self, what, *args): now = time.time() self.whats.append((now, what, args)) if what: msg = what else: msg = "" uLogging.debug( "%s%s> %s", self.phase is not None and ('[' + self.phase + '] ') or '', '-' * len(self.whats), msg % args)