class KvmRESTService(object): http_server = http.HttpServer() http_server.logfile_path = log.get_logfile_path() NO_DAEMON = 'no_deamon' PLUGIN_PATH = 'plugin_path' WORKSPACE = 'workspace' def __init__(self, config={}): self.config = config plugin_path = self._get_config(self.PLUGIN_PATH) if not plugin_path: plugin_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'plugins') self.plugin_path = plugin_path self.plugin_rgty = plugin.PluginRegistry(self.plugin_path) def _get_config(self, name): return None if not self.config.has_key(name) else self.config[name] def start(self, in_thread=True): config = {} self.plugin_rgty.configure_plugins(config) self.plugin_rgty.start_plugins() if in_thread: self.http_server.start_in_thread() else: self.http_server.start() def stop(self): self.plugin_rgty.stop_plugins() self.http_server.stop()
def setUpClass(self): self.server = http.HttpServer() self.test = TestHttpServer() self.server.register_sync_uri("/sayhello/hi/", self.test.say_hello) self.server.register_sync_uri("/returnsame/", self.test.return_same) self.server.start_in_thread() time.sleep(2)
def __init__(self, options): self.options = options self.agent_ip = options.ip self.agent_port = options.port self.cip = options.cip self.http_server = http.HttpServer(port=10086) self.http_server.register_sync_uri('/result', self.callback) self.http_server.start_in_thread() print "" comp = Completer() readline.set_completer_delims(' \t\n;') readline.set_completer(comp.complete) readline.parse_and_bind("tab: complete")
class VirtualRouter(object): http_server = http.HttpServer(port=7272) http_server.logfile_path = log.get_logfile_path() PLUGIN_PATH = "plugin_path" INIT_PATH = "/init" PING_PATH = "/ping" def __init__(self, config={}): self.config = config plugin_path = self.config.get(self.PLUGIN_PATH, None) if not plugin_path: plugin_path = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'plugins') self.plugin_path = plugin_path self.plugin_rgty = plugin.PluginRegistry(self.plugin_path) self.init_command = None self.uuid = None @replyerror def init(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) self.init_command = cmd self.uuid = cmd.uuid return jsonobject.dumps(InitRsp()) @replyerror def ping(self, req): rsp = PingRsp() rsp.uuid = self.uuid return jsonobject.dumps(rsp) def start(self, in_thread=True): self.plugin_rgty.configure_plugins(self) self.plugin_rgty.start_plugins() self.http_server.register_async_uri(self.INIT_PATH, self.init) self.http_server.register_async_uri(self.PING_PATH, self.ping) if in_thread: self.http_server.start_in_thread() else: self.http_server.start() def stop(self): self.plugin_rgty.stop_plugins() self.http_server.stop()
class TestAgentServer(object): http_server = http.HttpServer(port=TESTAGENT_PORT) http_server.logfile_path = log.get_logfile_path() def __init__(self): self.plugin_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'plugins') self.plugin_rgty = plugin.PluginRegistry(self.plugin_path) def start(self, in_thread=True): self.plugin_rgty.configure_plugins({}) self.plugin_rgty.start_plugins() if in_thread: self.http_server.start_in_thread() else: self.http_server.start() def stop(self): self.plugin_rgty.stop_plugins() self.http_server.stop()
class FusionstorAgent(object): INIT_PATH = "/fusionstor/primarystorage/init" CREATE_VOLUME_PATH = "/fusionstor/primarystorage/volume/createempty" DELETE_PATH = "/fusionstor/primarystorage/delete" CLONE_PATH = "/fusionstor/primarystorage/volume/clone" FLATTEN_PATH = "/fusionstor/primarystorage/volume/flatten" SFTP_DOWNLOAD_PATH = "/fusionstor/primarystorage/sftpbackupstorage/download" SFTP_UPLOAD_PATH = "/fusionstor/primarystorage/sftpbackupstorage/upload" ECHO_PATH = "/fusionstor/primarystorage/echo" CREATE_SNAPSHOT_PATH = "/fusionstor/primarystorage/snapshot/create" DELETE_SNAPSHOT_PATH = "/fusionstor/primarystorage/snapshot/delete" COMMIT_IMAGE_PATH = "/fusionstor/primarystorage/snapshot/commit" PROTECT_SNAPSHOT_PATH = "/fusionstor/primarystorage/snapshot/protect" ROLLBACK_SNAPSHOT_PATH = "/fusionstor/primarystorage/snapshot/rollback" UNPROTECT_SNAPSHOT_PATH = "/fusionstor/primarystorage/snapshot/unprotect" CP_PATH = "/fusionstor/primarystorage/volume/cp" DELETE_POOL_PATH = "/fusionstor/primarystorage/deletepool" GET_VOLUME_SIZE_PATH = "/fusionstor/primarystorage/getvolumesize" PING_PATH = "/fusionstor/primarystorage/ping" GET_FACTS = "/fusionstor/primarystorage/facts" http_server = http.HttpServer(port=7764) http_server.logfile_path = log.get_logfile_path() def __init__(self): self.http_server.register_async_uri(self.INIT_PATH, self.init) self.http_server.register_async_uri(self.DELETE_PATH, self.delete) self.http_server.register_async_uri(self.CREATE_VOLUME_PATH, self.create) self.http_server.register_async_uri(self.CLONE_PATH, self.clone) self.http_server.register_async_uri(self.COMMIT_IMAGE_PATH, self.commit_image) self.http_server.register_async_uri(self.CREATE_SNAPSHOT_PATH, self.create_snapshot) self.http_server.register_async_uri(self.DELETE_SNAPSHOT_PATH, self.delete_snapshot) self.http_server.register_async_uri(self.PROTECT_SNAPSHOT_PATH, self.protect_snapshot) self.http_server.register_async_uri(self.UNPROTECT_SNAPSHOT_PATH, self.unprotect_snapshot) self.http_server.register_async_uri(self.ROLLBACK_SNAPSHOT_PATH, self.rollback_snapshot) self.http_server.register_async_uri(self.FLATTEN_PATH, self.flatten) self.http_server.register_async_uri(self.SFTP_DOWNLOAD_PATH, self.sftp_download) self.http_server.register_async_uri(self.SFTP_UPLOAD_PATH, self.sftp_upload) self.http_server.register_async_uri(self.CP_PATH, self.cp) self.http_server.register_async_uri(self.DELETE_POOL_PATH, self.delete_pool) self.http_server.register_async_uri(self.GET_VOLUME_SIZE_PATH, self.get_volume_size) self.http_server.register_async_uri(self.PING_PATH, self.ping) self.http_server.register_async_uri(self.GET_FACTS, self.get_facts) self.http_server.register_sync_uri(self.ECHO_PATH, self.echo) def _set_capacity_to_response(self, rsp): total, used = lichbd.lichbd_get_capacity() rsp.totalCapacity = total rsp.availableCapacity = total - used def _get_file_size(self, path): return lichbd.lichbd_file_size(path) def _get_file_actual_size(self, path): return lichbd.lichbd_file_actual_size(path) @replyerror def get_volume_size(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) path = self._normalize_install_path(cmd.installPath) rsp = GetVolumeSizeRsp() rsp.size = self._get_file_size(path) rsp.actualSize = self._get_file_actual_size(path) return jsonobject.dumps(rsp) @replyerror def get_facts(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = GetFactsRsp() rsp.fsid = lichbd.lichbd_get_fsid() return jsonobject.dumps(rsp) @replyerror def ping(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = PingRsp() if cmd.testImagePath: pool = cmd.testImagePath.split('/')[0] testImagePath = '%s/this-is-a-test-image-with-long-name' % pool shellcmd = lichbd.lichbd_file_info(testImagePath) if shellcmd.return_code == errno.ENOENT: try: lichbd.lichbd_create_raw(testImagePath, '1b') except Exception, e: rsp.success = False rsp.operationFailure = True rsp.error = str(e) logger.debug("%s" % rsp.error) elif shellcmd.return_code == 0: pass else: rsp.success = False rsp.operationFailure = True rsp.error = "%s %s" % (shellcmd.cmd, shellcmd.stderr) logger.debug("%s: %s" % (shellcmd.cmd, shellcmd.stderr)) return jsonobject.dumps(rsp)
class AppBuildSystemAgent(object): CHECK_BUILDSYSTEM_PATH = "/appcenter/buildsystem/checkpath" CONNECT_BUILDSYSTEM_PATH = "/appcenter/buildsystem/connect" ECHO_BUILDSYSTEM_PATH = "/appcenter/buildsystem/echo" PING_BUILDSYSTEM_PATH = "/appcenter/buildsystem/ping" CREATE_APPLICATION_PATH = "/appcenter/buildsystem/createapp" DELETE_APPLICATION_PATH = "/appcenter/buildsystem/deleteapp" EXPORT_APPLICATION_PATH = "/appcenter/buildsystem/exportapp" DELETE_EXPORT_APPLICATION_PATH = "/appcenter/buildsystem/deleteexportapp" UNZIP_BUILDAPP = "/appcenter/rawapp/unzip" CLEAN_UNZIP_BUILDAPP = "/appcenter/rawapp/cleanunzip" DOWNLOAD_BUILDAPP = "/appcenter/rawapp/download" DELETE_DOWNLOAD_BUILDAPP = "/appcenter/rawapp/deletedownload" LENGTH_OF_UUID = 32 http_server = http.HttpServer(port=7079) http_server.logfile_path = log.get_logfile_path() def __init__(self): self.http_server.register_sync_uri(self.ECHO_BUILDSYSTEM_PATH, self.echo) self.http_server.register_async_uri(self.CHECK_BUILDSYSTEM_PATH, self.checkpath) self.http_server.register_async_uri(self.CONNECT_BUILDSYSTEM_PATH, self.collect) self.http_server.register_async_uri(self.PING_BUILDSYSTEM_PATH, self.ping) self.http_server.register_async_uri(self.CREATE_APPLICATION_PATH, self.create) self.http_server.register_async_uri(self.DELETE_APPLICATION_PATH, self.delete) self.http_server.register_async_uri(self.EXPORT_APPLICATION_PATH, self.export) self.http_server.register_async_uri(self.DELETE_EXPORT_APPLICATION_PATH, self.delexport) self.http_server.register_async_uri(self.UNZIP_BUILDAPP, self.unzipapp) self.http_server.register_async_uri(self.CLEAN_UNZIP_BUILDAPP, self.deleteunzip) self.http_server.register_async_uri(self.DOWNLOAD_BUILDAPP, self.downloadapp) self.http_server.register_async_uri(self.DELETE_DOWNLOAD_BUILDAPP, self.deletedownload) @staticmethod def _get_disk_capacity(path): if not path: raise Exception('storage path cannot be None') return linux.get_disk_capacity_by_df(path) def echo(self, req): return '' @replyerror def checkpath(self, req): def _get_image_meta(path): info = {} files = os.listdir(path) for file in files: if os.path.isfile(file): info[file] = linux.md5sum(file) return info def _clean_dst_dir(basename, path): target = path + '/' + basename if os.path.isdir(target): shutil.rmtree(target) if os.path.isfile(target): os.remove(target) cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = CheckPathRsp() checkParam(srcPath=cmd.srcPath, dstPath=cmd.dstPath) imageDir = cmd.srcPath + "/images" if not os.path.isdir(imageDir): rsp.error = "cannot find image dir: [%s]" % imageDir rsp.success = False return jsonobject.dumps(rsp) rsp.srcSize = linux.get_folder_size(cmd.srcPath) rsp.imageInfos = _get_image_meta(imageDir) _clean_dst_dir(os.path.basename(cmd.srcPath), cmd.dstPath) if cmd.url is not None: rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.url) return jsonobject.dumps(rsp) @replyerror def collect(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = AgentResponse() linux.mkdir(cmd.url, 0755) linux.mkdir(cmd.url + '/rawapps', 0755) linux.mkdir(cmd.url + '/builds', 0755) linux.mkdir(cmd.url + '/apps', 0755) linux.mkdir(cmd.url + '/exports', 0755) rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.url) return jsonobject.dumps(rsp) @replyerror def ping(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = AgentResponse() rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.url) return jsonobject.dumps(rsp) @replyerror def create(self, req): def _read_info(srcDir, filename): meta = srcDir + "/" + filename if not os.path.exists(meta): raise Exception("cannot find meta file: %s" % meta) fd = open(meta, 'r') ctx = fd.read() fd.close() return ctx def _copy_app(srcDir, dstDir): basename = os.path.basename(srcDir) target = dstDir + '/' + basename imageMeta = target + "/application-image-meta.json" shutil.copytree(srcDir, target) os.remove(imageMeta) return target def _encode_thumbs(srcDir, regex): files= os.listdir(srcDir) thumbs = [] for f in files: if re.match(regex, f): with open(srcDir+"/"+f, 'r') as thumb: thumbs.append(pic_prefix + base64.b64encode(thumb.read())) return thumbs cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = CreateAppRsp() checkParam(srcPath=cmd.srcPath, dstPath=cmd.dstPath) rsp.imageInfos = _read_info(cmd.srcPath, "application-image-meta.json") rsp.dstInfo = _read_info(cmd.srcPath, "application-desc.json") rsp.template = _read_info(cmd.srcPath, "raw-cloudformation-template.json") with open(cmd.srcPath+"/logo.jpg", 'r') as logo: rsp.logo = pic_prefix + base64.b64encode(logo.read()) rsp.thumbs = _encode_thumbs(cmd.srcPath, "thumbs.*.jpg") target = _copy_app(cmd.srcPath, cmd.dstPath) rsp.dstSize = linux.get_folder_size(target) rsp.dstPath = target if cmd.url is not None: rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.url) return jsonobject.dumps(rsp) @replyerror def delete(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = AgentResponse() if cmd.appPath is not None and os.path.isdir(cmd.appPath): shutil.rmtree(cmd.appPath) if cmd.url is not None: rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.url) return jsonobject.dumps(rsp) @replyerror def export(self, req): def _ready_dst_dir(path): if os.path.isdir(path): shutil.rmtree(path) if os.path.isfile(path): os.remove(path) linux.mkdir(path, 0755) def _tar_export(srcDir, dstDir, ctx): basename = os.path.basename(srcDir) metaPath = shell.call('mktemp XXXXXX-appmeta.json', True, srcDir).strip() # metaPath is relative path tarPath = "/tmp/%s.tar" % basename dstPath = "%s/%s.tar.gz" % (dstDir, basename) if os.path.exists(tarPath): os.remove(tarPath) if os.path.exists(dstPath): os.remove(dstPath) fd = open("%s/%s" % (srcDir, metaPath), 'w') fd.write(ctx) fd.close() shell.call("tar cf %s images %s" % (tarPath, metaPath), True, srcDir) shell.call("gzip %s" % tarPath, True, srcDir) gzipPath = tarPath + ".gz" shutil.move(gzipPath, dstDir) os.remove("%s/%s" % (srcDir, metaPath)) return dstPath cmd = jsonobject.loads(req[http.REQUEST_BODY]) checkParam(exportDir=cmd.exportDir, appDir=cmd.appDir, exportCtx=cmd.exportCtx) rsp = ExportAppRsp() _ready_dst_dir(cmd.exportDir) rsp.exportPath = _tar_export(cmd.appDir, cmd.exportDir, cmd.exportCtx) rsp.md5Sum = linux.md5sum(cmd.exportDir) if cmd.url is not None: rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.url) return jsonobject.dumps(rsp) @replyerror def delexport(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = AgentResponse() checkParam(exportPath=cmd.exportPath) if os.path.isdir(cmd.exportPath): shutil.rmtree(cmd.exportPath) if os.path.isfile(cmd.exportPath): os.remove(cmd.exportPath) if cmd.url is not None: rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.url) return jsonobject.dumps(rsp) @replyerror def unzipapp(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) checkParam(srcUrl=cmd.srcUrl, rawPath=cmd.rawPath) if os.path.isdir(cmd.rawPath): shutil.rmtree(cmd.rawPath) linux.mkdir(cmd.rawPath, 0755) if cmd.srcUrl.endswith(".gz"): shell.call("tar zxf %s -C %s" % (cmd.srcUrl, cmd.rawPath)) else: shell.call("tar xf %s -C %s" % (cmd.srcUrl, cmd.rawPath)) rsp = RawAppRsp() for file in os.listdir(cmd.rawPath): full_path = os.path.join(cmd.rawPath, file) if file.endswith("-appmeta.json"): f = open(full_path, 'r') rsp.appCtx = f.read() f.close() rsp.totalSize = linux.get_folder_size(cmd.rawPath) if cmd.url is not None: rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.url) return jsonobject.dumps(rsp) @replyerror def deleteunzip(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) checkParam(rawPath=cmd.rawPath) rsp = AgentResponse() if cmd.rawPath is not None and os.path.isdir(cmd.rawPath): shutil.rmtree(cmd.rawPath) if cmd.downloadPath: if os.path.isdir(cmd.downloadPath): shutil.rmtree(cmd.downloadPath) else: dir = os.path.dirname(cmd.downloadPath) shutil.rmtree(dir) if cmd.url is not None: rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.url) return jsonobject.dumps(rsp) @replyerror def downloadapp(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) checkParam(srcUrl=cmd.srcUrl, downloadPath=cmd.downloadPath) rsp = AgentResponse() dir = os.path.dirname(cmd.downloadPath) if os.path.isdir(dir): shutil.rmtree(dir) linux.mkdir(dir, 0755) cmd = shell.ShellCmd("wget -c -t 5 --no-check-certificate %s -O %s" % (cmd.srcUrl, cmd.downloadPath)) cmd(False) if cmd.return_code != 0: rsp.error = cmd.stderr rsp.success = False return jsonobject.dumps(rsp) @replyerror def deletedownload(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) checkParam(downloadPath=cmd.downloadPath) rsp = AgentResponse() dir = os.path.dirname(cmd.downloadPath) if os.path.isdir(dir): shutil.rmtree(dir) return jsonobject.dumps(rsp)
class ConsoleProxyAgent(object): PORT = 7758 http_server = http.HttpServer(PORT) http_server.logfile_path = log.get_logfile_path() CHECK_AVAILABILITY_PATH = "/console/check" ESTABLISH_PROXY_PATH = "/console/establish" DELETE_PROXY_PATH = "/console/delete" PING_PATH = "/console/ping" TOKEN_FILE_DIR = "/var/lib/zstack/consoleProxy/" PROXY_LOG_DIR = "/var/log/zstack/consoleProxy/" DB_NAME = "consoleProxy" #TODO: sync db status and current running processes def __init__(self): self.http_server.register_async_uri(self.CHECK_AVAILABILITY_PATH, self.check_proxy_availability) self.http_server.register_async_uri(self.ESTABLISH_PROXY_PATH, self.establish_new_proxy) self.http_server.register_async_uri(self.DELETE_PROXY_PATH, self.delete) self.http_server.register_sync_uri(self.PING_PATH, self.ping) if not os.path.exists(self.PROXY_LOG_DIR): os.makedirs(self.PROXY_LOG_DIR, 0755) if not os.path.exists(self.TOKEN_FILE_DIR): os.makedirs(self.TOKEN_FILE_DIR, 0755) self.db = filedb.FileDB(self.DB_NAME) self.token_ctrl = ConsoleTokenFileController() def _make_token_file_name(self, prefix, timeout): return '%s_%s' % (prefix, time.time() + timeout) def _get_token_name_prefix(self, cmd): return '_'.join(cmd.token.split('_')[:2]) def _get_pid_on_port(self, port): out = shell.ShellCmd('netstat -anp | grep ":%s" | grep LISTEN' % port) out(False) out = out.stdout.strip() if "" == out: return None pid = out.split()[-1].split('/')[0] try: pid = int(pid) return pid except: return None def _check_proxy_availability(self, args): proxyPort = args['proxyPort'] targetHostname = args['targetHostname'] targetPort = args['targetPort'] token = args['token'] pid = self._get_pid_on_port(proxyPort) if not pid: logger.debug( 'no websockify on proxy port[%s], availability false' % proxyPort) return False with open(os.path.join('/proc', str(pid), 'cmdline'), 'r') as fd: process_cmdline = fd.read() if 'websockify' not in process_cmdline: logger.debug( 'process[pid:%s] on proxy port[%s] is not websockify process, availability false' % (pid, proxyPort)) return False info_str = self.db.get(token) if not info_str: logger.debug( 'cannot find information for process[pid:%s] on proxy port[%s], availability false' % (pid, proxyPort)) return False info = jsonobject.loads(info_str) if token != info['token']: logger.debug( 'metadata[token] for process[pid:%s] on proxy port[%s] are changed[%s --> %s], availability false' % (pid, proxyPort, token, info['token'])) return False if targetPort != info['targetPort']: logger.debug( 'metadata[targetPort] for process[pid:%s] on proxy port[%s] are changed[%s --> %s], availability false' % (pid, proxyPort, targetPort, info['targetPort'])) return False if targetHostname != info['targetHostname']: logger.debug( 'metadata[targetHostname] for process[pid:%s] on proxy port[%s] are changed[%s --> %s], availability false' % (pid, proxyPort, targetHostname, info['targetHostname'])) return False return True @replyerror def ping(self, req): return jsonobject.dumps(AgentResponse()) @replyerror def check_proxy_availability(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) ret = self._check_proxy_availability({ 'proxyPort': cmd.proxyPort, 'targetHostname': cmd.targetHostname, 'targetPort': cmd.targetPort, 'token': cmd.token }) rsp = CheckAvailabilityRsp() rsp.available = ret return jsonobject.dumps(rsp) @replyerror @lock.lock('console-proxy') def delete(self, req): def kill_proxy_process(): out = shell.ShellCmd( "netstat -ntp | grep '%s:%s *ESTABLISHED.*python'" % (cmd.targetHostname, cmd.targetPort)) out(False) pids = [ line.strip().split(' ')[-1].split('/')[0] for line in out.stdout.splitlines() ] for pid in pids: try: os.kill(int(pid), 15) except OSError: continue cmd = jsonobject.loads(req[http.REQUEST_BODY]) token_file = ConsoleTokenFile(cmd.token) self.token_ctrl.delete_by_prefix(token_file.prefix) kill_proxy_process() logger.debug('deleted a proxy by command: %s' % req[http.REQUEST_BODY]) rsp = AgentResponse() return jsonobject.dumps(rsp) @replyerror @lock.lock('console-proxy') def establish_new_proxy(self, req): # check parameters, generate token file,set db,check process is alive,start process if not, cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = EstablishProxyRsp() log_file = os.path.join(self.PROXY_LOG_DIR, cmd.proxyHostname) port_conflict_msg = None ## def check_parameters(): if not cmd.targetHostname: raise ConsoleProxyError('targetHostname cannot be null') if not cmd.targetPort: raise ConsoleProxyError('targetPort cannot be null') if not cmd.token: raise ConsoleProxyError('token cannot be null') if not cmd.proxyHostname: raise ConsoleProxyError('proxyHostname cannot be null') def check_port_conflict(): if cmd.proxyPort is None or str(cmd.proxyPort).isdigit() is False: raise ConsoleProxyError('proxyPort is None or is not a Number') system_port_cmd = 'sysctl -n net.ipv4.ip_local_port_range' ret, out, err = bash_roe(system_port_cmd) if ret != 0: logger.warn(err) elif out.strip() is None: logger.warn( "None is net.ipv4.ip_local_port_range in current system") else: port_range = out.strip().split() if len(port_range) == 2 and str( port_range[0]).isdigit() and str( port_range[1]).isdigit(): if int(port_range[0]) < int(cmd.proxyPort) < int( port_range[1]): port_conflict_msg = "cmd.proxyPort [%s] is probably conflict with linux ip_local_port_range: %s" % ( cmd.proxyPort, port_range) logger.warn(port_conflict_msg) try: check_parameters() check_port_conflict() except ConsoleProxyError as e: err = linux.get_exception_stacktrace() logger.warn(err) rsp.error = str(e) rsp.success = False return jsonobject.dumps(rsp) token_file = ConsoleTokenFile(cmd.token) exist_token = self.token_ctrl.search_by_prefix(token_file.prefix) # this logic only execute when request from ZStack API if not exist_token or exist_token.is_stale(): self.token_ctrl.delete_by_prefix(token_file.prefix) token_file = self.token_ctrl.create_token_file( token_file.prefix, cmd.vncTokenTimeout) self.token_ctrl.submit_delete_token_task(token_file) else: token_file = exist_token rsp.token = token_file.get_full_name() token_file.flush_write( '%s: %s:%s' % (token_file.get_full_name(), cmd.targetHostname, cmd.targetPort)) info = { 'proxyHostname': cmd.proxyHostname, 'proxyPort': cmd.proxyPort, 'targetHostname': cmd.targetHostname, 'targetPort': cmd.targetPort, 'token': cmd.token, 'logFile': log_file, 'tokenFile': token_file.get_absolute_path(), } info_str = jsonobject.dumps(info) self.db.set(cmd.token, info_str) rsp.proxyPort = cmd.proxyPort logger.debug('successfully add new proxy token file %s' % info_str) ## kill garbage websockify process: same proxyip:proxyport, different cert file if not cmd.sslCertFile: command = "ps aux | grep '[z]stack.*websockify_init' | grep '%s:%d' | grep 'cert=' | awk '{ print $2 }'" % ( cmd.proxyHostname, cmd.proxyPort) else: command = "ps aux | grep '[z]stack.*websockify_init' | grep '%s:%d' | grep -v '%s' | awk '{ print $2 }'" % ( cmd.proxyHostname, cmd.proxyPort, cmd.sslCertFile) ret, out, err = bash_roe(command) for pid in out.splitlines(): try: os.kill(int(pid), 15) except OSError: continue ## if websockify process exists, then return alive = False ret, out, err = bash_roe("ps aux | grep '[z]stack.*websockify_init'") for o in out.splitlines(): if o.find("%s:%d" % (cmd.proxyHostname, cmd.proxyPort)) != -1: alive = True break if alive: return jsonobject.dumps(rsp) ##start a new websockify process timeout = cmd.idleTimeout if not timeout: timeout = 600 @in_bash def start_proxy(): LOG_FILE = log_file PROXY_HOST_NAME = cmd.proxyHostname PROXY_PORT = cmd.proxyPort TOKEN_FILE_DIR = self.TOKEN_FILE_DIR TIMEOUT = timeout start_cmd = '''python -c "from zstacklib.utils import log; import websockify; log.configure_log('{{LOG_FILE}}'); websockify.websocketproxy.websockify_init()" {{PROXY_HOST_NAME}}:{{PROXY_PORT}} -D --target-config={{TOKEN_FILE_DIR}} --idle-timeout={{TIMEOUT}}''' if cmd.sslCertFile: start_cmd += ' --cert=%s' % cmd.sslCertFile ret, out, err = bash_roe(start_cmd) if ret != 0: err = [] if port_conflict_msg is not None: err.append(port_conflict_msg) else: err.append('failed to execute bash command: %s' % start_cmd) err.append('return code: %s' % ret) err.append('stdout: %s' % out) err.append('stderr: %s' % err) raise ConsoleProxyError('\n'.join(err)) start_proxy() logger.debug('successfully establish new proxy%s' % info_str) return jsonobject.dumps(rsp)
class CephAgent(object): INIT_PATH = "/ceph/backupstorage/init" DOWNLOAD_IMAGE_PATH = "/ceph/backupstorage/image/download" DELETE_IMAGE_PATH = "/ceph/backupstorage/image/delete" PING_PATH = "/ceph/backupstorage/ping" ECHO_PATH = "/ceph/backupstorage/echo" GET_IMAGE_SIZE_PATH = "/ceph/backupstorage/image/getsize" GET_FACTS = "/ceph/backupstorage/facts" GET_IMAGES_METADATA = "/ceph/backupstorage/getimagesmetadata" DELETE_IMAGES_METADATA = "/ceph/backupstorage/deleteimagesmetadata" DUMP_IMAGE_METADATA_TO_FILE = "/ceph/backupstorage/dumpimagemetadatatofile" CHECK_IMAGE_METADATA_FILE_EXIST = "/ceph/backupstorage/checkimagemetadatafileexist" CEPH_METADATA_FILE = "bs_ceph_info.json" http_server = http.HttpServer(port=7761) http_server.logfile_path = log.get_logfile_path() def __init__(self): self.http_server.register_async_uri(self.INIT_PATH, self.init) self.http_server.register_async_uri(self.DOWNLOAD_IMAGE_PATH, self.download) self.http_server.register_async_uri(self.DELETE_IMAGE_PATH, self.delete) self.http_server.register_async_uri(self.PING_PATH, self.ping) self.http_server.register_async_uri(self.GET_IMAGE_SIZE_PATH, self.get_image_size) self.http_server.register_async_uri(self.GET_FACTS, self.get_facts) self.http_server.register_sync_uri(self.ECHO_PATH, self.echo) self.http_server.register_async_uri(self.GET_IMAGES_METADATA, self.get_images_metadata) self.http_server.register_async_uri( self.CHECK_IMAGE_METADATA_FILE_EXIST, self.check_image_metadata_file_exist) self.http_server.register_async_uri(self.DUMP_IMAGE_METADATA_TO_FILE, self.dump_image_metadata_to_file) self.http_server.register_async_uri( self.DELETE_IMAGES_METADATA, self.delete_image_metadata_from_file) def _set_capacity_to_response(self, rsp): o = shell.call('ceph df -f json') df = jsonobject.loads(o) if df.stats.total_bytes__ is not None: total = long(df.stats.total_bytes_) elif df.stats.total_space__ is not None: total = long(df.stats.total_space__) * 1024 else: raise Exception('unknown ceph df output: %s' % o) if df.stats.total_avail_bytes__ is not None: avail = long(df.stats.total_avail_bytes_) elif df.stats.total_avail__ is not None: avail = long(df.stats.total_avail_) * 1024 else: raise Exception('unknown ceph df output: %s' % o) rsp.totalCapacity = total rsp.availableCapacity = avail @replyerror def echo(self, req): logger.debug('get echoed') return '' def _normalize_install_path(self, path): return path.lstrip('ceph:').lstrip('//') def _get_file_size(self, path): o = shell.call('rbd --format json info %s' % path) o = jsonobject.loads(o) return long(o.size_) @replyerror def get_image_size(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = GetImageSizeRsp() path = self._normalize_install_path(cmd.installPath) rsp.size = self._get_file_size(path) return jsonobject.dumps(rsp) @in_bash @replyerror def get_images_metadata(self, req): logger.debug("meilei: get images metadata") cmd = jsonobject.loads(req[http.REQUEST_BODY]) pool_name = cmd.poolName bs_uuid = pool_name.split("-")[-1] valid_images_info = "" self.get_metadata_file(bs_uuid, self.CEPH_METADATA_FILE) last_image_install_path = "" bs_ceph_info_file = "/tmp/%s" % self.CEPH_METADATA_FILE with open(bs_ceph_info_file) as fd: images_info = fd.read() for image_info in images_info.split('\n'): if image_info != '': image_json = jsonobject.loads(image_info) # todo support multiple bs image_uuid = image_json['uuid'] image_install_path = image_json["backupStorageRefs"][0][ "installPath"] ret = bash_r("rbd info %s" % image_install_path.split("//")[1]) if ret == 0: logger.info( "Check image %s install path %s successfully!" % (image_uuid, image_install_path)) if image_install_path != last_image_install_path: valid_images_info = image_info + '\n' + valid_images_info last_image_install_path = image_install_path else: logger.warn("Image %s install path %s is invalid!" % (image_uuid, image_install_path)) self.put_metadata_file(bs_uuid, self.CEPH_METADATA_FILE) rsp = GetImageMetaDataResponse() rsp.imagesMetadata = valid_images_info return jsonobject.dumps(rsp) @in_bash @replyerror def check_image_metadata_file_exist(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) pool_name = cmd.poolName bs_uuid = pool_name.split("-")[-1] rsp = CheckImageMetaDataFileExistResponse() rsp.backupStorageMetaFileName = self.CEPH_METADATA_FILE ret, output = bash_ro("rados -p bak-t-%s stat %s" % (bs_uuid, self.CEPH_METADATA_FILE)) if ret == 0: rsp.exist = True else: rsp.exist = False return jsonobject.dumps(rsp) def get_metadata_file(self, bs_uuid, file_name): local_file_name = "/tmp/%s" % file_name bash_ro("rm -rf %s" % local_file_name) bash_ro("rados -p bak-t-%s get %s %s" % (bs_uuid, file_name, local_file_name)) def put_metadata_file(self, bs_uuid, file_name): local_file_name = "/tmp/%s" % file_name ret, output = bash_ro("rados -p bak-t-%s put %s %s" % (bs_uuid, file_name, local_file_name)) if ret == 0: bash_ro("rm -rf %s" % local_file_name) @in_bash @replyerror def dump_image_metadata_to_file(self, req): def _write_info_to_metadata_file(fd): strip_list_content = content[1:-1] data_list = strip_list_content.split('},') for item in data_list: if item.endswith("}") is not True: item = item + "}" fd.write(item + '\n') cmd = jsonobject.loads(req[http.REQUEST_BODY]) pool_name = cmd.poolName bs_uuid = pool_name.split("-")[-1] content = cmd.imageMetaData dump_all_metadata = cmd.dumpAllMetaData if dump_all_metadata is True: # this means no metadata exist in ceph bash_r("touch /tmp/%s" % self.CEPH_METADATA_FILE) else: self.get_metadata_file(bs_uuid, self.CEPH_METADATA_FILE) bs_ceph_info_file = "/tmp/%s" % self.CEPH_METADATA_FILE if content is not None: if '[' == content[0] and ']' == content[-1]: if dump_all_metadata is True: with open(bs_ceph_info_file, 'w') as fd: _write_info_to_metadata_file(fd) else: with open(bs_ceph_info_file, 'a') as fd: _write_info_to_metadata_file(fd) else: # one image info if dump_all_metadata is True: with open(bs_ceph_info_file, 'w') as fd: fd.write(content + '\n') else: with open(bs_ceph_info_file, 'a') as fd: fd.write(content + '\n') self.put_metadata_file(bs_uuid, self.CEPH_METADATA_FILE) rsp = DumpImageMetaDataToFileResponse() return jsonobject.dumps(rsp) @in_bash @replyerror def delete_image_metadata_from_file(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) image_uuid = cmd.imageUuid pool_name = cmd.poolName bs_uuid = pool_name.split("-")[-1] self.get_metadata_file(bs_uuid, self.CEPH_METADATA_FILE) bs_ceph_info_file = "/tmp/%s" % self.CEPH_METADATA_FILE ret, output = bash_ro("sed -i.bak '/%s/d' %s" % (image_uuid, bs_ceph_info_file)) self.put_metadata_file(bs_uuid, self.CEPH_METADATA_FILE) rsp = DeleteImageMetaDataResponse() rsp.ret = ret return jsonobject.dumps(rsp) @replyerror @in_bash def get_facts(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) o = bash_o('ceph mon_status') mon_status = jsonobject.loads(o) fsid = mon_status.monmap.fsid_ rsp = GetFactsRsp() facts = bash_o('ceph -s -f json') mon_facts = jsonobject.loads(facts) for mon in mon_facts.monmap.mons: ADDR = mon.addr.split(':')[0] if bash_r('ip route | grep -w {{ADDR}} > /dev/null') == 0: rsp.monAddr = ADDR break if not rsp.monAddr: raise Exception('cannot find mon address of the mon server[%s]' % cmd.monUuid) rsp.fsid = fsid return jsonobject.dumps(rsp) @replyerror def init(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) o = shell.call('ceph mon_status') mon_status = jsonobject.loads(o) fsid = mon_status.monmap.fsid_ existing_pools = shell.call('ceph osd lspools') for pool in cmd.pools: if pool.predefined and pool.name not in existing_pools: raise Exception( 'cannot find pool[%s] in the ceph cluster, you must create it manually' % pool.name) elif pool.name not in existing_pools: shell.call('ceph osd pool create %s 100' % pool.name) rsp = InitRsp() rsp.fsid = fsid self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) def _parse_install_path(self, path): return path.lstrip('ceph:').lstrip('//').split('/') @replyerror @rollback def download(self, req): rsp = DownloadRsp() def isDerivedQcow2Image(path): if path.startswith('http://') or path.startswith('https://'): resp = urllib2.urlopen(path) qhdr = resp.read(72) resp.close() else: resp = open(path) qhdr = resp.read(72) resp.close if len(qhdr) != 72: return False if qhdr[:4] != 'QFI\xfb': return False return qhdr[16:20] != '\x00\x00\x00\00' def fail_if_has_backing_file(fpath): if isDerivedQcow2Image(fpath): raise Exception('image has backing file or %s is not exist!' % fpath) cmd = jsonobject.loads(req[http.REQUEST_BODY]) pool, image_name = self._parse_install_path(cmd.installPath) tmp_image_name = 'tmp-%s' % image_name @rollbackable def _1(): shell.call('rbd rm %s/%s' % (pool, tmp_image_name)) def _getRealSize(length): '''length looks like: 10245K''' logger.debug(length) if not length[-1].isalpha(): return length units = { "g": lambda x: x * 1024 * 1024 * 1024, "m": lambda x: x * 1024 * 1024, "k": lambda x: x * 1024, } try: if not length[-1].isalpha(): return length return units[length[-1].lower()](int(length[:-1])) except: logger.warn(linux.get_exception_stacktrace()) return length report = Report() report.processType = "AddImage" report.resourceUuid = cmd.imageUuid report.progress_report("0", "start") if cmd.url.startswith('http://') or cmd.url.startswith('https://'): fail_if_has_backing_file(cmd.url) # roll back tmp ceph file after import it _1() if cmd.sendCommandUrl: Report.url = cmd.sendCommandUrl PFILE = shell.call('mktemp /tmp/tmp-XXXXXX').strip() url = "'''" + cmd.url + "'''" content_length = shell.call('curl -sI %s|grep Content-Length' % url).strip().split()[1] total = _getRealSize(content_length) def _getProgress(synced): logger.debug( "getProgress in ceph-bs-agent, synced: %s, total: %s" % (synced, total)) last = shell.call('tail -1 %s' % PFILE).strip() if not last or len(last.split()) < 1: return synced logger.debug("last synced: %s" % last) written = _getRealSize(last.split()[0]) if total > 0 and synced < written: synced = written if synced < total: percent = int(round(float(synced) / float(total) * 90)) report.progress_report(percent, "report") return synced logger.debug("content-length is: %s" % total) _, _, err = bash_progress_1( 'set -o pipefail;wget --no-check-certificate -O - %s 2>%s| rbd import --image-format 2 - %s/%s' % (cmd.url, PFILE, pool, tmp_image_name), _getProgress) if err: raise err actual_size = linux.get_file_size_by_http_head(cmd.url) if os.path.exists(PFILE): os.remove(PFILE) elif cmd.url.startswith('file://'): src_path = cmd.url.lstrip('file:') src_path = os.path.normpath(src_path) if not os.path.isfile(src_path): raise Exception('cannot find the file[%s]' % src_path) fail_if_has_backing_file(src_path) # roll back tmp ceph file after import it _1() shell.call("rbd import --image-format 2 %s %s/%s" % (src_path, pool, tmp_image_name)) actual_size = os.path.getsize(src_path) else: raise Exception('unknown url[%s]' % cmd.url) file_format = shell.call( "set -o pipefail; qemu-img info rbd:%s/%s | grep 'file format' | cut -d ':' -f 2" % (pool, tmp_image_name)) file_format = file_format.strip() if file_format not in ['qcow2', 'raw']: raise Exception('unknown image format: %s' % file_format) if file_format == 'qcow2': conf_path = None try: with open('/etc/ceph/ceph.conf', 'r') as fd: conf = fd.read() conf = '%s\n%s\n' % (conf, 'rbd default format = 2') conf_path = linux.write_to_temp_file(conf) shell.call( 'qemu-img convert -f qcow2 -O rbd rbd:%s/%s rbd:%s/%s:conf=%s' % (pool, tmp_image_name, pool, image_name, conf_path)) shell.call('rbd rm %s/%s' % (pool, tmp_image_name)) finally: if conf_path: os.remove(conf_path) else: shell.call('rbd mv %s/%s %s/%s' % (pool, tmp_image_name, pool, image_name)) report.progress_report("100", "finish") @rollbackable def _2(): shell.call('rbd rm %s/%s' % (pool, image_name)) _2() o = shell.call('rbd --format json info %s/%s' % (pool, image_name)) image_stats = jsonobject.loads(o) rsp.size = long(image_stats.size_) rsp.actualSize = actual_size self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def ping(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = PingRsp() facts = bash_o('ceph -s -f json') mon_facts = jsonobject.loads(facts) found = False for mon in mon_facts.monmap.mons: if cmd.monAddr in mon.addr: found = True break if not found: rsp.success = False rsp.failure = "MonAddrChanged" rsp.error = 'The mon addr is changed on the mon server[uuid:%s], not %s anymore.' \ 'Reconnect the ceph primary storage' \ ' may solve this issue' % (cmd.monUuid, cmd.monAddr) return jsonobject.dumps(rsp) create_img = shell.ShellCmd('rbd create %s --image-format 2 --size 1' % cmd.testImagePath) create_img(False) if create_img.return_code != 0 and 'File exists' not in create_img.stderr and 'File exists' not in create_img.stdout: rsp.success = False rsp.failure = 'UnableToCreateFile' rsp.error = "%s %s" % (create_img.stderr, create_img.stdout) else: rm_img = shell.ShellCmd('rbd rm %s' % cmd.testImagePath) rm_img(False) return jsonobject.dumps(rsp) @replyerror def delete(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) pool, image_name = self._parse_install_path(cmd.installPath) def delete_image(_): shell.call('rbd rm %s/%s' % (pool, image_name)) return True # 'rbd rm' might fail due to client crash. We wait for 30 seconds as suggested by 'rbd'. # # rbd: error: image still has watchers # This means the image is still open or the client using it crashed. Try again after # closing/unmapping it or waiting 30s for the crashed client to timeout. linux.wait_callback_success(delete_image, interval=5, timeout=30, ignore_exception_in_callback=True) rsp = AgentResponse() self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp)
class ApplianceVm(object): http_server = http.HttpServer(port=7759) http_server.logfile_path = log.get_logfile_path() REFRESH_FIREWALL_PATH = "/appliancevm/refreshfirewall" ECHO_PATH = "/appliancevm/echo" INIT_PATH = "/appliancevm/init" @lock.file_lock('/run/xtables.lock') def set_default_iptable_rules(self): shell.call('iptables --policy INPUT DROP') shell.call('iptables --policy FORWARD DROP') # NOTE: 22 port of eth0 is opened in /etc/sysconfig/iptables by default ipt = iptables.from_iptables_save() ipt.add_rule('-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT') ipt.add_rule('-A INPUT -i lo -j ACCEPT') ipt.add_rule('-A INPUT -p icmp -j ACCEPT') ipt.add_rule('-A INPUT -j REJECT --reject-with icmp-host-prohibited') ipt.add_rule('-A FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT') ipt.add_rule('-A POSTROUTING -p udp --dport bootpc -j CHECKSUM --checksum-fill', iptables.IPTables.MANGLE_TABLE_NAME) ipt.iptable_restore() @replyerror def init(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) upgrade_script_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'upgradescripts') list_file = os.path.join(upgrade_script_path, 'scriptlist') def upgrade(): script_names = [] with open(list_file, 'r') as fd: ls = fd.readlines() for l in ls: l = l.strip(' \t\r\n') if l: script_names.append(l) for s in script_names: script = os.path.join(upgrade_script_path, s) if not os.path.exists(script): raise Exception('cannot find upgrade script[%s]' % script) try: shell.call('bash %s' % script) except shell.ShellError as e: raise Exception('failed to execute upgrade script[%s], %s', script, str(e)) if os.path.exists(list_file): upgrade() return jsonobject.dumps(AgentResponse()) @replyerror def echo(self, req): logger.debug('get echoed') return '' @replyerror @lock.file_lock('/run/xtables.lock') def refresh_rule(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = RefreshFirewallRsp() ipt = iptables.from_iptables_save() # replace bootstrap 22 port rule with a more restricted one that binds to eth0's IP ipt.remove_rule('-A INPUT -i eth0 -p tcp -m tcp --dport 22 -j ACCEPT') eth0_ip = linux.get_ip_by_nic_name('eth0') assert eth0_ip, 'cannot find IP of eth0' ipt.add_rule('-A INPUT -d %s/32 -i eth0 -p tcp -m tcp --dport 22 -j ACCEPT' % eth0_ip) chain_name = 'appliancevm' ipt.delete_chain(chain_name) ipt.add_rule('-A INPUT -j %s' % chain_name) for to in cmd.rules: if to.destIp: nic_name = linux.get_nic_name_by_ip(to.destIp) else: nic_name = linux.get_nic_name_from_alias(linux.get_nic_names_by_mac(to.nicMac)) r =[] if to.protocol == 'all' or to.protocol == 'udp': r.append('-A %s' % chain_name) if to.sourceIp: r.append('-s %s' % to.sourceIp) if to.destIp: r.append('-d %s' % to.destIp) r.append('-i %s -p udp -m state --state NEW -m udp --dport %s:%s -j ACCEPT' % (nic_name, to.startPort, to.endPort)) rule = ' '.join(r) ipt.add_rule(rule) r = [] if to.protocol == 'all' or to.protocol == 'tcp': r.append('-A %s' % chain_name) if to.sourceIp: r.append('-s %s' % to.sourceIp) if to.destIp: r.append('-d %s' % to.destIp) r.append('-i %s -p tcp -m state --state NEW -m tcp --dport %s:%s -j ACCEPT' % (nic_name, to.startPort, to.endPort)) rule = ' '.join(r) ipt.add_rule(rule) ipt.iptable_restore() logger.debug('refreshed rules for appliance vm') return jsonobject.dumps(rsp) def start(self, in_thread=True): self.set_default_iptable_rules() self.http_server.register_async_uri(self.REFRESH_FIREWALL_PATH, self.refresh_rule) self.http_server.register_async_uri(self.INIT_PATH, self.init) self.http_server.register_sync_uri(self.ECHO_PATH, self.echo) if in_thread: self.http_server.start_in_thread() else: self.http_server.start() def stop(self): self.http_server.stop()
class SftpBackupStorageAgent(object): ''' classdocs ''' CONNECT_PATH = "/sftpbackupstorage/connect" DOWNLOAD_IMAGE_PATH = "/sftpbackupstorage/download" DELETE_IMAGE_PATH = "/sftpbackupstorage/delete" PING_PATH = "/sftpbackupstorage/ping" GET_SSHKEY_PATH = "/sftpbackupstorage/sshkey" ECHO_PATH = "/sftpbackupstorage/echo" WRITE_IMAGE_METADATA = "/sftpbackupstorage/writeimagemetadata" DELETE_IMAGES_METADATA = "/sftpbackupstorage/deleteimagesmetadata" DUMP_IMAGE_METADATA_TO_FILE = "/sftpbackupstorage/dumpimagemetadatatofile" GENERATE_IMAGE_METADATA_FILE = "/sftpbackupstorage/generateimagemetadatafile" CHECK_IMAGE_METADATA_FILE_EXIST = "/sftpbackupstorage/checkimagemetadatafileexist" GET_IMAGES_METADATA = "/sftpbackupstorage/getimagesmetadata" GET_IMAGE_SIZE = "/sftpbackupstorage/getimagesize" IMAGE_TEMPLATE = 'template' IMAGE_ISO = 'iso' URL_HTTP = 'http' URL_HTTPS = 'https' URL_FILE = 'file' URL_NFS = 'nfs' PORT = 7171 SSHKEY_PATH = "~/.ssh/id_rsa.sftp" SFTP_METADATA_FILE = "bs_sftp_info.json" http_server = http.HttpServer(PORT) http_server.logfile_path = log.get_logfile_path() def get_capacity(self): total = linux.get_total_disk_size(self.storage_path) used = linux.get_used_disk_size(self.storage_path) return (total, total - used) @replyerror def ping(self, req): rsp = PingResponse() rsp.uuid = self.uuid return jsonobject.dumps(rsp) @replyerror def echo(self, req): logger.debug('get echoed') return '' @replyerror def get_image_size(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = GetImageSizeRsp() rsp.size, rsp.actualSize = linux.qcow2_size_and_actual_size( cmd.installPath) return jsonobject.dumps(rsp) @replyerror def connect(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) self.storage_path = cmd.storagePath self.uuid = cmd.uuid if os.path.isfile(self.storage_path): raise Exception('storage path: %s is a file' % self.storage_path) if not os.path.exists(self.storage_path): os.makedirs(self.storage_path, 0777) (total, avail) = self.get_capacity() logger.debug( http.path_msg( self.CONNECT_PATH, 'connected, [storage path:%s, total capacity: %s bytes, available capacity: %s size]' % (self.storage_path, total, avail))) rsp = ConnectResponse() rsp.totalCapacity = total rsp.availableCapacity = avail return jsonobject.dumps(rsp) def _write_image_metadata(self, image_install_path, meta_data): image_dir = os.path.dirname(image_install_path) md5sum = linux.md5sum(image_install_path) size = os.path.getsize(image_install_path) meta = dict(meta_data.__dict__.items()) meta['size'] = size meta['md5sum'] = md5sum metapath = os.path.join(image_dir, 'meta_data.json') with open(metapath, 'w') as fd: fd.write(jsonobject.dumps(meta, pretty=True)) return (size, md5sum) @replyerror def write_image_metadata(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) meta_data = cmd.metaData self._write_image_metadata(meta_data.installPath, meta_data) rsp = WriteImageMetaDataResponse() return jsonobject.dumps(rsp) @in_bash def _generate_image_metadata_file(self, bs_path): bs_meta_file = bs_path + '/' + self.SFTP_METADATA_FILE if os.path.isfile(bs_meta_file) is False: #dir = '/'.join(bs_path.split("/")[:-1]) if os.path.exists(bs_path) is False: os.makedirs(bs_path) ret, output = bash_ro("touch %s" % bs_meta_file) if ret == 0: return bs_meta_file else: raise Exception('can not create image metadata file %s' % output) else: return bs_meta_file @replyerror def generate_image_metadata_file(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) bs_path = cmd.backupStoragePath file_name = self._generate_image_metadata_file(bs_path) rsp = GenerateImageMetaDataFileResponse() rsp.bsFileName = file_name return jsonobject.dumps(rsp) @replyerror def check_image_metadata_file_exist(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) bs_path = cmd.backupStoragePath # todo change bs_sftp_info.json to bs_image_info.json bs_sftp_info_file = bs_path + '/' + self.SFTP_METADATA_FILE rsp = CheckImageMetaDataFileExistResponse() rsp.backupStorageMetaFileName = bs_sftp_info_file if os.path.isfile(bs_sftp_info_file): rsp.exist = True else: rsp.exist = False return jsonobject.dumps(rsp) @replyerror def dump_image_metadata_to_file(self, req): def _write_info_to_metadata_file(fd): strip_list_content = content[1:-1] data_list = strip_list_content.split('},') for item in data_list: if item.endswith("}") is not True: item = item + "}" fd.write(item + '\n') cmd = jsonobject.loads(req[http.REQUEST_BODY]) bs_sftp_info_file = cmd.backupStoragePath + '/' + self.SFTP_METADATA_FILE content = cmd.imageMetaData dump_all_metadata = cmd.dumpAllMetaData if content is not None: if '[' == content[0] and ']' == content[-1]: if dump_all_metadata is True: with open(bs_sftp_info_file, 'w') as fd: _write_info_to_metadata_file(fd) else: with open(bs_sftp_info_file, 'a') as fd: _write_info_to_metadata_file(fd) else: #one image info if dump_all_metadata is True: with open(bs_sftp_info_file, 'w') as fd: fd.write(content + '\n') else: with open(bs_sftp_info_file, 'a') as fd: fd.write(content + '\n') rsp = DumpImageMetaDataToFileResponse() return jsonobject.dumps(rsp) @in_bash @replyerror def delete_image_metadata_from_file(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) image_uuid = cmd.imageUuid bs_sftp_info_file = cmd.backupStoragePath + '/' + self.SFTP_METADATA_FILE ret, output = bash_ro("sed -i.bak '/%s/d' %s" % (image_uuid, bs_sftp_info_file)) rsp = DeleteImageMetaDataResponse() rsp.ret = ret return jsonobject.dumps(rsp) @in_bash @replyerror def get_images_metadata(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) valid_images_info = "" bs_sftp_info_file = cmd.backupStoragePath + '/' + self.SFTP_METADATA_FILE image_uuid_list = [] with open(bs_sftp_info_file) as fd: images_info = fd.read() for image_info in images_info.split('\n'): if image_info != '': image_json = jsonobject.loads(image_info) # todo support multiple bs image_uuid = image_json['uuid'] image_install_path = image_json["backupStorageRefs"][0][ "installPath"] if image_uuid in image_uuid_list: logger.debug("duplicate uuid %s, ignore" % image_json["uuid"]) continue image_uuid_list.append(image_uuid) ret = bash_r("ls %s" % image_install_path) if ret == 0: logger.info( "Check image %s install path %s successfully!" % (image_uuid, image_install_path)) valid_images_info = image_info + '\n' + valid_images_info else: logger.warn("Image %s install path %s is invalid!" % (image_uuid, image_install_path)) rsp = GetImageMetaDataResponse() rsp.imagesMetaData = valid_images_info return jsonobject.dumps(rsp) @in_bash @replyerror def download_image(self, req): #TODO: report percentage to mgmt server def percentage_callback(percent, url): logger.debug('Downloading %s ... %s%%' % (url, percent)) def use_wget(url, name, workdir, timeout): return linux.wget(url, workdir=workdir, rename=name, timeout=timeout, interval=2, callback=percentage_callback, callback_data=url) cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = DownloadResponse() supported_schemes = [self.URL_HTTP, self.URL_HTTPS, self.URL_FILE] if cmd.urlScheme not in supported_schemes: rsp.success = False rsp.error = 'unsupported url scheme[%s], SimpleSftpBackupStorage only supports %s' % ( cmd.urlScheme, supported_schemes) return jsonobject.dumps(rsp) path = os.path.dirname(cmd.installPath) if not os.path.exists(path): os.makedirs(path, 0777) image_name = os.path.basename(cmd.installPath) install_path = cmd.installPath timeout = cmd.timeout if cmd.timeout else 7200 if cmd.urlScheme in [self.URL_HTTP, self.URL_HTTPS]: try: image_name = linux.shellquote(image_name) cmd.url = linux.shellquote(cmd.url) ret = use_wget(cmd.url, image_name, path, timeout) if ret != 0: rsp.success = False rsp.error = 'http/https download failed, [wget -O %s %s] returns value %s' % ( image_name, cmd.url, ret) return jsonobject.dumps(rsp) except linux.LinuxError as e: traceback.format_exc() rsp.success = False rsp.error = str(e) return jsonobject.dumps(rsp) elif cmd.urlScheme == self.URL_FILE: src_path = cmd.url.lstrip('file:') src_path = os.path.normpath(src_path) if not os.path.isfile(src_path): raise Exception('cannot find the file[%s]' % src_path) logger.debug("src_path is: %s" % src_path) shell.call('yes | cp %s %s' % (src_path, linux.shellquote(install_path))) os.chmod(cmd.installPath, stat.S_IRUSR + stat.S_IRGRP + stat.S_IROTH) image_format = bash_o( "qemu-img info %s | grep -w '^file format' | awk '{print $3}'" % linux.shellquote(install_path)).strip('\n') size = os.path.getsize(install_path) md5sum = 'not calculated' logger.debug('successfully downloaded %s to %s' % (cmd.url, install_path)) (total, avail) = self.get_capacity() rsp.md5Sum = md5sum rsp.actualSize = size rsp.size = linux.qcow2_virtualsize(install_path) rsp.totalCapacity = total rsp.availableCapacity = avail rsp.format = image_format return jsonobject.dumps(rsp) @replyerror def delete_image(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = DeleteResponse() path = os.path.dirname(cmd.installUrl) shutil.rmtree(path) logger.debug('successfully deleted bits[%s]' % cmd.installUrl) (total, avail) = self.get_capacity() rsp.totalCapacity = total rsp.availableCapacity = avail return jsonobject.dumps(rsp) @replyerror def get_sshkey(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = GetSshKeyResponse() path = os.path.expanduser(self.SSHKEY_PATH) if not os.path.exists(path): err = "Cannot find private key of SftpBackupStorageAgent" rsp.error = err rsp.success = False logger.warn("%s at %s" % (err, self.SSHKEY_PATH)) return jsonobject.dumps(rsp) with open(path) as fd: sshkey = fd.read() rsp.sshKey = sshkey logger.debug("Get sshkey as %s" % sshkey) return jsonobject.dumps(rsp) def __init__(self): ''' Constructor ''' self.http_server.register_sync_uri(self.CONNECT_PATH, self.connect) self.http_server.register_sync_uri(self.ECHO_PATH, self.echo) self.http_server.register_async_uri(self.DOWNLOAD_IMAGE_PATH, self.download_image) self.http_server.register_async_uri(self.DELETE_IMAGE_PATH, self.delete_image) self.http_server.register_async_uri(self.GET_SSHKEY_PATH, self.get_sshkey) self.http_server.register_async_uri(self.WRITE_IMAGE_METADATA, self.write_image_metadata) self.http_server.register_async_uri(self.GENERATE_IMAGE_METADATA_FILE, self.generate_image_metadata_file) self.http_server.register_async_uri( self.CHECK_IMAGE_METADATA_FILE_EXIST, self.check_image_metadata_file_exist) self.http_server.register_async_uri(self.DUMP_IMAGE_METADATA_TO_FILE, self.dump_image_metadata_to_file) self.http_server.register_async_uri( self.DELETE_IMAGES_METADATA, self.delete_image_metadata_from_file) self.http_server.register_async_uri(self.GET_IMAGES_METADATA, self.get_images_metadata) self.http_server.register_async_uri(self.PING_PATH, self.ping) self.http_server.register_async_uri(self.GET_IMAGE_SIZE, self.get_image_size) self.storage_path = None self.uuid = None
class ConsoleProxyAgent(object): PORT = 7758 http_server = http.HttpServer(PORT) http_server.logfile_path = log.get_logfile_path() CHECK_AVAILABILITY_PATH = "/check" ESTABLISH_PROXY_PATH = "/establish" DELETE_PROXY_PATH = "/delete" TOKEN_FILE_DIR = "/var/lib/zstack/consoleProxy/" PROXY_LOG_DIR = "/var/log/zstack/consoleProxy/" DB_NAME = "consoleProxy" #TODO: sync db status and current running processes def __init__(self): self.http_server.register_async_uri(self.CHECK_AVAILABILITY_PATH, self.check_proxy_availability) self.http_server.register_async_uri(self.ESTABLISH_PROXY_PATH, self.establish_new_proxy) self.http_server.register_async_uri(self.DELETE_PROXY_PATH, self.delete) if not os.path.exists(self.PROXY_LOG_DIR): os.makedirs(self.PROXY_LOG_DIR, 0755) if not os.path.exists(self.TOKEN_FILE_DIR): os.makedirs(self.TOKEN_FILE_DIR, 0755) self.db = filedb.FileDB(self.DB_NAME) def _make_token_file_name(self, cmd): target_ip_str = cmd.targetHostname.replace('.', '_') return '%s-%s' % (target_ip_str, cmd.targetPort) def _make_proxy_log_file_name(self, cmd): f = self._make_token_file_name(cmd) return '%s-%s' % (f, cmd.token) def _get_pid_on_port(self, port): out = shell.call('netstat -anp | grep ":%s" | grep LISTEN' % port, exception=False) out = out.strip(' \n\t\r') if "" == out: return None pid = out.split()[-1].split('/')[0] try: pid = int(pid) return pid except: return None def _check_proxy_availability(self, args): proxyPort = args['proxyPort'] targetHostname = args['targetHostname'] targetPort = args['targetPort'] token = args['token'] pid = self._get_pid_on_port(proxyPort) if not pid: logger.debug( 'no websockify on proxy port[%s], availability false' % proxyPort) return False with open(os.path.join('/proc', str(pid), 'cmdline'), 'r') as fd: process_cmdline = fd.read() if 'websockify' not in process_cmdline: logger.debug( 'process[pid:%s] on proxy port[%s] is not websockify process, availability false' % (pid, proxyPort)) return False info_str = self.db.get(token) if not info_str: logger.debug( 'cannot find information for process[pid:%s] on proxy port[%s], availability false' % (pid, proxyPort)) return False info = jsonobject.loads(info_str) if token != info['token']: logger.debug( 'metadata[token] for process[pid:%s] on proxy port[%s] are changed[%s --> %s], availability false' % (pid, proxyPort, token, info['token'])) return False if targetPort != info['targetPort']: logger.debug( 'metadata[targetPort] for process[pid:%s] on proxy port[%s] are changed[%s --> %s], availability false' % (pid, proxyPort, targetPort, info['targetPort'])) return False if targetHostname != info['targetHostname']: logger.debug( 'metadata[targetHostname] for process[pid:%s] on proxy port[%s] are changed[%s --> %s], availability false' % (pid, proxyPort, targetHostname, info['targetHostname'])) return False return True @replyerror def check_proxy_availability(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) ret = self._check_proxy_availability({ 'proxyPort': cmd.proxyPort, 'targetHostname': cmd.targetHostname, 'targetPort': cmd.targetPort, 'token': cmd.token }) rsp = CheckAvailabilityRsp() rsp.available = ret return jsonobject.dumps(rsp) @replyerror @lock.lock('console-proxy') def delete(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) keywords = [cmd.token, cmd.proxyHostname, str(cmd.proxyPort)] pid = linux.find_process_by_cmdline(keywords) if pid: shell.call("kill %s" % pid) log_file = self._make_proxy_log_file_name(cmd) shell.call("rm -f %s" % log_file) token_file = self._make_token_file_name(cmd) shell.call("rm -f %s" % token_file) shell.call( "iptables-save | grep -- '-A INPUT -p tcp -m tcp --dport %s' > /dev/null && iptables -D INPUT -p tcp -m tcp --dport %s -j ACCEPT" % (cmd.proxyPort, cmd.proxyPort)) logger.debug('deleted a proxy by command: %s' % req[http.REQUEST_BODY]) rsp = AgentResponse() return jsonobject.dumps(rsp) @replyerror @lock.lock('console-proxy') def establish_new_proxy(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = EstablishProxyRsp() def check_parameters(): if not cmd.targetHostname: raise ConsoleProxyError('targetHostname cannot be null') if not cmd.targetPort: raise ConsoleProxyError('targetPort cannot be null') if not cmd.token: raise ConsoleProxyError('token cannot be null') if not cmd.proxyHostname: raise ConsoleProxyError('proxyHostname cannot be null') try: check_parameters() except ConsoleProxyError as e: err = linux.get_exception_stacktrace() logger.warn(err) rsp.error = str(e) rsp.success = False return jsonobject.dumps(rsp) proxyPort = linux.get_free_port() token_file = os.path.join(self.TOKEN_FILE_DIR, self._make_token_file_name(cmd)) with open(token_file, 'w') as fd: fd.write('%s: %s:%s' % (cmd.token, cmd.targetHostname, cmd.targetPort)) timeout = cmd.idleTimeout if not timeout: timeout = 600 log_file = os.path.join(self.PROXY_LOG_DIR, self._make_proxy_log_file_name(cmd)) proxy_cmd = '''python -c "from zstacklib.utils import log; import websockify; log.configure_log('%s'); websockify.websocketproxy.websockify_init()" %s:%s -D --target-config=%s --idle-timeout=%s''' % ( log_file, cmd.proxyHostname, proxyPort, token_file, timeout) logger.debug(proxy_cmd) shell.call(proxy_cmd) shell.call( "iptables-save | grep -- '-A INPUT -p tcp -m tcp --dport %s' > /dev/null || iptables -I INPUT -p tcp -m tcp --dport %s -j ACCEPT" % (proxyPort, proxyPort)) info = { 'proxyHostname': cmd.proxyHostname, 'proxyPort': cmd.proxyPort, 'targetHostname': cmd.targetHostname, 'targetPort': cmd.targetPort, 'token': cmd.token, 'logFile': log_file, 'tokenFile': token_file } info_str = jsonobject.dumps(info) self.db.set(cmd.token, info_str) rsp.proxyPort = proxyPort logger.debug('successfully establish new proxy%s' % info_str) return jsonobject.dumps(rsp)
class CephAgent(object): INIT_PATH = "/ceph/primarystorage/init" CREATE_VOLUME_PATH = "/ceph/primarystorage/volume/createempty" DELETE_PATH = "/ceph/primarystorage/delete" CLONE_PATH = "/ceph/primarystorage/volume/clone" FLATTEN_PATH = "/ceph/primarystorage/volume/flatten" SFTP_DOWNLOAD_PATH = "/ceph/primarystorage/sftpbackupstorage/download" SFTP_UPLOAD_PATH = "/ceph/primarystorage/sftpbackupstorage/upload" ECHO_PATH = "/ceph/primarystorage/echo" CREATE_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/create" DELETE_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/delete" PURGE_SNAPSHOT_PATH = "/ceph/primarystorage/volume/purgesnapshots" COMMIT_IMAGE_PATH = "/ceph/primarystorage/snapshot/commit" PROTECT_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/protect" ROLLBACK_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/rollback" UNPROTECT_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/unprotect" CHECK_BITS_PATH = "/ceph/primarystorage/snapshot/checkbits" CP_PATH = "/ceph/primarystorage/volume/cp" DELETE_POOL_PATH = "/ceph/primarystorage/deletepool" GET_VOLUME_SIZE_PATH = "/ceph/primarystorage/getvolumesize" PING_PATH = "/ceph/primarystorage/ping" GET_FACTS = "/ceph/primarystorage/facts" DELETE_IMAGE_CACHE = "/ceph/primarystorage/deleteimagecache" ADD_POOL_PATH = "/ceph/primarystorage/addpool" CHECK_POOL_PATH = "/ceph/primarystorage/checkpool" RESIZE_VOLUME_PATH = "/ceph/primarystorage/volume/resize" MIGRATE_VOLUME_PATH = "/ceph/primarystorage/volume/migrate" MIGRATE_VOLUME_SNAPSHOT_PATH = "/ceph/primarystorage/volume/snapshot/migrate" GET_VOLUME_SNAPINFOS_PATH = "/ceph/primarystorage/volume/getsnapinfos" UPLOAD_IMAGESTORE_PATH = "/ceph/primarystorage/imagestore/backupstorage/commit" DOWNLOAD_IMAGESTORE_PATH = "/ceph/primarystorage/imagestore/backupstorage/download" http_server = http.HttpServer(port=7762) http_server.logfile_path = log.get_logfile_path() def __init__(self): self.http_server.register_async_uri(self.INIT_PATH, self.init) self.http_server.register_async_uri(self.ADD_POOL_PATH, self.add_pool) self.http_server.register_async_uri(self.CHECK_POOL_PATH, self.check_pool) self.http_server.register_async_uri(self.DELETE_PATH, self.delete) self.http_server.register_async_uri(self.CREATE_VOLUME_PATH, self.create) self.http_server.register_async_uri(self.CLONE_PATH, self.clone) self.http_server.register_async_uri(self.COMMIT_IMAGE_PATH, self.commit_image) self.http_server.register_async_uri(self.CREATE_SNAPSHOT_PATH, self.create_snapshot) self.http_server.register_async_uri(self.DELETE_SNAPSHOT_PATH, self.delete_snapshot) self.http_server.register_async_uri(self.PURGE_SNAPSHOT_PATH, self.purge_snapshots) self.http_server.register_async_uri(self.PROTECT_SNAPSHOT_PATH, self.protect_snapshot) self.http_server.register_async_uri(self.UNPROTECT_SNAPSHOT_PATH, self.unprotect_snapshot) self.http_server.register_async_uri(self.ROLLBACK_SNAPSHOT_PATH, self.rollback_snapshot) self.http_server.register_async_uri(self.FLATTEN_PATH, self.flatten) self.http_server.register_async_uri(self.SFTP_DOWNLOAD_PATH, self.sftp_download) self.http_server.register_async_uri(self.SFTP_UPLOAD_PATH, self.sftp_upload) self.http_server.register_async_uri(self.CP_PATH, self.cp) self.http_server.register_async_uri(self.UPLOAD_IMAGESTORE_PATH, self.upload_imagestore) self.http_server.register_async_uri(self.DOWNLOAD_IMAGESTORE_PATH, self.download_imagestore) self.http_server.register_async_uri(self.DELETE_POOL_PATH, self.delete_pool) self.http_server.register_async_uri(self.GET_VOLUME_SIZE_PATH, self.get_volume_size) self.http_server.register_async_uri(self.PING_PATH, self.ping) self.http_server.register_async_uri(self.GET_FACTS, self.get_facts) self.http_server.register_async_uri(self.DELETE_IMAGE_CACHE, self.delete_image_cache) self.http_server.register_async_uri(self.CHECK_BITS_PATH, self.check_bits) self.http_server.register_async_uri(self.RESIZE_VOLUME_PATH, self.resize_volume) self.http_server.register_sync_uri(self.ECHO_PATH, self.echo) self.http_server.register_async_uri(self.MIGRATE_VOLUME_PATH, self.migrate_volume) self.http_server.register_async_uri(self.MIGRATE_VOLUME_SNAPSHOT_PATH, self.migrate_volume_snapshot) self.http_server.register_async_uri(self.GET_VOLUME_SNAPINFOS_PATH, self.get_volume_snapinfos) self.imagestore_client = ImageStoreClient() def _set_capacity_to_response(self, rsp): o = shell.call('ceph df -f json') df = jsonobject.loads(o) if df.stats.total_bytes__ is not None: total = long(df.stats.total_bytes_) elif df.stats.total_space__ is not None: total = long(df.stats.total_space__) * 1024 else: raise Exception('unknown ceph df output: %s' % o) if df.stats.total_avail_bytes__ is not None: avail = long(df.stats.total_avail_bytes_) elif df.stats.total_avail__ is not None: avail = long(df.stats.total_avail_) * 1024 else: raise Exception('unknown ceph df output: %s' % o) rsp.totalCapacity = total rsp.availableCapacity = avail def _get_file_size(self, path): o = shell.call('rbd --format json info %s' % path) o = jsonobject.loads(o) return long(o.size_) def _read_file_content(self, path): with open(path) as f: return f.read() @replyerror @in_bash def resize_volume(self, req): rsp = ResizeVolumeRsp() cmd = jsonobject.loads(req[http.REQUEST_BODY]) pool, image_name = self._parse_install_path(cmd.installPath) path = self._normalize_install_path(cmd.installPath) shell.call("qemu-img resize -f raw rbd:%s/%s %s" % (pool, image_name, cmd.size)) rsp.size = self._get_file_size(path) self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror @in_bash @lock.lock('delete_image_cache') def delete_image_cache(self, req): rsp = AgentResponse() cmd = jsonobject.loads(req[http.REQUEST_BODY]) SP_PATH = self._normalize_install_path(cmd.snapshotPath) IMAGE_PATH = self._normalize_install_path(cmd.imagePath) if bash_r('rbd info {{IMAGE_PATH}}') != 0: return jsonobject.dumps(rsp) o = bash_o('rbd children {{SP_PATH}}') o = o.strip(' \t\r\n') if o: raise Exception('the image cache[%s] is still in used' % cmd.imagePath) bash_errorout('rbd snap unprotect {{SP_PATH}}') bash_errorout('rbd snap rm {{SP_PATH}}') bash_errorout('rbd rm {{IMAGE_PATH}}') self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror @in_bash def get_facts(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) o = bash_o('ceph mon_status') mon_status = jsonobject.loads(o) fsid = mon_status.monmap.fsid_ rsp = GetFactsRsp() facts = bash_o('ceph -s -f json') mon_facts = jsonobject.loads(facts) for mon in mon_facts.monmap.mons: ADDR = mon.addr.split(':')[0] if bash_r('ip route | grep -w {{ADDR}} > /dev/null') == 0: rsp.monAddr = ADDR break if not rsp.monAddr: raise Exception('cannot find mon address of the mon server[%s]' % cmd.monUuid) rsp.fsid = fsid return jsonobject.dumps(rsp) @replyerror @in_bash def ping(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) facts = bash_o('ceph -s -f json') mon_facts = jsonobject.loads(facts) found = False for mon in mon_facts.monmap.mons: if cmd.monAddr in mon.addr: found = True break rsp = PingRsp() if not found: rsp.success = False rsp.failure = "MonAddrChanged" rsp.error = 'The mon addr is changed on the mon server[uuid:%s], not %s anymore.' \ 'Reconnect the ceph primary storage' \ ' may solve this issue' % (cmd.monUuid, cmd.monAddr) return jsonobject.dumps(rsp) def retry(times=3, sleep_time=3): def wrap(f): @functools.wraps(f) def inner(*args, **kwargs): for i in range(0, times): try: return f(*args, **kwargs) except Exception as e: logger.error(e) time.sleep(sleep_time) rsp.error = ("Still failed after retry. Below is detail:\n %s" % e) return inner return wrap @retry() def doPing(): # try to delete test file, ignore the result bash_r('rbd rm %s' % cmd.testImagePath) r, o, e = bash_roe('timeout 60 rbd create %s --image-format 2 --size 1' % cmd.testImagePath) if r != 0: rsp.success = False rsp.failure = "UnableToCreateFile" if r == 124: # timeout happened rsp.error = 'failed to create temporary file on ceph, timeout after 60s, %s %s' % (e, o) raise Exception(rsp.error) else: rsp.error = "%s %s" % (e, o) doPing() return jsonobject.dumps(rsp) @replyerror def get_volume_size(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) path = self._normalize_install_path(cmd.installPath) rsp = GetVolumeSizeRsp() rsp.size = self._get_file_size(path) return jsonobject.dumps(rsp) @replyerror def delete_pool(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) for p in cmd.poolNames: shell.call('ceph osd pool delete %s %s --yes-i-really-really-mean-it' % (p, p)) return jsonobject.dumps(AgentResponse()) @replyerror def rollback_snapshot(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) spath = self._normalize_install_path(cmd.snapshotPath) shell.call('rbd snap rollback %s' % spath) rsp = RollbackSnapshotRsp() rsp.size = self._get_file_size(spath) self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def cp(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) src_path = self._normalize_install_path(cmd.srcPath) dst_path = self._normalize_install_path(cmd.dstPath) shell.call('rbd cp %s %s' % (src_path, dst_path)) rsp = CpRsp() rsp.size = self._get_file_size(dst_path) self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def upload_imagestore(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) return self.imagestore_client.upload_imagestore(cmd, req) @replyerror def commit_image(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) spath = self._normalize_install_path(cmd.snapshotPath) dpath = self._normalize_install_path(cmd.dstPath) shell.call('rbd snap protect %s' % spath, exception=not cmd.ignoreError) shell.call('rbd clone %s %s' % (spath, dpath)) rsp = AgentResponse() self._set_capacity_to_response(rsp) rsp.size = self._get_file_size(dpath) return jsonobject.dumps(rsp) @replyerror def download_imagestore(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) return self.imagestore_client.download_imagestore(cmd) @replyerror def create_snapshot(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) spath = self._normalize_install_path(cmd.snapshotPath) do_create = True if cmd.skipOnExisting: image_name, sp_name = spath.split('@') o = shell.call('rbd --format json snap ls %s' % image_name) o = jsonobject.loads(o) for s in o: if s.name_ == sp_name: do_create = False if do_create: shell.call('rbd snap create %s' % spath) rsp = CreateSnapshotRsp() rsp.size = self._get_file_size(spath) self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def delete_snapshot(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) spath = self._normalize_install_path(cmd.snapshotPath) shell.call('rbd snap rm %s' % spath) rsp = AgentResponse() self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror @in_bash def purge_snapshots(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) vpath = self._normalize_install_path(cmd.volumePath) shell.call('rbd snap purge %s' % vpath) rsp = AgentResponse() self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def unprotect_snapshot(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) spath = self._normalize_install_path(cmd.snapshotPath) shell.call('rbd snap unprotect %s' % spath) return jsonobject.dumps(AgentResponse()) @replyerror def protect_snapshot(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) spath = self._normalize_install_path(cmd.snapshotPath) shell.call('rbd snap protect %s' % spath, exception=not cmd.ignoreError) rsp = AgentResponse() return jsonobject.dumps(rsp) @replyerror def check_bits(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) path = self._normalize_install_path(cmd.installPath) rsp = CheckIsBitsExistingRsp() try: shell.call('rbd info %s' % path) except Exception as e: if 'No such file or directory' in str(e): rsp.existing = False return jsonobject.dumps(rsp) else: raise e rsp.existing = True return jsonobject.dumps(rsp) @replyerror def clone(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) src_path = self._normalize_install_path(cmd.srcPath) dst_path = self._normalize_install_path(cmd.dstPath) shell.call('rbd clone %s %s' % (src_path, dst_path)) rsp = AgentResponse() self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def flatten(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) path = self._normalize_install_path(cmd.path) shell.call('rbd flatten %s' % path) rsp = AgentResponse() self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def echo(self, req): logger.debug('get echoed') return '' @replyerror def add_pool(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) existing_pools = shell.call('ceph osd pool ls') pool_names = existing_pools.split("\n") realname = eval('u"' + cmd.poolName + '"').encode('utf-8') if not cmd.isCreate and realname not in pool_names: raise Exception('cannot find the pool[%s] in the ceph cluster, you must create it manually' % realname) if cmd.isCreate and realname in pool_names: raise Exception('have pool named[%s] in the ceph cluster, can\'t create new pool with same name' % realname) if realname not in pool_names: shell.call('ceph osd pool create %s 100' % realname) return jsonobject.dumps(AgentResponse()) @replyerror def check_pool(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) existing_pools = shell.call('ceph osd lspools') for pool in cmd.pools: if pool.name not in existing_pools: raise Exception('cannot find pool[%s] in the ceph cluster, you must create it manually' % pool.name) return jsonobject.dumps(AgentResponse()) @replyerror def init(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) o = shell.call('ceph mon_status') mon_status = jsonobject.loads(o) fsid = mon_status.monmap.fsid_ existing_pools = shell.call('ceph osd lspools') for pool in cmd.pools: if pool.predefined and pool.name not in existing_pools: raise Exception('cannot find pool[%s] in the ceph cluster, you must create it manually' % pool.name) elif pool.name not in existing_pools: shell.call('ceph osd pool create %s 100' % pool.name) rsp = InitRsp() if cmd.nocephx is False: o = shell.call("ceph -f json auth get-or-create client.zstack mon 'allow r' osd 'allow *' 2>/dev/null").strip( ' \n\r\t') o = jsonobject.loads(o) rsp.userKey = o[0].key_ rsp.fsid = fsid self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) def _normalize_install_path(self, path): return path.replace('ceph://', '') def _parse_install_path(self, path): return self._normalize_install_path(path).split('/') @replyerror def create(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) path = self._normalize_install_path(cmd.installPath) size_M = sizeunit.Byte.toMegaByte(cmd.size) + 1 call_string = 'rbd create --size %s --image-format 2 %s ' % (size_M, path) if cmd.shareable: call_string = call_string + " --image-shared" shell.call(call_string) rsp = AgentResponse() self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def sftp_upload(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) src_path = self._normalize_install_path(cmd.primaryStorageInstallPath) prikey_file = linux.write_to_temp_file(cmd.sshKey) bs_folder = os.path.dirname(cmd.backupStorageInstallPath) shell.call('ssh -p %d -o StrictHostKeyChecking=no -i %s root@%s "mkdir -p %s"' % (cmd.sshPort, prikey_file, cmd.hostname, bs_folder)) try: shell.call("set -o pipefail; rbd export %s - | ssh -o StrictHostKeyChecking=no -i %s root@%s 'cat > %s'" % (src_path, prikey_file, cmd.hostname, cmd.backupStorageInstallPath)) finally: os.remove(prikey_file) return jsonobject.dumps(AgentResponse()) @replyerror @rollback def sftp_download(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) hostname = cmd.hostname prikey = cmd.sshKey port = cmd.sshPort pool, image_name = self._parse_install_path(cmd.primaryStorageInstallPath) tmp_image_name = 'tmp-%s' % image_name prikey_file = linux.write_to_temp_file(prikey) @rollbackable def _0(): tpath = "%s/%s" % (pool, tmp_image_name) shell.call('rbd info %s > /dev/null && rbd rm %s' % (tpath, tpath)) _0() try: shell.call( 'set -o pipefail; ssh -p %d -o StrictHostKeyChecking=no -i %s root@%s "cat %s" | rbd import --image-format 2 - %s/%s' % (port, prikey_file, hostname, cmd.backupStorageInstallPath, pool, tmp_image_name)) finally: os.remove(prikey_file) @rollbackable def _1(): shell.call('rbd rm %s/%s' % (pool, tmp_image_name)) _1() file_format = shell.call( "set -o pipefail; qemu-img info rbd:%s/%s | grep 'file format' | cut -d ':' -f 2" % (pool, tmp_image_name)) file_format = file_format.strip() if file_format not in ['qcow2', 'raw']: raise Exception('unknown image format: %s' % file_format) if file_format == 'qcow2': conf_path = None try: with open('/etc/ceph/ceph.conf', 'r') as fd: conf = fd.read() conf = '%s\n%s\n' % (conf, 'rbd default format = 2') conf_path = linux.write_to_temp_file(conf) shell.call('qemu-img convert -f qcow2 -O rbd rbd:%s/%s rbd:%s/%s:conf=%s' % ( pool, tmp_image_name, pool, image_name, conf_path)) shell.call('rbd rm %s/%s' % (pool, tmp_image_name)) finally: if conf_path: os.remove(conf_path) else: shell.call('rbd mv %s/%s %s/%s' % (pool, tmp_image_name, pool, image_name)) rsp = AgentResponse() self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def delete(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) path = self._normalize_install_path(cmd.installPath) rsp = AgentResponse() self._set_capacity_to_response(rsp) try: o = shell.call('rbd snap ls --format json %s' % path) except Exception as e: if 'No such file or directory' not in str(e): raise logger.warn('delete %s;encounter %s' % (cmd.installPath, str(e))) return jsonobject.dumps(rsp) o = jsonobject.loads(o) if len(o) > 0: raise Exception('unable to delete %s; the volume still has snapshots' % cmd.installPath) @linux.retry(times=30, sleep_time=5) def do_deletion(): shell.call('rbd rm %s' % path) do_deletion() return jsonobject.dumps(rsp) def _migrate_volume(self, volume_uuid, volume_size, src_install_path, dst_install_path, dst_mon_addr, dst_mon_user, dst_mon_passwd, dst_mon_port): src_install_path = self._normalize_install_path(src_install_path) dst_install_path = self._normalize_install_path(dst_install_path) ret = shell.run('rbd export %s - | tee >(md5sum >/tmp/%s_src_md5) | pv -n -s %s 2>/tmp/%s_progress | sshpass -p "%s" ssh -o StrictHostKeyChecking=no %s@%s -p %s \'tee >(md5sum >/tmp/%s_dst_md5) | rbd import - %s\'' % (src_install_path, volume_uuid, volume_size, volume_uuid, dst_mon_passwd, dst_mon_user, dst_mon_addr, dst_mon_port, volume_uuid, dst_install_path)) if ret != 0: return ret src_md5 = self._read_file_content('/tmp/%s_src_md5' % volume_uuid) dst_md5 = shell.call('sshpass -p "%s" ssh -o StrictHostKeyChecking=no %s@%s -p %s \'cat /tmp/%s_dst_md5\'' % (dst_mon_passwd, dst_mon_user, dst_mon_addr, dst_mon_port, volume_uuid)) if src_md5 != dst_md5: return -1 else: return 0 @replyerror @in_bash def migrate_volume(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = AgentResponse() ret = self._migrate_volume(cmd.volumeUuid, cmd.volumeSize, cmd.srcInstallPath, cmd.dstInstallPath, cmd.dstMonHostname, cmd.dstMonSshUsername, cmd.dstMonSshPassword, cmd.dstMonSshPort) if ret != 0: rsp.success = False rsp.error = "Failed to migrate volume from one ceph primary storage to another." self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) def _migrate_volume_snapshot(self, parent_uuid, snapshot_uuid, snapshot_size, src_snapshot_path, dst_install_path, dst_mon_addr, dst_mon_user, dst_mon_passwd, dst_mon_port): src_snapshot_path = self._normalize_install_path(src_snapshot_path) dst_install_path = self._normalize_install_path(dst_install_path) if parent_uuid == "": ret = shell.run('rbd export-diff %s - | tee >(md5sum >/tmp/%s_src_md5) | pv -n -s %s 2>/tmp/%s_progress | sshpass -p "%s" ssh -o StrictHostKeyChecking=no %s@%s -p %s \'tee >(md5sum >/tmp/%s_dst_md5) | rbd import-diff - %s\'' % (src_snapshot_path, snapshot_uuid, snapshot_size, snapshot_uuid, dst_mon_passwd, dst_mon_user, dst_mon_addr, dst_mon_port, snapshot_uuid, dst_install_path)) else: ret = shell.run('rbd export-diff --from-snap %s %s - | tee >(md5sum >/tmp/%s_src_md5) | pv -n -s %s 2>/tmp/%s_progress | sshpass -p "%s" ssh -o StrictHostKeyChecking=no %s@%s -p %s \'tee >(md5sum >/tmp/%s_dst_md5) | rbd import-diff - %s\'' % (parent_uuid, src_snapshot_path, snapshot_uuid, snapshot_size, snapshot_uuid, dst_mon_passwd, dst_mon_user, dst_mon_addr, dst_mon_port, snapshot_uuid, dst_install_path)) if ret != 0: return ret src_md5 = self._read_file_content('/tmp/%s_src_md5' % snapshot_uuid) dst_md5 = shell.call('sshpass -p "%s" ssh -o StrictHostKeyChecking=no %s@%s -p %s \'cat /tmp/%s_dst_md5\'' % (dst_mon_passwd, dst_mon_user, dst_mon_addr, dst_mon_port, snapshot_uuid)) if src_md5 != dst_md5: return -1 else: return 0 @replyerror @in_bash def migrate_volume_snapshot(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = AgentResponse() ret = self._migrate_volume_snapshot(cmd.parentUuid, cmd.snapshotUuid, cmd.snapshotSize, cmd.srcSnapshotPath, cmd.dstInstallPath, cmd.dstMonHostname, cmd.dstMonSshUsername, cmd.dstMonSshPassword, cmd.dstMonSshPort) if ret != 0: rsp.success = False rsp.error = "Failed to migrate volume snapshot from one ceph primary storage to another." self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror @in_bash def get_volume_snapinfos(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) vpath = self._normalize_install_path(cmd.volumePath) ret = shell.call('rbd --format=json snap ls %s' % vpath) rsp = GetVolumeSnapInfosRsp() rsp.snapInfos = jsonobject.loads(ret) self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp)
class CephAgent(object): INIT_PATH = "/ceph/primarystorage/init" CREATE_VOLUME_PATH = "/ceph/primarystorage/volume/createempty" DELETE_PATH = "/ceph/primarystorage/delete" CLONE_PATH = "/ceph/primarystorage/volume/clone" FLATTEN_PATH = "/ceph/primarystorage/volume/flatten" SFTP_DOWNLOAD_PATH = "/ceph/primarystorage/sftpbackupstorage/download" SFTP_UPLOAD_PATH = "/ceph/primarystorage/sftpbackupstorage/upload" ECHO_PATH = "/ceph/primarystorage/echo" CREATE_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/create" DELETE_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/delete" COMMIT_IMAGE_PATH = "/ceph/primarystorage/snapshot/commit" PROTECT_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/protect" ROLLBACK_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/rollback" UNPROTECT_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/unprotect" CP_PATH = "/ceph/primarystorage/volume/cp" DELETE_POOL_PATH = "/ceph/primarystorage/deletepool" GET_VOLUME_SIZE_PATH = "/ceph/primarystorage/getvolumesize" PING_PATH = "/ceph/primarystorage/ping" GET_FACTS = "/ceph/primarystorage/facts" DELETE_IMAGE_CACHE = "/ceph/primarystorage/deleteimagecache" http_server = http.HttpServer(port=7762) http_server.logfile_path = log.get_logfile_path() def __init__(self): self.http_server.register_async_uri(self.INIT_PATH, self.init) self.http_server.register_async_uri(self.DELETE_PATH, self.delete) self.http_server.register_async_uri(self.CREATE_VOLUME_PATH, self.create) self.http_server.register_async_uri(self.CLONE_PATH, self.clone) self.http_server.register_async_uri(self.COMMIT_IMAGE_PATH, self.commit_image) self.http_server.register_async_uri(self.CREATE_SNAPSHOT_PATH, self.create_snapshot) self.http_server.register_async_uri(self.DELETE_SNAPSHOT_PATH, self.delete_snapshot) self.http_server.register_async_uri(self.PROTECT_SNAPSHOT_PATH, self.protect_snapshot) self.http_server.register_async_uri(self.UNPROTECT_SNAPSHOT_PATH, self.unprotect_snapshot) self.http_server.register_async_uri(self.ROLLBACK_SNAPSHOT_PATH, self.rollback_snapshot) self.http_server.register_async_uri(self.FLATTEN_PATH, self.flatten) self.http_server.register_async_uri(self.SFTP_DOWNLOAD_PATH, self.sftp_download) self.http_server.register_async_uri(self.SFTP_UPLOAD_PATH, self.sftp_upload) self.http_server.register_async_uri(self.CP_PATH, self.cp) self.http_server.register_async_uri(self.DELETE_POOL_PATH, self.delete_pool) self.http_server.register_async_uri(self.GET_VOLUME_SIZE_PATH, self.get_volume_size) self.http_server.register_async_uri(self.PING_PATH, self.ping) self.http_server.register_async_uri(self.GET_FACTS, self.get_facts) self.http_server.register_async_uri(self.DELETE_IMAGE_CACHE, self.delete_image_cache) self.http_server.register_sync_uri(self.ECHO_PATH, self.echo) def _set_capacity_to_response(self, rsp): o = shell.call('ceph df -f json') df = jsonobject.loads(o) if df.stats.total_bytes__ is not None: total = long(df.stats.total_bytes_) elif df.stats.total_space__ is not None: total = long(df.stats.total_space__) * 1024 else: raise Exception('unknown ceph df output: %s' % o) if df.stats.total_avail_bytes__ is not None: avail = long(df.stats.total_avail_bytes_) elif df.stats.total_avail__ is not None: avail = long(df.stats.total_avail_) * 1024 else: raise Exception('unknown ceph df output: %s' % o) rsp.totalCapacity = total rsp.availableCapacity = avail def _get_file_size(self, path): o = shell.call('rbd --format json info %s' % path) o = jsonobject.loads(o) return long(o.size_) @replyerror @in_bash @lock.lock('delete_image_cache') def delete_image_cache(self, req): rsp = AgentResponse() cmd = jsonobject.loads(req[http.REQUEST_BODY]) SP_PATH = self._normalize_install_path(cmd.snapshotPath) IMAGE_PATH = self._normalize_install_path(cmd.imagePath) if bash_r('rbd info {{IMAGE_PATH}}') != 0: return jsonobject.dumps(rsp) o = bash_o('rbd children {{SP_PATH}}') o = o.strip(' \t\r\n') if o: raise Exception('the image cache[%s] is still in used' % cmd.imagePath) bash_errorout('rbd snap unprotect {{SP_PATH}}') bash_errorout('rbd snap rm {{SP_PATH}}') bash_errorout('rbd rm {{IMAGE_PATH}}') self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror @in_bash def get_facts(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) o = bash_o('ceph mon_status') mon_status = jsonobject.loads(o) fsid = mon_status.monmap.fsid_ rsp = GetFactsRsp() facts = bash_o('ceph -s -f json') mon_facts = jsonobject.loads(facts) for mon in mon_facts.monmap.mons: ADDR = mon.addr.split(':')[0] if bash_r('ip route | grep -w {{ADDR}} > /dev/null') == 0: rsp.monAddr = ADDR break if not rsp.monAddr: raise Exception('cannot find mon address of the mon server[%s]' % cmd.monUuid) rsp.fsid = fsid return jsonobject.dumps(rsp) @replyerror @in_bash def ping(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) facts = bash_o('ceph -s -f json') mon_facts = jsonobject.loads(facts) found = False for mon in mon_facts.monmap.mons: if cmd.monAddr in mon.addr: found = True break rsp = PingRsp() if not found: rsp.success = False rsp.failure = "MonAddrChanged" rsp.error = 'The mon addr is changed on the mon server[uuid:%s], not %s anymore.' \ 'Reconnect the ceph primary storage' \ ' may solve this issue' % (cmd.monUuid, cmd.monAddr) return jsonobject.dumps(rsp) r, o, e = bash_roe( 'timeout 60 rbd create %s --image-format 2 --size 1' % cmd.testImagePath) if r != 0: rsp.success = False rsp.failure = "UnableToCreateFile" if r == 124: # timeout happened rsp.error = 'failed to create temporary file on ceph, timeout after 60s, %s %s' % ( e, o) else: rsp.error = "%s %s" % (e, o) else: bash_r('rbd rm %s' % cmd.testImagePath) return jsonobject.dumps(rsp) @replyerror def get_volume_size(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) path = self._normalize_install_path(cmd.installPath) rsp = GetVolumeSizeRsp() rsp.size = self._get_file_size(path) return jsonobject.dumps(rsp) @replyerror def delete_pool(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) for p in cmd.poolNames: shell.call( 'ceph osd pool delete %s %s --yes-i-really-really-mean-it' % (p, p)) return jsonobject.dumps(AgentResponse()) @replyerror def rollback_snapshot(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) spath = self._normalize_install_path(cmd.snapshotPath) shell.call('rbd snap rollback %s' % spath) rsp = AgentResponse() self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def cp(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) src_path = self._normalize_install_path(cmd.srcPath) dst_path = self._normalize_install_path(cmd.dstPath) shell.call('rbd cp %s %s' % (src_path, dst_path)) rsp = CpRsp() rsp.size = self._get_file_size(dst_path) self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def commit_image(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) spath = self._normalize_install_path(cmd.snapshotPath) dpath = self._normalize_install_path(cmd.dstPath) shell.call('rbd snap protect %s' % spath, exception=not cmd.ignoreError) shell.call('rbd clone %s %s' % (spath, dpath)) rsp = AgentResponse() self._set_capacity_to_response(rsp) rsp.size = self._get_file_size(dpath) return jsonobject.dumps(rsp) @replyerror def create_snapshot(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) spath = self._normalize_install_path(cmd.snapshotPath) do_create = True if cmd.skipOnExisting: image_name, sp_name = spath.split('@') o = shell.call('rbd --format json snap ls %s' % image_name) o = jsonobject.loads(o) for s in o: if s.name_ == sp_name: do_create = False if do_create: shell.call('rbd snap create %s' % spath) rsp = CreateSnapshotRsp() rsp.size = self._get_file_size(spath) self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def delete_snapshot(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) spath = self._normalize_install_path(cmd.snapshotPath) shell.call('rbd snap rm %s' % spath) rsp = AgentResponse() self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def unprotect_snapshot(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) spath = self._normalize_install_path(cmd.snapshotPath) shell.call('rbd snap unprotect %s' % spath) return jsonobject.dumps(AgentResponse()) @replyerror def protect_snapshot(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) spath = self._normalize_install_path(cmd.snapshotPath) shell.call('rbd snap protect %s' % spath, exception=not cmd.ignoreError) rsp = AgentResponse() return jsonobject.dumps(rsp) @replyerror def clone(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) src_path = self._normalize_install_path(cmd.srcPath) dst_path = self._normalize_install_path(cmd.dstPath) shell.call('rbd clone %s %s' % (src_path, dst_path)) rsp = AgentResponse() self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def flatten(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) path = self._normalize_install_path(cmd.path) shell.call('rbd flatten %s' % path) rsp = AgentResponse() self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def echo(self, req): logger.debug('get echoed') return '' @replyerror def init(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) o = shell.call('ceph mon_status') mon_status = jsonobject.loads(o) fsid = mon_status.monmap.fsid_ existing_pools = shell.call('ceph osd lspools') for pool in cmd.pools: if pool.predefined and pool.name not in existing_pools: raise Exception( 'cannot find pool[%s] in the ceph cluster, you must create it manually' % pool.name) elif pool.name not in existing_pools: shell.call('ceph osd pool create %s 100' % pool.name) o = shell.call( "ceph -f json auth get-or-create client.zstack mon 'allow r' osd 'allow *' 2>/dev/null" ).strip(' \n\r\t') o = jsonobject.loads(o) rsp = InitRsp() rsp.fsid = fsid rsp.userKey = o[0].key_ self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) def _normalize_install_path(self, path): return path.replace('ceph://', '') def _parse_install_path(self, path): return self._normalize_install_path(path).split('/') @replyerror def create(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) path = self._normalize_install_path(cmd.installPath) size_M = sizeunit.Byte.toMegaByte(cmd.size) + 1 shell.call('rbd create --size %s --image-format 2 %s' % (size_M, path)) rsp = AgentResponse() self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def sftp_upload(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) src_path = self._normalize_install_path(cmd.primaryStorageInstallPath) prikey_file = linux.write_to_temp_file(cmd.sshKey) bs_folder = os.path.dirname(cmd.backupStorageInstallPath) shell.call( 'ssh -p %d -o StrictHostKeyChecking=no -i %s root@%s "mkdir -p %s"' % (cmd.sshPort, prikey_file, cmd.hostname, bs_folder)) try: shell.call( "set -o pipefail; rbd export %s - | ssh -o StrictHostKeyChecking=no -i %s root@%s 'cat > %s'" % (src_path, prikey_file, cmd.hostname, cmd.backupStorageInstallPath)) finally: os.remove(prikey_file) return jsonobject.dumps(AgentResponse()) @replyerror @rollback def sftp_download(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) hostname = cmd.hostname prikey = cmd.sshKey port = cmd.sshPort pool, image_name = self._parse_install_path( cmd.primaryStorageInstallPath) tmp_image_name = 'tmp-%s' % image_name prikey_file = linux.write_to_temp_file(prikey) @rollbackable def _0(): tpath = "%s/%s" % (pool, tmp_image_name) shell.call('rbd info %s > /dev/null && rbd rm %s' % (tpath, tpath)) _0() try: shell.call( 'set -o pipefail; ssh -p %d -o StrictHostKeyChecking=no -i %s root@%s "cat %s" | rbd import --image-format 2 - %s/%s' % (port, prikey_file, hostname, cmd.backupStorageInstallPath, pool, tmp_image_name)) finally: os.remove(prikey_file) @rollbackable def _1(): shell.call('rbd rm %s/%s' % (pool, tmp_image_name)) _1() file_format = shell.call( "set -o pipefail; qemu-img info rbd:%s/%s | grep 'file format' | cut -d ':' -f 2" % (pool, tmp_image_name)) file_format = file_format.strip() if file_format not in ['qcow2', 'raw']: raise Exception('unknown image format: %s' % file_format) if file_format == 'qcow2': conf_path = None try: with open('/etc/ceph/ceph.conf', 'r') as fd: conf = fd.read() conf = '%s\n%s\n' % (conf, 'rbd default format = 2') conf_path = linux.write_to_temp_file(conf) shell.call( 'qemu-img convert -f qcow2 -O rbd rbd:%s/%s rbd:%s/%s:conf=%s' % (pool, tmp_image_name, pool, image_name, conf_path)) shell.call('rbd rm %s/%s' % (pool, tmp_image_name)) finally: if conf_path: os.remove(conf_path) else: shell.call('rbd mv %s/%s %s/%s' % (pool, tmp_image_name, pool, image_name)) rsp = AgentResponse() self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def delete(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) path = self._normalize_install_path(cmd.installPath) o = shell.call('rbd snap ls --format json %s' % path) o = jsonobject.loads(o) if len(o) > 0: raise Exception( 'unable to delete %s; the volume still has snapshots' % cmd.installPath) shell.call('rbd rm %s' % path) rsp = AgentResponse() self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp)
class PxeServerAgent(object): AGENT_PORT = 7770 NGINX_MN_PROXY_PORT = 7771 NGINX_TERMINAL_PROXY_PORT = 7772 WEBSOCKIFY_PORT = 6080 ECHO_PATH = "/baremetal/pxeserver/echo" INIT_PATH = "/baremetal/pxeserver/init" PING_PATH = "/baremetal/pxeserver/ping" CONNECT_PATH = '/baremetal/pxeserver/connect' START_PATH = "/baremetal/pxeserver/start" STOP_PATH = "/baremetal/pxeserver/stop" CREATE_BM_CONFIGS_PATH = "/baremetal/pxeserver/createbmconfigs" DELETE_BM_CONFIGS_PATH = "/baremetal/pxeserver/deletebmconfigs" CREATE_BM_NGINX_PROXY_PATH = "/baremetal/pxeserver/createbmnginxproxy" DELETE_BM_NGINX_PROXY_PATH = "/baremetal/pxeserver/deletebmnginxproxy" CREATE_BM_NOVNC_PROXY_PATH = "/baremetal/pxeserver/createbmnovncproxy" DELETE_BM_NOVNC_PROXY_PATH = "/baremetal/pxeserver/deletebmnovncproxy" CREATE_BM_DHCP_CONFIG_PATH = "/baremetal/pxeserver/createdhcpconfig" DELETE_BM_DHCP_CONFIG_PATH = "/baremetal/pxeserver/deletedhcpconfig" DOWNLOAD_FROM_IMAGESTORE_PATH = "/baremetal/pxeserver/imagestore/download" DOWNLOAD_FROM_CEPHB_PATH = "/baremetal/pxeserver/cephb/download" DELETE_BM_IMAGE_CACHE_PATH = "/baremetal/pxeserver/deletecache" MOUNT_BM_IMAGE_CACHE_PATH = "/baremetal/pxeserver/mountcache" http_server = http.HttpServer(port=AGENT_PORT) http_server.logfile_path = log.get_logfile_path() BAREMETAL_LIB_PATH = "/var/lib/zstack/baremetal/" BAREMETAL_LOG_PATH = "/var/log/zstack/baremetal/" DNSMASQ_CONF_PATH = BAREMETAL_LIB_PATH + "dnsmasq/dnsmasq.conf" DHCP_HOSTS_DIR = BAREMETAL_LIB_PATH + "dnsmasq/hosts" DNSMASQ_LOG_PATH = BAREMETAL_LOG_PATH + "dnsmasq.log" TFTPBOOT_PATH = BAREMETAL_LIB_PATH + "tftpboot/" VSFTPD_CONF_PATH = BAREMETAL_LIB_PATH + "vsftpd/vsftpd.conf" VSFTPD_ROOT_PATH = BAREMETAL_LIB_PATH + "ftp/" VSFTPD_LOG_PATH = BAREMETAL_LOG_PATH + "vsftpd.log" PXELINUX_CFG_PATH = TFTPBOOT_PATH + "pxelinux.cfg/" PXELINUX_DEFAULT_CFG = PXELINUX_CFG_PATH + "default" # we use `KS_CFG_PATH` to hold kickstart/preseed/autoyast preconfiguration files KS_CFG_PATH = VSFTPD_ROOT_PATH + "ks/" INSPECTOR_KS_CFG = KS_CFG_PATH + "inspector_ks.cfg" ZSTACK_SCRIPTS_PATH = VSFTPD_ROOT_PATH + "scripts/" NGINX_MN_PROXY_CONF_PATH = "/etc/nginx/conf.d/pxe_mn/" NGINX_TERMINAL_PROXY_CONF_PATH = "/etc/nginx/conf.d/terminal/" NOVNC_INSTALL_PATH = BAREMETAL_LIB_PATH + "noVNC/" NOVNC_TOKEN_PATH = NOVNC_INSTALL_PATH + "tokens/" NMAP_BROADCAST_DHCP_DISCOVER_PATH = "/usr/share/nmap/scripts/broadcast-dhcp-discover.nse" def __init__(self): self.uuid = None self.storage_path = None self.dhcp_interface = None self.http_server.register_sync_uri(self.ECHO_PATH, self.echo) self.http_server.register_sync_uri(self.CONNECT_PATH, self.connect) self.http_server.register_async_uri(self.INIT_PATH, self.init) self.http_server.register_async_uri(self.PING_PATH, self.ping) self.http_server.register_async_uri(self.START_PATH, self.start) self.http_server.register_async_uri(self.STOP_PATH, self.stop) self.http_server.register_async_uri(self.CREATE_BM_CONFIGS_PATH, self.create_bm_configs) self.http_server.register_async_uri(self.DELETE_BM_CONFIGS_PATH, self.delete_bm_configs) self.http_server.register_async_uri(self.CREATE_BM_NGINX_PROXY_PATH, self.create_bm_nginx_proxy) self.http_server.register_async_uri(self.DELETE_BM_NGINX_PROXY_PATH, self.delete_bm_nginx_proxy) self.http_server.register_async_uri(self.CREATE_BM_NOVNC_PROXY_PATH, self.create_bm_novnc_proxy) self.http_server.register_async_uri(self.DELETE_BM_NOVNC_PROXY_PATH, self.delete_bm_novnc_proxy) self.http_server.register_async_uri(self.CREATE_BM_DHCP_CONFIG_PATH, self.create_bm_dhcp_config) self.http_server.register_async_uri(self.DELETE_BM_DHCP_CONFIG_PATH, self.delete_bm_dhcp_config) self.http_server.register_async_uri(self.DOWNLOAD_FROM_IMAGESTORE_PATH, self.download_imagestore) self.http_server.register_async_uri(self.DOWNLOAD_FROM_CEPHB_PATH, self.download_cephb) self.http_server.register_async_uri(self.DELETE_BM_IMAGE_CACHE_PATH, self.delete_bm_image_cache) self.http_server.register_async_uri(self.MOUNT_BM_IMAGE_CACHE_PATH, self.mount_bm_image_cache) self.imagestore_client = ImageStoreClient() def _set_capacity_to_response(self, rsp): total, avail = self._get_capacity() rsp.totalCapacity = total rsp.availableCapacity = avail def _get_capacity(self): total = linux.get_total_disk_size(self.storage_path) used = linux.get_used_disk_size(self.storage_path) return total, total - used def _start_pxe_server(self): ret, _, err = bash_roe("ps -ef | grep -v 'grep' | grep 'dnsmasq -C {0}' || dnsmasq -C {0} -u root".format(self.DNSMASQ_CONF_PATH)) if ret != 0: raise PxeServerError("failed to start dnsmasq on baremetal pxeserver[uuid:%s]: %s" % (self.uuid, err)) ret, _, err = bash_roe("ps -ef | grep -v 'grep' | grep 'vsftpd {0}' || vsftpd {0}".format(self.VSFTPD_CONF_PATH)) if ret != 0: raise PxeServerError("failed to start vsftpd on baremetal pxeserver[uuid:%s]: %s" % (self.uuid, err)) ret, _, err = bash_roe("ps -ef | grep -v 'grep' | grep 'websockify' | grep 'baremetal' || " "python %s/utils/websockify/run --web %s --token-plugin TokenFile --token-source=%s -D 6080" % (self.NOVNC_INSTALL_PATH, self.NOVNC_INSTALL_PATH, self.NOVNC_TOKEN_PATH)) if ret != 0: raise PxeServerError("failed to start noVNC on baremetal pxeserver[uuid:%s]: %s" % (self.uuid, err)) # in case nginx config is updated during nginx running ret, _, err = bash_roe("systemctl start nginx && systemctl reload nginx") if ret != 0: raise PxeServerError("failed to start nginx on baremetal pxeserver[uuid:%s]: %s" % (self.uuid, err)) # we do not stop nginx on pxeserver because it may be needed by bm with terminal proxy # stop pxeserver means stop dnsmasq actually def _stop_pxe_server(self): bash_r("kill -9 `ps -ef | grep -v grep | grep 'vsftpd %s' | awk '{ print $2 }'`" % self.VSFTPD_CONF_PATH) bash_r("kill -9 `ps -ef | grep -v grep | grep websockify | grep baremetal | awk '{ print $2 }'`") bash_r("kill -9 `ps -ef | grep -v grep | grep 'dnsmasq -C %s' | awk '{ print $2 }'`" % self.DNSMASQ_CONF_PATH) bash_r("systemctl stop nginx") @staticmethod def _get_mac_address(ifname): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', ifname[:15])) return ':'.join(['%02x' % ord(char) for char in info[18:24]]) @staticmethod def _get_ip_address(ifname): s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) return socket.inet_ntoa(fcntl.ioctl( s.fileno(), 0x8915, # SIOCGIFADDR struct.pack('256s', ifname[:15]) )[20:24]) @staticmethod def _is_belong_to_same_subnet(addr1, addr2, netmask): return IPAddress(addr1) in IPNetwork("%s/%s" % (addr2, netmask)) @reply_error def echo(self, req): logger.debug('get echoed') return '' @reply_error def init(self, req): cmd = json_object.loads(req[http.REQUEST_BODY]) rsp = AgentResponse() self.uuid = cmd.uuid self.storage_path = cmd.storagePath # check dhcp interface and dhcp range pxeserver_dhcp_nic_ip = self._get_ip_address(cmd.dhcpInterface).strip() pxeserver_dhcp_nic_nm = linux.get_netmask_of_nic(cmd.dhcpInterface).strip() if not self._is_belong_to_same_subnet(cmd.dhcpRangeBegin, pxeserver_dhcp_nic_ip, pxeserver_dhcp_nic_nm) or \ not self._is_belong_to_same_subnet(cmd.dhcpRangeEnd, pxeserver_dhcp_nic_ip, pxeserver_dhcp_nic_nm): raise PxeServerError("%s ~ %s cannot connect to dhcp interface %s" % (cmd.dhcpRangeBegin, cmd.dhcpRangeEnd, cmd.dhcpInterface)) # get pxe server capacity self._set_capacity_to_response(rsp) # init dnsmasq.conf dhcp_conf = """interface={DHCP_INTERFACE} port=0 bind-interfaces dhcp-boot=pxelinux.0 enable-tftp tftp-root={TFTPBOOT_PATH} log-facility={DNSMASQ_LOG_PATH} dhcp-range={DHCP_RANGE} dhcp-option=1,{DHCP_NETMASK} dhcp-option=6,223.5.5.5,8.8.8.8 dhcp-hostsdir={DHCP_HOSTS_DIR} """.format(DHCP_INTERFACE=cmd.dhcpInterface, DHCP_RANGE="%s,%s,%s" % (cmd.dhcpRangeBegin, cmd.dhcpRangeEnd, cmd.dhcpRangeNetmask), DHCP_NETMASK=cmd.dhcpRangeNetmask, TFTPBOOT_PATH=self.TFTPBOOT_PATH, DHCP_HOSTS_DIR=self.DHCP_HOSTS_DIR, DNSMASQ_LOG_PATH=self.DNSMASQ_LOG_PATH) with open(self.DNSMASQ_CONF_PATH, 'w') as f: f.write(dhcp_conf) # init dhcp-hostdir mac_address = self._get_mac_address(cmd.dhcpInterface) dhcp_conf = "%s,ignore" % mac_address if not os.path.exists(self.DHCP_HOSTS_DIR): os.makedirs(self.DHCP_HOSTS_DIR) with open(os.path.join(self.DHCP_HOSTS_DIR, "ignore"), 'w') as f: f.write(dhcp_conf) # hack nmap script splited_mac_address = "0x" + mac_address.replace(":", ",0x") bash_r("sed -i '/local mac = string.char/s/0x..,0x..,0x..,0x..,0x..,0x../%s/g' %s" % \ (splited_mac_address, self.NMAP_BROADCAST_DHCP_DISCOVER_PATH)) # init vsftpd.conf vsftpd_conf = """anonymous_enable=YES anon_root={VSFTPD_ANON_ROOT} local_enable=YES write_enable=YES local_umask=022 dirmessage_enable=YES connect_from_port_20=YES listen=NO listen_ipv6=YES pam_service_name=vsftpd userlist_enable=YES tcp_wrappers=YES xferlog_enable=YES xferlog_std_format=YES xferlog_file={VSFTPD_LOG_PATH} """.format(VSFTPD_ANON_ROOT=self.VSFTPD_ROOT_PATH, VSFTPD_LOG_PATH=self.VSFTPD_LOG_PATH) with open(self.VSFTPD_CONF_PATH, 'w') as f: f.write(vsftpd_conf) # init pxelinux.cfg pxelinux_cfg = """default zstack_baremetal prompt 0 label zstack_baremetal kernel zstack/vmlinuz ipappend 2 append initrd=zstack/initrd.img devfs=nomount ksdevice=bootif ks=ftp://{PXESERVER_DHCP_NIC_IP}/ks/inspector_ks.cfg vnc """.format(PXESERVER_DHCP_NIC_IP=pxeserver_dhcp_nic_ip) with open(self.PXELINUX_DEFAULT_CFG, 'w') as f: f.write(pxelinux_cfg) # init inspector_ks.cfg ks_tmpl_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ks_tmpl') with open("%s/inspector_ks_tmpl" % ks_tmpl_path, 'r') as fr: inspector_ks_cfg = fr.read() \ .replace("PXESERVERUUID", cmd.uuid) \ .replace("PXESERVER_DHCP_NIC_IP", pxeserver_dhcp_nic_ip) with open(self.INSPECTOR_KS_CFG, 'w') as fw: fw.write(inspector_ks_cfg) # config nginx if not os.path.exists(self.NGINX_MN_PROXY_CONF_PATH): os.makedirs(self.NGINX_MN_PROXY_CONF_PATH, 0777) if not os.path.exists(self.NGINX_TERMINAL_PROXY_CONF_PATH): os.makedirs(self.NGINX_TERMINAL_PROXY_CONF_PATH, 0777) nginx_conf = """user nginx; worker_processes auto; error_log /var/log/nginx/error.log; pid /run/nginx.pid; include /usr/share/nginx/modules/*.conf; events { worker_connections 1024; } http { access_log /var/log/nginx/access.log; sendfile on; tcp_nopush on; tcp_nodelay on; keepalive_timeout 1000; types_hash_max_size 2048; include /etc/nginx/mime.types; default_type application/octet-stream; map $http_upgrade $connection_upgrade { default upgrade; '' close; } server { listen 8090; include /etc/nginx/conf.d/mn_pxe/*; } server { listen 7771; include /etc/nginx/conf.d/pxe_mn/*; } server { listen 7772; include /etc/nginx/conf.d/terminal/*; } } """ with open("/etc/nginx/nginx.conf", 'w') as fw: fw.write(nginx_conf) # create nginx proxy for http://MN_IP:MN_PORT/zstack/asyncrest/sendcommand content = "location / { proxy_pass http://%s:%s/; }" % (cmd.managementIp, cmd.managementPort) with open("/etc/nginx/conf.d/pxe_mn/zstack_mn.conf", 'w') as fw: fw.write(content) # install noVNC if not os.path.exists(self.NOVNC_INSTALL_PATH): ret = bash_r("tar -xf %s -C %s" % (os.path.join(self.BAREMETAL_LIB_PATH, "noVNC.tar.gz"), self.BAREMETAL_LIB_PATH)) if ret != 0: raise PxeServerError("failed to install noVNC on baremetal pxeserver[uuid:%s]" % self.uuid) # restart pxe services self._stop_pxe_server() self._start_pxe_server() logger.info("successfully inited and started baremetal pxeserver[uuid:%s]" % self.uuid) return json_object.dumps(rsp) @reply_error def ping(self, req): rsp = PingResponse() rsp.uuid = self.uuid # DETECT ROGUE DHCP SERVER cmd = json_object.loads(req[http.REQUEST_BODY]) if platform.machine() == "x86_64": ret, output = bash_ro("nmap -sU -p67 --script broadcast-dhcp-discover -e %s | grep 'Server Identifier'" % cmd.dhcpInterface) if ret == 0: raise PxeServerError("rogue dhcp server[IP:%s] detected" % output.strip().split(' ')[-1]) # make sure pxeserver is running if it's Enabled if cmd.enabled: self._start_pxe_server() return json_object.dumps(rsp) @reply_error def connect(self, req): cmd = json_object.loads(req[http.REQUEST_BODY]) rsp = AgentResponse() self.uuid = cmd.uuid self.storage_path = cmd.storagePath # check storage path if os.path.isfile(self.storage_path): raise PxeServerError('storage path: %s is a file' % self.storage_path) if not os.path.exists(self.storage_path): os.makedirs(self.storage_path, 0777) total, avail = self._get_capacity() logger.debug(http.path_msg(self.CONNECT_PATH, 'connected, [storage path:%s, total capacity: %s bytes, ' 'available capacity: %s size]' % (self.storage_path, total, avail))) rsp.totalCapacity = total rsp.availableCapacity = avail return json_object.dumps(rsp) @in_bash @reply_error def start(self, req): cmd = json_object.loads(req[http.REQUEST_BODY]) rsp = AgentResponse() self.uuid = cmd.uuid self._start_pxe_server() logger.info("successfully started baremetal pxeserver[uuid:%s]") return json_object.dumps(rsp) @in_bash @reply_error def stop(self, req): cmd = json_object.loads(req[http.REQUEST_BODY]) rsp = AgentResponse() self.uuid = cmd.uuid self._stop_pxe_server() logger.info("successfully stopped baremetal pxeserver[uuid:%s]" % self.uuid) return json_object.dumps(rsp) @reply_error def create_bm_configs(self, req): cmd = json_object.loads(req[http.REQUEST_BODY]) cmd.pxeNicMac = cmd.pxeNicMac.replace(":", "-") rsp = AgentResponse() # check preconfiguration md5sum if hashlib.md5(cmd.preconfigurationContent).hexdigest() != cmd.preconfigurationMd5sum: raise PxeServerError("preconfiguration content not complete") self.uuid = cmd.uuid self.dhcp_interface = cmd.dhcpInterface self._create_pxelinux_cfg(cmd) self._create_preconfiguration_file(cmd) logger.info("successfully created pxelinux.cfg and preconfiguration file for baremetal instance[uuid:%s] on pxeserver[uuid:%s]" % (cmd.bmUuid, self.uuid)) return json_object.dumps(rsp) def _create_pxelinux_cfg(self, cmd): ks_cfg_name = cmd.pxeNicMac pxe_cfg_file = os.path.join(self.PXELINUX_CFG_PATH, "01-" + ks_cfg_name) pxeserver_dhcp_nic_ip = self._get_ip_address(cmd.dhcpInterface).strip() append = "" if cmd.preconfigurationType == 'kickstart': append = 'devfs=nomount ksdevice=bootif ks=ftp://{PXESERVER_DHCP_NIC_IP}/ks/{KS_CFG_NAME} vnc' elif cmd.preconfigurationType == 'preseed': append = 'interface=auto auto=true priority=critical url=ftp://{PXESERVER_DHCP_NIC_IP}/ks/{KS_CFG_NAME}' elif cmd.preconfigurationType == 'autoyast': append = 'install=ftp://{PXESERVER_DHCP_NIC_IP}/{IMAGEUUID}/ autoyast=ftp://{PXESERVER_DHCP_NIC_IP}/ks/{KS_CFG_NAME} vnc=1 vncpassword=password' append = append.format(PXESERVER_DHCP_NIC_IP=pxeserver_dhcp_nic_ip, IMAGEUUID=cmd.imageUuid, KS_CFG_NAME=ks_cfg_name) pxelinux_cfg = ("default {IMAGEUUID}\n" "prompt 0\n" "ipappend 2\n" "label {IMAGEUUID}\n" "kernel {IMAGEUUID}/vmlinuz\n" "append initrd={IMAGEUUID}/initrd.img {APPEND}").format( PXESERVER_DHCP_NIC_IP=pxeserver_dhcp_nic_ip, IMAGEUUID=cmd.imageUuid, KS_CFG_NAME=ks_cfg_name, APPEND=append) with open(pxe_cfg_file, 'w') as f: f.write(pxelinux_cfg) def _create_preconfiguration_file(self, cmd): # in case user didn't seleted a preconfiguration template etc. cmd.preconfigurationContent = cmd.preconfigurationContent if cmd.preconfigurationContent != "" else """ {{ extra_repo }} {{ REPO_URL }} {{ SYS_USERNAME }} {{ SYS_PASSWORD }} {{ NETWORK_CFGS }} {{ FORCE_INSTALL }} {{ PRE_SCRIPTS }} {{ POST_SCRIPTS }} """ pxeserver_dhcp_nic_ip = self._get_ip_address(cmd.dhcpInterface).strip() if cmd.preconfigurationType == 'kickstart': rendered_content = self._render_kickstart_template(cmd, pxeserver_dhcp_nic_ip) elif cmd.preconfigurationType == 'preseed': rendered_content = self._render_preseed_template(cmd, pxeserver_dhcp_nic_ip) elif cmd.preconfigurationType == 'autoyast': rendered_content = self._render_autoyast_template(cmd, pxeserver_dhcp_nic_ip) else: raise PxeServerError("unkown preconfiguration type %s" % cmd.preconfigurationType) ks_cfg_name = cmd.pxeNicMac ks_cfg_file = os.path.join(self.KS_CFG_PATH, ks_cfg_name) with open(ks_cfg_file, 'w') as f: f.write(rendered_content) def _create_pre_scripts(self, cmd, pxeserver_dhcp_nic_ip, more_script = ""): # poweroff and abort the provisioning process if failed to send `deploybegin` command pre_script = """# notify deploy begin curl --fail -X POST -H "Content-Type:application/json" \ -H "commandpath:/baremetal/instance/deploybegin" \ -d {{"baremetalInstanceUuid":"{BMUUID}"}} \ --retry 3 \ http://{PXESERVER_DHCP_NIC_IP}:7771/zstack/asyncrest/sendcommand || \ wget -O- --header="Content-Type:application/json" \ --header="commandpath:/baremetal/instance/deploybegin" \ --post-data={{"baremetalInstanceUuid":"{BMUUID}"}} \ --tries=3 \ http://{PXESERVER_DHCP_NIC_IP}:7771/zstack/asyncrest/sendcommand || \ poweroff """.format(BMUUID=cmd.bmUuid, PXESERVER_DHCP_NIC_IP=pxeserver_dhcp_nic_ip) pre_script += more_script with open(os.path.join(self.ZSTACK_SCRIPTS_PATH, "pre_%s.sh" % cmd.pxeNicMac), 'w') as f: f.write(pre_script) logger.debug("create pre_%s.sh with content: %s" % (cmd.pxeNicMac, pre_script)) def _create_post_scripts(self, cmd, pxeserver_dhcp_nic_ip, more_script = ""): post_script = more_script post_script += """ bm_log='/tmp/zstack_bm.log' curr_time=`date +"%Y-%m-%d %H:%M:%S"` echo -e "Current time: \t$curr_time" >> $bm_log # notify deploy complete echo "\nnotify zstack that bm instance deploy completed:" >> $bm_log curl -X POST -H "Content-Type:application/json" \ -H "commandpath:/baremetal/instance/deploycomplete" \ -d {{"baremetalInstanceUuid":"{BMUUID}"}} \ --retry 5 \ http://{PXESERVER_DHCP_NIC_IP}:7771/zstack/asyncrest/sendcommand >>$bm_log 2>&1 || \ wget -O- --header="Content-Type:application/json" \ --header="commandpath:/baremetal/instance/deploycomplete" \ --post-data={{"baremetalInstanceUuid":"{BMUUID}"}} \ --tries=5 \ http://{PXESERVER_DHCP_NIC_IP}:7771/zstack/asyncrest/sendcommand >>$bm_log 2>&1 # install shellinaboxd wget -P /usr/bin ftp://{PXESERVER_DHCP_NIC_IP}/shellinaboxd || curl -o /usr/bin/shellinaboxd ftp://{PXESERVER_DHCP_NIC_IP}/shellinaboxd chmod a+x /usr/bin/shellinaboxd # install zstack zwatch-vm-agent wget -P /usr/bin ftp://{PXESERVER_DHCP_NIC_IP}/zwatch-vm-agent || curl -o /usr/bin/zwatch-vm-agent ftp://{PXESERVER_DHCP_NIC_IP}/zwatch-vm-agent chmod a+x /usr/bin/zwatch-vm-agent /usr/bin/zwatch-vm-agent -i echo "\npushGatewayUrl: http://{PXESERVER_DHCP_NIC_IP}:9093" >> /usr/local/zstack/zwatch-vm-agent/conf.yaml echo "vmInstanceUuid: {BMUUID}" >> /usr/local/zstack/zwatch-vm-agent/conf.yaml echo "versionFileUrl: ftp://{PXESERVER_DHCP_NIC_IP}/agent_version" >> /usr/local/zstack/zwatch-vm-agent/conf.yaml echo "vmInstanceUuid: {BMUUID}" > /usr/local/zstack/baremetalInstanceUuid systemctl start zwatch-vm-agent.service # baby agent cat > /usr/local/bin/zstack_bm_agent.sh << EOF #!/bin/bash iptables -C INPUT -p tcp -m tcp --dport 4200 -j ACCEPT if [ \$? -ne 0 ]; then iptables -I INPUT -p tcp -m tcp --dport 4200 -j ACCEPT || true service iptables save || true fi firewall-cmd --query-port=4200/tcp if [ \$? -ne 0 ]; then firewall-cmd --zone=public --add-port=4200/tcp --permanent || true systemctl is-enabled firewalld.service && systemctl restart firewalld.service || true fi ps -ef | grep [s]hellinahoxd || shellinaboxd -b -t -s /:SSH:127.0.0.1 echo "\nnotify zstack that bm instance is running:" >> $bm_log curl -X POST -H "Content-Type:application/json" \ -H "commandpath:/baremetal/instance/osrunning" \ -d {{"baremetalInstanceUuid":"{BMUUID}"}} \ --retry 5 \ http://{PXESERVER_DHCP_NIC_IP}:7771/zstack/asyncrest/sendcommand >>$bm_log 2>&1 || \ wget -O- --header="Content-Type:application/json" \ --header="commandpath:/baremetal/instance/osrunning" \ --post-data={{"baremetalInstanceUuid":"{BMUUID}"}} \ --tries=5 \ http://{PXESERVER_DHCP_NIC_IP}:7771/zstack/asyncrest/sendcommand >>$bm_log 2>&1 EOF cat > /etc/systemd/system/zstack-bm-agent.service << EOF [Unit] Description=ZStack Baremetal Instance Agent After=network-online.target NetworkManager.service iptables.service firewalld.service [Service] Restart=on-failure RestartSec=10 RemainAfterExit=yes ExecStart=/bin/bash /usr/local/bin/zstack_bm_agent.sh [Install] WantedBy=multi-user.target EOF systemctl daemon-reload systemctl enable zstack-bm-agent.service """.format(BMUUID=cmd.bmUuid, PXESERVER_DHCP_NIC_IP=pxeserver_dhcp_nic_ip) with open(os.path.join(self.ZSTACK_SCRIPTS_PATH, "post_%s.sh" % cmd.pxeNicMac), 'w') as f: f.write(post_script) logger.debug("create post_%s.sh with content: %s" % (cmd.pxeNicMac, post_script)) def _render_kickstart_template(self, cmd, pxeserver_dhcp_nic_ip): context = dict() context['REPO_URL'] = "ftp://%s/%s/" % (pxeserver_dhcp_nic_ip, cmd.imageUuid) context['USERNAME'] = "" if cmd.username == 'root' else cmd.username context['PASSWORD'] = cmd.password context['PRE_SCRIPTS'] = 'sh -c "$(curl -fsSL ftp://%s/scripts/pre_%s.sh)"' % (pxeserver_dhcp_nic_ip, cmd.pxeNicMac) context['POST_SCRIPTS'] = 'sh -c "$(curl -fsSL ftp://%s/scripts/post_%s.sh)"' % (pxeserver_dhcp_nic_ip, cmd.pxeNicMac) context['FORCE_INSTALL'] = "clearpart --all --initlabel" if cmd.forceInstall else "" context['IMAGE_UUID'] = cmd.imageUuid niccfgs = json_object.loads(cmd.nicCfgs) if cmd.nicCfgs is not None else [] pxe_niccfg_content = """ {% for cfg in niccfgs if cfg.pxe %} network --bootproto=static --onboot=yes --noipv6 --activate --device {{ cfg.mac }} --ip={{ cfg.ip }} --netmask={{ cfg.netmask }} --gateway={{ cfg.gateway }} --nameserver={{ cfg.nameserver }} {% endfor %} """ nic_cfg_tmpl = Template(pxe_niccfg_content) context['NETWORK_CFGS'] = nic_cfg_tmpl.render(niccfgs=niccfgs) # post script snippet for network configuration niccfg_post_script = """ {% for cfg in niccfgs if not cfg.pxe %} {% if cfg.vlanid %} {% if cfg.bondName %} DEVNAME={{ cfg.bondName }} IFCFGFILE=/etc/sysconfig/network-scripts/ifcfg-${DEVNAME} VLANCFGNAME=${DEVNAME}.{{ cfg.vlanid }} VLANCFGFILE=/etc/sysconfig/network-scripts/ifcfg-${DEVNAME}.{{ cfg.vlanid }} echo "BOOTPROTO=none" > $IFCFGFILE echo "DEVICE=${DEVNAME}" >> $IFCFGFILE echo "PEERDNS=no" >> $IFCFGFILE echo "PEERROUTES=no" >> $IFCFGFILE echo "ONBOOT=yes" >> $IFCFGFILE echo "TYPE=Bond" >> $IFCFGFILE echo "BONDING_MASTER=yes" >> $IFCFGFILE echo "BONDING_OPTS='mode={{ cfg.bondMode }} {{ cfg.bondOpts }}'" >> $IFCFGFILE {% for slave in cfg.bondSlaves %} SLAVENAME=`ip -o link show | grep {{ slave }} | awk -F ': ' '{ print $2 }'` SLAVECFG=/etc/sysconfig/network-scripts/ifcfg-${SLAVENAME} echo "BOOTPROTO=none" > $SLAVECFG echo "DEVICE=${SLAVENAME}" >> $SLAVECFG echo "MASTER={{ cfg.bondName }}" >> $SLAVECFG echo "SLAVE=yes" >> $SLAVECFG echo "PEERDNS=no" >> $SLAVECFG echo "PEERROUTES=no" >> $SLAVECFG echo "ONBOOT=yes" >> $SLAVECFG {% endfor %} {% else %} DEVNAME=`ip -o link show | grep {{ cfg.mac }} | awk -F ': ' '{ print $2 }'` IFCFGFILE=/etc/sysconfig/network-scripts/ifcfg-${DEVNAME} VLANCFGNAME=${DEVNAME}.{{ cfg.vlanid }} VLANCFGFILE=/etc/sysconfig/network-scripts/ifcfg-${DEVNAME}.{{ cfg.vlanid }} echo "BOOTPROTO=none" > $IFCFGFILE echo "DEVICE=${DEVNAME}" >> $IFCFGFILE echo "PEERDNS=no" >> $IFCFGFILE echo "PEERROUTES=no" >> $IFCFGFILE echo "ONBOOT=yes" >> $IFCFGFILE {% endif %} echo "BOOTPROTO=static" > $VLANCFGFILE echo "DEVICE=${VLANCFGNAME}" >> $VLANCFGFILE echo "IPADDR={{ cfg.ip }}" >> $VLANCFGFILE echo "NETMASK={{ cfg.netmask }}" >> $VLANCFGFILE echo "GATEWAY={{ cfg.gateway }}" >> $VLANCFGFILE echo "VLAN=yes" >> $VLANCFGFILE echo "PEERDNS=no" >> $VLANCFGFILE echo "PEERROUTES=no" >> $VLANCFGFILE echo "ONBOOT=yes" >> $VLANCFGFILE {% else %} {% if cfg.bondName %} DEVNAME={{ cfg.bondName }} IFCFGFILE=/etc/sysconfig/network-scripts/ifcfg-${DEVNAME} echo "BOOTPROTO=static" > $IFCFGFILE echo "DEVICE=${DEVNAME}" >> $IFCFGFILE echo "IPADDR={{ cfg.ip }}" >> $IFCFGFILE echo "NETMASK={{ cfg.netmask }}" >> $IFCFGFILE echo "GATEWAY={{ cfg.gateway }}" >> $IFCFGFILE echo "PEERDNS=no" >> $IFCFGFILE echo "PEERROUTES=no" >> $IFCFGFILE echo "ONBOOT=yes" >> $IFCFGFILE echo "TYPE=Bond" >> $IFCFGFILE echo "BONDING_MASTER=yes" >> $IFCFGFILE echo "BONDING_OPTS='mode={{ cfg.bondMode }} {{ cfg.bondOpts }}'" >> $IFCFGFILE {% for slave in cfg.bondSlaves %} SLAVENAME=`ip -o link show | grep {{ slave }} | awk -F ': ' '{ print $2 }'` SLAVECFG=/etc/sysconfig/network-scripts/ifcfg-${SLAVENAME} echo "BOOTPROTO=none" > $SLAVECFG echo "DEVICE=${SLAVENAME}" >> $SLAVECFG echo "MASTER={{ cfg.bondName }}" >> $SLAVECFG echo "SLAVE=yes" >> $SLAVECFG echo "PEERDNS=no" >> $SLAVECFG echo "PEERROUTES=no" >> $SLAVECFG echo "ONBOOT=yes" >> $SLAVECFG {% endfor %} {% else %} DEVNAME=`ip -o link show | grep {{ cfg.mac }} | awk -F ': ' '{ print $2 }'` IFCFGFILE=/etc/sysconfig/network-scripts/ifcfg-${DEVNAME} echo "BOOTPROTO=static" > $IFCFGFILE echo "DEVICE=${DEVNAME}" >> $IFCFGFILE echo "IPADDR={{ cfg.ip }}" >> $IFCFGFILE echo "NETMASK={{ cfg.netmask }}" >> $IFCFGFILE echo "GATEWAY={{ cfg.gateway }}" >> $IFCFGFILE echo "PEERDNS=no" >> $IFCFGFILE echo "PEERROUTES=no" >> $IFCFGFILE echo "ONBOOT=yes" >> $IFCFGFILE {% endif %} {% endif %} {% endfor %} """ niccfg_post_tmpl = Template(niccfg_post_script) for cfg in niccfgs: if cfg.bondName: cfg.bondSlaves = cfg.bondSlaves.split(',') self._create_pre_scripts(cmd, pxeserver_dhcp_nic_ip) self._create_post_scripts(cmd, pxeserver_dhcp_nic_ip, niccfg_post_tmpl.render(niccfgs=niccfgs)) if os.path.exists(os.path.join(self.VSFTPD_ROOT_PATH, cmd.imageUuid, "Extra", "qemu-kvm-ev")): context['extra_repo'] = "repo --name=qemu-kvm-ev --baseurl=ftp://%s/%s/Extra/qemu-kvm-ev" % (pxeserver_dhcp_nic_ip, cmd.imageUuid) context['pxeserver_dhcp_nic_ip'] = pxeserver_dhcp_nic_ip custom = simplejson.loads(cmd.customPreconfigurations) if cmd.customPreconfigurations is not None else {} context.update(custom) tmpl = Template(cmd.preconfigurationContent) return tmpl.render(context) def _render_preseed_template(self, cmd, pxeserver_dhcp_nic_ip): context = dict() context['REPO_URL'] = ("d-i mirror/protocol string ftp\n" "d-i mirror/ftp/hostname string {PXESERVER_DHCP_NIC_IP}\n" "d-i mirror/ftp/directory string /{IMAGEUUID}")\ .format(PXESERVER_DHCP_NIC_IP=pxeserver_dhcp_nic_ip, IMAGEUUID=cmd.imageUuid) context['USERNAME'] = cmd.username context['PASSWORD'] = cmd.password context['PRE_SCRIPTS'] = 'wget -O- ftp://%s/scripts/pre_%s.sh | /bin/sh -s' % (pxeserver_dhcp_nic_ip, cmd.pxeNicMac) context['POST_SCRIPTS'] = 'wget -O- ftp://%s/scripts/post_%s.sh | chroot /target /bin/sh -s' % (pxeserver_dhcp_nic_ip, cmd.pxeNicMac) context['FORCE_INSTALL'] = 'd-i partman-partitioning/confirm_write_new_label boolean true\n' \ 'd-i partman/choose_partition select finish\n' \ 'd-i partman/confirm boolean true\n' \ 'd-i partman/confirm_nooverwrite boolean true\n' \ 'd-i partman-md/confirm_nooverwrite boolean true\n' \ 'd-i partman-lvm/confirm_nooverwrite boolean true' if cmd.forceInstall else "" niccfgs = json_object.loads(cmd.nicCfgs) if cmd.nicCfgs is not None else [] # post script snippet for network configuration niccfg_post_script = """ echo 'loop' >> /etc/modules echo 'lp' >> /etc/modules echo 'rtc' >> /etc/modules echo 'bonding' >> /etc/modules echo '8021q' >> /etc/modules {% set count = 0 %} {% for cfg in niccfgs %} {% if cfg.bondName %} {% set count = count + 1 %} echo "options bonding max_bonds={{ count }}" > /etc/modprobe.d/bonding.conf {% endif %} {% endfor %} INTERFACES_FILE=/etc/network/interfaces {% for cfg in niccfgs %} {% if cfg.bondName %} RAWDEVNAME={{ cfg.bondName }} {% else %} RAWDEVNAME=`ip -o link show | grep {{ cfg.mac }} | awk -F ': ' '{ print $2 }'` {% endif %} DEVNAME=${RAWDEVNAME}{%- if cfg.vlanid -%}.{{ cfg.vlanid }}{%- endif -%} {% if cfg.vlanid %} echo "auto ${DEVNAME}" >> ${INTERFACES_FILE} echo "iface ${DEVNAME} inet static" >> ${INTERFACES_FILE} echo "address {{ cfg.ip }}" >> ${INTERFACES_FILE} echo "netmask {{ cfg.netmask }}" >> ${INTERFACES_FILE} echo "gateway {{ cfg.gateway }}" >> ${INTERFACES_FILE} echo "vlan-raw-device ${RAWDEVNAME}" >> ${INTERFACES_FILE} echo '' >> ${INTERFACES_FILE} {% endif %} {% if cfg.bondName %} echo "auto ${RAWDEVNAME}" >> ${INTERFACES_FILE} {% if cfg.vlanid %} echo "iface ${RAWDEVNAME} inet manual" >> ${INTERFACES_FILE} {% else %} echo "iface ${RAWDEVNAME} inet static" >> ${INTERFACES_FILE} echo "address {{ cfg.ip }}" >> ${INTERFACES_FILE} echo "netmask {{ cfg.netmask }}" >> ${INTERFACES_FILE} echo "gateway {{ cfg.gateway }}" >> ${INTERFACES_FILE} {% endif %} echo "bond-mode {{ cfg.bondMode }}" >> ${INTERFACES_FILE} {% if cfg.bondOpts %} echo "{{ cfg.bondOpts }}" >> ${INTERFACES_FILE} {% else %} echo "bond-miimon 100" >> ${INTERFACES_FILE} {% endif %} echo "bond-slaves none" >> ${INTERFACES_FILE} echo '' >> ${INTERFACES_FILE} {% for slave in cfg.bondSlaves %} slave_nic=`ip -o link show | grep {{ slave }} | awk -F ': ' '{ print $2 }'` echo "auto ${slave_nic}" >> ${INTERFACES_FILE} echo "iface ${slave_nic} inet manual" >> ${INTERFACES_FILE} echo "bond-master {{ cfg.bondName }}" >> ${INTERFACES_FILE} echo '' >> ${INTERFACES_FILE} {% endfor %} {% endif %} {% if not cfg.bondName and not cfg.vlanid %} echo "auto ${DEVNAME}" >> ${INTERFACES_FILE} echo "iface ${DEVNAME} inet static" >> ${INTERFACES_FILE} echo "address {{ cfg.ip }}" >> ${INTERFACES_FILE} echo "netmask {{ cfg.netmask }}" >> ${INTERFACES_FILE} echo "gateway {{ cfg.gateway }}" >> ${INTERFACES_FILE} echo '' >> ${INTERFACES_FILE} {% endif %} {% endfor %} """ niccfg_post_tmpl = Template(niccfg_post_script) for cfg in niccfgs: if cfg.bondName: cfg.bondSlaves = cfg.bondSlaves.split(',') self._create_pre_scripts(cmd, pxeserver_dhcp_nic_ip) self._create_post_scripts(cmd, pxeserver_dhcp_nic_ip, niccfg_post_tmpl.render(niccfgs=niccfgs)) custom = simplejson.loads(cmd.customPreconfigurations) if cmd.customPreconfigurations is not None else {} context.update(custom) tmpl = Template(cmd.preconfigurationContent) return tmpl.render(context) def _render_autoyast_template(self, cmd, pxeserver_dhcp_nic_ip): context = dict() context['USERNAME'] = cmd.username context['PASSWORD'] = cmd.password context['PRE_SCRIPTS'] = 'sh -c "$(curl -fsSL ftp://%s/scripts/pre_%s.sh)"' % (pxeserver_dhcp_nic_ip, cmd.pxeNicMac) context['POST_SCRIPTS'] = 'sh -c "$(curl -fsSL ftp://%s/scripts/post_%s.sh)"' % (pxeserver_dhcp_nic_ip, cmd.pxeNicMac) context['FORCE_INSTALL'] = 'false' if cmd.forceInstall else 'true' niccfgs = json_object.loads(cmd.nicCfgs) if cmd.nicCfgs is not None else [] # post script snippet for network configuration niccfg_post_script = """echo -e 'loop\nlp\nrtc\nbonding\n8021q' >> /etc/modules-load.d/ifcfg.conf {% set count = 0 %} {% for cfg in niccfgs %} {% if cfg.bondName %} {% set count = count + 1 %} echo "options bonding max_bonds={{ count }}" > /etc/modprobe.d/bonding.conf {% endif %} {% endfor %} {% for cfg in niccfgs %} {% if cfg.vlanid %} {% if cfg.bondName %} DEVNAME={{ cfg.bondName }} IFCFGFILE=/etc/sysconfig/network/ifcfg-${DEVNAME} VLANCFGNAME=${DEVNAME}.{{ cfg.vlanid }} VLANCFGFILE=/etc/sysconfig/network/ifcfg-${DEVNAME}.{{ cfg.vlanid }} echo "BOOTPROTO='none'" > $IFCFGFILE echo "STARTMODE='auto'" >> $IFCFGFILE echo "BONDING_MASTER='yes'" >> $IFCFGFILE echo "BONDING_MODULE_OPTS='mode={{ cfg.bondMode }} miimon=100 {% if cfg.bondOpts %}{{ cfg.bondOpts }}{% endif %}'" >> $IFCFGFILE {% for slave in cfg.bondSlaves %} SLAVENAME=`ip -o link show | grep {{ slave }} | awk -F ': ' '{ print $2 }'` SLAVECFG=/etc/sysconfig/network/ifcfg-${SLAVENAME} echo "BONDING_SLAVE{{ loop.index0 }}='${SLAVENAME}'" >> $IFCFGFILE echo "BOOTPROTO='none'" > $SLAVECFG echo "STARTMODE='hotplug'" >> $SLAVECFG {% endfor %} {% else %} DEVNAME=`ip -o link show | grep {{ cfg.mac }} | awk -F ': ' '{ print $2 }'` IFCFGFILE=/etc/sysconfig/network/ifcfg-${DEVNAME} VLANCFGNAME=${DEVNAME}.{{ cfg.vlanid }} VLANCFGFILE=/etc/sysconfig/network/ifcfg-${DEVNAME}.{{ cfg.vlanid }} echo "BOOTPROTO='none'" > $IFCFGFILE echo "STARTMODE='auto'" >> $IFCFGFILE {% endif %} echo "BOOTPROTO='static'" > $VLANCFGFILE echo "IPADDR={{ cfg.ip }}" >> $VLANCFGFILE echo "NETMASK={{ cfg.netmask }}" >> $VLANCFGFILE echo "STARTMODE='auto'" >> $VLANCFGFILE echo "ETHERDEVICE=${DEVNAME}" >> $VLANCFGFILE echo "VLAN_ID={{ cfg.vlanid }}" >> $VLANCFGFILE {% else %} {% if cfg.bondName %} DEVNAME={{ cfg.bondName }} IFCFGFILE=/etc/sysconfig/network/ifcfg-${DEVNAME} echo "BOOTPROTO='static'" > $IFCFGFILE echo "IPADDR='{{ cfg.ip }}'" >> $IFCFGFILE echo "NETMASK='{{ cfg.netmask }}'" >> $IFCFGFILE echo "STARTMODE='auto'" >> $IFCFGFILE echo "BONDING_MASTER='yes'" >> $IFCFGFILE echo "BONDING_MODULE_OPTS='mode={{ cfg.bondMode }} miimon=100 {% if cfg.bondOpts %}{{ cfg.bondOpts }}{% endif %}'" >> $IFCFGFILE {% for slave in cfg.bondSlaves %} SLAVENAME=`ip -o link show | grep {{ slave }} | awk -F ': ' '{ print $2 }'` SLAVECFG=/etc/sysconfig/network/ifcfg-${SLAVENAME} echo "BONDING_SLAVE{{ loop.index0 }}='${SLAVENAME}'" >> $IFCFGFILE echo "BOOTPROTO='none'" > $SLAVECFG echo "STARTMODE='hotplug'" >> $SLAVECFG {% endfor %} {% else %} DEVNAME=`ip -o link show | grep {{ cfg.mac }} | awk -F ': ' '{ print $2 }'` IFCFGFILE=/etc/sysconfig/network/ifcfg-${DEVNAME} echo "BOOTPROTO='static'" > $IFCFGFILE echo "IPADDR='{{ cfg.ip }}'" >> $IFCFGFILE echo "NETMASK='{{ cfg.netmask }}'" >> $IFCFGFILE echo "STARTMODE='auto'" >> $IFCFGFILE {% endif %} {% endif %} {% endfor %} """ niccfg_post_tmpl = Template(niccfg_post_script) for cfg in niccfgs: if cfg.bondName: cfg.bondSlaves = cfg.bondSlaves.split(',') self._create_pre_scripts(cmd, pxeserver_dhcp_nic_ip) self._create_post_scripts(cmd, pxeserver_dhcp_nic_ip, niccfg_post_tmpl.render(niccfgs=niccfgs)) custom = simplejson.loads(cmd.customPreconfigurations) if cmd.customPreconfigurations is not None else {} context.update(custom) tmpl = Template(cmd.preconfigurationContent) return tmpl.render(context) @reply_error def delete_bm_configs(self, req): cmd = json_object.loads(req[http.REQUEST_BODY]) rsp = AgentResponse() # clean up pxeserver bm configs if cmd.pxeNicMac == "*": if os.path.exists(self.PXELINUX_CFG_PATH): bash_r("rm -f %s/*" % self.PXELINUX_CFG_PATH) if os.path.exists(self.KS_CFG_PATH): bash_r("rm -f %s/*" % self.KS_CFG_PATH) if os.path.exists(self.NGINX_MN_PROXY_CONF_PATH): bash_r("rm -f %s/*" % self.NGINX_MN_PROXY_CONF_PATH) if os.path.exists(self.NGINX_TERMINAL_PROXY_CONF_PATH): bash_r("rm -f %s/*" % self.NGINX_TERMINAL_PROXY_CONF_PATH) if os.path.exists(self.NOVNC_TOKEN_PATH): bash_r("rm -f %s/*" % self.NOVNC_TOKEN_PATH) else: mac_as_name = cmd.pxeNicMac.replace(":", "-") pxe_cfg_file = os.path.join(self.PXELINUX_CFG_PATH, "01-" + mac_as_name) if os.path.exists(pxe_cfg_file): os.remove(pxe_cfg_file) ks_cfg_file = os.path.join(self.KS_CFG_PATH, mac_as_name) if os.path.exists(ks_cfg_file): os.remove(ks_cfg_file) pre_script_file = os.path.join(self.ZSTACK_SCRIPTS_PATH, "pre_%s.sh" % mac_as_name) if os.path.exists(pre_script_file): os.remove(pre_script_file) post_script_file = os.path.join(self.ZSTACK_SCRIPTS_PATH, "post_%s.sh" % mac_as_name) if os.path.exists(post_script_file): os.remove(post_script_file) logger.info("successfully deleted pxelinux.cfg and ks.cfg %s" % cmd.pxeNicMac if cmd.pxeNicMac != '*' else 'all') return json_object.dumps(rsp) @reply_error def create_bm_nginx_proxy(self, req): cmd = json_object.loads(req[http.REQUEST_BODY]) rsp = AgentResponse() nginx_proxy_file = os.path.join(self.NGINX_TERMINAL_PROXY_CONF_PATH, cmd.bmUuid) with open(nginx_proxy_file, 'w') as f: f.write(cmd.upstream) ret, _, err = bash_roe("systemctl reload nginx || systemctl reload nginx") if ret != 0: logger.debug("failed to reload nginx.service: " + err) logger.info("successfully create terminal nginx proxy for baremetal instance[uuid:%s] on pxeserver[uuid:%s]" % (cmd.bmUuid, self.uuid)) return json_object.dumps(rsp) @reply_error def delete_bm_nginx_proxy(self, req): cmd = json_object.loads(req[http.REQUEST_BODY]) rsp = AgentResponse() nginx_proxy_file = os.path.join(self.NGINX_TERMINAL_PROXY_CONF_PATH, cmd.bmUuid) if os.path.exists(nginx_proxy_file): os.remove(nginx_proxy_file) ret, _, err = bash_roe("systemctl reload nginx || systemctl reload nginx") if ret != 0: logger.debug("failed to reload nginx.service: " + err) logger.info("successfully deleted terminal nginx proxy for baremetal instance[uuid:%s] on pxeserver[uuid:%s]" % (cmd.bmUuid, self.uuid)) return json_object.dumps(rsp) @reply_error def create_bm_novnc_proxy(self, req): cmd = json_object.loads(req[http.REQUEST_BODY]) rsp = AgentResponse() novnc_proxy_file = os.path.join(self.NOVNC_TOKEN_PATH, cmd.bmUuid) with open(novnc_proxy_file, 'w') as f: f.write(cmd.upstream) logger.info("successfully created novnc proxy for baremetal instance[uuid:%s] on pxeserver[uuid:%s]" % (cmd.bmUuid, self.uuid)) return json_object.dumps(rsp) @reply_error def delete_bm_novnc_proxy(self, req): cmd = json_object.loads(req[http.REQUEST_BODY]) rsp = AgentResponse() novnc_proxy_file = os.path.join(self.NOVNC_TOKEN_PATH, cmd.bmUuid) if os.path.exists(novnc_proxy_file): os.remove(novnc_proxy_file) logger.info("successfully deleted novnc proxy for baremetal instance[uuid:%s] on pxeserver[uuid:%s]" % (cmd.bmUuid, self.uuid)) return json_object.dumps(rsp) @reply_error def create_bm_dhcp_config(self, req): cmd = json_object.loads(req[http.REQUEST_BODY]) rsp = AgentResponse() host_file = os.path.join(self.DHCP_HOSTS_DIR, cmd.chassisUuid) with open(host_file, 'w') as f: f.write("%s,%s" % (cmd.pxeNicMac, cmd.pxeNicIp)) logger.info("successfully created dhcp config for baremetal chassis[uuid:%s] on pxeserver[uuid:%s]" % (cmd.chassisUuid, self.uuid)) return json_object.dumps(rsp) @reply_error def delete_bm_dhcp_config(self, req): cmd = json_object.loads(req[http.REQUEST_BODY]) rsp = AgentResponse() host_file = os.path.join(self.DHCP_HOSTS_DIR, cmd.chassisUuid) if os.path.exists(host_file): os.remove(host_file) logger.info("successfully deleted dhcp config for baremetal chassis[uuid:%s] on pxeserver[uuid:%s]" % (cmd.chassisUuid, self.uuid)) return json_object.dumps(rsp) @in_bash @reply_error def download_imagestore(self, req): cmd = json_object.loads(req[http.REQUEST_BODY]) # download rsp = self.imagestore_client.download_image_from_imagestore(cmd) if not rsp.success: raise PxeServerError("failed to download image[uuid:%s] from imagestore to baremetal image cache" % cmd.imageUuid) # mount cache_path = cmd.cacheInstallPath mount_path = os.path.join(self.VSFTPD_ROOT_PATH, cmd.imageUuid) if not os.path.exists(mount_path): os.makedirs(mount_path) ret = bash_r("mount | grep %s || mount %s %s" % (mount_path, cache_path, mount_path)) if ret != 0: raise PxeServerError("failed to mount image[uuid:%s] to baremetal ftp server %s" % (cmd.imageUuid, cache_path)) # copy vmlinuz etc. vmlinuz_path = os.path.join(self.TFTPBOOT_PATH, cmd.imageUuid) if not os.path.exists(vmlinuz_path): os.makedirs(vmlinuz_path) # RHEL ret1 = bash_r("cp %s %s" % (os.path.join(mount_path, "isolinux/vmlinuz*"), os.path.join(vmlinuz_path, "vmlinuz"))) ret2 = bash_r("cp %s %s" % (os.path.join(mount_path, "isolinux/initrd*.img"), os.path.join(vmlinuz_path, "initrd.img"))) # DEBIAN SERVER ret3 = bash_r("cp %s %s" % (os.path.join(mount_path, "install/netboot/*-installer/amd64/linux"), os.path.join(vmlinuz_path, "vmlinuz"))) ret4 = bash_r("cp %s %s" % (os.path.join(mount_path, "install/netboot/*-installer/amd64/initrd.gz"), os.path.join(vmlinuz_path, "initrd.img"))) # SUSE ret5 = bash_r("cp %s %s" % (os.path.join(mount_path, "boot/*/loader/linux"), os.path.join(vmlinuz_path, "vmlinuz"))) ret6 = bash_r("cp %s %s" % (os.path.join(mount_path, "boot/*/loader/initrd"), os.path.join(vmlinuz_path, "initrd.img"))) if (ret1 != 0 or ret2 != 0) and (ret3 != 0 or ret4 != 0) and (ret5 != 0 or ret6 != 0): raise PxeServerError("failed to copy vmlinuz and initrd.img from image[uuid:%s] to baremetal tftp server" % cmd.imageUuid) logger.info("successfully downloaded image[uuid:%s] and mounted it" % cmd.imageUuid) self._set_capacity_to_response(rsp) return json_object.dumps(rsp) @reply_error def download_cephb(self, req): # TODO cmd = json_object.loads(req[http.REQUEST_BODY]) rsp = AgentResponse() return json_object.dumps(rsp) @in_bash @reply_error def delete_bm_image_cache(self, req): cmd = json_object.loads(req[http.REQUEST_BODY]) rsp = AgentResponse() # rm vmlinuz etc. vmlinuz_path = os.path.join(self.TFTPBOOT_PATH, cmd.imageUuid) if os.path.exists(vmlinuz_path): shutil.rmtree(vmlinuz_path) # umount mount_path = os.path.join(self.VSFTPD_ROOT_PATH, cmd.imageUuid) bash_r("umount {0}; rm -rf {0}".format(mount_path)) # rm image cache if os.path.exists(cmd.cacheInstallPath): shutil.rmtree(os.path.dirname(cmd.cacheInstallPath)) logger.info("successfully umounted and deleted cache of image[uuid:%s]" % cmd.imageUuid) self._set_capacity_to_response(rsp) return json_object.dumps(rsp) @in_bash @reply_error def mount_bm_image_cache(self, req): cmd = json_object.loads(req[http.REQUEST_BODY]) rsp = AgentResponse() cache_path = cmd.cacheInstallPath mount_path = os.path.join(self.VSFTPD_ROOT_PATH, cmd.imageUuid) ret = bash_r("mount | grep %s || mount %s %s" % (mount_path, cache_path, mount_path)) if ret != 0: raise PxeServerError("failed to mount baremetal cache of image[uuid:%s]" % cmd.imageUuid) return json_object.dumps(rsp)
class CephAgent(object): INIT_PATH = "/ceph/backupstorage/init" DOWNLOAD_IMAGE_PATH = "/ceph/backupstorage/image/download" DELETE_IMAGE_PATH = "/ceph/backupstorage/image/delete" PING_PATH = "/ceph/backupstorage/ping" ECHO_PATH = "/ceph/backupstorage/echo" http_server = http.HttpServer(port=7761) http_server.logfile_path = log.get_logfile_path() def __init__(self): self.http_server.register_async_uri(self.INIT_PATH, self.init) self.http_server.register_async_uri(self.DOWNLOAD_IMAGE_PATH, self.download) self.http_server.register_async_uri(self.DELETE_IMAGE_PATH, self.delete) self.http_server.register_async_uri(self.PING_PATH, self.ping) self.http_server.register_sync_uri(self.ECHO_PATH, self.echo) def _set_capacity_to_response(self, rsp): o = shell.call('ceph df -f json') df = jsonobject.loads(o) if df.stats.total_bytes__: total = long(df.stats.total_bytes_) elif df.stats.total_space__: total = long(df.stats.total_space__) * 1024 else: raise Exception('unknown ceph df output: %s' % o) if df.stats.total_avail_bytes__: avail = long(df.stats.total_avail_bytes_) elif df.stats.total_avail__: avail = long(df.stats.total_avail_) * 1024 else: raise Exception('unknown ceph df output: %s' % o) rsp.totalCapacity = total rsp.availableCapacity = avail @replyerror def echo(self, req): logger.debug('get echoed') return '' @replyerror def init(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) o = shell.call('ceph mon_status') mon_status = jsonobject.loads(o) fsid = mon_status.monmap.fsid_ existing_pools = shell.call('ceph osd lspools') for pool in cmd.pools: if pool.predefined and pool.name not in existing_pools: raise Exception( 'cannot find pool[%s] in the ceph cluster, you must create it manually' % pool.name) elif pool.name not in existing_pools: shell.call('ceph osd pool create %s 100' % pool.name) rsp = InitRsp() rsp.fsid = fsid self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) def _parse_install_path(self, path): return path.lstrip('ceph:').lstrip('//').split('/') @replyerror @rollback def download(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) pool, image_name = self._parse_install_path(cmd.installPath) tmp_image_name = 'tmp-%s' % image_name shell.call( 'wget --no-check-certificate -q -O - %s | rbd import --image-format 2 - %s/%s' % (cmd.url, pool, tmp_image_name)) @rollbackable def _1(): shell.call('rbd rm %s/%s' % (pool, tmp_image_name)) _1() file_format = shell.call( "qemu-img info rbd:%s/%s | grep 'file format' | cut -d ':' -f 2" % (pool, tmp_image_name)) file_format = file_format.strip() if file_format not in ['qcow2', 'raw']: raise Exception('unknown image format: %s' % file_format) if file_format == 'qcow2': conf_path = None try: with open('/etc/ceph/ceph.conf', 'r') as fd: conf = fd.read() conf = '%s\n%s\n' % (conf, 'rbd default format = 2') conf_path = linux.write_to_temp_file(conf) shell.call( 'qemu-img convert -f qcow2 -O rbd rbd:%s/%s rbd:%s/%s:conf=%s' % (pool, tmp_image_name, pool, image_name, conf_path)) shell.call('rbd rm %s/%s' % (pool, tmp_image_name)) finally: if conf_path: os.remove(conf_path) else: shell.call('rbd mv %s/%s %s/%s' % (pool, tmp_image_name, pool, image_name)) o = shell.call('rbd --format json info %s/%s' % (pool, image_name)) image_stats = jsonobject.loads(o) rsp = DownloadRsp() rsp.size = long(image_stats.size_) self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def ping(self, req): return jsonobject.dumps(AgentResponse()) @replyerror def delete(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) pool, image_name = self._parse_install_path(cmd.installPath) shell.call('rbd rm %s/%s' % (pool, image_name)) rsp = AgentResponse() self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp)
class CephAgent(object): INIT_PATH = "/ceph/primarystorage/init" CREATE_VOLUME_PATH = "/ceph/primarystorage/volume/createempty" DELETE_PATH = "/ceph/primarystorage/delete" CLONE_PATH = "/ceph/primarystorage/volume/clone" FLATTEN_PATH = "/ceph/primarystorage/volume/flatten" SFTP_DOWNLOAD_PATH = "/ceph/primarystorage/sftpbackupstorage/download" SFTP_UPLOAD_PATH = "/ceph/primarystorage/sftpbackupstorage/upload" ECHO_PATH = "/ceph/primarystorage/echo" CREATE_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/create" DELETE_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/delete" PROTECT_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/protect" ROLLBACK_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/rollback" UNPROTECT_SNAPSHOT_PATH = "/ceph/primarystorage/snapshot/unprotect" CP_PATH = "/ceph/primarystorage/volume/cp" DELETE_POOL_PATH = "/ceph/primarystorage/deletepool" http_server = http.HttpServer(port=7762) http_server.logfile_path = log.get_logfile_path() def __init__(self): self.http_server.register_async_uri(self.INIT_PATH, self.init) self.http_server.register_async_uri(self.DELETE_PATH, self.delete) self.http_server.register_async_uri(self.CREATE_VOLUME_PATH, self.create) self.http_server.register_async_uri(self.CLONE_PATH, self.clone) self.http_server.register_async_uri(self.CREATE_SNAPSHOT_PATH, self.create_snapshot) self.http_server.register_async_uri(self.DELETE_SNAPSHOT_PATH, self.delete_snapshot) self.http_server.register_async_uri(self.PROTECT_SNAPSHOT_PATH, self.protect_snapshot) self.http_server.register_async_uri(self.UNPROTECT_SNAPSHOT_PATH, self.unprotect_snapshot) self.http_server.register_async_uri(self.ROLLBACK_SNAPSHOT_PATH, self.rollback_snapshot) self.http_server.register_async_uri(self.FLATTEN_PATH, self.flatten) self.http_server.register_async_uri(self.SFTP_DOWNLOAD_PATH, self.sftp_download) self.http_server.register_async_uri(self.SFTP_UPLOAD_PATH, self.sftp_upload) self.http_server.register_async_uri(self.CP_PATH, self.cp) self.http_server.register_async_uri(self.DELETE_POOL_PATH, self.delete_pool) self.http_server.register_sync_uri(self.ECHO_PATH, self.echo) def _set_capacity_to_response(self, rsp): o = shell.call('ceph df -f json') df = jsonobject.loads(o) if df.stats.total_bytes__: total = long(df.stats.total_bytes_) elif df.stats.total_space__: total = long(df.stats.total_space__) * 1024 else: raise Exception('unknown ceph df output: %s' % o) if df.stats.total_avail_bytes__: avail = long(df.stats.total_avail_bytes_) elif df.stats.total_avail__: avail = long(df.stats.total_avail_) * 1024 else: raise Exception('unknown ceph df output: %s' % o) rsp.totalCapacity = total rsp.availableCapacity = avail def _get_file_size(self, path): o = shell.call('rbd --format json info %s' % path) o = jsonobject.loads(o) return long(o.size_) @replyerror def delete_pool(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) for p in cmd.poolNames: shell.call('ceph osd pool delete %s %s --yes-i-really-really-mean-it' % (p, p)) return jsonobject.dumps(AgentResponse()) @replyerror def rollback_snapshot(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) spath = self._normalize_install_path(cmd.snapshotPath) shell.call('rbd snap rollback %s' % spath) rsp = AgentResponse() self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def cp(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) src_path = self._normalize_install_path(cmd.srcPath) dst_path = self._normalize_install_path(cmd.dstPath) shell.call('rbd cp %s %s' % (src_path, dst_path)) rsp = CpRsp() rsp.size = self._get_file_size(dst_path) self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def create_snapshot(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) spath = self._normalize_install_path(cmd.snapshotPath) do_create = True if cmd.skipOnExisting: image_name, sp_name = spath.split('@') o = shell.call('rbd --format json snap ls %s' % image_name) o = jsonobject.loads(o) for s in o: if s.name_ == sp_name: do_create = False if do_create: shell.call('rbd snap create %s' % spath) rsp = CreateSnapshotRsp() rsp.size = self._get_file_size(spath) self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def delete_snapshot(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) spath = self._normalize_install_path(cmd.snapshotPath) shell.call('rbd snap rm %s' % spath) rsp = AgentResponse() self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def unprotect_snapshot(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) spath = self._normalize_install_path(cmd.snapshotPath) shell.call('rbd snap unprotect %s' % spath) return jsonobject.dumps(AgentResponse()) @replyerror def protect_snapshot(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) spath = self._normalize_install_path(cmd.snapshotPath) shell.call('rbd snap protect %s' % spath, exception=not cmd.ignoreError) rsp = AgentResponse() return jsonobject.dumps(rsp) @replyerror def clone(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) src_path = self._normalize_install_path(cmd.srcPath) dst_path = self._normalize_install_path(cmd.dstPath) shell.call('rbd clone %s %s' % (src_path, dst_path)) rsp = AgentResponse() self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def flatten(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) path = self._normalize_install_path(cmd.path) shell.call('rbd flatten %s' % path) rsp = AgentResponse() self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def echo(self, req): logger.debug('get echoed') return '' @replyerror def init(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) o = shell.call('ceph mon_status') mon_status = jsonobject.loads(o) fsid = mon_status.monmap.fsid_ existing_pools = shell.call('ceph osd lspools') for pool in cmd.pools: if pool.predefined and pool.name not in existing_pools: raise Exception('cannot find pool[%s] in the ceph cluster, you must create it manually' % pool.name) elif pool.name not in existing_pools: shell.call('ceph osd pool create %s 100' % pool.name) o = shell.call("ceph -f json auth get-or-create client.zstack mon 'allow r' osd 'allow *' 2>/dev/null").strip(' \n\r\t') o = jsonobject.loads(o) rsp = InitRsp() rsp.fsid = fsid rsp.userKey = o[0].key_ self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) def _normalize_install_path(self, path): return path.lstrip('ceph:').lstrip('//') def _parse_install_path(self, path): return self._normalize_install_path(path).split('/') @replyerror def create(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) path = self._normalize_install_path(cmd.installPath) size_M = sizeunit.Byte.toMegaByte(cmd.size) + 1 shell.call('rbd create --size %s --image-format 2 %s' % (size_M, path)) rsp = AgentResponse() self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def sftp_upload(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) src_path = self._normalize_install_path(cmd.primaryStorageInstallPath) prikey_file = linux.write_to_temp_file(cmd.sshKey) bs_folder = os.path.dirname(cmd.backupStorageInstallPath) shell.call('ssh -o StrictHostKeyChecking=no -i %s root@%s "mkdir -p %s"' % (prikey_file, cmd.hostname, bs_folder)) try: shell.call("set -o pipefail; rbd export %s - | ssh -o StrictHostKeyChecking=no -i %s root@%s 'cat > %s'" % (src_path, prikey_file, cmd.hostname, cmd.backupStorageInstallPath)) finally: os.remove(prikey_file) return jsonobject.dumps(AgentResponse()) @replyerror @rollback def sftp_download(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) hostname = cmd.hostname prikey = cmd.sshKey pool, image_name = self._parse_install_path(cmd.primaryStorageInstallPath) tmp_image_name = 'tmp-%s' % image_name prikey_file = linux.write_to_temp_file(prikey) @rollbackable def _0(): tpath = "%s/%s" % (pool, tmp_image_name) shell.call('rbd info %s > /dev/null && rbd rm %s' % (tpath, tpath)) _0() try: shell.call('set -o pipefail; ssh -o StrictHostKeyChecking=no -i %s root@%s "cat %s" | rbd import --image-format 2 - %s/%s' % (prikey_file, hostname, cmd.backupStorageInstallPath, pool, tmp_image_name)) finally: os.remove(prikey_file) @rollbackable def _1(): shell.call('rbd rm %s/%s' % (pool, tmp_image_name)) _1() file_format = shell.call("set -o pipefail; qemu-img info rbd:%s/%s | grep 'file format' | cut -d ':' -f 2" % (pool, tmp_image_name)) file_format = file_format.strip() if file_format not in ['qcow2', 'raw']: raise Exception('unknown image format: %s' % file_format) if file_format == 'qcow2': conf_path = None try: with open('/etc/ceph/ceph.conf', 'r') as fd: conf = fd.read() conf = '%s\n%s\n' % (conf, 'rbd default format = 2') conf_path = linux.write_to_temp_file(conf) shell.call('qemu-img convert -f qcow2 -O rbd rbd:%s/%s rbd:%s/%s:conf=%s' % (pool, tmp_image_name, pool, image_name, conf_path)) shell.call('rbd rm %s/%s' % (pool, tmp_image_name)) finally: if conf_path: os.remove(conf_path) else: shell.call('rbd mv %s/%s %s/%s' % (pool, tmp_image_name, pool, image_name)) rsp = AgentResponse() self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def ping(self, req): return jsonobject.dumps(AgentResponse()) @replyerror def delete(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) path = self._normalize_install_path(cmd.installPath) o = shell.call('rbd snap ls --format json %s' % path) o = jsonobject.loads(o) if len(o) > 0: raise Exception('unable to delete %s; the volume still has snapshots' % cmd.installPath) shell.call('rbd rm %s' % path) rsp = AgentResponse() self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp)
class CephAgent(object): INIT_PATH = "/ceph/backupstorage/init" DOWNLOAD_IMAGE_PATH = "/ceph/backupstorage/image/download" DELETE_IMAGE_PATH = "/ceph/backupstorage/image/delete" PING_PATH = "/ceph/backupstorage/ping" ECHO_PATH = "/ceph/backupstorage/echo" GET_IMAGE_SIZE_PATH = "/ceph/backupstorage/image/getsize" GET_FACTS = "/ceph/backupstorage/facts" http_server = http.HttpServer(port=7761) http_server.logfile_path = log.get_logfile_path() def __init__(self): self.http_server.register_async_uri(self.INIT_PATH, self.init) self.http_server.register_async_uri(self.DOWNLOAD_IMAGE_PATH, self.download) self.http_server.register_async_uri(self.DELETE_IMAGE_PATH, self.delete) self.http_server.register_async_uri(self.PING_PATH, self.ping) self.http_server.register_async_uri(self.GET_IMAGE_SIZE_PATH, self.get_image_size) self.http_server.register_async_uri(self.GET_FACTS, self.get_facts) self.http_server.register_sync_uri(self.ECHO_PATH, self.echo) def _set_capacity_to_response(self, rsp): o = shell.call('ceph df -f json') df = jsonobject.loads(o) if df.stats.total_bytes__: total = long(df.stats.total_bytes_) elif df.stats.total_space__: total = long(df.stats.total_space__) * 1024 else: raise Exception('unknown ceph df output: %s' % o) if df.stats.total_avail_bytes__: avail = long(df.stats.total_avail_bytes_) elif df.stats.total_avail__: avail = long(df.stats.total_avail_) * 1024 else: raise Exception('unknown ceph df output: %s' % o) rsp.totalCapacity = total rsp.availableCapacity = avail @replyerror def echo(self, req): logger.debug('get echoed') return '' def _normalize_install_path(self, path): return path.lstrip('ceph:').lstrip('//') def _get_file_size(self, path): o = shell.call('rbd --format json info %s' % path) o = jsonobject.loads(o) return long(o.size_) @replyerror def get_image_size(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = GetImageSizeRsp() path = self._normalize_install_path(cmd.installPath) rsp.size = self._get_file_size(path) return jsonobject.dumps(rsp) @replyerror def get_facts(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) o = shell.call('ceph mon_status') mon_status = jsonobject.loads(o) fsid = mon_status.monmap.fsid_ rsp = GetFactsRsp() rsp.fsid = fsid return jsonobject.dumps(rsp) @replyerror def init(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) o = shell.call('ceph mon_status') mon_status = jsonobject.loads(o) fsid = mon_status.monmap.fsid_ existing_pools = shell.call('ceph osd lspools') for pool in cmd.pools: if pool.predefined and pool.name not in existing_pools: raise Exception( 'cannot find pool[%s] in the ceph cluster, you must create it manually' % pool.name) elif pool.name not in existing_pools: shell.call('ceph osd pool create %s 100' % pool.name) rsp = InitRsp() rsp.fsid = fsid self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) def _parse_install_path(self, path): return path.lstrip('ceph:').lstrip('//').split('/') @replyerror @rollback def download(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) pool, image_name = self._parse_install_path(cmd.installPath) tmp_image_name = 'tmp-%s' % image_name if cmd.url.startswith('http://') or cmd.url.startswith('https://'): shell.call( 'set -o pipefail; wget --no-check-certificate -q -O - %s | rbd import --image-format 2 - %s/%s' % (cmd.url, pool, tmp_image_name)) actual_size = linux.get_file_size_by_http_head(cmd.url) elif cmd.url.startswith('file://'): src_path = cmd.url.lstrip('file:') src_path = os.path.normpath(src_path) if not os.path.isfile(src_path): raise Exception('cannot find the file[%s]' % src_path) shell.call("rbd import --image-format 2 %s %s/%s" % (src_path, pool, tmp_image_name)) actual_size = os.path.getsize(src_path) else: raise Exception('unknown url[%s]' % cmd.url) @rollbackable def _1(): shell.call('rbd rm %s/%s' % (pool, tmp_image_name)) _1() file_format = shell.call( "set -o pipefail; qemu-img info rbd:%s/%s | grep 'file format' | cut -d ':' -f 2" % (pool, tmp_image_name)) file_format = file_format.strip() if file_format not in ['qcow2', 'raw']: raise Exception('unknown image format: %s' % file_format) if file_format == 'qcow2': conf_path = None try: with open('/etc/ceph/ceph.conf', 'r') as fd: conf = fd.read() conf = '%s\n%s\n' % (conf, 'rbd default format = 2') conf_path = linux.write_to_temp_file(conf) shell.call( 'qemu-img convert -f qcow2 -O rbd rbd:%s/%s rbd:%s/%s:conf=%s' % (pool, tmp_image_name, pool, image_name, conf_path)) shell.call('rbd rm %s/%s' % (pool, tmp_image_name)) finally: if conf_path: os.remove(conf_path) else: shell.call('rbd mv %s/%s %s/%s' % (pool, tmp_image_name, pool, image_name)) o = shell.call('rbd --format json info %s/%s' % (pool, image_name)) image_stats = jsonobject.loads(o) rsp = DownloadRsp() rsp.size = long(image_stats.size_) rsp.actualSize = actual_size rsp.format = file_format self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def ping(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = PingRsp() create_img = shell.ShellCmd('rbd create %s --image-format 2 --size 1' % cmd.testImagePath) create_img(False) if create_img.return_code != 0: rsp.success = False rsp.operationFailure = True rsp.error = "%s %s" % (create_img.stderr, create_img.stdout) else: rm_img = shell.ShellCmd('rbd rm %s' % cmd.testImagePath) rm_img(False) return jsonobject.dumps(rsp) @replyerror def delete(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) pool, image_name = self._parse_install_path(cmd.installPath) shell.call('rbd rm %s/%s' % (pool, image_name)) rsp = AgentResponse() self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp)
class CephAgent(object): INIT_PATH = "/ceph/backupstorage/init" DOWNLOAD_IMAGE_PATH = "/ceph/backupstorage/image/download" UPLOAD_IMAGE_PATH = "/ceph/backupstorage/image/upload" UPLOAD_PROGRESS_PATH = "/ceph/backupstorage/image/progress" DELETE_IMAGE_PATH = "/ceph/backupstorage/image/delete" PING_PATH = "/ceph/backupstorage/ping" ECHO_PATH = "/ceph/backupstorage/echo" GET_IMAGE_SIZE_PATH = "/ceph/backupstorage/image/getsize" GET_FACTS = "/ceph/backupstorage/facts" GET_IMAGES_METADATA = "/ceph/backupstorage/getimagesmetadata" DELETE_IMAGES_METADATA = "/ceph/backupstorage/deleteimagesmetadata" DUMP_IMAGE_METADATA_TO_FILE = "/ceph/backupstorage/dumpimagemetadatatofile" CHECK_IMAGE_METADATA_FILE_EXIST = "/ceph/backupstorage/checkimagemetadatafileexist" CHECK_POOL_PATH = "/ceph/backupstorage/checkpool" GET_LOCAL_FILE_SIZE = "/ceph/backupstorage/getlocalfilesize/" MIGRATE_IMAGE_PATH = "/ceph/backupstorage/image/migrate" CEPH_METADATA_FILE = "bs_ceph_info.json" UPLOAD_PROTO = "upload://" LENGTH_OF_UUID = 32 http_server = http.HttpServer(port=7761) http_server.logfile_path = log.get_logfile_path() upload_tasks = UploadTasks() def __init__(self): self.http_server.register_async_uri(self.INIT_PATH, self.init) self.http_server.register_async_uri(self.DOWNLOAD_IMAGE_PATH, self.download) self.http_server.register_raw_uri(self.UPLOAD_IMAGE_PATH, self.upload) self.http_server.register_async_uri(self.UPLOAD_PROGRESS_PATH, self.get_upload_progress) self.http_server.register_async_uri(self.DELETE_IMAGE_PATH, self.delete) self.http_server.register_async_uri(self.PING_PATH, self.ping) self.http_server.register_async_uri(self.GET_IMAGE_SIZE_PATH, self.get_image_size) self.http_server.register_async_uri(self.GET_FACTS, self.get_facts) self.http_server.register_sync_uri(self.ECHO_PATH, self.echo) self.http_server.register_async_uri(self.GET_IMAGES_METADATA, self.get_images_metadata) self.http_server.register_async_uri( self.CHECK_IMAGE_METADATA_FILE_EXIST, self.check_image_metadata_file_exist) self.http_server.register_async_uri(self.DUMP_IMAGE_METADATA_TO_FILE, self.dump_image_metadata_to_file) self.http_server.register_async_uri( self.DELETE_IMAGES_METADATA, self.delete_image_metadata_from_file) self.http_server.register_async_uri(self.CHECK_POOL_PATH, self.check_pool) self.http_server.register_async_uri(self.GET_LOCAL_FILE_SIZE, self.get_local_file_size) self.http_server.register_async_uri(self.MIGRATE_IMAGE_PATH, self.migrate_image) def _get_capacity(self): o = shell.call('ceph df -f json') df = jsonobject.loads(o) if df.stats.total_bytes__ is not None: total = long(df.stats.total_bytes_) elif df.stats.total_space__ is not None: total = long(df.stats.total_space__) * 1024 else: raise Exception('unknown ceph df output: %s' % o) if df.stats.total_avail_bytes__ is not None: avail = long(df.stats.total_avail_bytes_) elif df.stats.total_avail__ is not None: avail = long(df.stats.total_avail_) * 1024 else: raise Exception('unknown ceph df output: %s' % o) poolCapacities = [] if not df.pools: return total, avail, poolCapacities for pool in df.pools: poolAvailable = pool.stats.max_avail_ poolUsed = pool.stats.bytes_used_ poolSize = jsonobject.loads( shell.call('ceph osd pool get %s size -f json' % pool.name)).size poolCapacity = CephPoolCapacity(pool.name, poolAvailable, poolSize, poolUsed) poolCapacities.append(poolCapacity) return total, avail, poolCapacities def _set_capacity_to_response(self, rsp): total, avail, poolCapacities = self._get_capacity() rsp.totalCapacity = total rsp.availableCapacity = avail rsp.poolCapacities = poolCapacities @replyerror def echo(self, req): logger.debug('get echoed') return '' def _normalize_install_path(self, path): return path.lstrip('ceph:').lstrip('//') def _get_file_size(self, path): o = shell.call('rbd --format json info %s' % path) o = jsonobject.loads(o) return long(o.size_) @replyerror def get_image_size(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = GetImageSizeRsp() path = self._normalize_install_path(cmd.installPath) rsp.size = self._get_file_size(path) return jsonobject.dumps(rsp) def _read_file_content(self, path): with open(path) as f: return f.read() @in_bash @replyerror def get_images_metadata(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) pool_name = cmd.poolName bs_uuid = pool_name.split("-")[-1] valid_images_info = "" self.get_metadata_file(bs_uuid, self.CEPH_METADATA_FILE) last_image_install_path = "" bs_ceph_info_file = "/tmp/%s" % self.CEPH_METADATA_FILE with open(bs_ceph_info_file) as fd: images_info = fd.read() for image_info in images_info.split('\n'): if image_info != '': image_json = jsonobject.loads(image_info) # todo support multiple bs image_uuid = image_json['uuid'] image_install_path = image_json["backupStorageRefs"][0][ "installPath"] ret = bash_r("rbd info %s" % image_install_path.split("//")[1]) if ret == 0: logger.info( "Check image %s install path %s successfully!" % (image_uuid, image_install_path)) if image_install_path != last_image_install_path: valid_images_info = image_info + '\n' + valid_images_info last_image_install_path = image_install_path else: logger.warn("Image %s install path %s is invalid!" % (image_uuid, image_install_path)) self.put_metadata_file(bs_uuid, self.CEPH_METADATA_FILE) rsp = GetImageMetaDataResponse() rsp.imagesMetadata = valid_images_info return jsonobject.dumps(rsp) @in_bash @replyerror def check_image_metadata_file_exist(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) pool_name = cmd.poolName bs_uuid = pool_name.split("-")[-1] rsp = CheckImageMetaDataFileExistResponse() rsp.backupStorageMetaFileName = self.CEPH_METADATA_FILE ret, output = bash_ro("rados -p bak-t-%s stat %s" % (bs_uuid, self.CEPH_METADATA_FILE)) if ret == 0: rsp.exist = True else: rsp.exist = False return jsonobject.dumps(rsp) def get_metadata_file(self, bs_uuid, file_name): local_file_name = "/tmp/%s" % file_name bash_ro("rm -rf %s" % local_file_name) bash_ro("rados -p bak-t-%s get %s %s" % (bs_uuid, file_name, local_file_name)) def put_metadata_file(self, bs_uuid, file_name): local_file_name = "/tmp/%s" % file_name ret, output = bash_ro("rados -p bak-t-%s put %s %s" % (bs_uuid, file_name, local_file_name)) if ret == 0: bash_ro("rm -rf %s" % local_file_name) @in_bash @replyerror def dump_image_metadata_to_file(self, req): def _write_info_to_metadata_file(fd): strip_list_content = content[1:-1] data_list = strip_list_content.split('},') for item in data_list: if item.endswith("}") is not True: item = item + "}" fd.write(item + '\n') cmd = jsonobject.loads(req[http.REQUEST_BODY]) pool_name = cmd.poolName bs_uuid = pool_name.split("-")[-1] content = cmd.imageMetaData dump_all_metadata = cmd.dumpAllMetaData if dump_all_metadata is True: # this means no metadata exist in ceph bash_r("touch /tmp/%s" % self.CEPH_METADATA_FILE) else: self.get_metadata_file(bs_uuid, self.CEPH_METADATA_FILE) bs_ceph_info_file = "/tmp/%s" % self.CEPH_METADATA_FILE if content is not None: if '[' == content[0] and ']' == content[-1]: if dump_all_metadata is True: with open(bs_ceph_info_file, 'w') as fd: _write_info_to_metadata_file(fd) else: with open(bs_ceph_info_file, 'a') as fd: _write_info_to_metadata_file(fd) else: # one image info if dump_all_metadata is True: with open(bs_ceph_info_file, 'w') as fd: fd.write(content + '\n') else: with open(bs_ceph_info_file, 'a') as fd: fd.write(content + '\n') self.put_metadata_file(bs_uuid, self.CEPH_METADATA_FILE) rsp = DumpImageMetaDataToFileResponse() return jsonobject.dumps(rsp) @in_bash @replyerror def delete_image_metadata_from_file(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) image_uuid = cmd.imageUuid pool_name = cmd.poolName bs_uuid = pool_name.split("-")[-1] self.get_metadata_file(bs_uuid, self.CEPH_METADATA_FILE) bs_ceph_info_file = "/tmp/%s" % self.CEPH_METADATA_FILE ret, output = bash_ro("sed -i.bak '/%s/d' %s" % (image_uuid, bs_ceph_info_file)) self.put_metadata_file(bs_uuid, self.CEPH_METADATA_FILE) rsp = DeleteImageMetaDataResponse() rsp.ret = ret return jsonobject.dumps(rsp) @replyerror @in_bash def get_facts(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) o = bash_o('ceph mon_status') mon_status = jsonobject.loads(o) fsid = mon_status.monmap.fsid_ rsp = GetFactsRsp() facts = bash_o('ceph -s -f json') mon_facts = jsonobject.loads(facts) for mon in mon_facts.monmap.mons: ADDR = mon.addr.split(':')[0] if bash_r('ip route | grep -w {{ADDR}} > /dev/null') == 0: rsp.monAddr = ADDR break if not rsp.monAddr: raise Exception('cannot find mon address of the mon server[%s]' % cmd.monUuid) rsp.fsid = fsid return jsonobject.dumps(rsp) @replyerror def init(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) o = shell.call('ceph mon_status') mon_status = jsonobject.loads(o) fsid = mon_status.monmap.fsid_ existing_pools = shell.call('ceph osd lspools') for pool in cmd.pools: if pool.predefined and pool.name not in existing_pools: raise Exception( 'cannot find pool[%s] in the ceph cluster, you must create it manually' % pool.name) elif pool.name not in existing_pools: shell.check_run('ceph osd pool create %s 128' % pool.name) rsp = InitRsp() rsp.fsid = fsid self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) def _parse_install_path(self, path): return path.lstrip('ceph:').lstrip('//').split('/') def _fail_task(self, task, reason): task.fail(reason) raise Exception(reason) def _get_fifopath(self, uu): import tempfile d = tempfile.gettempdir() return os.path.join(d, uu) # handler for multipart upload, requires: # - header X-IMAGE-UUID # - header X-IMAGE-SIZE def upload(self, req): imageUuid = req.headers['X-IMAGE-UUID'] imageSize = req.headers['X-IMAGE-SIZE'] task = self.upload_tasks.get_task(imageUuid) if task is None: raise Exception('image not found %s' % imageUuid) task.expectedSize = long(imageSize) total, avail, poolCapacities = self._get_capacity() if avail <= task.expectedSize: self._fail_task(task, 'capacity not enough for size: ' + imageSize) entity = req.body boundary = get_boundary(entity) if not boundary: self._fail_task(task, 'unexpected post form') try: # prepare the fifo to save image upload fpath = self._get_fifopath(imageUuid) linux.rm_file_force(fpath) os.mkfifo(fpath) stream_body(task, fpath, entity, boundary) except Exception as e: self._fail_task(task, str(e)) finally: linux.rm_file_force(fpath) def _prepare_upload(self, cmd): start = len(self.UPLOAD_PROTO) imageUuid = cmd.url[start:start + self.LENGTH_OF_UUID] dstPath = self._normalize_install_path(cmd.installPath) pool, image_name = self._parse_install_path(cmd.installPath) tmp_image_name = 'tmp-%s' % image_name tmpPath = '%s/%s' % (pool, tmp_image_name) task = UploadTask(imageUuid, cmd.installPath, dstPath, tmpPath) self.upload_tasks.add_task(task) def _get_upload_path(self, req): host = req[http.REQUEST_HEADER]['Host'] return 'http://' + host + self.UPLOAD_IMAGE_PATH @replyerror def get_upload_progress(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) task = self.upload_tasks.get_task(cmd.imageUuid) if task is None: raise Exception('image not found %s' % cmd.imageUuid) rsp = UploadProgressRsp() rsp.completed = task.completed rsp.installPath = task.installPath rsp.size = task.expectedSize rsp.actualSize = task.expectedSize if task.expectedSize == 0: rsp.progress = 0 elif task.completed: rsp.progress = 100 else: rsp.progress = task.downloadedSize * 90 / task.expectedSize if task.lastError is not None: rsp.success = False rsp.error = task.lastError rsp.format = task.image_format return jsonobject.dumps(rsp) @replyerror @rollback def download(self, req): rsp = DownloadRsp() def _get_origin_format(path): qcow2_length = 0x9007 if path.startswith('http://') or path.startswith( 'https://') or path.startswith('ftp://'): resp = urllib2.urlopen(path) qhdr = resp.read(qcow2_length) resp.close() elif path.startswith('sftp://'): fd, tmp_file = tempfile.mkstemp() get_header_from_pipe_cmd = "timeout 60 head --bytes=%d %s > %s" % ( qcow2_length, pipe_path, tmp_file) clean_cmd = "pkill -f %s" % pipe_path shell.run( '%s & %s && %s' % (scp_to_pipe_cmd, get_header_from_pipe_cmd, clean_cmd)) qhdr = os.read(fd, qcow2_length) if os.path.exists(tmp_file): os.remove(tmp_file) else: resp = open(path) qhdr = resp.read(qcow2_length) resp.close() if len(qhdr) < qcow2_length: return "raw" return get_image_format_from_buf(qhdr) def get_origin_format(fpath, fail_if_has_backing_file=True): image_format = _get_origin_format(fpath) if image_format == "derivedQcow2" and fail_if_has_backing_file: raise Exception('image has backing file or %s is not exist!' % fpath) return image_format cmd = jsonobject.loads(req[http.REQUEST_BODY]) pool, image_name = self._parse_install_path(cmd.installPath) tmp_image_name = 'tmp-%s' % image_name @rollbackable def _1(): shell.check_run('rbd rm %s/%s' % (pool, tmp_image_name)) def _getRealSize(length): '''length looks like: 10245K''' logger.debug(length) if not length[-1].isalpha(): return length units = { "g": lambda x: x * 1024 * 1024 * 1024, "m": lambda x: x * 1024 * 1024, "k": lambda x: x * 1024, } try: if not length[-1].isalpha(): return length return units[length[-1].lower()](int(length[:-1])) except: logger.warn(linux.get_exception_stacktrace()) return length # whether we have an upload request if cmd.url.startswith(self.UPLOAD_PROTO): self._prepare_upload(cmd) rsp.size = 0 rsp.uploadPath = self._get_upload_path(req) self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) if cmd.sendCommandUrl: Report.url = cmd.sendCommandUrl report = Report(cmd.threadContext, cmd.threadContextStack) report.processType = "AddImage" report.resourceUuid = cmd.imageUuid report.progress_report("0", "start") url = urlparse.urlparse(cmd.url) if url.scheme in ('http', 'https', 'ftp'): image_format = get_origin_format(cmd.url, True) cmd.url = linux.shellquote(cmd.url) # roll back tmp ceph file after import it _1() _, PFILE = tempfile.mkstemp() content_length = shell.call('curl -sI %s|grep Content-Length' % cmd.url).strip().split()[1] total = _getRealSize(content_length) def _getProgress(synced): logger.debug( "getProgress in ceph-bs-agent, synced: %s, total: %s" % (synced, total)) last = shell.call('tail -1 %s' % PFILE).strip() if not last or len(last.split()) < 1: return synced logger.debug("last synced: %s" % last) written = _getRealSize(last.split()[0]) if total > 0 and synced < written: synced = written if synced < total: percent = int(round(float(synced) / float(total) * 90)) report.progress_report(percent, "report") return synced logger.debug("content-length is: %s" % total) _, _, err = bash_progress_1( 'set -o pipefail;wget --no-check-certificate -O - %s 2>%s| rbd import --image-format 2 - %s/%s' % (cmd.url, PFILE, pool, tmp_image_name), _getProgress) if err: raise err actual_size = linux.get_file_size_by_http_head(cmd.url) if os.path.exists(PFILE): os.remove(PFILE) elif url.scheme == 'sftp': port = (url.port, 22)[url.port is None] _, PFILE = tempfile.mkstemp() pipe_path = PFILE + "fifo" scp_to_pipe_cmd = "scp -P %d -o StrictHostKeyChecking=no %s@%s:%s %s" % ( port, url.username, url.hostname, url.path, pipe_path) sftp_command = "sftp -o StrictHostKeyChecking=no -o BatchMode=no -P %s -b /dev/stdin %s@%s" % ( port, url.username, url.hostname) + " <<EOF\n%s\nEOF\n" if url.password is not None: scp_to_pipe_cmd = 'sshpass -p "%s" %s' % (url.password, scp_to_pipe_cmd) sftp_command = 'sshpass -p "%s" %s' % (url.password, sftp_command) actual_size = shell.call( sftp_command % ("ls -l " + url.path)).splitlines()[1].strip().split()[4] os.mkfifo(pipe_path) image_format = get_origin_format(cmd.url, True) cmd.url = linux.shellquote(cmd.url) # roll back tmp ceph file after import it _1() def _get_progress(synced): logger.debug("getProgress in add image") if not os.path.exists(PFILE): return synced last = shell.call('tail -1 %s' % PFILE).strip() if not last or not last.isdigit(): return synced report.progress_report(int(last) * 90 / 100, "report") return synced get_content_from_pipe_cmd = "pv -s %s -n %s 2>%s" % ( actual_size, pipe_path, PFILE) import_from_pipe_cmd = "rbd import --image-format 2 - %s/%s" % ( pool, tmp_image_name) _, _, err = bash_progress_1( 'set -o pipefail; %s & %s | %s' % (scp_to_pipe_cmd, get_content_from_pipe_cmd, import_from_pipe_cmd), _get_progress) if os.path.exists(PFILE): os.remove(PFILE) if os.path.exists(pipe_path): os.remove(pipe_path) if err: raise err elif url.scheme == 'file': src_path = cmd.url.lstrip('file:') src_path = os.path.normpath(src_path) if not os.path.isfile(src_path): raise Exception('cannot find the file[%s]' % src_path) image_format = get_origin_format(src_path, True) # roll back tmp ceph file after import it _1() shell.check_run("rbd import --image-format 2 %s %s/%s" % (src_path, pool, tmp_image_name)) actual_size = os.path.getsize(src_path) else: raise Exception('unknown url[%s]' % cmd.url) file_format = shell.call( "set -o pipefail; qemu-img info rbd:%s/%s | grep 'file format' | cut -d ':' -f 2" % (pool, tmp_image_name)) file_format = file_format.strip() if file_format not in ['qcow2', 'raw']: raise Exception('unknown image format: %s' % file_format) if file_format == 'qcow2': conf_path = None try: with open('/etc/ceph/ceph.conf', 'r') as fd: conf = fd.read() conf = '%s\n%s\n' % (conf, 'rbd default format = 2') conf_path = linux.write_to_temp_file(conf) shell.check_run( 'qemu-img convert -f qcow2 -O rbd rbd:%s/%s rbd:%s/%s:conf=%s' % (pool, tmp_image_name, pool, image_name, conf_path)) shell.check_run('rbd rm %s/%s' % (pool, tmp_image_name)) finally: if conf_path: os.remove(conf_path) else: shell.check_run('rbd mv %s/%s %s/%s' % (pool, tmp_image_name, pool, image_name)) report.progress_report("100", "finish") @rollbackable def _2(): shell.check_run('rbd rm %s/%s' % (pool, image_name)) _2() o = shell.call('rbd --format json info %s/%s' % (pool, image_name)) image_stats = jsonobject.loads(o) rsp.size = long(image_stats.size_) rsp.actualSize = actual_size if image_format == "qcow2": rsp.format = "raw" else: rsp.format = image_format self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def ping(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = PingRsp() facts = bash_o('ceph -s -f json') mon_facts = jsonobject.loads(facts) found = False for mon in mon_facts.monmap.mons: if cmd.monAddr in mon.addr: found = True break if not found: rsp.success = False rsp.failure = "MonAddrChanged" rsp.error = 'The mon addr is changed on the mon server[uuid:%s], not %s anymore.' \ 'Reconnect the ceph primary storage' \ ' may solve this issue' % (cmd.monUuid, cmd.monAddr) return jsonobject.dumps(rsp) pool, objname = cmd.testImagePath.split('/') create_img = shell.ShellCmd("echo zstack | rados -p '%s' put '%s' -" % (pool, objname)) create_img(False) if create_img.return_code != 0: rsp.success = False rsp.failure = 'UnableToCreateFile' rsp.error = "%s %s" % (create_img.stderr, create_img.stdout) else: shell.run("rados -p '%s' rm '%s'" % (pool, objname)) return jsonobject.dumps(rsp) @replyerror def delete(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) pool, image_name = self._parse_install_path(cmd.installPath) def delete_image(_): # in case image is deleted, we don't have to wait for timeout img = "%s/%s" % (pool, image_name) shell.check_run('rbd info %s && rbd rm %s' % (img, img)) return True # 'rbd rm' might fail due to client crash. We wait for 30 seconds as suggested by 'rbd'. # # rbd: error: image still has watchers # This means the image is still open or the client using it crashed. Try again after # closing/unmapping it or waiting 30s for the crashed client to timeout. linux.wait_callback_success(delete_image, interval=5, timeout=30, ignore_exception_in_callback=True) rsp = AgentResponse() self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def check_pool(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) existing_pools = shell.call('ceph osd lspools') for pool in cmd.pools: if pool.name not in existing_pools: raise Exception( 'cannot find pool[%s] in the ceph cluster, you must create it manually' % pool.name) return jsonobject.dumps(AgentResponse()) @replyerror def get_local_file_size(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = GetLocalFileSizeRsp() rsp.size = linux.get_local_file_size(cmd.path) return jsonobject.dumps(rsp) def _migrate_image(self, image_uuid, image_size, src_install_path, dst_install_path, dst_mon_addr, dst_mon_user, dst_mon_passwd, dst_mon_port): src_install_path = self._normalize_install_path(src_install_path) dst_install_path = self._normalize_install_path(dst_install_path) rst = shell.run( 'rbd export %s - | tee >(md5sum >/tmp/%s_src_md5) | sshpass -p "%s" ssh -o StrictHostKeyChecking=no %s@%s -p %s \'tee >(md5sum >/tmp/%s_dst_md5) | rbd import - %s\'' % (src_install_path, image_uuid, dst_mon_passwd, dst_mon_user, dst_mon_addr, dst_mon_port, image_uuid, dst_install_path)) if rst != 0: return rst src_md5 = self._read_file_content('/tmp/%s_src_md5' % image_uuid) dst_md5 = shell.call( 'sshpass -p "%s" ssh -o StrictHostKeyChecking=no %s@%s -p %s \'cat /tmp/%s_dst_md5\'' % (dst_mon_passwd, dst_mon_user, dst_mon_addr, dst_mon_port, image_uuid)) if src_md5 != dst_md5: return -1 else: return 0 @replyerror @in_bash def migrate_image(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = AgentResponse() rst = self._migrate_image(cmd.imageUuid, cmd.imageSize, cmd.srcInstallPath, cmd.dstInstallPath, cmd.dstMonHostname, cmd.dstMonSshUsername, cmd.dstMonSshPassword, cmd.dstMonSshPort) if rst != 0: rsp.success = False rsp.error = "Failed to migrate image from one ceph backup storage to another." self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp)
class FusionstorAgent(object): INIT_PATH = "/fusionstor/backupstorage/init" DOWNLOAD_IMAGE_PATH = "/fusionstor/backupstorage/image/download" DELETE_IMAGE_PATH = "/fusionstor/backupstorage/image/delete" PING_PATH = "/fusionstor/backupstorage/ping" ECHO_PATH = "/fusionstor/backupstorage/echo" GET_IMAGE_SIZE_PATH = "/fusionstor/backupstorage/image/getsize" GET_FACTS = "/fusionstor/backupstorage/facts" http_server = http.HttpServer(port=7763) http_server.logfile_path = log.get_logfile_path() def __init__(self): self.http_server.register_async_uri(self.INIT_PATH, self.init) self.http_server.register_async_uri(self.DOWNLOAD_IMAGE_PATH, self.download) self.http_server.register_async_uri(self.DELETE_IMAGE_PATH, self.delete) self.http_server.register_async_uri(self.PING_PATH, self.ping) self.http_server.register_async_uri(self.GET_IMAGE_SIZE_PATH, self.get_image_size) self.http_server.register_async_uri(self.GET_FACTS, self.get_facts) self.http_server.register_sync_uri(self.ECHO_PATH, self.echo) def _set_capacity_to_response(self, rsp): total, used = lichbd.lichbd_get_capacity() rsp.totalCapacity = total rsp.availableCapacity = total - used @replyerror def echo(self, req): logger.debug('get echoed') return '' def _normalize_install_path(self, path): return path.lstrip('fusionstor:').lstrip('//') def _get_file_size(self, path): return lichbd.lichbd_file_size(path) @replyerror def get_image_size(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = GetImageSizeRsp() path = self._normalize_install_path(cmd.installPath) rsp.size = self._get_file_size(path) return jsonobject.dumps(rsp) @replyerror def get_facts(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = GetFactsRsp() rsp.fsid = lichbd.lichbd_get_fsid() return jsonobject.dumps(rsp) @replyerror def init(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) existing_pools = lichbd.lichbd_lspools() for pool in cmd.pools: if pool.predefined and pool.name not in existing_pools: raise Exception( 'cannot find pool[%s] in the fusionstor cluster, you must create it manually' % pool.name) elif pool.name not in existing_pools: lichbd.lichbd_mkpool(pool.name) rsp = InitRsp() rsp.fsid = lichbd.lichbd_get_fsid() self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) def _parse_install_path(self, path): return path.lstrip('fusionstor:').lstrip('//').split('/') @replyerror @rollback def download(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) pool, image_name = self._parse_install_path(cmd.installPath) tmp_image_name = 'tmp-%s' % image_name lichbd_file = os.path.join(pool, image_name) tmp_lichbd_file = os.path.join(pool, tmp_image_name) protocol = lichbd.get_protocol() lichbd.lichbd_mkpool(os.path.dirname(lichbd_file)) if cmd.url.startswith('http://') or cmd.url.startswith('https://'): shell.call( 'set -o pipefail; wget --no-check-certificate -q -O - %s | lichbd import - %s -p %s' % (cmd.url, tmp_lichbd_file, protocol)) actual_size = linux.get_file_size_by_http_head(cmd.url) elif cmd.url.startswith('file://'): src_path = cmd.url.lstrip('file:') src_path = os.path.normpath(src_path) if not os.path.isfile(src_path): raise Exception('cannot find the file[%s]' % src_path) lichbd.lichbd_import(src_path, tmp_lichbd_file) actual_size = os.path.getsize(src_path) else: raise Exception('unknown url[%s]' % cmd.url) @rollbackable def _1(): if lichbd.lichbd_file_exist(tmp_lichbd_file): lichbd.lichbd_rm(tmp_lichbd_file) lichbd.lichbd_rm(lichbd_file) _1() file_format = lichbd.lichbd_get_format(tmp_lichbd_file) if file_format not in ['qcow2', 'raw']: raise Exception('unknown image format: %s' % file_format) lichbd.lichbd_mv(lichbd_file, tmp_lichbd_file) size = lichbd.lichbd_file_size(lichbd_file) rsp = DownloadRsp() rsp.size = size rsp.actualSize = actual_size rsp.format = file_format.strip('\n') self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def ping(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = PingRsp() if cmd.testImagePath: pool = cmd.testImagePath.split('/')[0] testImagePath = '%s/this-is-a-test-image-with-long-name' % pool shellcmd = lichbd.lichbd_file_info(testImagePath) if shellcmd.return_code == errno.ENOENT: try: lichbd.lichbd_create_raw(testImagePath, '1b') except Exception, e: rsp.success = False rsp.operationFailure = True rsp.error = str(e) logger.debug("%s" % rsp.error) elif shellcmd.return_code == 0: pass else: rsp.success = False rsp.operationFailure = True rsp.error = "%s %s" % (shellcmd.cmd, shellcmd.stderr) logger.debug("%s: %s" % (shellcmd.cmd, shellcmd.stderr)) return jsonobject.dumps(rsp)
class SftpBackupStorageAgent(object): ''' classdocs ''' CONNECT_PATH = "/sftpbackupstorage/connect" DOWNLOAD_IMAGE_PATH = "/sftpbackupstorage/download" DELETE_IMAGE_PATH = "/sftpbackupstorage/delete" PING_PATH = "/sftpbackupstorage/ping" GET_SSHKEY_PATH = "/sftpbackupstorage/sshkey" ECHO_PATH = "/sftpbackupstorage/echo" WRITE_IMAGE_METADATA = "/sftpbackupstorage/writeimagemetadata" IMAGE_TEMPLATE = 'template' IMAGE_ISO = 'iso' URL_HTTP = 'http' URL_HTTPS = 'https' URL_FILE = 'file' URL_NFS = 'nfs' PORT = 7171 SSHKEY_PATH = "~/.ssh/id_rsa.sftp" http_server = http.HttpServer(PORT) http_server.logfile_path = log.get_logfile_path() def get_capacity(self): total = linux.get_total_disk_size(self.storage_path) used = linux.get_used_disk_size(self.storage_path) return (total, total - used) @replyerror def ping(self, req): rsp = PingResponse() rsp.uuid = self.uuid return jsonobject.dumps(rsp) @replyerror def echo(self, req): logger.debug('get echoed') return '' @replyerror def connect(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) self.storage_path = cmd.storagePath self.uuid = cmd.uuid if os.path.isfile(self.storage_path): raise Exception('storage path: %s is a file' % self.storage_path) if not os.path.exists(self.storage_path): os.makedirs(self.storage_path, 0755) (total, avail) = self.get_capacity() logger.debug( http.path_msg( self.CONNECT_PATH, 'connected, [storage path:%s, total capacity: %s bytes, available capacity: %s size]' % (self.storage_path, total, avail))) rsp = ConnectResponse() rsp.totalCapacity = total rsp.availableCapacity = avail return jsonobject.dumps(rsp) def _write_image_metadata(self, image_install_path, meta_data): image_dir = os.path.dirname(image_install_path) md5sum = linux.md5sum(image_install_path) size = os.path.getsize(image_install_path) meta = dict(meta_data.__dict__.items()) meta['size'] = size meta['md5sum'] = md5sum metapath = os.path.join(image_dir, 'meta_data.json') with open(metapath, 'w') as fd: fd.write(jsonobject.dumps(meta, pretty=True)) return (size, md5sum) @replyerror def write_image_metadata(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) meta_data = cmd.metaData self._write_image_metadata(meta_data.installPath, meta_data) rsp = WriteImageMetaDataResponse() return jsonobject.dumps(rsp) @replyerror def download_image(self, req): #TODO: report percentage to mgmt server def percentage_callback(percent, url): logger.debug('Downloading %s ... %s%%' % (url, percent)) def use_wget(url, name, workdir, timeout): return linux.wget(url, workdir=workdir, rename=name, timeout=timeout, interval=2, callback=percentage_callback, callback_data=url) cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = DownloadResponse() supported_schemes = [self.URL_HTTP, self.URL_HTTPS, self.URL_FILE] if cmd.urlScheme not in supported_schemes: rsp.success = False rsp.error = 'unsupported url scheme[%s], SimpleSftpBackupStorage only supports %s' % ( cmd.urlScheme, supported_schemes) return jsonobject.dumps(rsp) path = os.path.dirname(cmd.installPath) if not os.path.exists(path): os.makedirs(path, 0755) image_name = os.path.basename(cmd.installPath) install_path = cmd.installPath timeout = cmd.timeout if cmd.timeout else 7200 if cmd.urlScheme in [self.URL_HTTP, self.URL_HTTPS]: try: ret = use_wget(cmd.url, image_name, path, timeout) if ret != 0: rsp.success = False rsp.error = 'http/https download failed, [wget -O %s %s] returns value %s' % ( image_name, cmd.url, ret) return jsonobject.dumps(rsp) except linux.LinuxError as e: traceback.format_exc() rsp.success = False rsp.error = str(e) return jsonobject.dumps(rsp) elif cmd.urlScheme == self.URL_FILE: src_path = cmd.url.lstrip('file:') src_path = os.path.normpath(src_path) if not os.path.isfile(src_path): raise Exception('cannot find the file[%s]' % src_path) shell.call('yes | cp %s %s' % (src_path, install_path)) size = os.path.getsize(install_path) md5sum = 'not calculated' logger.debug('successfully downloaded %s to %s' % (cmd.url, install_path)) (total, avail) = self.get_capacity() rsp.md5Sum = md5sum rsp.size = size rsp.totalCapacity = total rsp.availableCapacity = avail return jsonobject.dumps(rsp) @replyerror def delete_image(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = DeleteResponse() path = os.path.dirname(cmd.installUrl) shutil.rmtree(path) logger.debug('successfully deleted bits[%s]' % cmd.installUrl) (total, avail) = self.get_capacity() rsp.totalCapacity = total rsp.availableCapacity = avail return jsonobject.dumps(rsp) @replyerror def get_sshkey(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = GetSshKeyResponse() path = os.path.expanduser(self.SSHKEY_PATH) if not os.path.exists(path): err = "Cannot find private key of SftpBackupStorageAgent" rsp.error = err rsp.success = False logger.warn("%s at %s" % (err, self.SSHKEY_PATH)) return jsonobject.dumps(rsp) with open(path) as fd: sshkey = fd.read() rsp.sshKey = sshkey logger.debug("Get sshkey as %s" % sshkey) return jsonobject.dumps(rsp) def __init__(self): ''' Constructor ''' self.http_server.register_sync_uri(self.CONNECT_PATH, self.connect) self.http_server.register_sync_uri(self.ECHO_PATH, self.echo) self.http_server.register_async_uri(self.DOWNLOAD_IMAGE_PATH, self.download_image) self.http_server.register_async_uri(self.DELETE_IMAGE_PATH, self.delete_image) self.http_server.register_async_uri(self.GET_SSHKEY_PATH, self.get_sshkey) self.http_server.register_async_uri(self.WRITE_IMAGE_METADATA, self.write_image_metadata) self.http_server.register_async_uri(self.PING_PATH, self.ping) self.storage_path = None self.uuid = None
class ConsoleProxyAgent(object): PORT = 7758 http_server = http.HttpServer(PORT) http_server.logfile_path = log.get_logfile_path() CHECK_AVAILABILITY_PATH = "/console/check" ESTABLISH_PROXY_PATH = "/console/establish" DELETE_PROXY_PATH = "/console/delete" PING_PATH = "/console/ping" TOKEN_FILE_DIR = "/var/lib/zstack/consoleProxy/" PROXY_LOG_DIR = "/var/log/zstack/consoleProxy/" DB_NAME = "consoleProxy" #TODO: sync db status and current running processes def __init__(self): self.http_server.register_async_uri(self.CHECK_AVAILABILITY_PATH, self.check_proxy_availability) self.http_server.register_async_uri(self.ESTABLISH_PROXY_PATH, self.establish_new_proxy) self.http_server.register_async_uri(self.DELETE_PROXY_PATH, self.delete) self.http_server.register_sync_uri(self.PING_PATH, self.ping) if not os.path.exists(self.PROXY_LOG_DIR): os.makedirs(self.PROXY_LOG_DIR, 0755) if not os.path.exists(self.TOKEN_FILE_DIR): os.makedirs(self.TOKEN_FILE_DIR, 0755) self.db = filedb.FileDB(self.DB_NAME) def _make_token_file_name(self, cmd): target_ip_str = cmd.targetHostname.replace('.', '_') return '%s' % cmd.token def _get_pid_on_port(self, port): out = shell.call('netstat -anp | grep ":%s" | grep LISTEN' % port, exception=False) out = out.strip(' \n\t\r') if "" == out: return None pid = out.split()[-1].split('/')[0] try: pid = int(pid) return pid except: return None def _check_proxy_availability(self, args): proxyPort = args['proxyPort'] targetHostname = args['targetHostname'] targetPort = args['targetPort'] token = args['token'] pid = self._get_pid_on_port(proxyPort) if not pid: logger.debug('no websockify on proxy port[%s], availability false' % proxyPort) return False with open(os.path.join('/proc', str(pid), 'cmdline'), 'r') as fd: process_cmdline = fd.read() if 'websockify' not in process_cmdline: logger.debug('process[pid:%s] on proxy port[%s] is not websockify process, availability false' % (pid, proxyPort)) return False info_str = self.db.get(token) if not info_str: logger.debug('cannot find information for process[pid:%s] on proxy port[%s], availability false' % (pid, proxyPort)) return False info = jsonobject.loads(info_str) if token != info['token']: logger.debug('metadata[token] for process[pid:%s] on proxy port[%s] are changed[%s --> %s], availability false' % (pid, proxyPort, token, info['token'])) return False if targetPort != info['targetPort']: logger.debug('metadata[targetPort] for process[pid:%s] on proxy port[%s] are changed[%s --> %s], availability false' % (pid, proxyPort, targetPort, info['targetPort'])) return False if targetHostname != info['targetHostname']: logger.debug('metadata[targetHostname] for process[pid:%s] on proxy port[%s] are changed[%s --> %s], availability false' % (pid, proxyPort, targetHostname, info['targetHostname'])) return False return True @replyerror def ping(self, req): return jsonobject.dumps(AgentResponse()) @replyerror def check_proxy_availability(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) ret = self._check_proxy_availability({'proxyPort':cmd.proxyPort, 'targetHostname':cmd.targetHostname, 'targetPort':cmd.targetPort, 'token':cmd.token}) rsp = CheckAvailabilityRsp() rsp.available = ret return jsonobject.dumps(rsp) @replyerror @lock.lock('console-proxy') def delete(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) token_file = os.path.join(self.TOKEN_FILE_DIR, self._make_token_file_name(cmd)) shell.call("rm -f %s" % token_file) logger.debug('deleted a proxy by command: %s' % req[http.REQUEST_BODY]) rsp = AgentResponse() return jsonobject.dumps(rsp) @replyerror @lock.lock('console-proxy') def establish_new_proxy(self, req): #check parameters, generate token file,set db,check process is alive,start process if not, cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = EstablishProxyRsp() log_file = os.path.join(self.PROXY_LOG_DIR, cmd.proxyHostname) ## def check_parameters(): if not cmd.targetHostname: raise ConsoleProxyError('targetHostname cannot be null') if not cmd.targetPort: raise ConsoleProxyError('targetPort cannot be null') if not cmd.token: raise ConsoleProxyError('token cannot be null') if not cmd.proxyHostname: raise ConsoleProxyError('proxyHostname cannot be null') try: check_parameters() except ConsoleProxyError as e: err = linux.get_exception_stacktrace() logger.warn(err) rsp.error = str(e) rsp.success = False return jsonobject.dumps(rsp) ## token_file = os.path.join(self.TOKEN_FILE_DIR, self._make_token_file_name(cmd)) with open(token_file, 'w') as fd: fd.write('%s: %s:%s' % (cmd.token, cmd.targetHostname, cmd.targetPort)) info = { 'proxyHostname': cmd.proxyHostname, 'proxyPort': cmd.proxyPort, 'targetHostname': cmd.targetHostname, 'targetPort': cmd.targetPort, 'token': cmd.token, 'logFile': log_file, 'tokenFile': token_file } info_str = jsonobject.dumps(info) self.db.set(cmd.token, info_str) rsp.proxyPort = cmd.proxyPort logger.debug('successfully add new proxy token file %s' % info_str) ##if process exists,return out = shell.call("ps aux | grep websockify") alive = False for o in out.split('\n'): if o.find(cmd.proxyHostname) != -1: alive = True break if alive: return jsonobject.dumps(rsp) ##start a new websockify process timeout = cmd.idleTimeout if not timeout: timeout = 600 @lock.file_lock('/run/xtables.lock') def enable_proxy_port(): bash_errorout("iptables-save | grep -- '-A INPUT -p tcp -m tcp --dport {{PROXY_PORT}}' > /dev/null || iptables -I INPUT -p tcp -m tcp --dport {{PROXY_PORT}} -j ACCEPT") @in_bash def start_proxy(): LOG_FILE = log_file PROXY_HOST_NAME = cmd.proxyHostname PROXY_PORT = cmd.proxyPort TOKEN_FILE_DIR = self.TOKEN_FILE_DIR TIMEOUT = timeout start_cmd = '''python -c "from zstacklib.utils import log; import websockify; log.configure_log('{{LOG_FILE}}'); websockify.websocketproxy.websockify_init()" {{PROXY_HOST_NAME}}:{{PROXY_PORT}} -D --target-config={{TOKEN_FILE_DIR}} --idle-timeout={{TIMEOUT}} ''' ret,out,err = bash_roe(start_cmd) if ret != 0: err = [] err.append('failed to execute bash command: %s' % start_cmd) err.append('return code: %s' % ret) err.append('stdout: %s' % out) err.append('stderr: %s' % err) raise ConsoleProxyError('\n'.join(err)) else: enable_proxy_port() start_proxy() logger.debug('successfully establish new proxy%s' % info_str) return jsonobject.dumps(rsp)
class ApplianceVm(object): http_server = http.HttpServer(port=7759) http_server.logfile_path = log.get_logfile_path() REFRESH_FIREWALL_PATH = "/appliancevm/refreshfirewall" ECHO_PATH = "/appliancevm/echo" @lock.file_lock('iptables') def set_default_iptable_rules(self): shell.call('iptables --policy INPUT DROP') shell.call('iptables --policy FORWARD DROP') # NOTE: 22 port of eth0 is opened in /etc/sysconfig/iptables by default ipt = iptables.from_iptables_save() ipt.add_rule('-A INPUT -m state --state RELATED,ESTABLISHED -j ACCEPT') ipt.add_rule('-A INPUT -i lo -j ACCEPT') ipt.add_rule('-A INPUT -p icmp -j ACCEPT') ipt.add_rule('-A INPUT -j REJECT --reject-with icmp-host-prohibited') ipt.add_rule( '-A FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT') ipt.add_rule( '-A POSTROUTING -p udp --dport bootpc -j CHECKSUM --checksum-fill', iptables.IPTables.MANGLE_TABLE_NAME) ipt.iptable_restore() @replyerror def echo(self, req): logger.debug('get echoed') return '' @replyerror @lock.file_lock('iptables') def refresh_rule(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = RefreshFirewallRsp() ipt = iptables.from_iptables_save() # replace bootstrap 22 port rule with a more restricted one that binds to eth0's IP ipt.remove_rule('-A INPUT -i eth0 -p tcp -m tcp --dport 22 -j ACCEPT') eth0_ip = linux.get_ip_by_nic_name('eth0') assert eth0_ip, 'cannot find IP of eth0' ipt.add_rule( '-A INPUT -d %s/32 -i eth0 -p tcp -m tcp --dport 22 -j ACCEPT' % eth0_ip) chain_name = 'appliancevm' ipt.delete_chain(chain_name) ipt.add_rule('-A INPUT -j %s' % chain_name) for to in cmd.rules: if to.destIp: nic_name = linux.get_nic_name_by_ip(to.destIp) else: nic_name = linux.get_nic_name_from_alias( linux.get_nic_names_by_mac(to.nicMac)) r = [] if to.protocol == 'all' or to.protocol == 'udp': r.append('-A %s' % chain_name) if to.sourceIp: r.append('-s %s' % to.sourceIp) if to.destIp: r.append('-d %s' % to.destIp) r.append( '-i %s -p udp -m state --state NEW -m udp --dport %s:%s -j ACCEPT' % (nic_name, to.startPort, to.endPort)) rule = ' '.join(r) ipt.add_rule(rule) r = [] if to.protocol == 'all' or to.protocol == 'tcp': r.append('-A %s' % chain_name) if to.sourceIp: r.append('-s %s' % to.sourceIp) if to.destIp: r.append('-d %s' % to.destIp) r.append( '-i %s -p tcp -m state --state NEW -m tcp --dport %s:%s -j ACCEPT' % (nic_name, to.startPort, to.endPort)) rule = ' '.join(r) ipt.add_rule(rule) ipt.iptable_restore() logger.debug('refreshed rules for appliance vm') return jsonobject.dumps(rsp) def start(self, in_thread=True): self.set_default_iptable_rules() self.http_server.register_async_uri(self.REFRESH_FIREWALL_PATH, self.refresh_rule) self.http_server.register_sync_uri(self.ECHO_PATH, self.echo) if in_thread: self.http_server.start_in_thread() else: self.http_server.start() def stop(self): self.http_server.stop()
class SurfsAgent(object): INIT_PATH = "/surfs/backupstorage/init" DOWNLOAD_IMAGE_PATH = "/surfs/backupstorage/image/download" DELETE_IMAGE_PATH = "/surfs/backupstorage/image/delete" PING_PATH = "/surfs/backupstorage/ping" ECHO_PATH = "/surfs/backupstorage/echo" GET_IMAGE_SIZE_PATH = "/surfs/backupstorage/image/getsize" GET_FACTS = "/surfs/backupstorage/facts" GET_LOCAL_FILE_SIZE = "/surfs/backupstorage/getlocalfilesize" http_server = http.HttpServer(port=6732) http_server.logfile_path = log.get_logfile_path() def __init__(self): self.http_server.register_async_uri(self.INIT_PATH, self.init) self.http_server.register_async_uri(self.DOWNLOAD_IMAGE_PATH, self.download) self.http_server.register_async_uri(self.DELETE_IMAGE_PATH, self.delete) self.http_server.register_async_uri(self.PING_PATH, self.ping) self.http_server.register_async_uri(self.GET_IMAGE_SIZE_PATH, self.get_image_size) self.http_server.register_async_uri(self.GET_FACTS, self.get_facts) self.http_server.register_sync_uri(self.ECHO_PATH, self.echo) self.http_server.register_async_uri(self.GET_LOCAL_FILE_SIZE, self.get_local_file_size) self.fsid = 'surfsc48-2cef-454c-b0d0-b6e6b467c022' self.tmp_image_path = '/usr/lib/surfstmpimages' if os.path.exists(self.tmp_image_path) is False: shell.call('mkdir %s' % self.tmp_image_path) self.surfs_mgr = SurfsCmdManage() def _normalize_install_path(self, path): return path.lstrip('surfs:').lstrip('//') def _set_capacity_to_response(self, rsp): cmdstr = 'surfs connect' total = 0 used = 0 rmsg = self.surfs_mgr.get_pool_msg() for pl in rmsg: if pl["success"] is True: total = total + pl["total"] used = used + pl["used"] rsp.totalCapacity = total rsp.availableCapacity = total - used def _parse_install_path(self, path): return path.lstrip('surfs:').lstrip('//').split('/') @replyerror def echo(self, req): logger.debug('get echoed') return '' @replyerror def get_facts(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = GetFactsRsp() rsp.fsid = self.fsid return jsonobject.dumps(rsp) @replyerror def init(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = InitRsp() rsp.fsid = self.fsid self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror @rollback def download(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) pool, image_name = self._parse_install_path(cmd.installPath) tmp_image_name = 'tmp-%s' % image_name formated_file = os.path.join(self.tmp_image_path, image_name) tmp_image_file = os.path.join(self.tmp_image_path, tmp_image_name) if cmd.url.startswith('http://') or cmd.url.startswith('https://'): cmd.url = linux.shellquote(cmd.url) actual_size = linux.get_file_size_by_http_head(cmd.url) elif cmd.url.startswith('file://'): src_path = cmd.url.lstrip('file:') src_path = os.path.normpath(src_path) if not os.path.isfile(src_path): raise Exception('cannot find the file[%s]' % src_path) actual_size = os.path.getsize(src_path) else: raise Exception('unknown url[%s]' % cmd.url) file_format = '' if "raw" in cmd.imageFormat: file_format = 'raw' if "qcow2" in cmd.imageFormat: file_format = 'qcow2' if file_format not in ['qcow2', 'raw']: raise Exception('unknown image format: %s' % file_format) if self.surfs_mgr.download_image_to_surfs(cmd.url, image_name, file_format) is False: raise Exception('Can not download image from %s' % cmd.url) size = self.surfs_mgr.get_iamge_size(image_name) rsp = DownloadRsp() rsp.size = size rsp.actualSize = actual_size self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def ping(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = PingRsp() if cmd.testImagePath: rmsg = self.surfs_mgr.get_pool_msg() if rmsg is None: rsp.success = False rsp.operationFailure = True rsp.error = "can not to do surfs connect" logger.debug("%s" % rsp.error) else: if len(rmsg) > 0: for rsg in rmsg: if rsg['success'] is False: rsp.success = False rsp.operationFailure = True rsp.error = "Surfs is ready,but pool is breaken" logger.debug("Surfs is ready,but pool is breaken") break else: rsp.success = False rsp.operationFailure = True rsp.error = "Surfs is ready,but pool is Null" logger.debug("Surfs is ready,but pool is Null") return jsonobject.dumps(rsp) @replyerror def delete(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) pool, image_name = self._parse_install_path(cmd.installPath) self.surfs_mgr.delete_image(image_name) rsp = AgentResponse() self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def get_local_file_size(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = GetLocalFileSizeRsp() filedir = cmd.path[7:] if os.path.exists(filedir): rsp.size = linux.get_local_file_size(filedir) else: rsp.size = 0 rsp.success = False rsp.error = "The file is not exist" return jsonobject.dumps(rsp) def _get_file_size(self, pool, image_name): return self.surfs_mgr.get_iamge_size(image_name) @replyerror def get_image_size(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = GetImageSizeRsp() pool, image_name = self._parse_install_path(cmd.installPath) rsp.size = self._get_file_size(pool, image_name) return jsonobject.dumps(rsp)
class SurfsAgent(object): INIT_PATH = "/surfs/primarystorage/init" CREATE_VOLUME_PATH = "/surfs/primarystorage/volume/createempty" DELETE_PATH = "/surfs/primarystorage/delete" CLONE_PATH = "/surfs/primarystorage/volume/clone" FLATTEN_PATH = "/surfs/primarystorage/volume/flatten" SFTP_DOWNLOAD_PATH = "/surfs/primarystorage/sftpbackupstorage/download" SFTP_UPLOAD_PATH = "/surfs/primarystorage/sftpbackupstorage/upload" ECHO_PATH = "/surfs/primarystorage/echo" CREATE_SNAPSHOT_PATH = "/surfs/primarystorage/snapshot/create" DELETE_SNAPSHOT_PATH = "/surfs/primarystorage/snapshot/delete" COMMIT_IMAGE_PATH = "/surfs/primarystorage/snapshot/commit" PROTECT_SNAPSHOT_PATH = "/surfs/primarystorage/snapshot/protect" ROLLBACK_SNAPSHOT_PATH = "/surfs/primarystorage/snapshot/rollback" UNPROTECT_SNAPSHOT_PATH = "/surfs/primarystorage/snapshot/unprotect" CP_PATH = "/surfs/primarystorage/volume/cp" DELETE_POOL_PATH = "/surfs/primarystorage/deletepool" GET_VOLUME_SIZE_PATH = "/surfs/primarystorage/getvolumesize" PING_PATH = "/surfs/primarystorage/ping" GET_FACTS = "/surfs/primarystorage/facts" ATTACH_VOLUME_PREPARE = "/surfs/primarystorage/attachprepare" DETACH_VOLUME_AFTER = "/surfs/primarystorage/detachafter" START_VM_BEFORE = "/surfs/primarystorage/startvmbefore" SURFS_MIGRATE_PREPARE = "/surfs/primarystorage/migrateprepare" SURFS_MIGRATE_AFTER = "/surfs/primarystorage/migrateafter" http_server = http.HttpServer(port=6731) http_server.logfile_path = log.get_logfile_path() def __init__(self): self.http_server.register_async_uri(self.INIT_PATH, self.init) self.http_server.register_async_uri(self.DELETE_PATH, self.delete) self.http_server.register_async_uri(self.CREATE_VOLUME_PATH, self.create) self.http_server.register_async_uri(self.CLONE_PATH, self.clone) self.http_server.register_async_uri(self.COMMIT_IMAGE_PATH, self.commit_image) self.http_server.register_async_uri(self.CREATE_SNAPSHOT_PATH, self.create_snapshot) self.http_server.register_async_uri(self.DELETE_SNAPSHOT_PATH, self.delete_snapshot) self.http_server.register_async_uri(self.PROTECT_SNAPSHOT_PATH, self.protect_snapshot) self.http_server.register_async_uri(self.UNPROTECT_SNAPSHOT_PATH, self.unprotect_snapshot) self.http_server.register_async_uri(self.ROLLBACK_SNAPSHOT_PATH, self.rollback_snapshot) self.http_server.register_async_uri(self.FLATTEN_PATH, self.flatten) self.http_server.register_async_uri(self.SFTP_DOWNLOAD_PATH, self.sftp_download) self.http_server.register_async_uri(self.SFTP_UPLOAD_PATH, self.sftp_upload) self.http_server.register_async_uri(self.CP_PATH, self.cp) self.http_server.register_async_uri(self.DELETE_POOL_PATH, self.delete_pool) self.http_server.register_async_uri(self.GET_VOLUME_SIZE_PATH, self.get_volume_size) self.http_server.register_async_uri(self.PING_PATH, self.ping) self.http_server.register_async_uri(self.GET_FACTS, self.get_facts) self.http_server.register_sync_uri(self.ECHO_PATH, self.echo) self.http_server.register_async_uri(self.ATTACH_VOLUME_PREPARE,self.attach_datavol_prepare) self.http_server.register_async_uri(self.DETACH_VOLUME_AFTER,self.detach_datavol_after) self.http_server.register_async_uri(self.START_VM_BEFORE,self.start_vm_before) self.http_server.register_async_uri(self.SURFS_MIGRATE_PREPARE,self.migrate_vm_before) self.http_server.register_async_uri(self.SURFS_MIGRATE_AFTER,self.migrate_vm_after) self.fsid='surfsc48-2cef-454c-b0d0-b6e6b467c022' self.surfs_mgr = SurfsCmdManage() @replyerror def init(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp = InitRsp() rsp.fsid = self.fsid self._set_capacity_to_response(rsp) rsp.userKey = "AQDVyu9VXrozIhAAuT2yMARKBndq9g3W8KUQvw==" return jsonobject.dumps(rsp) @replyerror def delete(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) xmsg=self._parse_install_path(cmd.installPath) pool='' vol_name='' if len(xmsg) == 2: pool=xmsg[0] vol_name=xmsg[1] if len(xmsg) == 3: pool=xmsg[1] vol_name=xmsg[2] self.surfs_mgr.delete_volume(vol_name) rsp = AgentResponse() self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def migrate_vm_after(self,req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rootmsg=cmd.rootinstallPath.split("/"); datamsg=cmd.datainstallPath; rsp = AgentResponse() if self.surfs_mgr.init_name is None: rsp.success=False rsp.error='can not get local initorname' self.surfs_mgr.migrate_vm_after(rootmsg[2], rootmsg[3]) if datamsg is None or len(datamsg)==0: return jsonobject.dumps(rsp) else: vdvls=vmsg.split(',') for x in vdvls: dvl=x.split(':') if len(dvl) !=2: continue self.surfs_mgr.migrate_vm_after(dvl[0], dvl[1]) return jsonobject.dumps(rsp) @replyerror def migrate_vm_before(self,req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rootmsg=cmd.rootinstallPath.split("/"); datamsg=cmd.datainstallPath; rsp = AgentResponse() if self.surfs_mgr.init_name is None: rsp.success=False rsp.error='can not get local initorname' devstrs=self.surfs_mgr.migrate_vm_prepare(rootmsg[2], rootmsg[3]) if datamsg is None or len(datamsg)==0: return jsonobject.dumps(rsp) else: vdvls=vmsg.split(',') for x in vdvls: dvl=x.split(':') if len(dvl) !=2: continue self.surfs_mgr.migrate_vm_prepare(dvl[0], dvl[1]) return jsonobject.dumps(rsp) @replyerror def start_vm_before(self,req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) xmsg=cmd.installPath.split('/') vmsg=cmd.volinstallPath rsp = AgentResponse() if self.surfs_mgr.init_name is None: rsp.success=False rsp.error='can not get local initorname' self.surfs_mgr.start_vm_vol_resume(xmsg[2], xmsg[3]) if vmsg is None or len(vmsg)==0: return jsonobject.dumps(rsp) else: vdvls=vmsg.split(',') for x in vdvls: dvl=x.split(':') if len(dvl) !=2: continue if self.surfs_mgr.check_nodeip_result(dvl[1]) is True: poolmsg=self.surfs_mgr.get_vol_info(dvl[1]) if poolmsg is None: self.surfs_mgr.start_vm_vol_resume(dvl[0], dvl[1]) else: fileio_dir='/' + poolmsg['pool'] + '/' + dvl[1] + '/fileio' if os.path.exists(fileio_dir) is False: self.surfs_mgr.start_vm_vol_resume(dvl[0], dvl[1]) else: self.surfs_mgr.local_disk_link(fileio_dir,dvl[1],dvl[0]) else: self.surfs_mgr.start_vm_vol_resume(dvl[0], dvl[1]) return jsonobject.dumps(rsp) @replyerror def detach_datavol_after(self,req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp=AttachDataVolRsp() xmsg=cmd.installPath.split('/') if len(xmsg) != 4: rsp.success=False rsp.error='installPath[' + cmd.installPath + '] is error' return jsonobject.dumps(rsp) self.surfs_mgr.clean_target_after_detach(xmsg[2], xmsg[3]) return jsonobject.dumps(rsp) @replyerror def attach_datavol_prepare(self,req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) rsp=AttachDataVolRsp() xmsg=cmd.installPath.split('/') if len(xmsg) != 4: rsp.success=False rsp.error='installPath[' + cmd.installPath + '] is error' return jsonobject.dumps(rsp) self.surfs_mgr.check_vol_before_attach(xmsg[3], cmd.volsize, cmd.voltype,cmd.mgip) vol_ip=self.surfs_mgr.get_volume_pool_ip(xmsg[3]) bsign=False if vol_ip == cmd.mgip: poolmsg=self.surfs_mgr.get_vol_info(xmsg[3]) if poolmsg is None: bsign=True else: fileio_dir='/' + poolmsg['pool'] + '/' + xmsg[3] + '/fileio' if os.path.exists(fileio_dir) is False: bsign=True else: self.surfs_mgr.local_disk_link(fileio_dir, xmsg[3], xmsg[2]) rsp.devicetype='file' else: bsign=True if bsign is True: self.surfs_mgr.export_root_target(self.surfs_mgr.init_name, xmsg[3], xmsg[2]) self.surfs_mgr._find_target_path(xmsg[3]) return jsonobject.dumps(rsp) @replyerror def create(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) _,pool,image_name = self._parse_install_path(cmd.installPath) size_G = sizeunit.Byte.toGigaByte(cmd.size) + 1 size = "%dG" % (size_G) v_type=self.surfs_mgr.back_type try: v_type=getattr(cmd, 'poolcls') except: logger.warn('Can not get attribute:poolcls') #self.surfs_mgr.create_data_volume(image_name,size,v_type) rsp = AgentResponse() self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def clone(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) _,src_vol_id = self._parse_install_path(cmd.srcPath) _,pname,dst_vol_id = self._parse_install_path(cmd.dstPath) _,src_id,src_type=src_vol_id.split('@') if src_type == 'image': self.surfs_mgr.clone_image(src_id, dst_vol_id) else: self.surfs_mgr.clone_vol(src_vol_id,dst_vol_id) rsp = AgentResponse() self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def commit_image(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) _,_,s_id = self._parse_install_path(cmd.snapshotPath) _,_,v_id = self._parse_install_path(cmd.dstPath) self.surfs_mgr.create_vol_from_snap(s_id,v_id) rsp = CpRsp() rsp.size = self._get_file_size(dpath) self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def create_snapshot(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) spath = self._normalize_install_path(cmd.snapshotPath) do_create = True imagename, sp_name,pooltype = spath.split('@') xmsg=imagename.split('/') image_name='' if len(xmsg) == 2: image_name=xmsg[1] if len(xmsg) == 3: image_name=xmsg[2] rsp = CreateSnapshotRsp() if pooltype == 'image': pass else: if self.surfs_mgr.get_vol_info(image_name) is None: rsp.success=False rsp.error='The volume has never be attached to any vm' return jsonobject.dumps(rsp) if cmd.skipOnExisting: if pooltype == 'image': do_create = False else: snaps = self.surfs_mgr.get_snap_exist_byvol(image_name) for s in snaps: do_create = False if do_create: self.surfs_mgr.create_snapshot(image_name,sp_name) if pooltype == 'image': rsp.size= self.surfs_mgr.get_iamge_size(sp_name) else: rsp.size = self.surfs_mgr.get_vol_size(image_name) self._set_capacity_to_response(rsp) return jsonobject.dumps(rsp) @replyerror def delete_snapshot(self, req): cmd = jsonobject.loads(req[http.REQUEST_BODY]) spath = self._normalize_install_path(cmd.snapshotPath) xmsg=spath.split('@') sp_name=xmsg[1] rsp = AgentResponse() try: self.surfs_mgr.delete_snapshot(sp_name) self._set_capacity_to_response(rsp) except Exception, e: logger.debug('%s' % str(e)) rsp.success = False rsp.error = str(e) raise return jsonobject.dumps(rsp)