def create_nic_job(obj, guest, name, mac, bridge, network, options={}): options['name'] = name options['mac'] = mac if bridge is not None: options['bridge'] = bridge if network is not None: options['network'] = network _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], VIRT_COMMAND_ADD_NIC), options) cmdname = u"Create NIC" _jobgroup = JobGroup(cmdname, karesansui.sheconf['env.uniqkey']) _jobgroup.jobs.append(Job('%s command' % cmdname, 0, _cmd)) _machine2jobgroup = m2j_new( machine=guest, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=obj.me, modified_user=obj.me, ) save_job_collaboration( obj.orm, obj.pysilhouette.orm, _machine2jobgroup, _jobgroup, )
def firewall_save(obj, model, options={}): _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], FIREWALL_COMMAND_SAVE_FIREWALL), options) cmdname = u"Initialize Firewall" _jobgroup = JobGroup(cmdname, karesansui.sheconf['env.uniqkey']) _jobgroup.jobs.append(Job("%s command" % cmdname, 0, _cmd)) _machine2jobgroup = m2j_new( machine=model, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=obj.me, modified_user=obj.me, ) save_job_collaboration( obj.orm, obj.pysilhouette.orm, _machine2jobgroup, _jobgroup, ) return True
def exec_disk_job(obj, guest, disk_job, volume_job=None, order=0): """<comment-ja> ゲストOSにディスクを追加するジョブを登録します。 </comment-ja> <comment-en> TODO: To include comments in English </comment-en> """ cmdname = u"Add Disk" _jobgroup = JobGroup(cmdname, karesansui.sheconf['env.uniqkey']) if volume_job is not None: _jobgroup.jobs.append(volume_job) _jobgroup.jobs.append(disk_job) _machine2jobgroup = m2j_new(machine=guest, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=obj.me, modified_user=obj.me, ) save_job_collaboration(obj.orm, obj.pysilhouette.orm, _machine2jobgroup, _jobgroup, ) return True
def change_mac_job(obj, guest, name, old_mac, new_mac, options={}): options['name'] = name options['from'] = old_mac options['to'] = new_mac _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], VIRT_COMMAND_SET_MAC_ADDRESS), options) cmdname = u"Change MAC Address" _jobgroup = JobGroup(cmdname, karesansui.sheconf['env.uniqkey']) _jobgroup.jobs.append(Job("%s command" % cmdname, 0, _cmd)) _machine2jobgroup = m2j_new(machine=guest, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=obj.me, modified_user=obj.me, ) save_job_collaboration(obj.orm, obj.pysilhouette.orm, _machine2jobgroup, _jobgroup, ) return True
def delete_storage_pool_job(obj, host, name, options={}): #:TODO options['name'] = name _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], VIRT_COMMAND_DELETE_STORAGE_POOL), options) cmdname = u"Delete Storage Pool" _jobgroup = JobGroup(cmdname, karesansui.sheconf['env.uniqkey']) _jobgroup.jobs.append(Job("%s command" % cmdname, 0, _cmd)) _machine2jobgroup = m2j_new( machine=host, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=obj.me, modified_user=obj.me, ) save_job_collaboration( obj.orm, obj.pysilhouette.orm, _machine2jobgroup, _jobgroup, ) return True
def network_start_stop_job(obj, host_id, network_name, action): """ Register start/stop network job into pysilhouette @param obj: Rest object @param network_name: Name of network to start or stop @type network_name: string @param action: 'start' or 'stop' @type action: string """ if not network_name: raise KaresansuiException if (karesansui.sheconf.has_key('env.uniqkey') is False) \ or (karesansui.sheconf['env.uniqkey'].strip('') == ''): raise KaresansuiException if not (action == 'start' or action == 'stop'): raise KaresansuiException host = findbyhost1(obj.orm, host_id) _cmd = None _jobgroup = None if action == 'start': cmdname = ["Start Network", "start network"] _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], 'restart_network.py'), dict(name=network_name, force=None)) else: cmdname = ["Stop Network", "stop network"] _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], 'stop_network.py'), dict(name=network_name)) # Job Register _jobgroup = JobGroup(cmdname[0], karesansui.sheconf['env.uniqkey']) _jobgroup.jobs.append(Job('%s command' % cmdname[1], 0, _cmd)) _machine2jobgroup = m2j_new( machine=host, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=obj.me, modified_user=obj.me, ) # INSERT save_job_collaboration( obj.orm, obj.pysilhouette.orm, _machine2jobgroup, _jobgroup, )
def service_job(obj, host, name, status): if status == 'start': _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], SERVICE_COMMAND_START), {"name": name}) cmdname = "Start Service" elif status == 'stop': _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], SERVICE_COMMAND_STOP), {"name": name}) cmdname = "Stop Service" elif status == 'restart': _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], SERVICE_COMMAND_RESTART), {"name": name}) cmdname = "Restart Service" elif status == 'enable': _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], SERVICE_COMMAND_AUTOSTART), { "name": name, "enable": None }) cmdname = "Enable Autostart Service" elif status == 'disable': _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], SERVICE_COMMAND_AUTOSTART), { "name": name, "disable": None }) cmdname = "Disable Autostart Service" else: raise _jobgroup = JobGroup(cmdname, karesansui.sheconf['env.uniqkey']) _job = Job(cmdname, 0, _cmd) _jobgroup.jobs.append(_job) _machine2jobgroup = m2j_new( machine=host, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=obj.me, modified_user=obj.me, ) save_job_collaboration( obj.orm, obj.pysilhouette.orm, _machine2jobgroup, _jobgroup, )
def _POST(self, *param, **params): (host_id, guest_id) = self.chk_guestby1(param) if guest_id is None: return web.notfound() if not validates_snapshot(self): return web.badrequest(self.view.alert) guest = findbyguest1(self.orm, guest_id) kvs = KaresansuiVirtSnapshot(readonly=False) try: domname = kvs.kvc.uuid_to_domname(guest.uniq_key) if not domname: return web.conflict(web.ctx.path) virt = kvs.kvc.search_kvg_guests(domname)[0] if virt.is_active() is True: return web.badrequest( _("Guest is running. Please stop and try again. name=%s" % domname)) finally: kvs.finish() id = int(time.time()) notebook = new_notebook(self.input.title, self.input.value) snapshot = new_snapshot(guest, id, self.me, self.me, notebook) save_snapshot(self.orm, snapshot) options = {} options['name'] = domname options['id'] = id _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], VIRT_COMMAND_TAKE_SNAPSHOT), options) cmdname = 'Take Snapshot' _jobgroup = JobGroup(cmdname, karesansui.sheconf['env.uniqkey']) _jobgroup.jobs.append(Job('%s command' % cmdname, 0, _cmd)) _machine2jobgroup = m2j_new( machine=guest, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=self.me, modified_user=self.me, ) save_job_collaboration( self.orm, self.pysilhouette.orm, _machine2jobgroup, _jobgroup, ) return web.accepted()
def get_iscsi_cmd(obj, host_id): cmd_name = u'Get iSCSI List' jobgroup = JobGroup(cmd_name, karesansui.sheconf['env.uniqkey']) jobgroup.jobs.append(Job('%s command' % cmd_name, 0, "%s/%s" \ % (karesansui.config['application.bin.dir'], ISCSI_COMMAND_GET))) jobgroup.type = JOBGROUP_TYPE['PARALLEL'] host = findbyhost1(obj.orm, host_id) _machine2jobgroup = m2j_new(machine=host, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=obj.me, modified_user=obj.me, ) if corp(obj.orm, obj.pysilhouette.orm,_machine2jobgroup, jobgroup) is False: return False ret = jobgroup.jobs[0].action_stdout network_storages = get_network_storages(ret) return network_storages
def _POST(self, *param, **params): host_id = self.chk_hostby1(param) if host_id is None: return web.notfound() target_regex = re.compile( r"^bonding_target_dev_select_(?P<dev>eth[0-9]+)") if not validates_bonding(self, target_regex): self.logger.debug("Add bonding failed. Did not validate.") return web.badrequest(self.view.alert) target_dev = [] for input in self.input: m = target_regex.match(input) if m: target_dev.append(m.group('dev')) primary = self.input.bonding_target_dev_primary mode = self.input.bonding_mode cmdname = u"Add Bonding Setting" cmd = BONDING_COMMAND_ADD options = {} options['dev'] = ','.join(target_dev) options["primary"] = primary options["mode"] = mode _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], cmd), options) _jobgroup = JobGroup(cmdname, karesansui.sheconf['env.uniqkey']) _job = Job('%s command' % cmdname, 0, _cmd) _jobgroup.jobs.append(_job) host = findbyhost1(self.orm, host_id) _machine2jobgroup = m2j_new( machine=host, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=self.me, modified_user=self.me, ) save_job_collaboration( self.orm, self.pysilhouette.orm, _machine2jobgroup, _jobgroup, ) return web.accepted()
def get_iscsi_cmd(obj, host_id): cmd_name = u'Get iSCSI List' jobgroup = JobGroup(cmd_name, karesansui.sheconf['env.uniqkey']) jobgroup.jobs.append(Job('%s command' % cmd_name, 0, "%s/%s" \ % (karesansui.config['application.bin.dir'], ISCSI_COMMAND_GET))) jobgroup.type = JOBGROUP_TYPE['PARALLEL'] host = findbyhost1(obj.orm, host_id) _machine2jobgroup = m2j_new( machine=host, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=obj.me, modified_user=obj.me, ) if corp(obj.orm, obj.pysilhouette.orm, _machine2jobgroup, jobgroup) is False: return False ret = jobgroup.jobs[0].action_stdout network_storages = get_network_storages(ret) return network_storages
def storagepool_start_stop_job(obj, host, pool_obj, status): _iscsi_job = None if status == 'start': pool_info = pool_obj.get_info() if pool_info['type'].lower() == "iscsi": _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], ISCSI_COMMAND_START), {"iqn": pool_info['source']['dev_path']}) cmdname = "Start iSCSI" _iscsi_job = Job('%s command' % cmdname, 0, _cmd) _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], VIRT_COMMAND_START_STORAGE_POOL), {"name": pool_obj.get_storage_name()}) cmdname = "Start Storage Pool" else: _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], VIRT_COMMAND_DESTROY_STORAGE_POOL), {"name": pool_obj.get_storage_name()}) cmdname = "Stop Storage Pool" _jobgroup = JobGroup(cmdname, karesansui.sheconf['env.uniqkey']) if _iscsi_job: _jobgroup.jobs.append(_iscsi_job) _job = Job('%s command' % cmdname, 1, _cmd) else: _job = Job('%s command' % cmdname, 0, _cmd) _jobgroup.jobs.append(_job) _machine2jobgroup = m2j_new( machine=host, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=obj.me, modified_user=obj.me, ) save_job_collaboration( obj.orm, obj.pysilhouette.orm, _machine2jobgroup, _jobgroup, )
def _DELETE(self, *param, **params): host_id = self.chk_hostby1(param) if host_id is None: return web.notfound() network_name = param[1] if not network_name: self.logger.debug("Network delete failed. Network not found.") return web.notfound("Network not found.") if network_name == 'default': self.logger.debug( 'Network delete failed. Target network is "default".') return web.badrequest('Target network "default" can not deleted.') host = findbyhost1(self.orm, host_id) options = {} options['name'] = network_name _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], VIRT_COMMAND_DELETE_NETWORK), options) # Job Registration _jobgroup = JobGroup('Delete network: %s' % network_name, karesansui.sheconf['env.uniqkey']) _jobgroup.jobs.append(Job('Delete network', 0, _cmd)) _machine2jobgroup = m2j_new( machine=host, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=self.me, modified_user=self.me, ) save_job_collaboration( self.orm, self.pysilhouette.orm, _machine2jobgroup, _jobgroup, ) self.logger.debug('(Delete network) Job group id==%s', _jobgroup.id) url = '%s/job/%s.part' % (web.ctx.home, _jobgroup.id) self.logger.debug('Returning Location: %s' % url) return web.accepted()
def create_pool_dir_job(obj, machine, name, type_, target_path, options={}, rollback_options={}): cmdname = u"Create Storage Pool" cmd = VIRT_COMMAND_CREATE_STORAGE_POOL options['name'] = name options["type"] = type_ options["target_path"] = target_path options["mode"] = STORAGE_POOL_PWD["MODE"] options["group"] = pwd.getpwnam(STORAGE_POOL_PWD["GROUP"])[2] options["owner"] = pwd.getpwnam(STORAGE_POOL_PWD["OWNER"])[2] _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], cmd), options) rollback_options["name"] = name rollback_cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], VIRT_COMMAND_DELETE_STORAGE_POOL), rollback_options) _jobgroup = JobGroup(cmdname, karesansui.sheconf['env.uniqkey']) _job = Job('%s command' % cmdname, 0, _cmd) _job.rollback_command = rollback_cmd _jobgroup.jobs.append(_job) _machine2jobgroup = m2j_new(machine=machine, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=obj.me, modified_user=obj.me, ) save_job_collaboration(obj.orm, obj.pysilhouette.orm, _machine2jobgroup, _jobgroup, ) return True
def _PUT(self, *param, **params): host_id = self.chk_hostby1(param) if host_id is None: return web.notfound() self.view.host_id = host_id host = findbyhost1(self.orm, host_id) status = int(self.input.status) if status != NETWORK_RESTART: return web.badrequest() cmdname = u"Restart Network" cmd = NETWORK_COMMAND_RESTART options = {} _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], cmd), options) _jobgroup = JobGroup(cmdname, karesansui.sheconf['env.uniqkey']) _job = Job('%s command' % cmdname, 0, _cmd) _jobgroup.jobs.append(_job) host = findbyhost1(self.orm, host_id) _machine2jobgroup = m2j_new( machine=host, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=self.me, modified_user=self.me, ) save_job_collaboration( self.orm, self.pysilhouette.orm, _machine2jobgroup, _jobgroup, ) return web.accepted()
def setexec_delete_disk(obj, guest, disk_job, volume_job): jobgroup = JobGroup('Delete disk', karesansui.sheconf['env.uniqkey']) order = 0 if volume_job is not None: jobgroup.jobs.append(volume_job) jobgroup.jobs.append(disk_job) _machine2jobgroup = m2j_new( machine=guest, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=obj.me, modified_user=obj.me, ) save_job_collaboration( obj.orm, obj.pysilhouette.orm, _machine2jobgroup, jobgroup, ) return True
def _DELETE(self, *param, **params): host_id = self.chk_hostby1(param) if host_id is None: return web.notfound() device = param[1] if device is None: return web.notfound() cmdname = u"Delete Bonding Setting" cmd = BONDING_COMMAND_DELETE options = {} options['dev'] = device options["succession"] = None _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], cmd), options) _jobgroup = JobGroup(cmdname, karesansui.sheconf['env.uniqkey']) _job = Job('%s command' % cmdname, 0, _cmd) _jobgroup.jobs.append(_job) host = findbyhost1(self.orm, host_id) _machine2jobgroup = m2j_new( machine=host, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=self.me, modified_user=self.me, ) save_job_collaboration( self.orm, self.pysilhouette.orm, _machine2jobgroup, _jobgroup, ) return web.accepted()
def firewall_restore(obj, model, action='', options={}): if action != "": options['action'] = action if action == "restart": action_msg = "Restart Firewall" elif action == "start": action_msg = "Start Firewall" elif action == "stop": action_msg = "Stop Firewall" else: action_msg = "Restore Firewall" _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], FIREWALL_COMMAND_RESTORE_FIREWALL), options) _jobgroup = JobGroup(action_msg, karesansui.sheconf['env.uniqkey']) _jobgroup.jobs.append(Job("%s command" % action_msg, 0, _cmd)) _machine2jobgroup = m2j_new( machine=model, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=obj.me, modified_user=obj.me, ) save_job_collaboration( obj.orm, obj.pysilhouette.orm, _machine2jobgroup, _jobgroup, ) return True
def iptables_control(obj, model, action='', options={}): if action != "": options['action'] = action if action == "restart": action_msg = "Restart iptables" elif action == "start": action_msg = "Start iptables" msg = "Start iptables" elif action == "stop": action_msg = "Stop iptables" _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], IPTABLES_COMMAND_CONTROL), options) _jobgroup = JobGroup(action_msg, karesansui.sheconf['env.uniqkey']) _jobgroup.jobs.append(Job("%s command" % action_msg, 0, _cmd)) _machine2jobgroup = m2j_new( machine=model, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=obj.me, modified_user=obj.me, ) save_job_collaboration( obj.orm, obj.pysilhouette.orm, _machine2jobgroup, _jobgroup, ) return True
def delete_nic_job(obj, guest, name, mac, options={}): options['name'] = name options['mac'] = mac _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], VIRT_COMMAND_DELETE_NIC), options) cmdname = u"Delete NIC" _jobgroup = JobGroup(cmdname, karesansui.sheconf['env.uniqkey']) _jobgroup.jobs.append(Job("%s command" % cmdname, 0, _cmd)) _machine2jobgroup = m2j_new(machine=guest, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=obj.me, modified_user=obj.me, ) save_job_collaboration(obj.orm, obj.pysilhouette.orm, _machine2jobgroup, _jobgroup, ) return True
def read_conf(modules, webobj=None, machine=None, extra_args={}): """<comment-ja> 設定ファイルパーサー(モジュール)により設定ファイルの内容を 辞書配列操作クラスに渡し、そのオブジェクトを返す @param modules: モジュールのリスト配列 @param webobj: @param machine: @type modules: list @rtype: object dict_op @return: 辞書配列操作オブジェクト </comment-ja> <comment-en> TODO: English Comment </comment-en> """ if type(modules) == str: modules = [modules] options = {"module" : ":".join(modules)} try: options['include'] = extra_args['include'] except: pass #cmd_name = u"Get Settings - %s" % ":".join(modules) cmd_name = u"Get Settings" if type(webobj) == types.InstanceType: from karesansui.db.model._2pysilhouette import Job, JobGroup, \ JOBGROUP_TYPE from karesansui.db.access._2pysilhouette import jg_findby1, jg_save,corp from karesansui.db.access._2pysilhouette import save_job_collaboration from karesansui.db.access.machine2jobgroup import new as m2j_new from pysilhouette.command import dict2command _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], CONFIGURE_COMMAND_READ), options) jobgroup = JobGroup(cmd_name, karesansui.sheconf['env.uniqkey']) jobgroup.jobs.append(Job('%s command' % cmd_name, 0, _cmd)) jobgroup.type = JOBGROUP_TYPE['PARALLEL'] _machine2jobgroup = m2j_new(machine=machine, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=webobj.me, modified_user=webobj.me, ) if corp(webobj.orm, webobj.pysilhouette.orm,_machine2jobgroup, jobgroup) is False: webobj.logger.debug("%s command failed. Return to timeout" % (cmd_name)) return False cmd_res = jobgroup.jobs[0].action_stdout else: from karesansui.lib.utils import execute_command opts_str = "" for x in options.keys(): if options[x] is None: opts_str += "--%s " % x else: opts_str += "--%s=%s " % (x, options[x]) _cmd = "%s/bin/%s %s" % (KARESANSUI_PREFIX, CONFIGURE_COMMAND_READ, opts_str.strip(),) command_args = _cmd.strip().split(" ") (rc,res) = execute_command(command_args) if rc != 0: return False cmd_res = "\n".join(res) dop = DictOp() try: exec(cmd_res) except Exception: return False for module in modules: try: exec("dop.addconf('%s',Config_Dict_%s)" % (module,module,)) except: pass return dop
def write_conf(dop, webobj=None, machine=None, modules=[], extra_args={}): """<comment-ja> @param dop: 辞書配列操作オブジェクト @param webobj: @param machine: @type dop: object dict_op @rtype: boolean @return: True or False </comment-ja> <comment-en> TODO: English Comment </comment-en> """ from karesansui.lib.file.configfile import ConfigFile if isinstance(dop,karesansui.lib.dict_op.DictOp) is False: return False if not os.path.exists(CONF_TMP_DIR): os.makedirs(CONF_TMP_DIR) r_chmod(CONF_TMP_DIR,0770) r_chown(CONF_TMP_DIR,KARESANSUI_USER) r_chgrp(CONF_TMP_DIR,KARESANSUI_GROUP) serial = time.strftime("%Y%m%d%H%M%S",time.localtime()) if len(modules) == 0: modules = dop.ModuleNames w_modules = [] w_files = [] for _module in modules: if _module in dop.ModuleNames: filename = "%s/%s.%s" % (CONF_TMP_DIR,_module,serial,) data = preprint_r(dop.getconf(_module),return_var=True) ConfigFile(filename).write(data+"\n") r_chmod(filename,0660) r_chown(filename,KARESANSUI_USER) r_chgrp(filename,KARESANSUI_GROUP) w_modules.append(_module) w_files.append(filename) if len(w_modules) == 0: return False options = { "module" : ":".join(w_modules), "input-file" : ":".join(w_files), } options["delete"] = None try: extra_args['pre-command'] options['pre-command'] = "b64:" + base64_encode(extra_args['pre-command']) except: pass try: extra_args['post-command'] options['post-command'] = "b64:" + base64_encode(extra_args['post-command']) except: pass try: options['include'] = extra_args['include'] except: pass #cmd_name = u"Write Settings - %s" % ":".join(w_modules) cmd_name = u"Write Settings" if type(webobj) == types.InstanceType: from karesansui.db.model._2pysilhouette import Job, JobGroup, \ JOBGROUP_TYPE from karesansui.db.access._2pysilhouette import jg_findby1, jg_save,corp from karesansui.db.access._2pysilhouette import save_job_collaboration from karesansui.db.access.machine2jobgroup import new as m2j_new from pysilhouette.command import dict2command _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], CONFIGURE_COMMAND_WRITE), options) _jobgroup = JobGroup(cmd_name, karesansui.sheconf['env.uniqkey']) _jobgroup.jobs.append(Job('%s command' % cmd_name, 0, _cmd)) _machine2jobgroup = m2j_new(machine=machine, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=webobj.me, modified_user=webobj.me, ) save_job_collaboration(webobj.orm, webobj.pysilhouette.orm, _machine2jobgroup, _jobgroup, ) """ _jobgroup.type = JOBGROUP_TYPE['PARALLEL'] if corp(webobj.orm, webobj.pysilhouette.orm,_machine2jobgroup, _jobgroup) is False: webobj.logger.debug("%s command failed. Return to timeout" % (cmd_name)) for filename in w_files: if os.path.exists(filename): os.unlink(filename) return False cmd_res = jobgroup.jobs[0].action_stdout """ else: from karesansui.lib.utils import execute_command opts_str = "" for x in options.keys(): if options[x] is None: opts_str += "--%s " % x else: opts_str += "--%s=%s " % (x, options[x]) _cmd = "%s/bin/%s %s" % (KARESANSUI_PREFIX, CONFIGURE_COMMAND_WRITE, opts_str.strip(),) command_args = _cmd.strip().split(" ") (rc,res) = execute_command(command_args) if rc != 0: for filename in w_files: if os.path.exists(filename): os.unlink(filename) return False cmd_res = "\n".join(res) """ for filename in w_files: if os.path.exists(filename): os.unlink(filename) """ return True
def _DELETE(self, *param, **params): (host_id, guest_id) = self.chk_guestby1(param) if guest_id is None: return web.notfound() model = findbyguest1(self.orm, guest_id) self.kvc = KaresansuiVirtConnection() try: domname = self.kvc.uuid_to_domname(model.uniq_key) if not domname: self.logger.info("Did not exist in libvirt. - guestid=%s" % model.id) logical_delete(self.orm, model) # TODO ファイルシステムにゴミが残るので、delete_guest.pyを実行する必要がある。 self.orm.commit() return web.nocontent() kvg_guest = self.kvc.search_kvg_guests(domname) if not kvg_guest: return web.badrequest(_("Guest not found. - name=%s") % domname) else: kvg_guest = kvg_guest[0] if kvg_guest.is_active(): return web.badrequest(_("Can not delete a running guest OS. - name=%s") % domname) # jobs order order = 0 jobs = [] os_storage = {} for disk in kvg_guest.get_disk_info(): if disk['type'] == 'file': # type="dir" # delete_storage_volume pool = self.kvc.get_storage_pool_name_byimage(disk['source']['file']) if not pool: return web.badrequest(_("Can not find the storage pool image. - target=%s") % (disk['source']['file'])) else: pool = pool[0] disk_volumes = self.kvc.get_storage_volume_bydomain(domname, 'disk', 'key') volume = None for key in disk_volumes.keys(): if disk['source']['file'] == os.path.realpath(disk_volumes[key]): volume = key # disk image use = DISK_USES['DISK'] if volume is None: # os image os_volume = self.kvc.get_storage_volume_bydomain(domname, 'os', 'key') if not os_volume: return web.badrequest(_("OS storage volume for guest could not be found. domname=%s") % domname) if disk['source']['file'] == os.path.realpath(os_volume.values()[0]): use = DISK_USES['IMAGES'] volume = os_volume.keys()[0] os_storage["pool"] = pool os_storage["volume"] = volume continue # OS delete command to do "VIRT_COMMAND_DELETE_GUEST" image. jobs.append(delete_storage_volume(self, volume, pool, order, use)) order += 1 # delete_disk jobs.append(delete_disk_job(self, domname, disk["target"]["dev"], order)) order += 1 elif disk['type'] == 'block': # type="iscsi" pool = self.kvc.get_storage_pool_name_byimage(disk['source']['dev']) if not pool: return web.badrequest(_("Can not find the storage pool image. - target=%s") % disk['source']['dev']) else: pool = pool[0] os_volume = self.kvc.get_storage_volume_bydomain(domname, 'os', 'key') if not os_volume: return web.badrequest(_("OS storage volume for guest could not be found. domname=%s") % domname) else: volume = os_volume.values()[0] if disk['source']['dev'] == volume: os_storage["pool"] = pool os_storage["volume"] = volume continue # OS delete command to do "VIRT_COMMAND_DELETE_GUEST" image. # delete_disk jobs.append(delete_disk_job(self, domname, disk["target"]["dev"], order)) order += 1 else: return web.internalerror( _("Not expected storage type. type=%") % disk['type']) # DELETE GUEST cmdname = "Delete Guest" _jobgroup = JobGroup(cmdname, karesansui.sheconf['env.uniqkey']) _jobgroup.jobs = jobs # Set Disk action_cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], VIRT_COMMAND_DELETE_GUEST), {"name" : domname, "pool" : os_storage["pool"], "volume" : os_storage["volume"], } ) _job = Job('%s command' % cmdname, order, action_cmd) _jobgroup.jobs.append(_job) logical_delete(self.orm, model) _machine2jobgroup = m2j_new(machine=model, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=self.me, modified_user=self.me, ) save_job_collaboration(self.orm, self.pysilhouette.orm, _machine2jobgroup, _jobgroup, ) return web.accepted(url = web.ctx.path) finally: #self.kvc.close() GuestBy1#_post pass
def _PUT(self, *param, **params): host_id = self.chk_hostby1(param) if host_id is None: return web.notfound() if not validates_network_storage(self): self.logger.debug( "Network storage change status failed. Did not validate.") return web.badrequest(self.view.alert) host = findbyhost1(self.orm, host_id) if is_param(self.input, "iqn"): iqn = self.input.iqn else: self.logger.debug( "Network storage change status failed. Target IQN not found.") return web.badrequest() options = {'iqn': iqn} job_order = 0 if is_param(self.input, "status"): status = self.input.status else: self.logger.debug( "Network storage change status failed. Status type not found.") return web.badrequest() if is_param(self.input, "host") and is_param(self.input, "port"): host = self.input.host port = self.input.port else: self.logger.debug( "Network storage change status failed. Target host and port not found." ) return web.badrequest() active_used_pool = [] inactive_used_pool = [] kvc = KaresansuiVirtConnection() try: dev_symlink_list = get_filelist(ISCSI_DEVICE_DIR) dev_symlink_list.sort() symlink_regexp = re.compile("^%s/%s" % (re.escape(ISCSI_DEVICE_DIR), re.escape(ISCSI_DEVICE_NAME_TPL % (host, port, iqn)))) active_pools = kvc.list_active_storage_pool() inactive_pools = kvc.list_inactive_storage_pool() now_pools = active_pools + inactive_pools for pool in now_pools: pool_type = kvc.get_storage_pool_type(pool) if pool_type == "iscsi": if iqn == kvc.get_storage_pool_sourcedevicepath(pool): if pool in active_pools: active_used_pool.append(pool) if pool in inactive_pools: inactive_used_pool.append(pool) elif pool_type == "fs": if symlink_regexp.match( kvc.get_storage_pool_sourcedevicepath(pool)): if pool in active_pools: active_used_pool.append(pool) if pool in inactive_pools: inactive_used_pool.append(pool) if status == NETWORK_STORAGE_STOP: for pool in active_used_pool: if kvc.is_used_storage_pool(name=pool, active_only=True) is True: self.logger.debug( "Stop iSCSI failed. Target iSCSI is used by guest." ) return web.badrequest("Target iSCSI is used by guest.") finally: kvc.close() if status == NETWORK_STORAGE_START: network_storage_cmd = ISCSI_COMMAND_START cmd_name = u'Start iSCSI' jobgroup = JobGroup(cmd_name, karesansui.sheconf['env.uniqkey']) for pool in inactive_used_pool: pool_cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], VIRT_COMMAND_START_STORAGE_POOL), {"name": pool}) pool_cmdname = "Start Storage Pool" jobgroup.jobs.append( Job('%s command' % pool_cmdname, 1, pool_cmd)) job_order = 0 elif status == NETWORK_STORAGE_STOP: network_storage_cmd = ISCSI_COMMAND_STOP cmd_name = u'Stop iSCSI' jobgroup = JobGroup(cmd_name, karesansui.sheconf['env.uniqkey']) for pool in active_used_pool: pool_cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], VIRT_COMMAND_DESTROY_STORAGE_POOL), {"name": pool}) pool_cmdname = "Stop Storage Pool" jobgroup.jobs.append( Job('%s command' % pool_cmdname, 0, pool_cmd)) job_order = 1 else: return web.internalerror('Internal Server Error. (Param)') _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], network_storage_cmd), options) jobgroup.jobs.append(Job('%s command' % cmd_name, job_order, _cmd)) host = findbyhost1(self.orm, host_id) _machine2jobgroup = m2j_new( machine=host, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=self.me, modified_user=self.me, ) save_job_collaboration( self.orm, self.pysilhouette.orm, _machine2jobgroup, jobgroup, ) return web.accepted()
def iptables_lint(filepath, webobj=None, machine=None, delete=False): from karesansui.lib.const import IPTABLES_COMMAND_CONTROL options = {"config": filepath, "lint": None} cmd_name = u"Check iptables settings - %s" % filepath if type(webobj) == types.InstanceType: from karesansui.db.model._2pysilhouette import Job, JobGroup, \ JOBGROUP_TYPE from karesansui.db.access._2pysilhouette import jg_findby1, jg_save, corp from karesansui.db.access._2pysilhouette import save_job_collaboration from karesansui.db.access.machine2jobgroup import new as m2j_new from pysilhouette.command import dict2command _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], IPTABLES_COMMAND_CONTROL), options) jobgroup = JobGroup(cmd_name, karesansui.sheconf['env.uniqkey']) jobgroup.jobs.append(Job('%s command' % cmd_name, 0, _cmd)) jobgroup.type = JOBGROUP_TYPE['PARALLEL'] _machine2jobgroup = m2j_new( machine=machine, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=webobj.me, modified_user=webobj.me, ) if corp(webobj.orm, webobj.pysilhouette.orm, _machine2jobgroup, jobgroup) is False: webobj.logger.debug("%s command failed. Return to timeout" % (cmd_name)) if delete is True and os.path.exists(filepath): os.unlink(filepath) return False cmd_res = jobgroup.jobs[0].action_stdout else: from karesansui.lib.const import KARESANSUI_PREFIX from karesansui.lib.utils import execute_command opts_str = "" for x in options.keys(): if options[x] is None: opts_str += "--%s " % x else: opts_str += "--%s=%s " % (x, options[x]) _cmd = "%s/bin/%s %s" % ( KARESANSUI_PREFIX, IPTABLES_COMMAND_CONTROL, opts_str.strip(), ) command_args = _cmd.strip().split(" ") (rc, res) = execute_command(command_args) if rc != 0: if delete is True and os.path.exists(filepath): os.unlink(filepath) return False cmd_res = "\n".join(res) if delete is True and os.path.exists(filepath): os.unlink(filepath) return cmd_res
def _DELETE(self, *param, **params): host_id = self.chk_hostby1(param) if host_id is None: return web.notfound() # valid self.view.uuid = param[1] kvc = KaresansuiVirtConnection() try: # Storage Pool #inactive_pool = kvc.list_inactive_storage_pool() inactive_pool = [] active_pool = kvc.list_active_storage_pool() pools = inactive_pool + active_pool pools.sort() export = [] for pool_name in pools: pool = kvc.search_kvn_storage_pools(pool_name) path = pool[0].get_info()["target"]["path"] if os.path.exists(path): for _afile in glob.glob("%s/*/info.dat" % (path, )): e_param = ExportConfigParam() e_param.load_xml_config(_afile) if e_param.get_uuid() != self.view.uuid: continue e_name = e_param.get_domain() _dir = os.path.dirname(_afile) param = ConfigParam(e_name) path = "%s/%s.xml" % (_dir, e_name) if os.path.isfile(path) is False: self.logger.error( 'Export corrupt data.(file not found) - path=%s' % path) return web.internalerror() param.load_xml_config(path) if e_name != param.get_domain_name(): self.logger.error('Export corrupt data.(The name does not match) - info=%s, xml=%s' \ % (e_name, param.get_name())) return web.internalerror() _dir = os.path.dirname(_afile) export.append({ "dir": _dir, "pool": pool_name, "uuid": e_param.get_uuid(), "name": e_name, }) if len(export) != 1: self.logger.info("Export does not exist. - uuid=%s" % self.view.uuid) return web.badrequest() finally: kvc.close() export = export[0] if os.path.exists(export['dir']) is False or os.path.isdir( export['dir']) is False: self.logger.error('Export data is not valid. [%s]' % export_dir) return web.badrequest('Export data is not valid.') host = findbyhost1(self.orm, host_id) options = {} options['uuid'] = export["uuid"] _cmd = dict2command("%s/%s" % (karesansui.config['application.bin.dir'], \ VIRT_COMMAND_DELETE_EXPORT_DATA), options) # Job Registration _jobgroup = JobGroup('Delete Export Data', karesansui.sheconf['env.uniqkey']) _jobgroup.jobs.append(Job('Delete Export Data', 0, _cmd)) _machine2jobgroup = m2j_new( machine=host, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=self.me, modified_user=self.me, ) save_job_collaboration( self.orm, self.pysilhouette.orm, _machine2jobgroup, _jobgroup, ) self.logger.debug('(Delete export data) Job group id==%s', _jobgroup.id) url = '%s/job/%s.part' % (web.ctx.home, _jobgroup.id) self.logger.debug('Returning Location: %s' % url) return web.accepted()
def _PUT(self, *param, **params): host_id = self.chk_hostby1(param) if host_id is None: return web.notfound() network_name = param[1] if not network_name: self.logger.debug("Network update failed. Network not found.") return web.notfound("Network not found.") if not validates_network(self, network_name=network_name): self.logger.debug("Network update failed. Did not validate.") return web.badrequest(self.view.alert) cidr = self.input.cidr dhcp_start = self.input.dhcp_start dhcp_end = self.input.dhcp_end bridge = self.input.bridge forward_mode = getattr(self.input, 'forward_mode', '') try: autostart = self.input.autostart except: autostart = "no" # # spin off update job # options = { 'name': network_name, 'cidr': cidr, 'dhcp-start': dhcp_start, 'dhcp-end': dhcp_end, 'bridge-name': bridge, 'forward-mode': forward_mode, 'autostart': autostart, } self.logger.debug('spinning off network_update_job options=%s' % (options)) host = findbyhost1(self.orm, host_id) _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], VIRT_COMMAND_UPDATE_NETWORK), options) # Job Registration _jobgroup = JobGroup('Update network: %s' % network_name, karesansui.sheconf['env.uniqkey']) _jobgroup.jobs.append(Job('Update network', 0, _cmd)) _machine2jobgroup = m2j_new( machine=host, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=self.me, modified_user=self.me, ) save_job_collaboration( self.orm, self.pysilhouette.orm, _machine2jobgroup, _jobgroup, ) self.logger.debug('(Update network) Job group id==%s', _jobgroup.id) url = '%s/job/%s.part' % (web.ctx.home, _jobgroup.id) self.logger.debug('Returning Location: %s' % url) return web.accepted(url=url)
def _DELETE(self, *param, **params): (host_id, guest_id) = self.chk_guestby1(param) if guest_id is None: return web.notfound() model = findbyguest1(self.orm, guest_id) self.kvc = KaresansuiVirtConnection() try: domname = self.kvc.uuid_to_domname(model.uniq_key) if not domname: self.logger.info("Did not exist in libvirt. - guestid=%s" % model.id) logical_delete(self.orm, model) # TODO ファイルシステムにゴミが残るので、delete_guest.pyを実行する必要がある。 self.orm.commit() return web.nocontent() kvg_guest = self.kvc.search_kvg_guests(domname) if not kvg_guest: return web.badrequest( _("Guest not found. - name=%s") % domname) else: kvg_guest = kvg_guest[0] if kvg_guest.is_active(): return web.badrequest( _("Can not delete a running guest OS. - name=%s") % domname) # jobs order order = 0 jobs = [] os_storage = {} for disk in kvg_guest.get_disk_info(): if disk['type'] == 'file': # type="dir" # delete_storage_volume pool = self.kvc.get_storage_pool_name_byimage( disk['source']['file']) if not pool: return web.badrequest( _("Can not find the storage pool image. - target=%s" ) % (disk['source']['file'])) else: pool = pool[0] disk_volumes = self.kvc.get_storage_volume_bydomain( domname, 'disk', 'key') volume = None for key in disk_volumes.keys(): if disk['source']['file'] == os.path.realpath( disk_volumes[key]): volume = key # disk image use = DISK_USES['DISK'] if volume is None: # os image os_volume = self.kvc.get_storage_volume_bydomain( domname, 'os', 'key') if not os_volume: return web.badrequest( _("OS storage volume for guest could not be found. domname=%s" ) % domname) if disk['source']['file'] == os.path.realpath( os_volume.values()[0]): use = DISK_USES['IMAGES'] volume = os_volume.keys()[0] os_storage["pool"] = pool os_storage["volume"] = volume continue # OS delete command to do "VIRT_COMMAND_DELETE_GUEST" image. jobs.append( delete_storage_volume(self, volume, pool, order, use)) order += 1 # delete_disk jobs.append( delete_disk_job(self, domname, disk["target"]["dev"], order)) order += 1 elif disk['type'] == 'block': # type="iscsi" pool = self.kvc.get_storage_pool_name_byimage( disk['source']['dev']) if not pool: return web.badrequest( _("Can not find the storage pool image. - target=%s" ) % disk['source']['dev']) else: pool = pool[0] os_volume = self.kvc.get_storage_volume_bydomain( domname, 'os', 'key') if not os_volume: return web.badrequest( _("OS storage volume for guest could not be found. domname=%s" ) % domname) else: volume = os_volume.values()[0] if disk['source']['dev'] == volume: os_storage["pool"] = pool os_storage["volume"] = volume continue # OS delete command to do "VIRT_COMMAND_DELETE_GUEST" image. # delete_disk jobs.append( delete_disk_job(self, domname, disk["target"]["dev"], order)) order += 1 else: return web.internalerror( _("Not expected storage type. type=%") % disk['type']) # DELETE GUEST cmdname = "Delete Guest" _jobgroup = JobGroup(cmdname, karesansui.sheconf['env.uniqkey']) _jobgroup.jobs = jobs # Set Disk action_cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], VIRT_COMMAND_DELETE_GUEST), { "name": domname, "pool": os_storage["pool"], "volume": os_storage["volume"], }) _job = Job('%s command' % cmdname, order, action_cmd) _jobgroup.jobs.append(_job) logical_delete(self.orm, model) _machine2jobgroup = m2j_new( machine=model, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=self.me, modified_user=self.me, ) save_job_collaboration( self.orm, self.pysilhouette.orm, _machine2jobgroup, _jobgroup, ) return web.accepted(url=web.ctx.path) finally: #self.kvc.close() GuestBy1#_post pass
def iptables_lint(filepath, webobj=None, machine=None, delete=False): from karesansui.lib.const import IPTABLES_COMMAND_CONTROL options = {"config" : filepath, "lint" : None} cmd_name = u"Check iptables settings - %s" % filepath if type(webobj) == types.InstanceType: from karesansui.db.model._2pysilhouette import Job, JobGroup, \ JOBGROUP_TYPE from karesansui.db.access._2pysilhouette import jg_findby1, jg_save,corp from karesansui.db.access._2pysilhouette import save_job_collaboration from karesansui.db.access.machine2jobgroup import new as m2j_new from pysilhouette.command import dict2command _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], IPTABLES_COMMAND_CONTROL), options) jobgroup = JobGroup(cmd_name, karesansui.sheconf['env.uniqkey']) jobgroup.jobs.append(Job('%s command' % cmd_name, 0, _cmd)) jobgroup.type = JOBGROUP_TYPE['PARALLEL'] _machine2jobgroup = m2j_new(machine=machine, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=webobj.me, modified_user=webobj.me, ) if corp(webobj.orm, webobj.pysilhouette.orm,_machine2jobgroup, jobgroup) is False: webobj.logger.debug("%s command failed. Return to timeout" % (cmd_name)) if delete is True and os.path.exists(filepath): os.unlink(filepath) return False cmd_res = jobgroup.jobs[0].action_stdout else: from karesansui.lib.const import KARESANSUI_PREFIX from karesansui.lib.utils import execute_command opts_str = "" for x in options.keys(): if options[x] is None: opts_str += "--%s " % x else: opts_str += "--%s=%s " % (x, options[x]) _cmd = "%s/bin/%s %s" % (KARESANSUI_PREFIX, IPTABLES_COMMAND_CONTROL, opts_str.strip(),) command_args = _cmd.strip().split(" ") (rc,res) = execute_command(command_args) if rc != 0: if delete is True and os.path.exists(filepath): os.unlink(filepath) return False cmd_res = "\n".join(res) if delete is True and os.path.exists(filepath): os.unlink(filepath) return cmd_res
def _PUT(self, *param, **params): """<comment-ja> Japanese Comment </comment-ja> <comment-en> TODO: English Comment </comment-en> """ (host_id, guest_id) = self.chk_guestby1(param) if guest_id is None: return web.notfound() if is_param(self.input, 'memory'): memory = int(self.input.memory) else: memory = None max_memory = int(self.input.max_memory) model = findbyguest1(self.orm, guest_id) # virt kvc = KaresansuiVirtConnection() try: domname = kvc.uuid_to_domname(model.uniq_key) if not domname: return web.conflict(web.ctx.path) virt = kvc.search_kvg_guests(domname)[0] info = virt.get_info() #maxMem = info["maxMem"] now_memory = info["memory"] mem_info = kvc.get_mem_info() nodeinfo = kvc.get_nodeinfo() finally: kvc.close() # valid #if (mem_info["host_free_mem"] + (now_memory / 1024)) < memory: # return web.badrequest("Memory value is greater than the maximum memory value. - memory=%s" % self.input.memory) options = {} options["name"] = domname options["maxmem"] = max_memory if memory is None: options["memory"] = max_memory else: options["memory"] = memory _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], VIRT_COMMAND_SET_MEMORY), options) cmdname = "Set Memory" _jobgroup = JobGroup(cmdname, karesansui.sheconf['env.uniqkey']) _jobgroup.jobs.append(Job('%s command' % cmdname, 0, _cmd)) _machine2jobgroup = m2j_new( machine=model, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=self.me, modified_user=self.me, ) save_job_collaboration( self.orm, self.pysilhouette.orm, _machine2jobgroup, _jobgroup, ) return web.accepted(url=web.ctx.path)
def create_pool_iscsi_job(obj, machine, name, type_, host_name, device_path, automount_list, options={}, rollback_options={}): cmdname = u"Create Storage Pool" cmd = VIRT_COMMAND_CREATE_STORAGE_POOL options['name'] = name options["type"] = type_ options["host_name"] = host_name options["device_path"] = device_path _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], cmd), options) rollback_options["name"] = name rollback_options["force"] = None rollback_cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], VIRT_COMMAND_DELETE_STORAGE_POOL), rollback_options) _jobgroup = JobGroup(cmdname, karesansui.sheconf['env.uniqkey']) _job = Job('%s command' % cmdname, 0, _cmd) _job.rollback_command = rollback_cmd _jobgroup.jobs.append(_job) _machine2jobgroup = m2j_new(machine=machine, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=obj.me, modified_user=obj.me, ) save_job_collaboration(obj.orm, obj.pysilhouette.orm, _machine2jobgroup, _jobgroup, ) automount_options = {} automount_options["type"] = STORAGE_POOL_TYPE['TYPE_FS'] for disk in automount_list: readymount_options = {} readymount_options["dev"] = "%s/%s" % (ISCSI_DEVICE_DIR, disk['symlink_name']) if "is_format" in disk: readymount_options["format"] = None automount_options["name"] = disk['symlink_name'] automount_options["device_path"] = "%s/%s" % (ISCSI_DEVICE_DIR, disk['symlink_name']) automount_options["target_path"] = "%s/%s" % (VENDOR_DATA_ISCSI_MOUNT_DIR, disk['symlink_name']) readymount_cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], ISCSI_COMMAND_READY_MOUNT), readymount_options) readymount_job = Job('Check mount command', 0, readymount_cmd) automount_cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], cmd), automount_options) automount_job = Job('%s command' % cmdname, 1, automount_cmd) jobgroup = JobGroup(cmdname, karesansui.sheconf['env.uniqkey']) jobgroup.jobs.append(readymount_job) jobgroup.jobs.append(automount_job) machine2jobgroup = m2j_new(machine=machine, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=obj.me, modified_user=obj.me, ) save_job_collaboration(obj.orm, obj.pysilhouette.orm, machine2jobgroup, jobgroup, ) return True
def _PUT(self, *param, **params): """<comment-ja> ステータス更新 - param - create = 0 - shutdown = 1 - destroy = 2 - suspend = 3 - resume = 4 - reboot = 5 </comment-ja> <comment-en> TODO: English Comment </comment-en> """ host_id = param[0] host_id = self.chk_hostby1(param) if host_id is None: return web.notfound() uri_id = param[1] if uri_id is None: return web.notfound() if not validates_uriguest_status(self): return web.badrequest(self.view.alert) status = int(self.input.status) model = findbyhost1(self.orm, host_id) if model.attribute == 2: info = {} segs = uri_split(model.hostname) uri = uri_join(segs, without_auth=True) creds = '' if segs["user"] is not None: creds += segs["user"] if segs["passwd"] is not None: creds += ':' + segs["passwd"] self.kvc = KaresansuiVirtConnectionAuth(uri,creds) try: host = MergeHost(self.kvc, model) for guest in host.guests: _virt = self.kvc.search_kvg_guests(guest.info["model"].name) if 0 < len(_virt): for _v in _virt: info = _v.get_info() #uri = _v._conn.getURI() if info["uuid"] == uri_id or (uri[0:5] == "test:"): esc_name = "'%s'" % guest.info["model"].name opts = {"name":esc_name,"connection":uri} if creds != '': passwd_file = KARESANSUI_TMP_DIR + "/" + segs['host'] + ".auth" open(passwd_file, "w").write(creds) os.chmod(passwd_file, 0600) opts["passwd-file"] = passwd_file if status == GUEST_ACTION_CREATE: # -- Create cmdname = ["Start Guest", "start guest"] if _v.is_creatable() is True: _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'],VIRT_COMMAND_START_GUEST), opts) self.view.status = VIRT_COMMAND_START_GUEST else: self.logger.error("Create Action:The state can not run. - %d" % _v.status()) elif status == GUEST_ACTION_SHUTDOWN: cmdname = ["Shutdown Guest", "shutdown guest"] if _v.is_shutdownable() is True: # -- Shutdown _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'],VIRT_COMMAND_SHUTDOWN_GUEST), opts) self.view.status = VIRT_COMMAND_SHUTDOWN_GUEST else: self.logger.error("Shutdown Action:The state can not run. - %d" % _v.status()) elif status == GUEST_ACTION_DESTROY: cmdname = ["Destroy Guest", "Destroy guest"] if _v.is_destroyable() is True: # -- destroy _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'],VIRT_COMMAND_DESTROY_GUEST), opts) self.view.status = VIRT_COMMAND_DESTROY_GUEST else: self.logger.error("Destroy Action:The state can not run. - %d" % _v.status()) elif status == GUEST_ACTION_SUSPEND: cmdname = ["Suspend Guest", "suspend guest"] if _v.is_suspendable() is True: # -- Suspend _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'],VIRT_COMMAND_SUSPEND_GUEST), opts) self.view.status = VIRT_COMMAND_SUSPEND_GUEST else: self.logger.error("Destroy Action:The state can not run. - %d" % _v.status()) elif status == GUEST_ACTION_RESUME: cmdname = ["Resume Guest", "resume guest"] if _v.is_resumable() is True: # -- Resume _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'],VIRT_COMMAND_RESUME_GUEST), opts) self.view.status = VIRT_COMMAND_RESUME_GUEST else: self.logger.error("Resume Action:The state can not run. - %d" % _v.status()) elif status == GUEST_ACTION_REBOOT: cmdname = ["Reboot Guest", "reboot guest"] if _v.is_shutdownable() is True: # -- Reboot _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'],VIRT_COMMAND_REBOOT_GUEST), opts) self.view.status = VIRT_COMMAND_REBOOT_GUEST else: self.logger.error("Reboot Action:The state can not run. - %d" % _v.status()) elif status == GUEST_ACTION_ENABLE_AUTOSTART: opts["enable"] = None cmdname = ["Enable Autostart Guest", "enable autostart guest"] # -- Enable autostart guest _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'],VIRT_COMMAND_AUTOSTART_GUEST), opts) self.view.status = VIRT_COMMAND_AUTOSTART_GUEST elif status == GUEST_ACTION_DISABLE_AUTOSTART: opts["disable"] = None cmdname = ["Disable Autostart Guest", "disable autostart guest"] # -- Disable autostart guest _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'],VIRT_COMMAND_AUTOSTART_GUEST), opts) self.view.status = VIRT_COMMAND_AUTOSTART_GUEST else: self.logger.error("Action:Bad Request. - request status=%d" % status) return web.badrequest() break finally: self.kvc.close() # Job Register _jobgroup = JobGroup(cmdname[0], karesansui.sheconf['env.uniqkey']) _jobgroup.jobs.append(Job('%s command' % cmdname[1], 0, _cmd)) _machine2jobgroup = m2j_new(machine=model, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=self.me, modified_user=self.me, ) # INSERT save_job_collaboration(self.orm, self.pysilhouette.orm, _machine2jobgroup, _jobgroup, ) return web.accepted(url="/host/%d/uriguest/%s.part" % (host_id, uri_id))
def _PUT(self, *param, **params): """<comment-ja> Japanese Comment </comment-ja> <comment-en> TODO: English Comment </comment-en> """ (host_id, guest_id) = self.chk_guestby1(param) if guest_id is None: return web.notfound() if not validates_graphics(self): return web.badrequest(self.view.alert) model = findbyguest1(self.orm, guest_id) # virt kvc = KaresansuiVirtConnection() try: domname = kvc.uuid_to_domname(model.uniq_key) if not domname: return web.conflict(web.ctx.path) virt = kvc.search_kvg_guests(domname)[0] info = virt.get_graphics_info()["setting"] used_ports = kvc.list_used_graphics_port() origin_port = info["port"] finally: kvc.close() options = {} options["name"] = domname if self.input.change_passwd == "random": options["random-passwd"] = None elif self.input.change_passwd == "empty": options["passwd"] = "" options["port"] = self.input.port options["listen"] = self.input.listen options["keymap"] = self.input.keymap options["type"] = self.input.graphics_type if int(self.input.port) != origin_port and int( self.input.port) in used_ports: return web.badrequest( "Graphics port number has been already used by other service. - port=%s" % (self.input.port, )) _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], VIRT_COMMAND_SET_GRAPHICS), options) cmdname = "Set Graphics" _jobgroup = JobGroup(cmdname, karesansui.sheconf['env.uniqkey']) _jobgroup.jobs.append(Job('%s command' % cmdname, 0, _cmd)) _machine2jobgroup = m2j_new( machine=model, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=self.me, modified_user=self.me, ) save_job_collaboration( self.orm, self.pysilhouette.orm, _machine2jobgroup, _jobgroup, ) return web.created(None)
def _GET(self, *param, **params): host_id = self.chk_hostby1(param) if host_id is None: return web.notfound() if self.is_mode_input() is True: self.view.host_id = host_id iqn = self.input.iqn options = {'iqn' : iqn} _cmd = dict2command( "%s/%s" % (karesansui.config['application.bin.dir'], ISCSI_COMMAND_GET), options) cmd_name = u'Get iSCSI Detail' jobgroup = JobGroup(cmd_name, karesansui.sheconf['env.uniqkey']) jobgroup.jobs.append(Job('%s command' % cmd_name, 0, _cmd)) jobgroup.type = JOBGROUP_TYPE['PARALLEL'] host = findbyhost1(self.orm, host_id) _machine2jobgroup = m2j_new(machine=host, jobgroup_id=-1, uniq_key=karesansui.sheconf['env.uniqkey'], created_user=self.me, modified_user=self.me, ) if corp(self.orm, self.pysilhouette.orm,_machine2jobgroup, jobgroup) is False: self.logger.debug("%s command failed. Return to timeout" % (cmd_name)) return web.internalerror('Internal Server Error. (Timeout)') cmd_res = jobgroup.jobs[0].action_stdout if not cmd_res: self.view.info = { 'type' : "iSCSI", 'hostname' : "", 'port' : "", 'tpgt' : "", 'iqn' : "", 'activity' : "", 'autostart' : "", 'auth' : "", 'user' : "", 'disk_list' : [], } return True (host,port,tpgt,iqn,activity,autostart,auth,user) = cmd_res.strip("\n").split(' ', 8) info = { 'type' : "iSCSI", 'hostname' : host, 'port' : port, 'tpgt' : tpgt, 'iqn' : iqn, 'activity' : string.atoi(activity), 'autostart' : string.atoi(autostart), 'auth' : auth, 'user' : user, 'disk_list' : [], } dev_symlink_list = get_filelist(ISCSI_DEVICE_DIR) if activity == '1': disk_list = [] symlink_regexp = re.compile("^%s" % (re.escape(ISCSI_DEVICE_NAME_TPL % (host, port, iqn)))) for sym_link in dev_symlink_list: if symlink_regexp.match(sym_link): real_path = symlink2real("%s/%s" % (ISCSI_DEVICE_DIR, sym_link)) disk_list.append({'symlink_name' : sym_link, 'realpath_list' : real_path, }) info['disk_list'] = disk_list self.view.info = info return True