def get_template_info(self): """ returns template name, template's current version and vm & template version match for imported vms template name alone is available from config file """ template_info = {} template_info["template_name"] = self._config['image_name'] template_info["template_version"] = '0.0' template_info["version_comment"] = '' try: if self.image_id is not None: from convirt.model.ImageStore import Image img = DBSession.query(Image).filter( Image.id == self.image_id).one() template_info["template_name"] = img.name template_info["template_version"] = to_str( self.template_version) template_info["version_comment"] = '' if self.template_version != img.version: template_info["version_comment"]="*Current version of the Template is "+\ to_str(img.version) except Exception, e: LOGGER.error(e) pass
def get_server_def_list(self,site_id, group_id, def_id): try: server_def_list=[] node_defns = self.sync_manager.get_node_defns(def_id, to_unicode(constants.NETWORK)) if node_defns: for eachdefn in node_defns: temp_dic={} if eachdefn: node = DBSession.query(ManagedNode).filter_by(id=eachdefn.server_id).first() temp_dic['id']=eachdefn.server_id if node: temp_dic['name']=node.hostname else: temp_dic['name']=None temp_dic['status']=eachdefn.status if eachdefn.details: temp_dic['details']=eachdefn.details else: temp_dic['details']=None server_def_list.append(temp_dic) except Exception, ex: print_traceback() LOGGER.error(to_str(ex).replace("'","")) return "{success: false,msg: '",to_str(ex).replace("'",""),"'}"
def exec_cmd(self, cmd, exec_path=None, timeout=-1, params=None, cd=False, env=None): evn_str = "" if env is not None: for env_var in env.keys(): evn_str += "export " + env_var + "=" + env.get(env_var) + ";" if self.use_bash_timeout: bash_script = os.path.join(self.local_bash_dir, "bash_timeout.sh") if self.isRemote: bash_script = os.path.join(self.bash_dir, "bash_timeout.sh") bash_cmd = "" if timeout == -1: bash_cmd = bash_script + " -t " + to_str(self.default_bash_timeout) + " " elif timeout is not None: bash_cmd = bash_script + " -t " + to_str(timeout) + " " cmd = bash_cmd + cmd if timeout == -1: timeout = None if exec_path is not None and exec_path is not "": exec_cmd = "PATH=$PATH:%s; %s" % (exec_path, cmd) if cd == True: exec_cmd = "cd " + exec_path + ";" + exec_cmd else: exec_cmd = cmd exec_cmd = evn_str + exec_cmd if self.isRemote: return self.remote_exec_cmd(exec_cmd, timeout, params) else: return self.local_exec_cmd(exec_cmd, timeout, params)
def get_vmdevice_map(self,platform): try: result=[] dic = {"hda": "hda", "hdb": "hdb", "hdc": "hdc", "hdc:cdrom": "hdc:cdrom", "hdd": "hdd" } if platform=='xen': dic['xvda']='xvda' dic['xvdb']='xvdb' dic['xvdc']='xvdc' if platform=='kvm': dic['vda']='vda' dic['vdb']='vdb' dic['vdc']='vdc' dic['vdd']='vdd' for key in dic.keys(): result.append(dict(id=dic[key],value=key)) except Exception, ex: print_traceback() LOGGER.error(to_str(ex).replace("'","")) return "{success: false,msg: '",to_str(ex).replace("'",""),"'}"
def remove_storage_def(self, auth, storage_id, site_id, groupId, op_level=None): try: site = self.manager.getSite(site_id) group = self.manager.getGroup(auth, groupId) group_list = self.manager.getGroupList(auth, site_id) sd_to_remove = self.storage_manager.get_sd( storage_id, site_id, groupId, to_unicode(constants.STORAGE)) node = None add_mode = False warning_msg = self.sync_manager.remove_defn( sd_to_remove, site, group, node, auth, to_unicode(constants.STORAGE), constants.DETACH, "REMOVE_STORAGE_DEF", self.storage_manager, self.manager, add_mode, group_list, op_level) if warning_msg: return "{success: true,msg: '" + warning_msg + "'}" return "{success: true,msg: 'Storage Removed'}" except Exception, ex: print_traceback() err_desc = to_str(ex).replace("'", "") err_desc = err_desc.strip() LOGGER.error(to_str(err_desc)) return "{success: false,msg: '" + err_desc + "'}"
def send_test_email(self, desc, servername, port, useremail, password, secure): self.sender = useremail Record = DBSession.query(User.email_address).filter(User.user_name == "admin").first() self.receivers = Record.email_address self.mail_server = servername if port: self.port = int(port) self.secure_type = int(secure) self.password = password self.subject = "Test Email" self.content = "\Test message Sent on " + to_str(ct_time()) self.msg = MIMEText(self.content, self.text_subtype) self.msg["Subject"] = "ConVirt Test Email" # SendSuccess = False try: if self.secure_type == NONSECURE: EmailManager().send_nonsecure( servername, self.port, useremail, Record.email_address, self.msg.as_string() ) elif self.secure_type == TLS: EmailManager().send_tls( servername, self.port, useremail, password, Record.email_address, self.msg.as_string() ) else: EmailManager().send_ssl( servername, self.port, useremail, password, Record.email_address, self.msg.as_string() ) except Exception, ex: # traceback.print_exc() LOGGER.error("Error sending mails:" + to_str(ex).replace("'", "")) raise ex
def get_new_updates(self, guid, update_checked_date, edition=None): new_updates = [] updates = self.retrieve_updates(guid) r_date = update_checked_date max_dt = r_date for update in updates: str_p_dt = to_str(update["pubDate"]) if str_p_dt: p_dt = time.strptime(str_p_dt, "%Y-%m-%d %H:%M:%S") dt = datetime(*p_dt[0:5]) if dt > r_date: if edition: pltfom = to_str(update["platform"]) platforms = pltfom.split(",") if edition in platforms or 'ALL' in platforms: new_updates.append(update) if dt > max_dt: max_dt = dt str_max_dt = r_date.strftime("%Y-%m-%d %H:%M:%S") if max_dt > r_date: str_max_dt = max_dt.strftime("%Y-%m-%d %H:%M:%S") return (new_updates, str_max_dt)
def get_server_def_list(self,site_id, group_id, def_id): try: server_def_list=[] node_defns = self.sync_manager.get_node_defns(def_id, to_unicode(constants.STORAGE)) if node_defns: for eachdefn in node_defns: temp_dic={} if eachdefn: node = DBSession.query(ManagedNode).filter_by(id=eachdefn.server_id).first() temp_dic['id']=eachdefn.server_id if node: temp_dic['name']=node.hostname else: temp_dic['name']=None temp_dic['status']=eachdefn.status if eachdefn.details: temp_dic['details']=eachdefn.details else: temp_dic['details']=None server_def_list.append(temp_dic) except Exception, ex: LOGGER.error(to_str(ex).replace("'","")) return "{success: false,msg: '",to_str(ex).replace("'",""),"'}"
def associate_defns(self, site_id, group_id, def_type, def_ids, auth, op_level=None): error_desc="" site = self.manager.getSite(site_id) group=self.manager.getGroup(auth,group_id) group_list = self.manager.getGroupList(auth, site_id) def_id_list = def_ids.split(",") for def_id in def_id_list: new_sd = DBSession.query(StorageDef).filter_by(id=def_id).first() node = None try: associate=True self.sync_manager.add_defn(new_sd, site, group, node, auth, to_unicode(constants.STORAGE), constants.ATTACH, "ADD_STORAGE_DEF", self.storage_manager, self.manager, op_level, associate) #matching disks on association of storage. vm_disks = self.manager.get_vm_disks_from_pool(auth, group_id) storage_disks = DBSession.query(StorageDisks).filter_by(storage_id=def_id) if storage_disks: for eachdisk in storage_disks: self.manager.matching_disk_on_discover_storage(vm_disks, eachdisk.id) except Exception, ex: error_desc = to_str(ex) print_traceback() LOGGER.error(to_str(ex).replace("'","")) #if we get any exception while adding/ sync definition then are removing the definition. add_mode=True try: self.sync_manager.remove_defn(new_sd, site, group, node, auth, to_unicode(constants.STORAGE), constants.DETACH, "REMOVE_STORAGE_DEF", self.storage_manager, self.manager, add_mode, group_list, op_level) except Exception, ex1: print_traceback() LOGGER.error(to_str(ex1).replace("'","")) raise Exception(to_str(ex1)) if error_desc: raise Exception(error_desc)
def add_storage_def(self,auth, site_id, group_id, node_id, type, opts, op_level=None, sp_ids=None, scan_result=None): new_sd = self.get_valid_sd(type,opts, op_level) site = self.manager.getSite(site_id) group=self.manager.getGroup(auth,group_id) node = None group_list = self.manager.getGroupList(auth, site_id) try: sdlist = self.storage_manager.get_sds(site_id, group_id) for sd in sdlist: if new_sd.name==sd.name: raise Exception("Storage share with same name already exists.") errs=[] errs = self.update_storage_def(auth, new_sd, None, None, None, site, group, op_level, True, sp_ids, errs, scan_result) if errs: if len(errs) > 0: add_mode=True self.sync_manager.remove_defn(new_sd, site, group, node, auth, to_unicode(constants.STORAGE), constants.DETACH, "REMOVE_STORAGE_DEF", self.storage_manager, self.manager, add_mode, group_list, op_level) return {'success':False,'msg':to_str(errs).replace("'","")} except Exception, ex: print_traceback() err_desc = to_str(ex).replace("'","") err_desc = err_desc.strip() LOGGER.error(err_desc) try: add_mode=True defn_temp = self.storage_manager.get_sd(new_sd.id, None, None, None) if defn_temp: self.sync_manager.remove_defn(defn_temp, site, group, node, auth, to_unicode(constants.STORAGE), constants.DETACH, "REMOVE_STORAGE_DEF", self.storage_manager, self.manager, add_mode, group_list, op_level) except Exception, ex1: print_traceback() LOGGER.error(to_str(ex1).replace("'","")) raise Exception(to_str(ex1))
def get_new_updates(self,guid,update_checked_date, edition=None): new_updates = [] updates = self.retrieve_updates(guid) r_date=update_checked_date max_dt = r_date for update in updates: str_p_dt = to_str(update["pubDate"]) if str_p_dt: p_dt = time.strptime(str_p_dt, "%Y-%m-%d %H:%M:%S") dt = datetime(*p_dt[0:5]) if dt > r_date : if edition: pltfom = to_str(update["platform"]) platforms = pltfom.split(",") if edition in platforms or 'ALL' in platforms: new_updates.append(update) if dt > max_dt: max_dt = dt str_max_dt = r_date.strftime("%Y-%m-%d %H:%M:%S") if max_dt > r_date: str_max_dt = max_dt.strftime("%Y-%m-%d %H:%M:%S") return (new_updates,str_max_dt)
def get_ref_disk_format_map(self,format_type): print "-----",format_type try: result=[] if format_type=="disk_image": dic= { "Raw": "raw", "dir-gzipped-chunks" : "dir-gzipped-chunks", ".bz2": "bzip", ".gz" : "gzip", ".zip": "zip", ".tar": "tar", ".tar.gzip": "tar_gzip", ".tar.bz2" : "tar_bzip", } for key in dic.keys(): result.append(dict(id=dic[key],value=key)) elif format_type=="disk_content": dic1={ # ".bz2": "bzip", (Need to add to provision.sh) # ".gz" : "gzip", (Need to add to provision.sh) ".zip": "zip", ".tar": "tar", ".tar.gzip": "tar_gzip", ".tar.bz2" : "tar_bzip", "directory":"dir" } for key in dic1.keys(): result.append(dict(id=dic1[key],value=key)) except Exception, ex: print_traceback() LOGGER.error(to_str(ex).replace("'","")) return "{success: false,msg: '",to_str(ex).replace("'",""),"'}"
def get_command(self, auth, node_id, dom_id, cmd): command = None try: command = tg.config.get(cmd) info = {} value_map = {} if cmd in [constants.VNC, constants.TIGHTVNC]: host = pylons.request.headers['Host'] if host.find(":") != -1: (address, port) = host.split(':') else: address = host info = self.manager.get_vnc_info(auth, node_id, dom_id, address) value_map[constants.APPLET_IP] = info["hostname"] value_map[constants.PORT] = info["port"] if command is not None: if type(command) in [types.StringType, types.UnicodeType]: template_str = string.Template(command) command = to_str(template_str.safe_substitute(value_map)) except Exception, ex: print_traceback() LOGGER.error(to_str(ex).replace("'", "")) return "{success: false,msg: " 'Command not found' "}"
def get_command(self,auth,node_id,dom_id,cmd): command=None try: command=tg.config.get(cmd) info={} value_map={} if cmd in [constants.VNC,constants.TIGHTVNC]: host=pylons.request.headers['Host'] if host.find(":") != -1: (address,port)=host.split(':') else: address = host info=self.manager.get_vnc_info(auth, node_id, dom_id, address) value_map[constants.APPLET_IP] = info["hostname"] value_map[constants.PORT] = info["port"] if command is not None: if type(command) in [types.StringType,types.UnicodeType]: template_str = string.Template(command) command = to_str(template_str.safe_substitute(value_map)) except Exception, ex: print_traceback() LOGGER.error(to_str(ex).replace("'","")) return "{success: false,msg: "'Command not found'"}"
def _init_vmm(self): if self.xen_proxy is not None: return self.xen_proxy # share the transport from the node_proxy if self.is_remote: if self.protocol == "tcp": self.xen_proxy = ServerProxy('http://' + self.hostname + ':' + to_str(self.tcp_port) + self.DEFAULT_PATH) if self.protocol == "ssl": self.xen_proxy = ServerProxy('https://' + self.hostname + ':' + to_str(self.tcp_port) + self.DEFAULT_PATH) if self.protocol == "ssh": self.xen_proxy = ServerProxy('ssh://' + self.username +'@' + self.hostname + self.DEFAULT_PATH) if self.protocol == "ssh_tunnel": self.xen_proxy = ServerProxy('ssh_tunnel://' + self.username + '@' + self.hostname + ":" + to_str(self.tcp_port) + self.DEFAULT_PATH, ssh_transport = self.ssh_transport, user = self.username, password=self.password, use_keys = self.use_keys ) else: self.xen_proxy = ServerProxy('httpu:///var/run/xend/xmlrpc.sock') return self.xen_proxy
def format_task_result_details(self, task_results): result=[] ent_type_txt_map = self.get_entity_type_id_text_map() LOGGER.debug("start format_task_result_details : "+to_str(datetime.utcnow())) for tpl in task_results: tid=tpl.task_id task_name = tpl.name username=tpl.user_name entityName=tpl.entity_name cancellable=tpl.cancellable startime=tpl.timestamp endtime="" stat=tpl.status enttype = ent_type_txt_map.get(to_str(tpl.entity_type),"") short_desc = tpl.short_desc if short_desc: task_name = short_desc err='' if stat in [Task.FAILED,Task.SUCCEEDED,Task.CANCELED]: endtime=convert_to_CMS_TZ(tpl.endtime) err=tpl.results status=Task.TASK_STATUS[stat] startime=convert_to_CMS_TZ(startime) result.append(dict(taskid=tid,entname=entityName,enttype=enttype,\ name=task_name,username=username,status=status,\ errmsg=err,timestamp=startime,cancellable=cancellable,\ endtime=endtime)) LOGGER.debug("end format_task_result_details : "+to_str(datetime.utcnow())) return result
def get_ref_disk_format_map(self, format_type): print "-----", format_type try: result = [] if format_type == "disk_image": dic = { "Raw": "raw", "dir-gzipped-chunks": "dir-gzipped-chunks", ".bz2": "bzip", ".gz": "gzip", ".zip": "zip", ".tar": "tar", ".tar.gzip": "tar_gzip", ".tar.bz2": "tar_bzip", } for key in dic.keys(): result.append(dict(id=dic[key], value=key)) elif format_type == "disk_content": dic1 = { # ".bz2": "bzip", (Need to add to provision.sh) # ".gz" : "gzip", (Need to add to provision.sh) ".zip": "zip", ".tar": "tar", ".tar.gzip": "tar_gzip", ".tar.bz2": "tar_bzip", "directory": "dir" } for key in dic1.keys(): result.append(dict(id=dic1[key], value=key)) except Exception, ex: print_traceback() LOGGER.error(to_str(ex).replace("'", "")) return "{success: false,msg: '", to_str(ex).replace("'", ""), "'}"
def get_vmdevice_map(self, platform): try: result = [] dic = { "hda": "hda", "hdb": "hdb", "hdc": "hdc", "hdc:cdrom": "hdc:cdrom", "hdd": "hdd" } if platform == 'xen': dic['xvda'] = 'xvda' dic['xvdb'] = 'xvdb' dic['xvdc'] = 'xvdc' if platform == 'kvm': dic['vda'] = 'vda' dic['vdb'] = 'vdb' dic['vdc'] = 'vdc' dic['vdd'] = 'vdd' for key in dic.keys(): result.append(dict(id=dic[key], value=key)) except Exception, ex: print_traceback() LOGGER.error(to_str(ex).replace("'", "")) return "{success: false,msg: '", to_str(ex).replace("'", ""), "'}"
def send_test_email(self, desc, servername, port, useremail, password, secure): self.sender = useremail Record = DBSession.query(User.email_address).filter(User.user_name =='admin').first() self.receivers =Record.email_address self.mail_server = servername if port: self.port = int(port) self.secure_type = int(secure) self.password = password self.subject = "Test Email" self.content="\Test message Sent on " + to_str(ct_time()) self.msg = MIMEText(self.content, self.text_subtype) self.msg['Subject']= "WishCloud Test Email" # SendSuccess = False try: if (self.secure_type== NONSECURE): EmailManager().send_nonsecure(servername,self.port,useremail,Record.email_address,self.msg.as_string()) elif (self.secure_type== TLS): EmailManager().send_tls(servername,self.port,useremail,password,Record.email_address,self.msg.as_string()) else: EmailManager().send_ssl(servername,self.port,useremail,password,Record.email_address,self.msg.as_string()) except Exception, ex: # traceback.print_exc() LOGGER.error("Error sending mails:"+to_str(ex).replace("'","")) raise ex
def get_task_details(self,task_ids): result= [] LOGGER.debug("get_task_details query start : "+to_str(datetime.utcnow())) task=DBSession.query(Task).filter(Task.task_id.in_(task_ids)).\ options(eagerload("result")).all() LOGGER.debug("get_task_details query end : "+to_str(datetime.utcnow())) result = self.format_task_details(task) return result
def clone_image(self,auth, image_id, image_name, group_id): """ clone image """ try: self.image_store.clone_image(auth,group_id, image_id, image_name) except Exception , ex: print_traceback() LOGGER.error(to_str(ex).replace("'","")) return "{success: false,msg: '",to_str(ex).replace("'",""),"'}"
def save_image_desc(self,auth, image_id, content): try: mgd_node=Basic.local_node self.image_store.save_image_desc(auth,mgd_node,image_id, content) except Exception , ex: print_traceback() LOGGER.error(to_str(ex).replace("'","")) return "{success: false,msg: '",to_str(ex).replace("'",""),"'}"
def transfer_image(self,auth, image_id,source_group_id,dest_group_id): """ transfer image """ try: self.image_store.transfer_image(auth,image_id,source_group_id,dest_group_id) except Exception , ex: print_traceback() LOGGER.error(to_str(ex).replace("'","")) return "{success: false,msg: '",to_str(ex).replace("'",""),"'}"
def rename_image_group(self, auth,group_id, group_name): """ rename image group """ try: self.image_store.rename_image_group(auth,group_id, group_name) except Exception , ex: print_traceback() LOGGER.error(to_str(ex).replace("'","")) return "{success: false,msg: '",to_str(ex).replace("'",""),"'}"
def add_image_group(self,auth, group_name,store_id): """ add image group """ try: group = self.image_store.new_group(group_name) self.image_store.add_group(auth,group,store_id) except Exception , ex: print_traceback() LOGGER.error(to_str(ex).replace("'","")) return "{success: false,msg: '",to_str(ex).replace("'",""),"'}"
def add_storage_def(self, auth, site_id, group_id, node_id, type, opts, op_level=None, sp_ids=None, scan_result=None): new_sd = self.get_valid_sd(type, opts, op_level) site = self.manager.getSite(site_id) group = self.manager.getGroup(auth, group_id) node = None group_list = self.manager.getGroupList(auth, site_id) try: sdlist = self.storage_manager.get_sds(site_id, group_id) for sd in sdlist: if new_sd.name == sd.name: raise Exception( "Storage share with same name already exists.") errs = [] errs = self.update_storage_def(auth, new_sd, None, None, None, site, group, op_level, True, sp_ids, errs, scan_result) if errs: if len(errs) > 0: add_mode = True self.sync_manager.remove_defn( new_sd, site, group, node, auth, to_unicode(constants.STORAGE), constants.DETACH, "REMOVE_STORAGE_DEF", self.storage_manager, self.manager, add_mode, group_list, op_level) return { 'success': False, 'msg': to_str(errs).replace("'", "") } except Exception, ex: print_traceback() err_desc = to_str(ex).replace("'", "") err_desc = err_desc.strip() LOGGER.error(err_desc) try: add_mode = True defn_temp = self.storage_manager.get_sd( new_sd.id, None, None, None) if defn_temp: self.sync_manager.remove_defn( defn_temp, site, group, node, auth, to_unicode(constants.STORAGE), constants.DETACH, "REMOVE_STORAGE_DEF", self.storage_manager, self.manager, add_mode, group_list, op_level) except Exception, ex1: print_traceback() LOGGER.error(to_str(ex1).replace("'", "")) raise Exception(to_str(ex1))
def get_ref_disk_type_map(self): try: result = [] dic = {"Disk Image": "disk_image", "Disk Content": "disk_content"} for key in dic.keys(): result.append(dict(id=dic[key], value=_(key))) except Exception, ex: print_traceback() LOGGER.error(to_str(ex).replace("'", "")) return "{success: false,msg: '", to_str(ex).replace("'", ""), "'}"
def _compare_node_info(self,dest_node, key, msg_list): # src_val = self[key] # dest_val = dest_node[key] src_val = self.get_platform_info().get(key) dest_val = dest_node.get_platform_info().get(key) if src_val != dest_val: msg_list.append((key.upper(), key+" version is not the same on both"+ "source server and destination server. Source : "+to_str(src_val)+ ", Destination "+to_str(dest_val)))
def check_image_exists(self,auth,image_name): try: #for image_group in self.image_store.get_image_groups(auth).values(): if self.image_store.image_exists_by_name(image_name): return "{success: true,exists:true,msg: 'Image with the same name exists.'}" except Exception, ex: print_traceback() LOGGER.error(to_str(ex).replace("'","")) return "{success: false,msg: '"+to_str(ex).replace("'","")+"'}"
def get_task_details(self, task_ids): result = [] LOGGER.debug("get_task_details query start : " + to_str(datetime.utcnow())) task=DBSession.query(Task).filter(Task.task_id.in_(task_ids)).\ options(eagerload("result")).all() LOGGER.debug("get_task_details query end : " + to_str(datetime.utcnow())) result = self.format_task_details(task) return result
def get_command_list(self): try: result = [] dic = {"tightvnc": constants.TIGHTVNC, "vncviewer": constants.VNC} for key in dic.keys(): result.append(dict(id=key, value=dic[key])) except Exception, ex: print_traceback() LOGGER.error(to_str(ex).replace("'", "")) return "{success: false,msg: '", to_str(ex).replace("'", ""), "'}"
def get_ref_disk_type_map(self): try: result=[] dic={ "Disk Image": "disk_image", "Disk Content" : "disk_content" } for key in dic.keys(): result.append(dict(id=dic[key],value=key)) except Exception, ex: print_traceback() LOGGER.error(to_str(ex).replace("'","")) return "{success: false,msg: '",to_str(ex).replace("'",""),"'}"
def _compare_node_info(self, dest_node, key, msg_list): # src_val = self[key] # dest_val = dest_node[key] src_val = self.get_platform_info().get(key) dest_val = dest_node.get_platform_info().get(key) if src_val != dest_val: msg_list.append( (key.upper(), key + " version is not the same on both" + "source server and destination server. Source : " + to_str(src_val) + ", Destination " + to_str(dest_val)))
def get_disk_fs_map(self): try: result = [] dic = {"None": "", "ext3": "ext3", "ext2": "ext2", "swap": "swap"} for key in dic.keys(): result.append(dict(id=dic[key], value=key)) except Exception, ex: print_traceback() LOGGER.error(to_str(ex).replace("'", "")) return "{success: false,msg: '", to_str(ex).replace("'", ""), "'}"
def list_dir_contents(self, node_id=None, directory=None, _dc=None): result = None self.authenticate() try: result=self.node_service.get_dir_contents(session['auth'],node_id,directory) except Exception , ex: print_traceback() x=to_str(ex) err='' if x.startswith('[Errno 2] No such file or directory:'): err='NoDirectory' return {'success':'false','msg':to_str(ex).replace("'",""),'err':err}
def is_storage_allocated(self, storage_id): returnVal = False msg = "NOT_IN_USE" try: returnVal = self.storage_manager.is_storage_allocated(storage_id) if returnVal: msg = "IN_USE" return "{success: true,msg: '" + msg + "'}" except Exception, ex: print_traceback() LOGGER.error(to_str(ex)) return "{success: false,msg: '" + to_str(ex) + "'}"
def get_vm_status(self,image_id): result= [] vms=DBSession.query(VM).filter(VM.image_id==image_id).all() count_run = 0 for vm in vms: vm_state = vm.get_state_string() if (vm_state == "Running" or vm_state == "Blocked"): count_run+=1 result.append(dict(name= 'Provisioned VMs:', value= to_str(len(vms)))) result.append(dict(name= 'Running VMs:', value=to_str(count_run))) return result
def edit_nw_defn(self,nw_id,nw_name,nw_desc): nw_name=(nw_name) nw_desc=(nw_desc) try: errmsgs=[] common_desc = { "Network name":nw_name, "Network description":nw_desc} for key in common_desc: v = common_desc.get(key) if not v: errmsgs.append("%s is required." % (key,)) if errmsgs: if len(errmsgs)>0: return {'success':False,'msg':to_str(errmsgs).replace("'","")} #Identify definition scope here. Since we do not have node here. We are checking the definition in spdeflinks table. If definition is present in the table then the definition is at pool level else it is at server level. # going ahead we could think of adding scope in the defintion tables so that we can directly take the scope from definition row = DBSession.query(SPDefLink).filter_by(def_id = nw_id).first() if row: scope = constants.SCOPE_SP else: scope = constants.SCOPE_S #Validation for duplicate name alldefns=None if scope == constants.SCOPE_S: node_defn = DBSession.query(ServerDefLink).filter_by(def_id = nw_id).first() if node_defn: alldefns = DBSession.query(ServerDefLink).filter_by(server_id = node_defn.server_id, def_type = to_unicode(constants.NETWORK)) elif scope == constants.SCOPE_SP: group_defn = DBSession.query(SPDefLink).filter_by(def_id = nw_id).first() if group_defn: alldefns = DBSession.query(SPDefLink).filter_by(group_id = group_defn.group_id, def_type = to_unicode(constants.NETWORK)) elif scope == constants.SCOPE_DC: group_defn = DBSession.query(DCDefLink).filter_by(def_id = nw_id).first() if group_defn: alldefns = DBSession.query(DCDefLink).filter_by(site_id = group_defn.site_id, def_type = to_unicode(constants.NETWORK)) if alldefns: for eachdefn in alldefns: defnTemp = DBSession.query(NwDef).filter_by(id=eachdefn.def_id, name=nw_name).first() if defnTemp and defnTemp.id != nw_id: raise Exception("Network definition with the same name already exists") defn = DBSession.query(NwDef).filter_by(id=nw_id).first() group = None auth = None self.sync_manager.update_defn(defn, nw_name, nw_desc, None, group, auth, constants.NETWORK, constants.ATTACH, self.nw_manager, 'UPDATE_NETWORK_DEF') except Exception, ex: print_traceback() LOGGER.error(to_str(ex).replace("'","")) return {'success':False,'msg':to_str(ex).replace("'","")}
def get_props_for_repos(self, node): print "Hostname %s" % (node.hostname) props = { prop_hostname : node.hostname, prop_login : node.username, prop_ssh_port : to_str(node.ssh_port), prop_migration_port : to_str(node.migration_port), prop_isRemote : to_str(node.isRemote), prop_use_keys : to_str(node.use_keys), prop_address : node.address, prop_platform : node.platform,} if props is not None: props.update(VNodeFactory.get_props_for_repos(self,node)) return props
def get_device_mode_map(self): try: result=[] dic={ "Read-Only": "r", "Read-Write" : "w", "Read-ForceWrite" : "w!"} for key in dic.keys(): result.append(dict(id=dic[key],value=key)) except Exception, ex: print_traceback() LOGGER.error(to_str(ex).replace("'","")) return "{success: false,msg: '",to_str(ex).replace("'",""),"'}"
def get_command_list(self): try: result=[] dic= { "tightvnc": constants.TIGHTVNC, "vncviewer": constants.VNC } for key in dic.keys(): result.append(dict(id=key,value=dic[key])) except Exception, ex: print_traceback() LOGGER.error(to_str(ex).replace("'","")) return "{success: false,msg: '",to_str(ex).replace("'",""),"'}"
def edit_storage_def(self, auth, storage_id, site_id, groupId, type, op_level, sp_ids, opts): try: site = self.manager.getSite(site_id) group=self.manager.getGroup(auth,groupId) #new_sd = self.get_valid_sd(type,opts) new_name = opts.get("name") new_desc = opts.get("description") self.update_storage_def(auth, None, new_name, new_desc, storage_id, site, group, op_level, False, sp_ids) self.SaveScanResult(storage_id, site_id) except Exception, ex: print_traceback() LOGGER.error(to_str(ex).replace("'","")) return "{success: false,msg: '",to_str(ex).replace("'","").strip(),"'}"
def get_disk_fs_map(self): try: result=[] dic= {"None": "", "ext3": "ext3", "ext2" : "ext2", "swap" : "swap"} for key in dic.keys(): result.append(dict(id=dic[key],value=key)) except Exception, ex: print_traceback() LOGGER.error(to_str(ex).replace("'","")) return "{success: false,msg: '",to_str(ex).replace("'",""),"'}"
def prepare_scripts(self, dest_node, type, defType): s_src_scripts_location=tg.config.get("nw_script") s_src_scripts_location=os.path.abspath(s_src_scripts_location) s_common_src_scripts_location=tg.config.get("common_script") s_common_src_scripts_location=os.path.abspath(s_common_src_scripts_location) LOGGER.info("Source script location= " + to_str(s_src_scripts_location)) LOGGER.info("Destination script location= " + to_str(self.s_scripts_location)) copyToRemote(s_src_scripts_location, dest_node, self.s_scripts_location) LOGGER.info("Common source script location= " + to_str(s_common_src_scripts_location)) LOGGER.info("Common destination script location= " + to_str(self.s_common_scripts_location)) copyToRemote(s_common_src_scripts_location, dest_node, self.s_common_scripts_location)
def associate_defns(self, site_id, group_id, def_type, def_ids, auth, op_level=None): error_desc = "" site = self.manager.getSite(site_id) group = self.manager.getGroup(auth, group_id) group_list = self.manager.getGroupList(auth, site_id) def_id_list = def_ids.split(",") for def_id in def_id_list: new_sd = DBSession.query(StorageDef).filter_by(id=def_id).first() node = None try: associate = True self.sync_manager.add_defn(new_sd, site, group, node, auth, to_unicode(constants.STORAGE), constants.ATTACH, "ADD_STORAGE_DEF", self.storage_manager, self.manager, op_level, associate) #matching disks on association of storage. vm_disks = self.manager.get_vm_disks_from_pool(auth, group_id) storage_disks = DBSession.query(StorageDisks).filter_by( storage_id=def_id) if storage_disks: for eachdisk in storage_disks: self.manager.matching_disk_on_discover_storage( vm_disks, eachdisk.id) except Exception, ex: error_desc = to_str(ex) print_traceback() LOGGER.error(to_str(ex).replace("'", "")) #if we get any exception while adding/ sync definition then are removing the definition. add_mode = True try: self.sync_manager.remove_defn( new_sd, site, group, node, auth, to_unicode(constants.STORAGE), constants.DETACH, "REMOVE_STORAGE_DEF", self.storage_manager, self.manager, add_mode, group_list, op_level) except Exception, ex1: print_traceback() LOGGER.error(to_str(ex1).replace("'", "")) raise Exception(to_str(ex1)) if error_desc: raise Exception(error_desc)
def get_disks_type_map(self, option, mode): try: if mode in [ "edit_image_settings", "provision_image", "provision_vm" ]: if option == "CREATE_DISK": value_map = self.get_disk_type_map() elif option == "USE_DEVICE": value_map = self.get_disk_type_map_4_existing_disk() elif option == "USE_ISO": value_map = self.get_disk_type_map_4_iso() elif option == "USE_REF_DISK": value_map = self.get_disk_type_map_4_ref_disk() else: value_map = self.get_disk_type_map_4_vm_config() result = [] # if option=="USE_REF_DISK": # dic={ "File (VBD)": "file*VBD", # "Logical Volume" : "phy*LVM", # "Select Existing Device": "phy*", # "QCOW": "tap:qcow*qcow2", # "VMDK": "tap:vmdk*vmdk" # } # elif option=="CREATE_DISK": # dic={ "File (VBD)": "file*VBD", # "QCOW": "tap:qcow*qcow2", # "VMDK": "tap:vmdk*vmdk", # "Logical Volume" : "phy*LVM" # } # elif option=="USE_ISO": # dic={ "Select ISO ": "file*ISO"} # elif option=="USE_DEVICE": # dic={"Select Existing Device": "phy*"} # else: # dic={ "File (VBD)": "file", # "QCOW": "tap:qcow", # "VMDK": "tap:vmdk", # "Physical Device" : "phy" # } for key in value_map.keys(): (type, disk_type) = value_map[key] result.append(dict(id=type, value=_(key), disk_type=disk_type)) except Exception, ex: print_traceback() LOGGER.error(to_str(ex).replace("'", "")) return "{success: false,msg: '", to_str(ex).replace("'", ""), "'}"
def populate_platform_info(self): platform_dict = {} vmm_info = self.get_vmm_info() xen_ver = "" xen_major = to_str(vmm_info['xen_major']) xen_minor = to_str(vmm_info['xen_minor']) xen_extra = to_str(vmm_info['xen_extra']) xen_ver += xen_major + "." + xen_minor + xen_extra platform_dict['xen_version'] = xen_ver caps_value = vmm_info['xen_caps'] if caps_value: caps_value = caps_value.strip().replace(" ", ", ") platform_dict['xen_caps'] = caps_value return platform_dict
def get_device_mode_map(self): try: result = [] dic = { "Read-Only": "r", "Read-Write": "w", "Read-ForceWrite": "w!" } for key in dic.keys(): result.append(dict(id=dic[key], value=key)) except Exception, ex: print_traceback() LOGGER.error(to_str(ex).replace("'", "")) return "{success: false,msg: '", to_str(ex).replace("'", ""), "'}"
def get_props_for_repos(self, node): print "Hostname %s" % (node.hostname) props = { prop_hostname: node.hostname, prop_login: node.username, prop_ssh_port: to_str(node.ssh_port), prop_migration_port: to_str(node.migration_port), prop_isRemote: to_str(node.isRemote), prop_use_keys: to_str(node.use_keys), prop_address: node.address, prop_platform: node.platform, } if props is not None: props.update(VNodeFactory.get_props_for_repos(self, node)) return props
def get_disks_options_map(self): try: result = [] dic = { "Create New Disk": "CREATE_DISK", "Use Physical Device": "USE_DEVICE", "Use ISO File": "USE_ISO", "Clone Reference Disk": "USE_REF_DISK", } for key in dic.keys(): result.append(dict(id=dic[key], value=_(key))) except Exception, ex: print_traceback() LOGGER.error(to_str(ex).replace("'", "")) return "{success: false,msg: '", to_str(ex).replace("'", ""), "'}"