def update_disks_size(self, auth): sites = DBSession.query(Site) if sites: for eachsite in sites: #site = DBSession.query(Sites).filter_by(id=eachsite.id).first() site_entity = auth.get_entity(eachsite.id) #get all groups in the site. group_entities = auth.get_entities( to_unicode(constants.SERVER_POOL), site_entity) #loop through each group in the site for eachgroup in group_entities: group = DBSession.query(ServerGroup).filter_by( id=eachgroup.entity_id).first() if group: group_entity = auth.get_entity(group.id) #get all nodes in the group node_entities = auth.get_entities( to_unicode(constants.MANAGED_NODE), group_entity) #loop through each node in the group for eachnode in node_entities: node = DBSession.query(ManagedNode).filter_by( id=eachnode.entity_id).first() server_def_link = DBSession.query( ServerDefLink).filter_by(server_id=node.id) if server_def_link: for each_link in server_def_link: defn = DBSession.query( StorageDef).filter_by( id=each_link.def_id).first() if defn: self.test_storage_def( auth, node, group, eachsite, defn)
def post_login(self,userid,came_from=url('/')): """ Redirect the user to the initially requested page on successful authentication or redirect her back to the login page if login failed. """ result='' if not userid: result = "{success:false,msg:'session expired'}" return result u=User.by_user_name(to_unicode(userid)) g=Group.by_group_name(to_unicode('adminGroup')) auth=AuthorizationService() auth.user=u session['username']=u.user_name session['user_firstname']=u.firstname session['has_adv_priv']=tg.config.get(constants.ADVANCED_PRIVILEGES) session['PAGEREFRESHINTERVAL']=tg.config.get(constants.PAGEREFRESHINTERVAL) session['TASKPANEREFRESH']=tg.config.get(constants.TASKPANEREFRESH) session['userid']=userid session['auth']=auth session['edition_string']=get_edition_string() session['version']=get_version() is_admin = u.has_group(g) session['is_admin']=is_admin session.save() TopCache().delete_usercache(auth) result = "{success:true}" return result
def remove_storage_def(self, auth, storage_id, site_id, groupId, op_level=None): try: site = self.manager.getSite(site_id) group = self.manager.getGroup(auth, groupId) group_list = self.manager.getGroupList(auth, site_id) sd_to_remove = self.storage_manager.get_sd( storage_id, site_id, groupId, to_unicode(constants.STORAGE)) node = None add_mode = False warning_msg = self.sync_manager.remove_defn( sd_to_remove, site, group, node, auth, to_unicode(constants.STORAGE), constants.DETACH, "REMOVE_STORAGE_DEF", self.storage_manager, self.manager, add_mode, group_list, op_level) if warning_msg: return "{success: true,msg: '" + warning_msg + "'}" return "{success: true,msg: 'Storage Removed'}" except Exception, ex: print_traceback() err_desc = to_str(ex).replace("'", "") err_desc = err_desc.strip() LOGGER.error(to_str(err_desc)) return "{success: false,msg: '" + err_desc + "'}"
def associate_defns(self, site_id, group_id, def_type, def_ids, auth, op_level=None): error_desc="" site = self.manager.getSite(site_id) group=self.manager.getGroup(auth,group_id) group_list = self.manager.getGroupList(auth, site_id) def_id_list = def_ids.split(",") for def_id in def_id_list: new_sd = DBSession.query(StorageDef).filter_by(id=def_id).first() node = None try: associate=True self.sync_manager.add_defn(new_sd, site, group, node, auth, to_unicode(constants.STORAGE), constants.ATTACH, "ADD_STORAGE_DEF", self.storage_manager, self.manager, op_level, associate) #matching disks on association of storage. vm_disks = self.manager.get_vm_disks_from_pool(auth, group_id) storage_disks = DBSession.query(StorageDisks).filter_by(storage_id=def_id) if storage_disks: for eachdisk in storage_disks: self.manager.matching_disk_on_discover_storage(vm_disks, eachdisk.id) except Exception, ex: error_desc = to_str(ex) print_traceback() LOGGER.error(to_str(ex).replace("'","")) #if we get any exception while adding/ sync definition then are removing the definition. add_mode=True try: self.sync_manager.remove_defn(new_sd, site, group, node, auth, to_unicode(constants.STORAGE), constants.DETACH, "REMOVE_STORAGE_DEF", self.storage_manager, self.manager, add_mode, group_list, op_level) except Exception, ex1: print_traceback() LOGGER.error(to_str(ex1).replace("'","")) raise Exception(to_str(ex1)) if error_desc: raise Exception(error_desc)
def on_add_node(self, nodeId, groupId, site_id, auth, def_manager): op = constants.ATTACH #If one of them is not present then return from here. if not (nodeId or groupId): return defn_list = [] errs = [] sync_manager = SyncDef() defType = def_manager.getType() #Link all the definitions in the server pool to this new server node. sp_defns = DBSession.query(SPDefLink).filter_by( group_id=to_unicode(groupId)) if sp_defns: for eachdefn in sp_defns: defn = def_manager.get_defn(eachdefn.def_id) if defn: defn_list.append(defn) #Add these default value to this link definition. These values would get changed after sync operation. status = to_unicode(constants.OUT_OF_SYNC) details = None sync_manager.add_node_defn(nodeId, defn.id, defType, status, details)
def post_login(self,userid,came_from=url('/')): """ Redirect the user to the initially requested page on successful authentication or redirect her back to the login page if login failed. """ result='' if not userid: result = "{success:false,msg:'session expired'}" return result u=User.by_user_name(to_unicode(userid)) g=Group.by_group_name(to_unicode('adminGroup')) auth=AuthorizationService() auth.user=u session['username']=u.user_name session['user_firstname']=u.firstname session['has_adv_priv']=tg.config.get(constants.ADVANCED_PRIVILEGES) session['PAGEREFRESHINTERVAL']=tg.config.get(constants.PAGEREFRESHINTERVAL) session['TASKPANEREFRESH']=tg.config.get(constants.TASKPANEREFRESH) session['userid']=userid session['auth']=auth session['edition_string']=get_edition_string() session['version']=get_version() self.update_registerd_session() is_admin = u.has_group(g) session['is_admin']=is_admin session.save() TopCache().delete_usercache(auth) result = "{success:true}" return result
def update_storage_def(self, auth, new_sd, new_name, new_desc, storage_id, site, group, op_level, new=True, sp_ids=None, errs=None, scan_result=None): if new == True: #Validation for duplicate name if group: group_defns = DBSession.query(SPDefLink).filter_by( group_id=group.id) elif site: group_defns = DBSession.query(DCDefLink).filter_by( site_id=site.id) for group_defn in group_defns: rowSDef = DBSession.query(StorageDef).filter_by( id=group_defn.def_id, name=new_name).first() if rowSDef: raise Exception( "Storage definition with the same name already exists") node = None self.sync_manager.add_defn(new_sd, site, group, node, auth, to_unicode(constants.STORAGE), constants.ATTACH, "ADD_STORAGE_DEF", self.storage_manager, self.manager, op_level, sp_ids, scan_result) else: #Validation for duplicate name if group: group_defns = DBSession.query(SPDefLink).filter_by( group_id=group.id) elif site: group_defns = DBSession.query(DCDefLink).filter_by( site_id=site.id) for group_defn in group_defns: rowSDef = DBSession.query(StorageDef).filter_by( id=group_defn.def_id, name=new_name).first() if rowSDef and rowSDef.id != storage_id: raise Exception( "Storage definition with the same name already exists") defn = DBSession.query(StorageDef).filter_by(id=storage_id).first() self.sync_manager.update_defn(defn, new_name, new_desc, site, group, auth, to_unicode(constants.STORAGE), constants.ATTACH, self.storage_manager, 'UPDATE_STORAGE_DEF', op_level, sp_ids, self.manager)
def __new__(cls, name, bases, dictionary): if '__mapper_args__' in dictionary.keys(): dictionary['__mapper_args__']['polymorphic_identity'] = to_unicode( name) else: dictionary['__mapper_args__'] = dict( polymorphic_identity=to_unicode(name)) return DeclarativeMeta.__new__(cls, name, bases, dictionary)
def add_storage_def(self, auth, site_id, group_id, node_id, type, opts, op_level=None, sp_ids=None, scan_result=None): new_sd = self.get_valid_sd(type, opts, op_level) site = self.manager.getSite(site_id) group = self.manager.getGroup(auth, group_id) node = None group_list = self.manager.getGroupList(auth, site_id) try: sdlist = self.storage_manager.get_sds(site_id, group_id) for sd in sdlist: if new_sd.name == sd.name: raise Exception( "Storage share with same name already exists.") errs = [] errs = self.update_storage_def(auth, new_sd, None, None, None, site, group, op_level, True, sp_ids, errs, scan_result) if errs: if len(errs) > 0: add_mode = True self.sync_manager.remove_defn( new_sd, site, group, node, auth, to_unicode(constants.STORAGE), constants.DETACH, "REMOVE_STORAGE_DEF", self.storage_manager, self.manager, add_mode, group_list, op_level) return { 'success': False, 'msg': to_str(errs).replace("'", "") } except Exception, ex: print_traceback() err_desc = to_str(ex).replace("'", "") err_desc = err_desc.strip() LOGGER.error(err_desc) try: add_mode = True defn_temp = self.storage_manager.get_sd( new_sd.id, None, None, None) if defn_temp: self.sync_manager.remove_defn( defn_temp, site, group, node, auth, to_unicode(constants.STORAGE), constants.DETACH, "REMOVE_STORAGE_DEF", self.storage_manager, self.manager, add_mode, group_list, op_level) except Exception, ex1: print_traceback() LOGGER.error(to_str(ex1).replace("'", "")) raise Exception(to_str(ex1))
def add_node_defn(self, node_id, def_id, def_type, status, details): #Check whether the record is already present... row = DBSession.query(ServerDefLink).filter_by(server_id = node_id, def_id = def_id).first() if not row: node_defn = ServerDefLink() node_defn.server_id = to_unicode(node_id) node_defn.def_type = to_unicode(def_type) node_defn.def_id = def_id node_defn.status = to_unicode(status) node_defn.details = to_unicode(details) node_defn.dt_time = datetime.utcnow() DBSession.add(node_defn)
def add_node_defn(self, node_id, def_id, def_type, status, details): #Check whether the record is already present... row = DBSession.query(ServerDefLink).filter_by(server_id=node_id, def_id=def_id).first() if not row: node_defn = ServerDefLink() node_defn.server_id = to_unicode(node_id) node_defn.def_type = to_unicode(def_type) node_defn.def_id = def_id node_defn.status = to_unicode(status) node_defn.details = to_unicode(details) node_defn.dt_time = datetime.utcnow() DBSession.add(node_defn)
def associate_defns(self, site_id, group_id, def_type, def_ids, auth, op_level=None): error_desc = "" site = self.manager.getSite(site_id) group = self.manager.getGroup(auth, group_id) group_list = self.manager.getGroupList(auth, site_id) def_id_list = def_ids.split(",") for def_id in def_id_list: new_sd = DBSession.query(StorageDef).filter_by(id=def_id).first() node = None try: associate = True self.sync_manager.add_defn(new_sd, site, group, node, auth, to_unicode(constants.STORAGE), constants.ATTACH, "ADD_STORAGE_DEF", self.storage_manager, self.manager, op_level, associate) #matching disks on association of storage. vm_disks = self.manager.get_vm_disks_from_pool(auth, group_id) storage_disks = DBSession.query(StorageDisks).filter_by( storage_id=def_id) if storage_disks: for eachdisk in storage_disks: self.manager.matching_disk_on_discover_storage( vm_disks, eachdisk.id) except Exception, ex: error_desc = to_str(ex) print_traceback() LOGGER.error(to_str(ex).replace("'", "")) #if we get any exception while adding/ sync definition then are removing the definition. add_mode = True try: self.sync_manager.remove_defn( new_sd, site, group, node, auth, to_unicode(constants.STORAGE), constants.DETACH, "REMOVE_STORAGE_DEF", self.storage_manager, self.manager, add_mode, group_list, op_level) except Exception, ex1: print_traceback() LOGGER.error(to_str(ex1).replace("'", "")) raise Exception(to_str(ex1)) if error_desc: raise Exception(error_desc)
def __init__( self, hostname=None, username=Node.DEFAULT_USER, password=None, isRemote=False, protocol="tcp", tcp_port=8006, ssh_port=22, migration_port=8002, helper=None, #store = None, use_keys=False, address=None): VNode.__init__( self, to_unicode("xen"), #platform, I'm xen node #store, hostname, username, password, isRemote, ssh_port, helper, use_keys, address) self._dom0 = None self.metrics_helper = MetricsHelper(self) self.tcp_port = tcp_port self.migration_port = migration_port self.protocol = protocol
def add_storage_def(self,auth, site_id, group_id, node_id, type, opts, op_level=None, sp_ids=None, scan_result=None): new_sd = self.get_valid_sd(type,opts, op_level) site = self.manager.getSite(site_id) group=self.manager.getGroup(auth,group_id) node = None group_list = self.manager.getGroupList(auth, site_id) try: sdlist = self.storage_manager.get_sds(site_id, group_id) for sd in sdlist: if new_sd.name==sd.name: raise Exception("Storage share with same name already exists.") errs=[] errs = self.update_storage_def(auth, new_sd, None, None, None, site, group, op_level, True, sp_ids, errs, scan_result) if errs: if len(errs) > 0: add_mode=True self.sync_manager.remove_defn(new_sd, site, group, node, auth, to_unicode(constants.STORAGE), constants.DETACH, "REMOVE_STORAGE_DEF", self.storage_manager, self.manager, add_mode, group_list, op_level) return {'success':False,'msg':to_str(errs).replace("'","")} except Exception, ex: print_traceback() err_desc = to_str(ex).replace("'","") err_desc = err_desc.strip() LOGGER.error(err_desc) try: add_mode=True defn_temp = self.storage_manager.get_sd(new_sd.id, None, None, None) if defn_temp: self.sync_manager.remove_defn(defn_temp, site, group, node, auth, to_unicode(constants.STORAGE), constants.DETACH, "REMOVE_STORAGE_DEF", self.storage_manager, self.manager, add_mode, group_list, op_level) except Exception, ex1: print_traceback() LOGGER.error(to_str(ex1).replace("'","")) raise Exception(to_str(ex1))
def update_storage_def(self, auth, new_sd, new_name, new_desc, storage_id, site, group, op_level, new=True, sp_ids=None, errs=None, scan_result=None): if new == True: #Validation for duplicate name if group: group_defns = DBSession.query(SPDefLink).filter_by(group_id = group.id) elif site: group_defns = DBSession.query(DCDefLink).filter_by(site_id = site.id) for group_defn in group_defns: rowSDef = DBSession.query(StorageDef).filter_by(id=group_defn.def_id, name=new_name).first() if rowSDef: raise Exception("Storage definition with the same name already exists") node = None self.sync_manager.add_defn(new_sd, site, group, node, auth, to_unicode(constants.STORAGE), constants.ATTACH, "ADD_STORAGE_DEF", self.storage_manager, self.manager, op_level, sp_ids, scan_result) else: #Validation for duplicate name if group: group_defns = DBSession.query(SPDefLink).filter_by(group_id = group.id) elif site: group_defns = DBSession.query(DCDefLink).filter_by(site_id = site.id) for group_defn in group_defns: rowSDef = DBSession.query(StorageDef).filter_by(id=group_defn.def_id, name=new_name).first() if rowSDef and rowSDef.id != storage_id: raise Exception("Storage definition with the same name already exists") defn = DBSession.query(StorageDef).filter_by(id=storage_id).first() self.sync_manager.update_defn(defn, new_name, new_desc, site, group, auth, to_unicode(constants.STORAGE), constants.ATTACH, self.storage_manager, 'UPDATE_STORAGE_DEF', op_level, sp_ids, self.manager)
def __init__(self, hostname = None, username= Node.DEFAULT_USER, password=None, isRemote=False, protocol = "tcp", tcp_port = 8006, ssh_port = 22, migration_port = 8002, helper = None, #store = None, use_keys = False, address = None): VNode.__init__(self, to_unicode("xen"), #platform, I'm xen node #store, hostname, username, password, isRemote, ssh_port, helper, use_keys, address) self._dom0 = None self.metrics_helper = MetricsHelper(self) self.tcp_port = tcp_port self.migration_port = migration_port self.protocol = protocol
def __init__(self, hostname = None, username= Node.DEFAULT_USER, password=None, isRemote=False, ssh_port = 22, migration_port = 8002, helper = None, #store = None, use_keys = False, address = None): VNode.__init__(self, to_unicode("kvm"), #store, hostname, username, password, isRemote, ssh_port, helper, use_keys, address) self.migration_port = migration_port
def find_nw(self, vif, op_level=None): bridge_name = vif.get_bridge() site_id=None group_id = None for defn in self.nw_manager.get_defns(to_unicode(constants.NETWORK), site_id, group_id, self.managed_node.id, op_level): if defn.bridge_info and defn.bridge_info["name"] == bridge_name: return defn
def __init__(self, config): self.config = config self.storage_stats = {} self.vm_id = None if self.config: vm = DBSession.query(VM).filter_by( id=to_unicode(config.id)).first() if vm: self.vm_id = vm.id self.storage_stats = self.get_storage_stats(vm.id) self.disk_stats = {} if self.storage_stats is not None: ds = self.storage_stats.get(self.DISK_STATS) if ds is None: self.storage_stats[ self.DISK_STATS] = self.disk_stats # initial value of {} else: self.disk_stats = ds self.local_allocation = self.storage_stats.get(self.LOCAL_ALLOC) if not self.local_allocation: self.local_allocation = 0 self.shared_allocation = self.storage_stats.get(self.SHARED_ALLOC) if not self.shared_allocation: self.shared_allocation = 0 self.storage_disk_id = None
def get_server_def_list(self,site_id, group_id, def_id): try: server_def_list=[] node_defns = self.sync_manager.get_node_defns(def_id, to_unicode(constants.NETWORK)) if node_defns: for eachdefn in node_defns: temp_dic={} if eachdefn: node = DBSession.query(ManagedNode).filter_by(id=eachdefn.server_id).first() temp_dic['id']=eachdefn.server_id if node: temp_dic['name']=node.hostname else: temp_dic['name']=None temp_dic['status']=eachdefn.status if eachdefn.details: temp_dic['details']=eachdefn.details else: temp_dic['details']=None server_def_list.append(temp_dic) except Exception, ex: print_traceback() LOGGER.error(to_str(ex).replace("'","")) return "{success: false,msg: '",to_str(ex).replace("'",""),"'}"
def __init__(self, config): self.config = config self.storage_stats = {} self.vm_id = None if self.config: vm = DBSession.query(VM).filter_by(id=to_unicode(config.id)).first() if vm: self.vm_id = vm.id self.storage_stats = self.get_storage_stats(vm.id) self.disk_stats = {} if self.storage_stats is not None: ds = self.storage_stats.get(self.DISK_STATS) if ds is None: self.storage_stats[self.DISK_STATS] = self.disk_stats # initial value of {} else: self.disk_stats = ds self.local_allocation = self.storage_stats.get(self.LOCAL_ALLOC) if not self.local_allocation: self.local_allocation = 0 self.shared_allocation = self.storage_stats.get(self.SHARED_ALLOC) if not self.shared_allocation: self.shared_allocation = 0 self.storage_disk_id = None
def task_fail(self, exception, auth, cancelled=False): conn = self.task_manager.get_database_conn() try: fail_status = self.FAILED if cancelled == True: fail_status = self.CANCELED results = to_str(exception) res = TaskResult(self.task_id, \ self.curr_instance, \ fail_status, \ results, \ cancel_requested=cancelled) # u = User.by_user_name(self.user_name) email = auth.email_address if email: notification=Notification(to_unicode(self.task_id), \ self.name, \ self.curr_instance, \ results, \ self.user_name, \ email) conn.add(notification) conn.merge(res) conn.commit() if not self.repeating and self.entity_id != None: UIUpdateManager().set_updated_tasks(self.task_id,\ self.user_name) finally: self.clear_running_task_obj() conn.close()
def task_fail(self, exception, auth, cancelled=False): conn = self.task_manager.get_database_conn() try: fail_status=self.FAILED if cancelled==True: fail_status=self.CANCELED results = to_str(exception) res = TaskResult(self.task_id, \ self.curr_instance, \ fail_status, \ results, \ cancel_requested=cancelled) # u = User.by_user_name(self.user_name) email=auth.email_address if email: notification=Notification(to_unicode(self.task_id), \ self.name, \ self.curr_instance, \ results, \ self.user_name, \ email) conn.add(notification) conn.merge(res) conn.commit() if not self.repeating and self.entity_id!=None: UIUpdateManager().set_updated_tasks(self.task_id,\ self.user_name) finally: self.clear_running_task_obj() conn.close()
def get_server_def_list(self,site_id, group_id, def_id): try: server_def_list=[] node_defns = self.sync_manager.get_node_defns(def_id, to_unicode(constants.STORAGE)) if node_defns: for eachdefn in node_defns: temp_dic={} if eachdefn: node = DBSession.query(ManagedNode).filter_by(id=eachdefn.server_id).first() temp_dic['id']=eachdefn.server_id if node: temp_dic['name']=node.hostname else: temp_dic['name']=None temp_dic['status']=eachdefn.status if eachdefn.details: temp_dic['details']=eachdefn.details else: temp_dic['details']=None server_def_list.append(temp_dic) except Exception, ex: LOGGER.error(to_str(ex).replace("'","")) return "{success: false,msg: '",to_str(ex).replace("'",""),"'}"
def save_email_setup_details(self, desc, servername, port, useremail, password, secure): #SiteRecord = DBSession.query(Site).filter(Site.name == 'Data Center').first() SiteRecord = DBSession.query(Site).first() if SiteRecord: site_id = SiteRecord.id # check if record for same server name avoid duplicate records of same name EmailRecord = DBSession.query(EmailSetup).filter(EmailSetup.site_id == site_id).filter(EmailSetup.mail_server == servername).first() if EmailRecord: return dict(success=True,msg="Duplicaate Record found in list") else: # Add record in EmailSetup table for site id queried email_setup_obj = EmailSetup(servername, desc, port, secure, site_id, useremail, password ) DBSession.add(email_setup_obj) emailsetupid = email_setup_obj.getEmailSetupId() EmailManager().add_entity(to_unicode(servername), emailsetupid, to_unicode(constants.EMAIL), None) return dict(success=True, msg="New Record Added Successfully")
def __init__(self,mail_server,desc, port, use_secure, site_id, useremail, password): self.id = getHexID() self.mail_server = to_unicode(mail_server) self.description=desc self.port = port self.use_secure = use_secure self.site_id = site_id self.credential=Credential(self.id,u"",user_email = useremail, password = password)
def get_node_defns(self, def_id, defType): defns = [] node_defns = DBSession.query(ServerDefLink).filter_by( def_id=def_id, def_type=to_unicode(defType)) if node_defns: for eachdefn in node_defns: defns.append(eachdefn) return defns
def get_pending_node_ids(self, node_ids): #while resuming get node_ids which is still under process by the task #i.e. ignore the completed nodes and pending nodes(if the task was hung) ets = DBSession.query(EntityTasks.entity_id).\ filter(EntityTasks.worker_id==to_unicode(self.task_id)).\ filter(EntityTasks.entity_id.in_(node_ids)).all() node_ids=[et[0] for et in ets] WRK_LOGGER.debug("RESUMING CHILD WORKER . NodeIDS : "+str(node_ids)) return node_ids
def do_cleanup(self): #cleanup entity_tasks if any of the entries are still owned by me #so that next iteration will pick those up #make worker_id = null, finished = 1, endtime = utcnow() WRK_LOGGER.debug("Cleaning Up entity_tasks . task_id: "+str(self.task_id)) r = DBSession.query(EntityTasks.entity_id).\ filter(EntityTasks.worker_id==to_unicode(self.task_id)).\ update(values=dict(worker_id=None,finished=True,end_time=datetime.utcnow())) WRK_LOGGER.debug("Cleaned Up entity_tasks . task_id:rows : "+str(self.task_id)+":"+str(r))
def wait_for_workers_to_finish(self, task_ids): WRK_LOGGER.debug("wait_for_workers_to_finish for "+self.worker+" max_worker_wait_time: "+str(self.max_worker_wait_time)) task_completed = False self.wait_start_time=datetime.utcnow() ###this is an infinite loop until we find a completed task ###we need to add some wait time to check on the status of child tasks while task_completed == False: time.sleep(5) completed_tasks = self.check_tasks_completed(task_ids) WRK_LOGGER.debug("wait_for_workers_to_finish for "+self.worker+" completed_tasks :"+str(completed_tasks)) if len(completed_tasks) > 0: task_completed = True for task in completed_tasks: self.worker_ids.remove(task['task_id']) WRK_LOGGER.debug("child task completed, update EntityTasks "+self.worker+" completed_tasks :"+str(task['task_id'])) ets = DBSession.query(EntityTasks).\ filter(EntityTasks.worker_id==to_unicode(task['task_id'])).all() for et in ets: et.worker_id=None et.finished=True et.end_time=datetime.utcnow() DBSession.merge(et) transaction.commit() WRK_LOGGER.debug("child tasks completed, updated EntityTasks "+self.worker) else : # if True: # continue wait_time_sec=(datetime.utcnow()-self.wait_start_time).seconds WRK_LOGGER.debug("No completed child tasks for "+self.worker+". waiting for "+str(wait_time_sec)) if wait_time_sec > self.max_worker_wait_time: task_service = self.svc_central.get_service(self.task_service_id) past_time = self.start_time-timedelta(minutes=1) for task_id in task_ids: task_obj = task_service.get_running_task_obj(task_id) if task_obj: (hung, completed, pending) = task_obj.get_running_status() WRK_LOGGER.debug("HUNG STATUS for "+self.worker+":"+str(hung)+":"+str(task_id)+\ ":"+str(completed)+":"+str(pending)) if hung: task_completed = True self.worker_ids.remove(task_id) WRK_LOGGER.debug("Hung task. Cleanup EntityTask for "+self.worker+". task id : "+str(task_id)) DBSession.query(EntityTasks).filter(EntityTasks.worker==self.worker).\ filter(EntityTasks.entity_id.in_(completed)).\ update(dict(worker_id=None,finished=True, end_time=datetime.utcnow())) DBSession.query(EntityTasks).filter(EntityTasks.worker==self.worker).\ filter(EntityTasks.entity_id.in_(pending)).\ update(dict(worker_id=None,finished=True, start_time=past_time)) transaction.commit() WRK_LOGGER.debug("Hung task. Cleaned up EntityTask for "+self.worker+". task id : "+str(task_id))
def associate_nw_defns(self, site_id, group_id, node_id, def_type, def_ids, auth, op_level=None): site = self.manager.getSite(site_id) group = self.manager.getGroup(auth,group_id) def_id_list = def_ids.split(",") for def_id in def_id_list: defn = self.nw_manager.get_defn(def_id) node = DBSession.query(ManagedNode).filter_by(id=node_id).first() try: #associate=True self.sync_manager.add_defn(defn, site, group, node, auth, to_unicode(constants.NETWORK), constants.ATTACH, "ADD_NETWORK_DEF", self.nw_manager, self.manager, op_level, None) except Exception, ex: print_traceback() LOGGER.error(to_str(ex).replace("'","")) #if we get any exception while adding/ sync definition then are removing the definition. add_mode=True group_list = self.manager.getGroupList(auth, site_id) self.sync_manager.remove_defn(defn, site, group, node, auth, to_unicode(constants.NETWORK), constants.DETACH, "REMOVE_NETWORK_DEF", self.nw_manager, self.manager, add_mode, group_list, op_level) return {'success':False,'msg':to_str(ex).replace("'","")}
def getNodeList(self,auth): ent=auth.get_entity(self.id) nodelist={} if ent is not None: child_ents=auth.get_entities(to_unicode(constants.MANAGED_NODE),parent=ent) ids = [child_ent.entity_id for child_ent in child_ents] nodes= DBHelper().filterby(ManagedNode,[],[ManagedNode.id.in_(ids)]) for node in nodes: nodelist[node.id]=node return nodelist
def get_available_nws(self, auth,mode,node_id, op_level=None): result=[] if mode in ["edit_image_settings"]: result = available_nws else: # "PROVISION_VM", "EDIT_VM_CONFIG" nw_map = {} managed_node=NodeService().get_managed_node(auth,node_id) if mode in ["provision_vm" ,"provision_image"]: nw_map["Default"] = "$DEFAULT_BRIDGE" result.append(dict(value="$DEFAULT_BRIDGE",name="Default")) bridges = managed_node.get_bridge_info() site_id=None group_id = None #set the op_level none so that we can get all the networks created on the server (networks present in serverdeflinks table for that server) op_level=None for nw in self.nw_manager.get_defns(to_unicode(constants.NETWORK), site_id, group_id, node_id, op_level): bridge=None network = None if nw.ipv4_info and nw.ipv4_info.get("ip_network"): network = nw.ipv4_info.get("ip_network") if nw.bridge_info and nw.bridge_info.get("name"): bridge = nw.bridge_info.get("name") if bridge and network: desc = "%s (%s, %s)" % (nw.name, bridge, network) elif bridge: desc = "%s (%s)" % (nw.name, bridge,) elif network: desc = "%s (%s)" % (nw.name, network,) if nw.bridge_info and nw.bridge_info.get("name"): nw_map[desc] = nw.bridge_info.get("name") result.append(dict(value=nw.bridge_info.get("name"),name=desc)) if bridges is not None: for n in bridges.itervalues(): name = n["name"] if name not in nw_map.itervalues(): desc = name + " network" if n.get("network"): desc = "%s (%s,%s)" % (desc, name, n["network"]) nw_map[desc] = name result.append(dict(value=name,name=desc)) #init_combo(self.widgets.available_nw_combo,nw_map) return result
def test_storage_def(self, auth, managed_node, group, site, sd): details = None testmsg = None try: details = self.storage_manager.get_sd_details( auth, sd, managed_node, group, site, to_unicode(constants.STORAGE), self.storage_manager) details = self.edit_test_output(details) except Exception, ex: testmsg = ex
def getNodeList(self, auth): ent = auth.get_entity(self.id) nodelist = {} if ent is not None: child_ents = auth.get_entities(to_unicode(constants.MANAGED_NODE), parent=ent) ids = [child_ent.entity_id for child_ent in child_ents] nodes = DBHelper().filterby(ManagedNode, [], [ManagedNode.id.in_(ids)]) for node in nodes: nodelist[node.id] = node return nodelist
def save_email_setup_details(self, desc, servername, port, useremail, password, secure): SiteRecord = DBSession.query(Site).filter(Site.name == "Data Center").first() if SiteRecord: site_id = SiteRecord.id # check if record for same server name avoid duplicate records of same name EmailRecord = ( DBSession.query(EmailSetup) .filter(EmailSetup.site_id == site_id) .filter(EmailSetup.mail_server == servername) .first() ) if EmailRecord: return dict(success=True, msg="Duplicaate Record found in list") else: # Add record in EmailSetup table for site id queried email_setup_obj = EmailSetup(servername, desc, port, secure, site_id, useremail, password) DBSession.add(email_setup_obj) emailsetupid = email_setup_obj.getEmailSetupId() EmailManager().add_entity(to_unicode(servername), emailsetupid, to_unicode(constants.EMAIL), None) return dict(success=True, msg="New Record Added Sucessfully")
def __init__(self, mail_server, desc, port, use_secure, site_id, useremail, password): self.id = getHexID() self.mail_server = to_unicode(mail_server) self.description = desc self.port = port self.use_secure = use_secure self.site_id = site_id self.credential = Credential(self.id, u"", user_email=useremail, password=password)
def add_site_defn(self, site_id, def_id, def_type, status, oos_count): #Check whether the record is already present... row = DBSession.query(DCDefLink).filter_by(site_id = site_id, def_id = def_id).first() if not row: DCDL = DCDefLink() DCDL.site_id = site_id DCDL.def_type = def_type DCDL.def_id = def_id DCDL.status = to_unicode(status) DCDL.oos_count = oos_count DCDL.dt_time = datetime.utcnow() DBSession.add(DCDL)
def isVMRunningInPool(self, auth, group_id): returnVal = False #get group entity ent =auth.get_entity(group_id) #get server list nodes=auth.get_entities(to_unicode(constants.MANAGED_NODE),parent=ent) for eachnode in nodes: #loop through each server returnVal = self.isVMRunningOnServer(auth, eachnode.entity_id) if returnVal == True: break return returnVal
def add_site_defn(self, site_id, def_id, def_type, status, oos_count): #Check whether the record is already present... row = DBSession.query(DCDefLink).filter_by(site_id=site_id, def_id=def_id).first() if not row: DCDL = DCDefLink() DCDL.site_id = site_id DCDL.def_type = def_type DCDL.def_id = def_id DCDL.status = to_unicode(status) DCDL.oos_count = oos_count DCDL.dt_time = datetime.utcnow() DBSession.add(DCDL)
def get_valid_sd(self, type, options, scope): creds_req = False creds = {} conn_options = {} if type == constants.iSCSI: # conn_props = target, options creds_req = True creds["username"] = options.get("username") creds["password"] = options.get("password") conn_options["server"] = options.get("portal") conn_options["target"] = options.get("target") conn_options["options"] = options.get("options") conn_options["username"] = options.get("username") conn_options["password"] = options.get("password") if type == constants.NFS: # conn_props = share, mount_point, mount_options conn_options["server"] = options.get("server") conn_options["share"] = options.get("share") conn_options["mount_point"] = options.get("mount_point") conn_options["mount_options"] = options.get("mount_options") if type == constants.AOE: # conn_props = interfaces conn_options["interface"] = options.get("interface") new_sd = StorageDef(None, to_unicode(options.get("name")), type, to_unicode(options.get("description")), conn_options, scope, creds_req) if creds_req == True: new_sd.set_creds(creds) if options["total_cap"] != 'null': options["total_cap"] = str(options.get("total_cap")).strip() if options["total_cap"]: total_cap = str(options.get("total_cap")) if not total_cap: total_cap = 0 print "new_sd=====", new_sd return new_sd
def getNext(self, auth,ctx): sp=auth.get_entity(self._group.id) #nodenames=auth.get_entity_names(constants.MANAGED_NODE,parent=sp) child_ents=auth.get_entities(to_unicode(constants.MANAGED_NODE),parent=sp) ids = [child_ent.entity_id for child_ent in child_ents] nodelist= DBHelper().filterby(ManagedNode,[],[ManagedNode.id.in_(ids)]) load_time=self._group.getGroupVarValue("SERVER_LOAD_TIME") try: load_time=int(load_time) except Exception, e: load_time=0
def check_if_hung(self): WRK_LOGGER.debug("Check if Task, "+self.name+" is hung? ") marked_hung = False try: marked_hung = self.mark_hung if marked_hung : WRK_LOGGER.debug("Task, "+self.name+"("+str(self.task_id)+") was marked hung. updating entity_tasks") DBSession.query(EntityTasks).\ filter(EntityTasks.worker_id==to_unicode(self.task_id)).\ update(dict(worker_id=None,finished=True, end_time=datetime.utcnow())) # transaction.commit() except AttributeError, e: pass
def isVMRunningInPool(self, auth, group_id): returnVal = False #get group entity ent = auth.get_entity(group_id) #get server list nodes = auth.get_entities(to_unicode(constants.MANAGED_NODE), parent=ent) for eachnode in nodes: #loop through each server returnVal = self.isVMRunningOnServer(auth, eachnode.entity_id) if returnVal == True: break return returnVal
def get_valid_sd(self, type, options, scope): creds_req = False creds = {} conn_options = {} if type == constants.iSCSI: # conn_props = target, options creds_req = True creds["username"] = options.get("username") creds["password"] = options.get("password") conn_options["server"] = options.get("portal") conn_options["target"] = options.get("target") conn_options["options"] = options.get("options") conn_options["username"] = options.get("username") conn_options["password"] = options.get("password") if type == constants.NFS: # conn_props = share, mount_point, mount_options conn_options["server"] = options.get("server") conn_options["share"] = options.get("share") conn_options["mount_point"] = options.get("mount_point") conn_options["mount_options"] = options.get("mount_options") if type == constants.AOE: # conn_props = interfaces conn_options["interface"] = options.get("interface") new_sd = StorageDef(None, to_unicode(options.get("name")), type, to_unicode(options.get("description")), conn_options, scope, creds_req) if creds_req == True: new_sd.set_creds(creds) if options["total_cap"] != 'null': options["total_cap"] = str(options.get("total_cap")).strip() if options["total_cap"]: total_cap = str(options.get("total_cap")) if not total_cap: total_cap = 0 print "new_sd=====",new_sd return new_sd
def get_appliances_list(self, feed_name): if feed_name is None: return [] if self.appliance_list.get(feed_name) is not None: return self.appliance_list[feed_name] a_list = [] provider_id = self.get_provider_id(feed_name) list = DBHelper().filterby( Appliance, [], [Appliance.provider_id == to_unicode(provider_id)]) if len(list) == 0: # get the appliance feed a_list = self.populate_appliances(feed_name) if a_list: self.appliance_list[feed_name] = a_list else: for appliance in list: appliance_info = {} appliance_info['title'] = appliance.title appliance_info['id'] = appliance.catalog_id appliance_info['provider_id'] = appliance.provider_id appliance_info["provider"] = self.get_provider(feed_name) appliance_info["provider_url"] = self.get_provider_url( feed_name) appliance_info["provider_logo_url"] = self.get_logo_url( feed_name) appliance_info['link'] = appliance.link_href appliance_info['description'] = appliance.description appliance_info['popularity_score'] = appliance.popularity_score appliance_info[ 'short_description'] = appliance.short_description appliance_info['PAE'] = appliance.PAE appliance_info['arch'] = appliance.arch appliance_info['archive'] = appliance.archive appliance_info['compressed'] = appliance.compression_type appliance_info['href'] = appliance.download_href appliance_info['filename'] = appliance.filename appliance_info['installed_size'] = appliance.installed_size appliance_info['is_hvm'] = appliance.is_hvm appliance_info['platform'] = appliance.platform appliance_info['size'] = appliance.size appliance_info['type'] = appliance.type appliance_info['updated'] = appliance.updated_date appliance_info['version'] = appliance.version a_list.append(appliance_info) self.appliance_list[feed_name] = a_list return a_list
def update_node_defn(self, node_id, group_id, site_id, def_id, def_type, status, dt_time, details, scope, defType): #update definition status in ServerDefLink table node_defn = DBSession.query(ServerDefLink).filter_by(server_id = node_id, def_id = def_id).first() if node_defn: node_defn.status = status node_defn.dt_time = datetime.utcnow() node_defn.details = details oos_count = 0 # out of sync count g_status = to_unicode(constants.IN_SYNC) #Here we are finding that how many servers are OUT_OF_SYNC with this definition. So getting out of sync count and decide group level sync status to update SPDefLink table with these values. rowNodeDefn = DBSession.query(ServerDefLink).filter_by(def_id = def_id, def_type = to_unicode(defType), status = to_unicode(constants.OUT_OF_SYNC)) if rowNodeDefn: oos_count = rowNodeDefn.count() #Get the status for updating SPDefLink table if oos_count > 0: g_status = to_unicode(constants.OUT_OF_SYNC) else: g_status = to_unicode(constants.IN_SYNC) #update definition status and oos_count in SPDefLink table group_sd=None if scope == constants.SCOPE_SP: group_sd = DBSession.query(SPDefLink).filter_by(group_id = group_id, def_id = def_id, def_type = to_unicode(defType)).first() elif scope == constants.SCOPE_DC: group_sd = DBSession.query(DCDefLink).filter_by(site_id = site_id, def_id = def_id, def_type = to_unicode(defType)).first() if group_sd: group_sd.status = g_status group_sd.dt_time = datetime.utcnow() group_sd.oos_count = oos_count #Keep a note here saying that commit would have to be called here. DBSession.flush() transaction.commit()
def on_add_node(self, nodeId, groupId, site_id, auth, def_manager): op = constants.ATTACH #If one of them is not present then return from here. if not (nodeId or groupId): return defn_list = [] errs = [] sync_manager = SyncDef() defType = def_manager.getType() #Link all the definitions in the server pool to this new server node. sp_defns = DBSession.query(SPDefLink).filter_by(group_id=to_unicode(groupId)) if sp_defns: for eachdefn in sp_defns: defn = def_manager.get_defn(eachdefn.def_id) if defn: defn_list.append(defn) #Add these default value to this link definition. These values would get changed after sync operation. status = to_unicode(constants.OUT_OF_SYNC) details = None sync_manager.add_node_defn(nodeId, defn.id, defType, status, details)
def getNext(self, auth, ctx): sp = auth.get_entity(self._group.id) #nodenames=auth.get_entity_names(constants.MANAGED_NODE,parent=sp) child_ents = auth.get_entities(to_unicode(constants.MANAGED_NODE), parent=sp) ids = [child_ent.entity_id for child_ent in child_ents] nodelist = DBHelper().filterby(ManagedNode, [], [ManagedNode.id.in_(ids)]) load_time = self._group.getGroupVarValue("SERVER_LOAD_TIME") try: load_time = int(load_time) except Exception, e: load_time = 0
def update_disks_size(self, auth): sites = DBSession.query(Site) if sites: for eachsite in sites: #site = DBSession.query(Sites).filter_by(id=eachsite.id).first() site_entity = auth.get_entity(eachsite.id) #get all groups in the site. group_entities = auth.get_entities(to_unicode(constants.SERVER_POOL), site_entity) #loop through each group in the site for eachgroup in group_entities: group = DBSession.query(ServerGroup).filter_by(id=eachgroup.entity_id).first() if group: group_entity = auth.get_entity(group.id) #get all nodes in the group node_entities = auth.get_entities(to_unicode(constants.MANAGED_NODE), group_entity) #loop through each node in the group for eachnode in node_entities: node = DBSession.query(ManagedNode).filter_by(id=eachnode.entity_id).first() server_def_link = DBSession.query(ServerDefLink).filter_by(server_id=node.id) if server_def_link: for each_link in server_def_link: defn = DBSession.query(StorageDef).filter_by(id=each_link.def_id).first() if defn: self.test_storage_def(auth, node, group, eachsite, defn)
def make_entity_task_entries(self, task_id, entity_ids): """ making entry to entity_tasks table """ # estimated_time=datetime.utcnow()+timedelta(minutes=int(tg.config.get("completion_time"))) ent_tasks = [] WRK_LOGGER.debug("in make_entity_task_entries task_id : "+str(task_id)+ " :entity_ids :"+str(entity_ids)) for ent_id in entity_ids: try: ###update the entity_tasks table ent_task=EntityTasks(self.worker,to_unicode(task_id),ent_id,False,datetime.utcnow()) ent_tasks.append(ent_task) except Exception, e: traceback.print_exc()
def get_appliances_list(self, feed_name): if feed_name is None: return [] if self.appliance_list.get(feed_name) is not None: return self.appliance_list[feed_name] a_list = [] provider_id = self.get_provider_id(feed_name) list = DBHelper().filterby(Appliance, [], [Appliance.provider_id == to_unicode(provider_id)]) if len(list) == 0: # get the appliance feed a_list = self.populate_appliances(feed_name) if a_list: self.appliance_list[feed_name] = a_list else: for appliance in list: appliance_info = {} appliance_info["title"] = appliance.title appliance_info["id"] = appliance.catalog_id appliance_info["provider_id"] = appliance.provider_id appliance_info["provider"] = self.get_provider(feed_name) appliance_info["provider_url"] = self.get_provider_url(feed_name) appliance_info["provider_logo_url"] = self.get_logo_url(feed_name) appliance_info["link"] = appliance.link_href appliance_info["description"] = appliance.description appliance_info["popularity_score"] = appliance.popularity_score appliance_info["short_description"] = appliance.short_description appliance_info["PAE"] = appliance.PAE appliance_info["arch"] = appliance.arch appliance_info["archive"] = appliance.archive appliance_info["compressed"] = appliance.compression_type appliance_info["href"] = appliance.download_href appliance_info["filename"] = appliance.filename appliance_info["installed_size"] = appliance.installed_size appliance_info["is_hvm"] = appliance.is_hvm appliance_info["platform"] = appliance.platform appliance_info["size"] = appliance.size appliance_info["type"] = appliance.type appliance_info["updated"] = appliance.updated_date appliance_info["version"] = appliance.version a_list.append(appliance_info) self.appliance_list[feed_name] = a_list return a_list
def remove_storage_def(self, auth,storage_id,site_id,groupId, op_level=None): try: site = self.manager.getSite(site_id) group=self.manager.getGroup(auth,groupId) group_list = self.manager.getGroupList(auth, site_id) sd_to_remove = self.storage_manager.get_sd(storage_id, site_id, groupId, to_unicode(constants.STORAGE)) node = None add_mode=False warning_msg = self.sync_manager.remove_defn(sd_to_remove, site, group, node, auth, to_unicode(constants.STORAGE), constants.DETACH, "REMOVE_STORAGE_DEF", self.storage_manager, self.manager, add_mode, group_list, op_level) if warning_msg: return "{success: true,msg: '" + warning_msg + "'}" return "{success: true,msg: 'Storage Removed'}" except Exception, ex: print_traceback() err_desc = to_str(ex).replace("'","") err_desc = err_desc.strip() LOGGER.error(to_str(err_desc)) return "{success: false,msg: '" + err_desc + "'}"