def delete(caller_id, node_id): """ Method deletes specified Node (provided it's state isn't @val{closed}). To bring deleted Node back available for CC1 system, one has to add it once again. @decoratedby{src.cm.utils.decorators.admin_cm_log} @parameter{caller_id,int} @parameter{node_id,int} id of the node to delete @response{None} @raises{node_has_vms,CMException} @raises{node_delete,CMException} """ node = Node.get(caller_id, node_id) # check if the node has vms if node.vm_set.exists(): raise CMException('node_has_vms') try: node.delete() except: raise CMException('node_delete')
def edit(caller_id, node_id, **node_info): """ Method edits node, according to data provided in request,data. @decoratedby{src.cm.utils.decorators.admin_cm_log}. All Node's attributes need to be present in data dictionary (even if only some of them are changed, e.g. driver). Former Node's attributes may be accessed via clm.views.admin_cm.node.get_by_id() method. @parameter{caller_id,int} @parameter{driver,string} xen, kvm or other hypervisior name @parameter{transport,string} unix, ssh, tls or other available transport name for kvm @parameter{address,string} node ip adress or domain name @parameter{username,string} optional username for transport @parameter{suffix,string} optional suffix for transport (i.e. /system for KVM) @parameter{cpu_total,int} @parameter{memory_total,int} @parameter{hdd_total,int} @parameter{node_id} id of the Node to edit @response{None} @raises{node_edit,CMException} """ node = Node.get(caller_id, node_id) for k, v in node_info.iteritems(): setattr(node, k, v) try: node.save() except: raise CMException('node_edit')
def get_by_id_details(caller_id, node_id): """ Returns more details of the requested Node. @cmview_admin_cm @param_post{node_id,int} id of the requested Node @response{dict} Node.long_long_dict property of the requested Node """ node = Node.get(caller_id, node_id) return node.long_long_dict
def get_by_id_details(caller_id, node_id): """ Method returns detailed of requested Node. @decoratedby{src.cm.utils.decorators.admin_cm_log} @parameter{node_id,int} id of the requested Node @response{dict} further extended information about Node """ node = Node.get(caller_id, node_id) return node.long_long_dict
def lock(caller_id, node_id_list): """ Sets specified Node's state as @val{locked}. No VMs can be run on locked Node. @cmview_admin_cm @param_post{node_id_list,int} list of the specified Nodes ids @response{None} @raises{node_lock,CMException} """ for node_id in node_id_list: node = Node.get(caller_id, node_id) node.state = node_states['locked'] try: node.save() except: raise CMException('node_lock')
def edit(caller_id, node_id, **node_info): """ Updates Node attributes according to data provided in node_info. @cmview_admin_cm @param_post{node_id,int} id of the Node to edit @param_post{node_info,string} dictionary where cm.models.Node model's fields are the keys and values are values to set @raises{node_edit,CMException} """ node = Node.get(caller_id, node_id) for k, v in node_info.iteritems(): setattr(node, k, v) try: node.save() except: raise CMException('node_edit')
def lock(caller_id, node_id_list): """ Method locks specified Node. No VMs can be run on locked node. @decoratedby{src.cm.utils.decorators.admin_cm_log} @parameter{caller_id,int} @parameter{node_id,int} id of the Node to lock @response{None} @raises{node_lock,CMException} """ for node_id in node_id_list: node = Node.get(caller_id, node_id) node.state = node_states['locked'] try: node.save() except: raise CMException('node_lock')
def unlock(caller_id, node_id_list): """ Unlocks specified Node. After unlock Node's state is @val{ok} and Users are able to run VMs on that Node. @cmview_admin_cm @param_post{node_id_list,int} list of the specified Nodes ids @response{None} @raises{node_unlock,CMException} """ for node_id in node_id_list: node = Node.get(caller_id, node_id) node.state = node_states['ok'] try: node.save() except: raise CMException('node_unlock')
def unlock(caller_id, node_id_list): """ Method unlocks specified Node. After unlock Node's state is @val{ok} and one is be able to run VMs on that Node. @decoratedby{src.cm.utils.decorators.admin_cm_log} @parameter{caller_id,int} @parameter{node_id,int} id of the Node to unlock @response{None} @raises{node_unlock,CMException} """ for node_id in node_id_list: node = Node.get(caller_id, node_id) node.state = node_states['ok'] try: node.save() except: raise CMException('node_unlock')
def delete(caller_id, node_id): """ Deletes specified Node from database provided the Node does not host any VM's. Node's operating system setup isn't affected. To bring deleted Node back available for CC1 Cluster, one has to add it once again via Web Interface. @cmview_admin_cm @param_post{node_id,int} id of the Node to delete @raises{node_has_vms,CMException} @raises{node_delete,CMException} """ node = Node.get(caller_id, node_id) # check if the node has vms if node.vm_set.exists(): raise CMException('node_has_vms') try: node.delete() except: raise CMException('node_delete')
def add(address, username, transport, driver, suffix, cpu, memory, disk): try: Node.objects.get(address=address) raise CMException('node_exists') except: pass node = Node() node.address = address node.comment = '' node.driver = driver node.transport = transport node.username = username node.suffix = suffix node.cpu_total = cpu node.memory_total = memory node.hdd_total = disk node.state = node_states['offline'] node.save() return 0
def create(user, name, description, image_id, template_id, public_ip_id, iso_list, disk_list, vnc, groups, ssh_key=None, ssh_username=None, count=1, farm=None, head_template_id=None, node_id=False, lease_id=None, user_data=None): from cm.models.storage_image import StorageImage from cm.utils.threads.vm import VMThread template = Template.get(template_id) image = SystemImage.get(user.id, image_id, groups) if image.state != image_states['ok']: raise CMException('image_unavailable') if farm: head_template = Template.get(head_template_id) wn_template = template user.check_quota([(head_template, 1), (wn_template, count)]) count += 1 else: user.check_quota([(template, count)]) vms = [] reservation_id = None for i in range(count): # create VM instance log.debug(user.id, "Looking for node") node = Node.get_free_node(head_template, image, node_id) if farm and i == 0 else Node.get_free_node(template, image, node_id) log.info(user.id, 'Selected node: %d' % node.id) vm = VM() vm.libvirt_id = -1 if farm: if i == 0: vm.name = '%s-head' % name vm.description = 'Farm head' vm.template = head_template else: vm.name = '%s-wn%d' % (name, i) vm.description = 'Worker Node' vm.template = wn_template else: vm.template = template vm.description = description if count > 1: vm.name = '%s_%d' % (name, i + 1) else: vm.name = name vm.user = user vm.state = vm_states['init'] vm.start_time = datetime.now() vm.system_image = image vm.node = node vm.save_vm = True if farm: vm.farm = farm # Find first free vnc port used_ports = VM.objects.exclude(state__in=[vm_states['closed'], vm_states['erased']]).values_list('vnc_port', flat=True) for new_vnc_port in xrange(VNC_PORTS['START'], VNC_PORTS['END'] + 1): if new_vnc_port not in used_ports and new_vnc_port not in VNC_PORTS['EXCLUDE']: break else: raise CMException('vm_vnc_not_found') log.debug(user.id, "Found vnc port: %d" % new_vnc_port) vm.vnc_port = new_vnc_port # Find first free novnc port used_ports = VM.objects.exclude(state__in=[vm_states['closed'], vm_states['erased']]).values_list('novnc_port', flat=True) for new_novnc_port in xrange(NOVNC_PORTS['START'], NOVNC_PORTS['END'] + 1): if new_novnc_port not in used_ports and new_novnc_port not in NOVNC_PORTS['EXCLUDE']: break else: raise CMException('vm_novnc_not_found') log.debug(user.id, "Found novnc port: %d" % new_novnc_port) vm.novnc_port = new_novnc_port if vnc: vm.attach_vnc() vm.vnc_passwd = password_gen(13, chars=['letters', 'digits'], extra_chars='!@#$%^&*()') vm.ssh_key = ssh_key vm.ssh_username = ssh_username vm.user_data = user_data vm.save() if not reservation_id: reservation_id = vm.id vm.reservation_id = reservation_id vm.save() if farm and i == 0: farm.head = vm vms.append(vm) log.debug(user.id, "Attaching disks") disk_devs = [] if i == 0 and disk_list: for disk_id in disk_list: log.debug(user.id, 'Attaching disks to first VM') disk = StorageImage.get(user.id, disk_id) if disk.vm != None: raise CMException('image_attached') while disk.disk_dev in disk_devs: disk.disk_dev += 1 disk_devs.append(disk.disk_dev) disk.vm = vm disk.save() log.debug(user.id, "Attaching CD") if i == 0 and iso_list: for iso_id in iso_list: log.debug(user.id, 'Attaching iso to first VM') # cd image have not be attached to any other vm iso = IsoImage.get(user.id, iso_id) iso.check_attached() vm.iso_image = iso vm.save() for i, vm in enumerate(vms): if lease_id != None: lease = Lease.objects.get(id=lease_id) if lease.user_network.user != user: raise CMException('lease_permission') if lease.vm != None: raise CMException('lease_attached') lease.vm = vm log.debug(user.id, "Attached ip: %s" % lease.address) else: lease = AvailableNetwork.get_lease(user) lease.vm = vm lease.save() log.debug(user.id, "Attached ip: %s" % lease.address) if i == 0 and public_ip_id > 0: log.debug(user.id, "Attaching PublicIP") try: publicip = PublicIP.objects.filter(user=user).get(id=public_ip_id) publicip.assign(lease) publicip.save() except Exception, e: log.exception(user.id, str(e)) raise CMException("lease_not_found")
def create(user, name, description, image_id, template_id, public_ip_id, iso_list, disk_list, vnc, groups, ssh_key=None, ssh_username=None, count=1, farm=None, head_template_id=None, node_id=False, lease_id=None, user_data=None): from cm.models.storage_image import StorageImage from cm.utils.threads.vm import VMThread template = Template.get(template_id) image = SystemImage.get(user.id, image_id, groups) if image.state != image_states['ok']: raise CMException('image_unavailable') if farm: head_template = Template.get(head_template_id) wn_template = template user.check_quota([(head_template, 1), (wn_template, count)]) count += 1 else: user.check_quota([(template, count)]) vms = [] reservation_id = None for i in range(count): # create VM instance log.debug(user.id, "Looking for node") node = Node.get_free_node( head_template, image, node_id) if farm and i == 0 else Node.get_free_node( template, image, node_id) log.info(user.id, 'Selected node: %d' % node.id) vm = VM() vm.libvirt_id = -1 if farm: if i == 0: vm.name = '%s-head' % name vm.description = 'Farm head' vm.template = head_template else: vm.name = '%s-wn%d' % (name, i) vm.description = 'Worker Node' vm.template = wn_template else: vm.template = template vm.description = description if count > 1: vm.name = '%s_%d' % (name, i + 1) else: vm.name = name vm.user = user vm.state = vm_states['init'] vm.start_time = datetime.now() vm.system_image = image vm.node = node vm.save_vm = True if farm: vm.farm = farm # Find first free vnc port used_ports = VM.objects.exclude( state__in=[vm_states['closed'], vm_states['erased'] ]).values_list('vnc_port', flat=True) for new_vnc_port in xrange(VNC_PORTS['START'], VNC_PORTS['END'] + 1): if new_vnc_port not in used_ports and new_vnc_port not in VNC_PORTS[ 'EXCLUDE']: break else: raise CMException('vm_vnc_not_found') log.debug(user.id, "Found vnc port: %d" % new_vnc_port) vm.vnc_port = new_vnc_port # Find first free novnc port used_ports = VM.objects.exclude( state__in=[vm_states['closed'], vm_states['erased'] ]).values_list('novnc_port', flat=True) for new_novnc_port in xrange(NOVNC_PORTS['START'], NOVNC_PORTS['END'] + 1): if new_novnc_port not in used_ports and new_novnc_port not in NOVNC_PORTS[ 'EXCLUDE']: break else: raise CMException('vm_novnc_not_found') log.debug(user.id, "Found novnc port: %d" % new_novnc_port) vm.novnc_port = new_novnc_port if vnc: vm.attach_vnc() vm.vnc_passwd = password_gen(13, chars=['letters', 'digits'], extra_chars='!@#$%^&*()') vm.ssh_key = ssh_key vm.ssh_username = ssh_username vm.user_data = user_data vm.save() if not reservation_id: reservation_id = vm.id vm.reservation_id = reservation_id vm.save() if farm and i == 0: farm.head = vm vms.append(vm) log.debug(user.id, "Attaching disks") disk_devs = [] if i == 0 and disk_list: for disk_id in disk_list: log.debug(user.id, 'Attaching disks to first VM') disk = StorageImage.get(user.id, disk_id) if disk.vm != None: raise CMException('image_attached') while disk.disk_dev in disk_devs: disk.disk_dev += 1 disk_devs.append(disk.disk_dev) disk.vm = vm disk.save() log.debug(user.id, "Attaching CD") if i == 0 and iso_list: for iso_id in iso_list: log.debug(user.id, 'Attaching iso to first VM') # cd image have not be attached to any other vm iso = IsoImage.get(user.id, iso_id) iso.check_attached() vm.iso_image = iso vm.save() for i, vm in enumerate(vms): if lease_id != None: lease = Lease.objects.get(id=lease_id) if lease.user_network.user != user: raise CMException('lease_permission') if lease.vm != None: raise CMException('lease_attached') lease.vm = vm log.debug(user.id, "Attached ip: %s" % lease.address) else: lease = AvailableNetwork.get_lease(user) lease.vm = vm lease.save() log.debug(user.id, "Attached ip: %s" % lease.address) if i == 0 and public_ip_id > 0: log.debug(user.id, "Attaching PublicIP") try: publicip = PublicIP.objects.filter(user=user).get( id=public_ip_id) publicip.assign(lease) publicip.save() except Exception, e: log.exception(user.id, str(e)) raise CMException("lease_not_found")