Example #1
0
    def prepare_temporary_pool_xml(self, image):
        """
        Create temporary Libvirt Pool description to copy from/to images.
        storage and user parameters are used to define storage path

        @raises{cm_template_create,CMException}
        """
        try:
            django_settings.configure()
        except Exception:
            pass

        try:
            # Open template file
            template = open("%s/storage_dir.xml" %
                            settings.TEMPLATE_DIR).read()
            # Create django template
            st_template = loader.get_template_from_string(template)
            c = Context({
                'parent_pool': image.storage.name,
                'user': image.user.id,
                'cc_userid': 331,
                'cc_groupid': 331
            })
            t = st_template.render(c)
            log.info(self.user.id, "Rendered template: %s" % t)
        except Exception, e:
            log.exception(self.user.id, "Cannot create template: %s" % str(e))
            raise CMException('cm_template_create')
Example #2
0
    def register_node(vm):
        """
        Called from CLM when registering worker nodes of the farm

        @parameter{vm,vm} VM database mapper
        """
        log.debug(vm.user_id, "machine %d: registered as worker node" % vm.id)

        try:
            hosts = vm.farm.hosts()
            log.debug(
                vm.user_id, "vm: %d, host list to inject into WNs: %s" %
                (vm.id, str(hosts)))

            Command.execute('add_ssh_key',
                            vm.user_id,
                            vm.id,
                            user=vm.ssh_username,
                            ssh_key=vm.ssh_key)
            Command.execute('update_hosts',
                            vm.user_id,
                            vm.id,
                            hosts_list=hosts,
                            user=vm.ssh_username)
            Command.execute('set_hostname',
                            vm.user_id,
                            vm.id,
                            hostname=vm.name.replace(vm.farm.name, 'farm'))

        except Exception:
            log.exception(vm.user_id,
                          'configuring farm failed for machine %d' % vm.id)
            raise Exception('configuring farm failed')
        log.info(vm.user_id, 'WN %d registered' % vm.id)
Example #3
0
 def create(self):
     """
     Starts VM's thread.
     -# Gets VM's record from database (basing on vm_id) (if exists).
     -# Copies image chosen for this VM.
     -# Connects to Libvirt and generate template for it.
     -# Creates Libvirt domain.
     -# Sets VM's state as *running*
     -# If VM is element of farm, it sets proper farm state.
     """
     try:
         log.info(
             self.vm.user_id, "Copy image from %s to %s" %
             (self.vm.system_image.path, self.vm.path))
         self.vm.system_image.copy_to_node(self.vm)
     except Exception, e:
         log.exception(self.vm.user_id,
                       'Libvirt error for %d: %s' % (self.vm.id, e))
         self.vm.set_state('failed')
         message.error(self.vm.user_id, 'vm_create', {
             'id': self.vm.id,
             'name': self.vm.name
         })
         self.vm.node.lock()
         self.vm.save(update_fields=['state'])
         return
Example #4
0
 def path(self):
     """
     @returns{string} path to image
     """
     img_path = '%d' % self.id
     log.info(self.user.id, 'Storage: %s, user_id: %d, image_id: %s' % (self.storage.path, self.user.id, img_path))
     return os.path.join(self.storage.path, str(self.user.id), img_path)
Example #5
0
def mount(caller_id, storage_id=None, node_id=None):
    """
    Mount selected (or all) storages on selected (or all) node.
    @cmview_admin_cm

    @parameter{caller_id}
    @dictkey{storage_id} id of storage which should be mounted (or none if all defined)
    @dictkey{node_id} id of node where storage should be mounted (or none if all defined)

    @response node response
    """
    #if node_id is sent, get that node, otherwise every node
    if node_id:
        nodes = Node.objects.filter(id__exact=node_id)
    else:
        nodes = Node.objects.all()

    #if storage_id is sent, get that storage, otherwise every storage
    if storage_id:
        storages = Storage.objects.filter(id__exact=storage_id)
    else:
        storages = Storage.objects.all()

    node_response = {}
    for node in nodes:
        log.debug(caller_id, "Mounting node: %d" % node.id)
        storage_response = {}
        try:
            conn = libvirt.open(node.conn_string)
        except Exception, e:
            log.debug(caller_id, 'Cannot connect to libvirt: %s' % str(e))
            node.lock()
            node.save()
            raise CMException('storage_libvirt')

        for storage in storages:
            try:
                st_template = loader.get_template("storage_%s.xml" % storage.transport)
                log.info(caller_id, "Rendered template: %s" % st_template)
            except Exception, e:
                raise CMException('cm_storage_mount')

            try:
                # Create pool from xml template
                #TODO: change 331 to be read from config
                context = Context({'storage': storage, 'cc_userid': settings.CC_USERID, 'cc_groupid': settings.CC_GROUPID})
                t = st_template.render(context)
                log.info(caller_id, t)
                # Define pool, then set autostart, create mountpoint and start it
                try:
                    pool = conn.storagePoolDefineXML(t, 0)
                except Exception, e:
                    log.debug(caller_id, "Cannot define storage: %s" % str(e))
                    pool = conn.storagePoolLookupByName(storage.name)
                pool.setAutostart(1)
                pool.build(0)
                pool.create(0)
                storage_response[str(storage.id)] = 'ok'
Example #6
0
 def path(self):
     """
     @returns{string} path to image
     """
     img_path = '%d' % self.id
     log.info(
         self.user.id, 'Storage: %s, user_id: %d, image_id: %s' %
         (self.storage.path, self.user.id, img_path))
     return os.path.join(self.storage.path, str(self.user.id), img_path)
Example #7
0
    def remove(self):
        if not self.vm:
            return 0

        tar = tarfile.open(self.backuppath, "w:gz")
        tar.add(self.filepath)
        tar.close()

        os.remove(self.filepath)
        log.info(0, 'removed: %s -> %s' % (self.filepath, self.backuppath))
Example #8
0
    def remove(self):
        if not self.vm:
            return 0

        tar = tarfile.open(self.backuppath, "w:gz")
        tar.add(self.filepath)
        tar.close()

        os.remove(self.filepath)
        log.info(0, 'removed: %s -> %s' % (self.filepath, self.backuppath))
Example #9
0
 def run(self):
     try:
         while self.running:
             time.sleep(settings.CLEANING_PERIOD)
             rrds = cm.utils.monia.RrdHandler().get_list()
             for vm in rrds:
                 if time.time() - settings.TIME_TO_REMOVE > rrds[vm][1]:
                     cm.utils.monia.RrdHandler({'name': str(vm), 'data': None}).remove()
         log.info(0, "CleanerThread stopped")
     except Exception, e:
         log.exception(0, 'CleanerThread: %s' % (e))
Example #10
0
 def run(self):
     try:
         while self.running:
             time.sleep(settings.CLEANING_PERIOD)
             rrds = cm.utils.monia.RrdHandler().get_list()
             for vm in rrds:
                 if time.time() - settings.TIME_TO_REMOVE > rrds[vm][1]:
                     cm.utils.monia.RrdHandler({
                         'name': str(vm),
                         'data': None
                     }).remove()
         log.info(0, "CleanerThread stopped")
     except Exception, e:
         log.exception(0, 'CleanerThread: %s' % (e))
Example #11
0
File: vm.py Project: cc1-cloud/cc1
    def set_state(self, state):
        """
        @parameter{state,string} new state for entity, value in 'turned off',
        'restart', 'running', 'running ctx', 'turned off', 'saving',
        'closing', 'init', 'closed', 'saving failed',  'failed', 'suspend',
        'ereased'

        @raises{vm_wrong_state,CMException}
        """

        # Key - destination state
        # Values - actual available states
        states = {'init': (),
                  'running': ('init', 'turned off', 'restart',),
                  'running ctx': ('running', 'running ctx',),
                  'closing': ('turned off', 'running', 'running ctx', 'saving', 'turned off',),
                  'closed': ('saving', 'closing', 'erased'),
                  'saving': ('running', 'running ctx',),
                  'saving failed': ('saving',),
                  'failed': ('init', 'running', 'running ctx', 'closing', 'closed', 'saving', 'saving failed', 'failed',
                             'turned off', 'suspend', 'restart', 'erased'),
                  'turned off': ('running', 'init',),
                  'suspend': ('running', 'running ctx',),
                  'restart': ('running', 'running ctx',),
                  'erasing': (
                  'init', 'running', 'running ctx', 'closing', 'closed', 'saving', 'saving failed', 'failed',
                  'turned off', 'suspend', 'restart', 'erased', 'erasing'),
                  'erased': ('erasing', 'erased')
        }

        # Find my state:
        my_state = False
        for s in vm_states.keys():
            if self.state == vm_states[s]:
                my_state = s

        log.info(self.user.id, "Changing state from %s to %s for %d" % (my_state, state, self.id))

        # Check if VM could go from actual state to given
        if (my_state not in states[state] or my_state == False) and my_state != 'erasing':
            raise CMException('vm_wrong_state', '%s -> %s for %d' % (my_state, state, self.id))

        self.state = vm_states[state]
        self.save(update_fields=['state'])

        # Lock node on fail
        if state in ('failed', 'saving failed') and my_state != 'erasing':
            self.node.lock()
Example #12
0
def stop_monia():
    """
    Stop the monitoring system
    @returns{list}
    """

    t = threading.activeCount()
    e = threading.enumerate()
    th = []
    for i in e:
        th.append(i.getName())
        if i.getName() == "initiator":
            i.kill()
        if i.getName() == "cleaner":
            i.kill()
    log.info(0, 'Monitoring threads stopped')
    return [str(t), str(th)]
Example #13
0
def hello(remote_ip, **kw):
    """
    REST stub for hello function

    @parameter{kw}
    @returns HTTP response
    """
    vm = VM.get_by_ip(remote_ip)
    log.info(vm.user_id, "vm  called hello")
    Command.hello(remote_ip)

    r = response('ok')
    if int(kw.get('version', 0)) < VERSION:
        f = file(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'actions.py'), 'r')
        r['actions_file'] = f.read()
        f.close()
    return r
Example #14
0
def stop_monia():
    """
    Stop the monitoring system
    @returns{list}
    """

    t = threading.activeCount()
    e = threading.enumerate()
    th = []
    for i in e:
        th.append(i.getName())
        if i.getName() == "initiator":
            i.kill()
        if i.getName() == "cleaner":
            i.kill()
    log.info(0, 'Monitoring threads stopped')
    return [str(t), str(th)]
Example #15
0
def hello(remote_ip, **kw):
    """
    REST stub for hello function

    @param_post{remote_ip,string}
    @param_post{kw}
    @returns HTTP response
    """
    vm = VM.get_by_ip(remote_ip)
    log.info(vm.user_id, "vm  called hello")
    Command.hello(remote_ip)

    r = response('ok')
    if int(kw.get('version', 0)) < VERSION:
        f = file(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'actions.py'), 'r')
        r['actions_file'] = f.read()
        f.close()
    return r
Example #16
0
class DownloadImage(threading.Thread):
    image = None
    url = None
    size = 0

    def __init__(self, image, url, size):
        threading.Thread.__init__(self)
        self.image = image
        self.url = url
        self.size = size

    def run(self):
        try:
            if self.url.startswith('/'):
                src_image = open(self.url, 'r')
            else:
                src_image = urllib2.urlopen(self.url)
        except Exception, e:
            log.exception(self.image.user.id, "Cannot open url %s: %s" % (self.url, str(e)))
            self.image.state = image_states['failed']
            return

        if os.path.exists(self.image.path):
            self.image.state = image_states['failed']
            self.image.save(update_fields=['state'])
            log.error(self.image.user.id, "Destination image %d for user %d exists! Aborting download" % (self.image.id, self.image.user.id))
            return

        try:
            dirpath = os.path.dirname(self.image.path)
            if not os.path.exists(dirpath):
                os.mkdir(dirpath)
            dest_image = open(self.image.path, 'w')
            downloaded_size = 0
            md5sum = hashlib.md5()
            while downloaded_size < self.size:
                buff = src_image.read(1024 * 1024)
                md5sum.update(buff)
                downloaded_size += len(buff)
                dest_image.write(buff)

                progress = int(downloaded_size * 100 / self.size)
                if progress != self.image.progress:
                    self.image.progress = progress
                    self.image.save(update_fields=['progress'])

            dest_image.close()

            log.info(self.image.user.id, 'md5 hash of image %d is %s' % (self.image.id, md5sum.hexdigest()))
            self.image.state = image_states['ok']
            self.image.size = downloaded_size / (1024 * 1024)
            self.image.save(update_fields=['progress', 'state', 'size'])
            message.info(self.image.user.id, 'image_downloaded', {'name': self.image.name, 'md5sum': md5sum.hexdigest()})
        except Exception, e:
            log.exception(self.image.user.id, "Failed to download image: %s" % str(e))
            self.image.state = image_states['failed']
            self.image.save(update_fields=['state'])
Example #17
0
File: vm.py Project: cc1-cloud/cc1
 def create(self):
     """
     Starts VM's thread.
     -# Gets VM's record from database (basing on vm_id) (if exists).
     -# Copies image chosen for this VM.
     -# Connects to Libvirt and generate template for it.
     -# Creates Libvirt domain.
     -# Sets VM's state as *running*
     -# If VM is element of farm, it sets proper farm state.
     """
     try:
         log.info(self.vm.user_id, "Copy image from %s to %s" % (self.vm.system_image.path, self.vm.path))
         self.vm.system_image.copy_to_node(self.vm)
     except Exception, e:
         log.exception(self.vm.user_id, 'Libvirt error for %d: %s' % (self.vm.id, e))
         self.vm.set_state('failed')
         message.error(self.vm.user_id, 'vm_create', {'id': self.vm.id, 'name': self.vm.name})
         self.vm.node.lock()
         self.vm.save(update_fields=['state'])
         return
Example #18
0
    def register_node(vm):
        """
        Called from CLM when registering worker nodes of the farm

        @parameter{vm,vm} VM database mapper
        """
        log.debug(vm.user_id, "machine %d: registered as worker node" % vm.id)

        try:
            hosts = vm.farm.hosts()
            log.debug(vm.user_id, "vm: %d, host list to inject into WNs: %s" % (vm.id, str(hosts)))

            Command.execute('add_ssh_key', vm.user_id, vm.id, user=vm.ssh_username, ssh_key=vm.ssh_key)
            Command.execute('update_hosts', vm.user_id, vm.id, hosts_list=hosts, user=vm.ssh_username)
            Command.execute('set_hostname', vm.user_id, vm.id, hostname=vm.name.replace(vm.farm.name, 'farm'))

        except Exception:
            log.exception(vm.user_id, 'configuring farm failed for machine %d' % vm.id)
            raise Exception('configuring farm failed')
        log.info(vm.user_id, 'WN %d registered' % vm.id)
Example #19
0
    def format(self):

        if not os.path.exists(os.path.dirname(self.image.path)):
            os.makedirs(os.path.dirname(self.image.path))
        format_cmd = disk_format_commands[disk_filesystems_reversed[self.filesystem]].split()
        if format_cmd:
            tmp_dir = '/var/lib/cc1/images-tmp/'
            tmp_path = os.path.join(tmp_dir, os.path.split(self.image.path)[1])
            if not os.path.exists(os.path.dirname(tmp_dir)):
                os.makedirs(os.path.dirname(tmp_dir))
        else:
            tmp_path = str(self.image.path)

        log.debug(self.image.user.id, 'stage [1/6] truncate partition file')
        if self.exec_cmd(['truncate', '-s', '%dM' % self.image.size, '%s' % tmp_path]):
            return 'failed'
        self.set_progress(random.randint(0, 15))

        if format_cmd:
            format_cmd.append('%s' % tmp_path)
            log.debug(self.image.user.id, 'stage [2/6] creating partition filesystem')
            if self.exec_cmd(format_cmd):
                return 'failed'
            self.set_progress(random.randint(15, 50))

            log.debug(self.image.user.id, 'stage [3/6] creating disk')
            if self.exec_cmd(['/usr/bin/ddrescue', '-S', '-o', '1048576', '%s' % tmp_path, str(self.image.path)]):
                return 'failed'
            self.set_progress(random.randint(50, 80))

        log.debug(self.image.user.id, 'stage [4/6] creating new partition table')
        if self.exec_cmd(['/sbin/parted', '-s', str(self.image.path), 'mklabel', 'msdos']):
            return 'failed'
        self.set_progress(random.randint(80, 90))

        log.debug(self.image.user.id, 'stage [5/6] adding partition')
        if self.exec_cmd(['/sbin/parted', '-s', str(self.image.path), 'mkpart', 'primary', '1048576b', '100%']):
            return 'failed'
        self.set_progress(random.randint(90, 100))

        log.info(self.image.user.id, 'disk succesfully formatted')
Example #20
0
 def kill(self):
     log.info(0, 'killing MonitorThread...')
     try:
         sys.exit()
     except Exception:
         log.info(0, 'monitorThread error...')
     log.info(0, 'MonitorThread killed')
Example #21
0
 def kill(self):
     log.info(0, 'killing MonitorThread...')
     try:
         sys.exit()
     except Exception:
         log.info(0, 'monitorThread error...')
     log.info(0, 'MonitorThread killed')
Example #22
0
    def prepare_temporary_pool_xml(self, image):
        """
        Create temporary Libvirt Pool description to copy from/to images.
        storage and user parameters are used to define storage path

        @raises{cm_template_create,CMException}
        """
        try:
            django_settings.configure()
        except Exception:
            pass

        try:
            # Open template file
            template = open("%s/storage_dir.xml" % settings.TEMPLATE_DIR).read()
            # Create django template
            st_template = loader.get_template_from_string(template)
            c = Context({'parent_pool': image.storage.name, 'user': image.user.id, 'cc_userid': 331, 'cc_groupid': 331})
            t = st_template.render(c)
            log.info(self.user.id, "Rendered template: %s" % t)
        except Exception, e:
            log.exception(self.user.id, "Cannot create template: %s" % str(e))
            raise CMException('cm_template_create')
Example #23
0
    def register_head(vm):
        """
        Head registration process:
        - Creates ssh keys and sets their values for WN;
        - Inserts VMs into the database;
        - Then starts VMThreads which create actual machines.

        Called when registering farms head.

        @parameter{vm,VM} instance of the VM to be registered as head
        """
        log.debug(vm.user_id, "machine %d: registered as head" % vm.id)

        log.debug(vm.user_id, "creating lock for machine %d in farm %d" % (vm.id, vm.farm_id))
        # skip if farm is already configured - reboot head
        if vm.is_head() == True and vm.farm.state == farm_states['running']:
            return

        vms = []
        if vm.farm.state == farm_states['init_head']:
            vm.farm.state = farm_states['running']
            vm.farm.save()

            log.info(vm.user_id, 'generating ssh keys on head %d' % vm.id)

            try:
                r = Command.execute('generate_key', vm.user_id, vm.id)
                r = json.loads(r)
                log.info(vm.user_id, 'generated key: %s for machine %d' % (r, vm.id))
                for wn in vm.farm.vms.all():
                    wn.ssh_username = '******'
                    wn.ssh_key = r
                    wn.save()
                    if not wn.is_head():
                        vms.append(wn)
                ssh_username = '******'
                ssh_key = r
                log.debug(vm.user_id, 'appended %d vms to farm [id:%d]' % (vm.farm.vms.count() - 1, vm.id))  # excluding head

                Command.add_command('add_ssh_key', vm.user_id, vm.id, user=ssh_username, ssh_key=ssh_key)
                Command.add_command('update_hosts', vm.user_id, vm.id, hosts_list=vm.farm.hosts(), user=ssh_username)
                Command.execute('set_hostname', vm.user_id, vm.id, hostname=vm.name.replace(vm.farm.name, 'farm'))

            except Exception:
                log.exception(vm.user_id, '')
                vm.farm.state = farm_states['unconfigured']
                message.error(vm.id, 'farm_create', {'id': vm.farm.id, 'name': vm.farm.name})
        log.info(vm.user_id, 'Head %d registered' % vm.id)
        shared = {"counter": len(vms), "lock": threading.Lock()}
        for vm in vms:
            thread = VMThread(vm, 'create', shared)
            thread.start()
            log.debug(vm.user_id, 'vm thread created [vm id:%d]' % vm.id)
Example #24
0
class MonitorInitiator(threading.Thread):
    def __init__(self):
        threading.Thread.__init__(self)
        self.setDaemon(True)
        self.name = "initiator"
        self.running = True
        if not cm.utils.monia.os.path.exists(settings.PATH_TO_RRD):
            cm.utils.monia.os.makedirs(settings.PATH_TO_RRD)
        if not cm.utils.monia.os.path.exists(settings.BACKUP_PATH):
            cm.utils.monia.os.makedirs(settings.BACKUP_PATH)

        self.rb = cm.utils.monia.RingBuffer()

        nlist = get_nodes()
        self.frequency = settings.PERIOD * 1.0 / len(nlist)
        for n in nlist:
            self.rb.add(n)

    def update_nodes(self, nlist):
        log.info(0, 'updating nodes list')
        self.rb.clear()
        for n in nlist:
            self.rb.add(n)

    def run(self):
        while self.running:
            try:
                one = self.rb.get()
                if not one['address'] in [
                        i.name for i in threading.enumerate()
                ]:
                    t = MonitorThread(one)
                    t.start()
            except Exception, e:
                log.error(0, 'Monitoring error %s: %s' % (one['address'], e))
            time.sleep(self.frequency)
        log.info(0, "MonitorInitiator stopped")
Example #25
0
def start_monia():
    """
    Starts the system monitoring
    @response (list)
    """
    if not settings.MONITOR_ENABLE:
        stop_monia()
        return 'Monitoring disabled'
    nlist = get_nodes()
    if not nlist:
        stop_monia()
        return 'No nodes to monitor'
    r = []
    e = threading.enumerate()

    #update list of nodes in MonitorInitiator thread
    for t in e:
        if t.name == "initiator":
            t.update_nodes(nlist)
            log.info(0, 'Monitoring nodes list updated')
            r.append('node list updated')

    #start MonitorInitiator thread...
    if not [t for t in e if t.name == 'initiator']:
        monitor = MonitorInitiator()
        monitor.start()
        r.append('initiator started')
        log.info(0, 'Monitoring thread MonitorInitiator started')

    #start CleanerThread thread...
    if not [t for t in e if t.name == 'cleaner']:
        cl = CleanerThread()
        cl.start()
        r.append('cleaner started')
        log.info(0, 'Monitoring thread CleanerThread started')

    return r
Example #26
0
def start_monia():
    """
    Starts the system monitoring
    @response (list)
    """
    if not settings.MONITOR_ENABLE:
        stop_monia()
        return 'Monitoring disabled'
    nlist = get_nodes()
    if not nlist:
        stop_monia()
        return 'No nodes to monitor'
    r = []
    e = threading.enumerate()

    #update list of nodes in MonitorInitiator thread
    for t in e:
        if t.name == "initiator":
            t.update_nodes(nlist)
            log.info(0, 'Monitoring nodes list updated')
            r.append('node list updated')

    #start MonitorInitiator thread...
    if not [t for t in e if t.name == 'initiator']:
        monitor = MonitorInitiator()
        monitor.start()
        r.append('initiator started')
        log.info(0, 'Monitoring thread MonitorInitiator started')

    #start CleanerThread thread...
    if not [t for t in e if t.name == 'cleaner']:
        cl = CleanerThread()
        cl.start()
        r.append('cleaner started')
        log.info(0, 'Monitoring thread CleanerThread started')

    return r
Example #27
0
File: vm.py Project: cc1-cloud/cc1
 def terminate(self):
     log.info(0, "Terminate vm %d" % (self.vm.id))
     return self._Thread__stop()
Example #28
0
File: vm.py Project: cc1-cloud/cc1
        #TODO: network part
        log.debug(self.vm.user_id, "Attaching network")
        try:
            for lease in self.vm.lease_set.all():
                lease.attach_node()
        except Exception, e:
            log.exception(self.vm.user_id, "Cannot create network")
            self.vm.set_state('failed')
            self.vm.save(update_fields=['state'])
            message.error(self.vm.user_id, 'vm_create', {'id': self.vm.id, 'name': self.vm.name})
            self.vm.node.lock()
            self.vm.node.save()
            return

        log.info(self.vm.user_id, "Connecting libvirt and generating template")
        try:
            conn = libvirt.open(self.vm.node.conn_string)
            tmpl = self.vm.libvirt_template()
            log.debug(self.vm.user_id, "Create from template: %s" % tmpl)
        except Exception, e:
            log.exception(self.vm.user_id, "Cannot connect to libvirt")
            self.vm.set_state('failed')
            message.error(self.vm.user_id, 'vm_create', {'id': self.vm.id, 'name': self.vm.name})
            self.vm.node.lock()
            self.vm.save(update_fields=['state'])
            return

        log.info(self.vm.user_id, "Creating libvirt domain")
        try:
            domain = conn.createXML(tmpl, 0)
Example #29
0
def mount(caller_id, storage_id=None, node_id=None):
    """
    Mounts specified Storages on specified Node.

    @cmview_admin_cm
    @param_post{storage_id} id of Storage to mount (None for all)
    @param_post{node_id} id of Node where to mount Storage (None for all)

    @response{dict} Node's responses
    """
    #if node_id is sent, get that node, otherwise every node
    if node_id:
        nodes = Node.objects.filter(id__exact=node_id)
    else:
        nodes = Node.objects.all()

    #if storage_id is sent, get that storage, otherwise every storage
    if storage_id:
        storages = Storage.objects.filter(id__exact=storage_id)
    else:
        storages = Storage.objects.all()

    node_response = {}
    for node in nodes:
        log.debug(caller_id, "Mounting node: %d" % node.id)
        storage_response = {}
        try:
            conn = libvirt.open(node.conn_string)
        except Exception, e:
            log.debug(caller_id, 'Cannot connect to libvirt: %s' % str(e))
            node.lock()
            node.save()
            raise CMException('storage_libvirt')

        for storage in storages:
            try:
                st_template = loader.get_template("storage_%s.xml" %
                                                  storage.transport)
                log.info(caller_id, "Rendered template: %s" % st_template)
            except Exception, e:
                raise CMException('cm_storage_mount')

            try:
                # Create pool from xml template
                #TODO: change 331 to be read from config
                context = Context({
                    'storage': storage,
                    'cc_userid': settings.CC_USERID,
                    'cc_groupid': settings.CC_GROUPID
                })
                t = st_template.render(context)
                log.info(caller_id, t)
                # Define pool, then set autostart, create mountpoint and start it
                try:
                    pool = conn.storagePoolDefineXML(t, 0)
                except Exception, e:
                    log.debug(caller_id, "Cannot define storage: %s" % str(e))
                    pool = conn.storagePoolLookupByName(storage.name)
                pool.setAutostart(1)
                pool.build(0)
                pool.create(0)
                storage_response[str(storage.id)] = 'ok'
Example #30
0
    def set_state(self, state):
        """
        @parameter{state,string} new state for entity, value in 'turned off',
        'restart', 'running', 'running ctx', 'turned off', 'saving',
        'closing', 'init', 'closed', 'saving failed',  'failed', 'suspend',
        'ereased'

        @raises{vm_wrong_state,CMException}
        """

        # Key - destination state
        # Values - actual available states
        states = {
            'init': (),
            'running': (
                'init',
                'turned off',
                'restart',
            ),
            'running ctx': (
                'running',
                'running ctx',
            ),
            'closing': (
                'turned off',
                'running',
                'running ctx',
                'saving',
                'turned off',
            ),
            'closed': ('saving', 'closing', 'erased'),
            'saving': (
                'running',
                'running ctx',
            ),
            'saving failed': ('saving', ),
            'failed': ('init', 'running', 'running ctx', 'closing', 'closed',
                       'saving', 'saving failed', 'failed', 'turned off',
                       'suspend', 'restart', 'erased'),
            'turned off': (
                'running',
                'init',
            ),
            'suspend': (
                'running',
                'running ctx',
            ),
            'restart': (
                'running',
                'running ctx',
            ),
            'erasing': ('init', 'running', 'running ctx', 'closing', 'closed',
                        'saving', 'saving failed', 'failed', 'turned off',
                        'suspend', 'restart', 'erased', 'erasing'),
            'erased': ('erasing', 'erased')
        }

        # Find my state:
        my_state = False
        for s in vm_states.keys():
            if self.state == vm_states[s]:
                my_state = s

        log.info(
            self.user.id,
            "Changing state from %s to %s for %d" % (my_state, state, self.id))

        # Check if VM could go from actual state to given
        if (my_state not in states[state]
                or my_state == False) and my_state != 'erasing':
            raise CMException('vm_wrong_state',
                              '%s -> %s for %d' % (my_state, state, self.id))

        self.state = vm_states[state]
        self.save(update_fields=['state'])

        # Lock node on fail
        if state in ('failed', 'saving failed') and my_state != 'erasing':
            self.node.lock()
Example #31
0
 def update_nodes(self, nlist):
     log.info(0, 'updating nodes list')
     self.rb.clear()
     for n in nlist:
         self.rb.add(n)
Example #32
0
    def create(user,
               name,
               description,
               image_id,
               template_id,
               public_ip_id,
               iso_list,
               disk_list,
               vnc,
               groups,
               ssh_key=None,
               ssh_username=None,
               count=1,
               farm=None,
               head_template_id=None,
               node_id=False,
               lease_id=None,
               user_data=None):
        from cm.models.storage_image import StorageImage
        from cm.utils.threads.vm import VMThread

        template = Template.get(template_id)
        image = SystemImage.get(user.id, image_id, groups)

        if image.state != image_states['ok']:
            raise CMException('image_unavailable')

        if farm:
            head_template = Template.get(head_template_id)
            wn_template = template
            user.check_quota([(head_template, 1), (wn_template, count)])
            count += 1
        else:
            user.check_quota([(template, count)])

        vms = []

        reservation_id = None

        for i in range(count):
            # create VM instance
            log.debug(user.id, "Looking for node")
            node = Node.get_free_node(
                head_template, image,
                node_id) if farm and i == 0 else Node.get_free_node(
                    template, image, node_id)
            log.info(user.id, 'Selected node: %d' % node.id)
            vm = VM()
            vm.libvirt_id = -1
            if farm:
                if i == 0:
                    vm.name = '%s-head' % name
                    vm.description = 'Farm head'
                    vm.template = head_template
                else:
                    vm.name = '%s-wn%d' % (name, i)
                    vm.description = 'Worker Node'
                    vm.template = wn_template
            else:
                vm.template = template
                vm.description = description
                if count > 1:
                    vm.name = '%s_%d' % (name, i + 1)
                else:
                    vm.name = name
            vm.user = user
            vm.state = vm_states['init']
            vm.start_time = datetime.now()
            vm.system_image = image
            vm.node = node
            vm.save_vm = True
            if farm:
                vm.farm = farm

            # Find first free vnc port
            used_ports = VM.objects.exclude(
                state__in=[vm_states['closed'], vm_states['erased']
                           ]).values_list('vnc_port', flat=True)

            for new_vnc_port in xrange(VNC_PORTS['START'],
                                       VNC_PORTS['END'] + 1):
                if new_vnc_port not in used_ports and new_vnc_port not in VNC_PORTS[
                        'EXCLUDE']:
                    break
            else:
                raise CMException('vm_vnc_not_found')

            log.debug(user.id, "Found vnc port: %d" % new_vnc_port)
            vm.vnc_port = new_vnc_port

            # Find first free novnc port
            used_ports = VM.objects.exclude(
                state__in=[vm_states['closed'], vm_states['erased']
                           ]).values_list('novnc_port', flat=True)
            for new_novnc_port in xrange(NOVNC_PORTS['START'],
                                         NOVNC_PORTS['END'] + 1):
                if new_novnc_port not in used_ports and new_novnc_port not in NOVNC_PORTS[
                        'EXCLUDE']:
                    break
            else:
                raise CMException('vm_novnc_not_found')

            log.debug(user.id, "Found novnc port: %d" % new_novnc_port)
            vm.novnc_port = new_novnc_port

            if vnc:
                vm.attach_vnc()
            vm.vnc_passwd = password_gen(13,
                                         chars=['letters', 'digits'],
                                         extra_chars='!@#$%^&*()')
            vm.ssh_key = ssh_key
            vm.ssh_username = ssh_username
            vm.user_data = user_data
            vm.save()

            if not reservation_id:
                reservation_id = vm.id

            vm.reservation_id = reservation_id
            vm.save()

            if farm and i == 0:
                farm.head = vm
            vms.append(vm)

            log.debug(user.id, "Attaching disks")
            disk_devs = []
            if i == 0 and disk_list:
                for disk_id in disk_list:
                    log.debug(user.id, 'Attaching disks to first VM')
                    disk = StorageImage.get(user.id, disk_id)
                    if disk.vm != None:
                        raise CMException('image_attached')
                    while disk.disk_dev in disk_devs:
                        disk.disk_dev += 1
                    disk_devs.append(disk.disk_dev)
                    disk.vm = vm
                    disk.save()

            log.debug(user.id, "Attaching CD")
            if i == 0 and iso_list:
                for iso_id in iso_list:
                    log.debug(user.id, 'Attaching iso to first VM')
                    # cd image have not be attached to any other vm
                    iso = IsoImage.get(user.id, iso_id)
                    iso.check_attached()
                    vm.iso_image = iso
                    vm.save()

        for i, vm in enumerate(vms):
            if lease_id != None:
                lease = Lease.objects.get(id=lease_id)

                if lease.user_network.user != user:
                    raise CMException('lease_permission')

                if lease.vm != None:
                    raise CMException('lease_attached')
                lease.vm = vm
                log.debug(user.id, "Attached ip: %s" % lease.address)
            else:
                lease = AvailableNetwork.get_lease(user)
                lease.vm = vm
                lease.save()
                log.debug(user.id, "Attached ip: %s" % lease.address)

            if i == 0 and public_ip_id > 0:
                log.debug(user.id, "Attaching PublicIP")
                try:
                    publicip = PublicIP.objects.filter(user=user).get(
                        id=public_ip_id)
                    publicip.assign(lease)
                    publicip.save()
                except Exception, e:
                    log.exception(user.id, str(e))
                    raise CMException("lease_not_found")
Example #33
0
 def kill(self):
     log.info(0, "stopping CleanerThread... ")
     self.running = False
Example #34
0
 def kill(self):
     log.info(0, "stopping MonitorInitiator... ")
     self.running = False
Example #35
0
 def update_nodes(self, nlist):
     log.info(0, 'updating nodes list')
     self.rb.clear()
     for n in nlist:
         self.rb.add(n)
Example #36
0
    def register_head(vm):
        """
        Head registration process:
        - Creates ssh keys and sets their values for WN;
        - Inserts VMs into the database;
        - Then starts VMThreads which create actual machines.

        Called when registering farms head.

        @parameter{vm,VM} instance of the VM to be registered as head
        """
        log.debug(vm.user_id, "machine %d: registered as head" % vm.id)

        log.debug(
            vm.user_id,
            "creating lock for machine %d in farm %d" % (vm.id, vm.farm_id))
        # skip if farm is already configured - reboot head
        if vm.is_head() == True and vm.farm.state == farm_states['running']:
            return

        vms = []
        if vm.farm.state == farm_states['init_head']:
            vm.farm.state = farm_states['running']
            vm.farm.save()

            log.info(vm.user_id, 'generating ssh keys on head %d' % vm.id)

            try:
                r = Command.execute('generate_key', vm.user_id, vm.id)
                r = json.loads(r)
                log.info(vm.user_id,
                         'generated key: %s for machine %d' % (r, vm.id))
                for wn in vm.farm.vms.all():
                    wn.ssh_username = '******'
                    wn.ssh_key = r
                    wn.save()
                    if not wn.is_head():
                        vms.append(wn)
                ssh_username = '******'
                ssh_key = r
                log.debug(vm.user_id, 'appended %d vms to farm [id:%d]' %
                          (vm.farm.vms.count() - 1, vm.id))  # excluding head

                Command.add_command('add_ssh_key',
                                    vm.user_id,
                                    vm.id,
                                    user=ssh_username,
                                    ssh_key=ssh_key)
                Command.add_command('update_hosts',
                                    vm.user_id,
                                    vm.id,
                                    hosts_list=vm.farm.hosts(),
                                    user=ssh_username)
                Command.execute('set_hostname',
                                vm.user_id,
                                vm.id,
                                hostname=vm.name.replace(vm.farm.name, 'farm'))

            except Exception:
                log.exception(vm.user_id, '')
                vm.farm.state = farm_states['unconfigured']
                message.error(vm.id, 'farm_create', {
                    'id': vm.farm.id,
                    'name': vm.farm.name
                })
        log.info(vm.user_id, 'Head %d registered' % vm.id)
        shared = {"counter": len(vms), "lock": threading.Lock()}
        for vm in vms:
            thread = VMThread(vm, 'create', shared)
            thread.start()
            log.debug(vm.user_id, 'vm thread created [vm id:%d]' % vm.id)
Example #37
0
 def kill(self):
     log.info(0, "stopping MonitorInitiator... ")
     self.running = False
Example #38
0
 def kill(self):
     log.info(0, "stopping CleanerThread... ")
     self.running = False
Example #39
0
 def terminate(self):
     log.info(0, "Terminate vm %d" % (self.vm.id))
     return self._Thread__stop()
Example #40
0
class RrdHandler():
    def __init__(self, data=None):
        if data:
            self.vm = data
            self.filepath = get_path(data['name'])
            self.backuppath = get_backup_path(data['name'])

    def update(self):
        """
        Update rrd file, if exists. Otherwise create new rrd
        """
        if not self.vm:
            raise Exception('No VM specified')
        try:
            filesize = os.path.getsize(self.filepath)
        except Exception:
            filesize = 0

        if (filesize == 0):
            self.create()
        else:  # appropriate updating
            ret = rrdtool.update(
                "%s" % (self.filepath), 'N:%d:%d:%d:%d:%d:%d:%d:%d:%d:%d' % (
                    int(self.vm['cpu_count']),
                    int(self.vm['cpu_time']) / 100000000 / 10.0 /
                    self.vm['cpu_count'],
                    int(self.vm['rd_req']),
                    int(self.vm['rd_bytes']),
                    int(self.vm['wr_req']),
                    int(self.vm['wr_bytes']),
                    int(self.vm['rx_bytes']),
                    int(self.vm['rx_packets']),
                    int(self.vm['tx_bytes']),
                    int(self.vm['tx_packets']),
                ))
            if ret:
                log.error(0, 'update error: %s' % (rrdtool.error()))

    def create(self):
        if not self.vm:
            raise Exception('No VM specified')
        rarg = [
            "%s" % (self.filepath),
            "--step",
            "%d" % settings.PERIOD,
            "DS:cpu_count:GAUGE:%d:0:100000" % (settings.PERIOD * 2),
            "DS:cpu_time:COUNTER:%d:0:100000" % (settings.PERIOD * 2),
            "DS:rd_req:COUNTER:%d:0:100000000" % (settings.PERIOD * 2),
            "DS:rd_bytes:COUNTER:%d:0:100000000" % (settings.PERIOD * 2),
            "DS:wr_req:COUNTER:%d:0:100000000" % (settings.PERIOD * 2),
            "DS:wr_bytes:COUNTER:%d:0:100000000" % (settings.PERIOD * 2),
            "DS:rx_bytes:COUNTER:%d:0:100000000" % (settings.PERIOD * 2),
            "DS:rx_packets:COUNTER:%d:0:100000000" % (settings.PERIOD * 2),
            "DS:tx_bytes:COUNTER:%d:0:100000000" % (settings.PERIOD * 2),
            "DS:tx_packets:COUNTER:%d:0:100000000" % (settings.PERIOD * 2),
        ]
        for s in settings.STATS:
            rarg.append("RRA:AVERAGE:0.5:%d:%d" % (s[0], s[1]))

        try:
            ret = rrdtool.create(rarg)  # all data = 3,1MB
            if ret:
                log.error(0, 'update error: %s' % (rrdtool.error()))
        except Exception, e:
            log.exception(0, e)
        log.info(0, 'created: %s' % (self.filepath))
Example #41
0
        try:
            for lease in self.vm.lease_set.all():
                lease.attach_node()
        except Exception, e:
            log.exception(self.vm.user_id, "Cannot create network")
            self.vm.set_state('failed')
            self.vm.save(update_fields=['state'])
            message.error(self.vm.user_id, 'vm_create', {
                'id': self.vm.id,
                'name': self.vm.name
            })
            self.vm.node.lock()
            self.vm.node.save()
            return

        log.info(self.vm.user_id, "Connecting libvirt and generating template")
        try:
            conn = libvirt.open(self.vm.node.conn_string)
            tmpl = self.vm.libvirt_template()
            log.debug(self.vm.user_id, "Create from template: %s" % tmpl)
        except Exception, e:
            log.exception(self.vm.user_id, "Cannot connect to libvirt")
            self.vm.set_state('failed')
            message.error(self.vm.user_id, 'vm_create', {
                'id': self.vm.id,
                'name': self.vm.name
            })
            self.vm.node.lock()
            self.vm.save(update_fields=['state'])
            return
Example #42
0
File: vm.py Project: cc1-cloud/cc1
    def create(user, name, description, image_id, template_id, public_ip_id, iso_list, disk_list, vnc, groups,
               ssh_key=None, ssh_username=None, count=1,
               farm=None, head_template_id=None, node_id=False, lease_id=None, user_data=None):
        from cm.models.storage_image import StorageImage
        from cm.utils.threads.vm import VMThread

        template = Template.get(template_id)
        image = SystemImage.get(user.id, image_id, groups)

        if image.state != image_states['ok']:
            raise CMException('image_unavailable')

        if farm:
            head_template = Template.get(head_template_id)
            wn_template = template
            user.check_quota([(head_template, 1), (wn_template, count)])
            count += 1
        else:
            user.check_quota([(template, count)])

        vms = []

        reservation_id = None

        for i in range(count):
            # create VM instance
            log.debug(user.id, "Looking for node")
            node = Node.get_free_node(head_template, image, node_id) if farm and i == 0 else Node.get_free_node(template, image, node_id)
            log.info(user.id, 'Selected node: %d' % node.id)
            vm = VM()
            vm.libvirt_id = -1
            if farm:
                if i == 0:
                    vm.name = '%s-head' % name
                    vm.description = 'Farm head'
                    vm.template = head_template
                else:
                    vm.name = '%s-wn%d' % (name, i)
                    vm.description = 'Worker Node'
                    vm.template = wn_template
            else:
                vm.template = template
                vm.description = description
                if count > 1:
                    vm.name = '%s_%d' % (name, i + 1)
                else:
                    vm.name = name
            vm.user = user
            vm.state = vm_states['init']
            vm.start_time = datetime.now()
            vm.system_image = image
            vm.node = node
            vm.save_vm = True
            if farm:
                vm.farm = farm

            # Find first free vnc port
            used_ports = VM.objects.exclude(state__in=[vm_states['closed'], vm_states['erased']]).values_list('vnc_port', flat=True)

            for new_vnc_port in xrange(VNC_PORTS['START'], VNC_PORTS['END'] + 1):
                if new_vnc_port not in used_ports and new_vnc_port not in VNC_PORTS['EXCLUDE']:
                    break
            else:
                raise CMException('vm_vnc_not_found')

            log.debug(user.id, "Found vnc port: %d" % new_vnc_port)
            vm.vnc_port = new_vnc_port

            # Find first free novnc port
            used_ports = VM.objects.exclude(state__in=[vm_states['closed'], vm_states['erased']]).values_list('novnc_port', flat=True)
            for new_novnc_port in xrange(NOVNC_PORTS['START'], NOVNC_PORTS['END'] + 1):
                if new_novnc_port not in used_ports and new_novnc_port not in NOVNC_PORTS['EXCLUDE']:
                    break
            else:
                raise CMException('vm_novnc_not_found')

            log.debug(user.id, "Found novnc port: %d" % new_novnc_port)
            vm.novnc_port = new_novnc_port

            if vnc:
                vm.attach_vnc()
            vm.vnc_passwd = password_gen(13, chars=['letters', 'digits'], extra_chars='!@#$%^&*()')
            vm.ssh_key = ssh_key
            vm.ssh_username = ssh_username
            vm.user_data = user_data
            vm.save()

            if not reservation_id:
                reservation_id = vm.id

            vm.reservation_id = reservation_id
            vm.save()

            if farm and i == 0:
                farm.head = vm
            vms.append(vm)

            log.debug(user.id, "Attaching disks")
            disk_devs = []
            if i == 0 and disk_list:
                for disk_id in disk_list:
                    log.debug(user.id, 'Attaching disks to first VM')
                    disk = StorageImage.get(user.id, disk_id)
                    if disk.vm != None:
                        raise CMException('image_attached')
                    while disk.disk_dev in disk_devs:
                        disk.disk_dev += 1
                    disk_devs.append(disk.disk_dev)
                    disk.vm = vm
                    disk.save()

            log.debug(user.id, "Attaching CD")
            if i == 0 and iso_list:
                for iso_id in iso_list:
                    log.debug(user.id, 'Attaching iso to first VM')
                    # cd image have not be attached to any other vm
                    iso = IsoImage.get(user.id, iso_id)
                    iso.check_attached()
                    vm.iso_image = iso
                    vm.save()

        for i, vm in enumerate(vms):
            if lease_id != None:
                lease = Lease.objects.get(id=lease_id)

                if lease.user_network.user != user:
                    raise CMException('lease_permission')

                if lease.vm != None:
                    raise CMException('lease_attached')
                lease.vm = vm
                log.debug(user.id, "Attached ip: %s" % lease.address)
            else:
                lease = AvailableNetwork.get_lease(user)
                lease.vm = vm
                lease.save()
                log.debug(user.id, "Attached ip: %s" % lease.address)

            if i == 0 and public_ip_id > 0:
                log.debug(user.id, "Attaching PublicIP")
                try:
                    publicip = PublicIP.objects.filter(user=user).get(id=public_ip_id)
                    publicip.assign(lease)
                    publicip.save()
                except Exception, e:
                    log.exception(user.id, str(e))
                    raise CMException("lease_not_found")