Exemplo n.º 1
0
 def get_import_task(self):
     params = {'InstanceId': config.get_worker_id()}
     task = self.conn.get_object('GetInstanceImportTask', params, InstanceImportTask, verb='POST')
     if not task or not task.task_id:
         return None
     else:
         return task
Exemplo n.º 2
0
    def __init__(self):
        # get the instance id from metadata service
        self.__instance_id = None
        if self.__instance_id is None:
            self.__instance_id = config.get_worker_id()

        self.__status = WorkerLoop.STOPPED
        logger.debug('main loop running with instance_id=%s' % (self.__instance_id))
    def __init__(self):
        # get the instance id from metadata service
        self.__instance_id = None
        if self.__instance_id is None:
            self.__instance_id = config.get_worker_id()

        self.__status = WorkerLoop.STOPPED
        logger.debug('main loop running with instance_id=%s' % (self.__instance_id))
    def __init__(self):
        # get the instance id from metadata service
        self.__instance_id = None
        self.__euca_host = config.get_clc_host()
        if self.__instance_id is None:
            self.__instance_id = config.get_worker_id()

        self.__status = WorkerLoop.STOPPED
        worker.log.debug('main loop running with clc_host=%s, instance_id=%s' % (self.__euca_host, self.__instance_id))
 def attach_volume(self):
     if self.volume_id == None:
         raise RuntimeError('This import does not have a volume')
     devices_before = self.get_block_devices()
     device_name = self.next_device_name(devices_before)
     instance_id = config.get_worker_id()
     worker.log.debug('attaching volume {0} to {1} as {2}'.format(self.volume_id, instance_id, device_name))
     if not self.ec2_conn.attach_volume_and_wait(self.volume_id, instance_id, device_name):
         raise RuntimeError('Can not attach volume {0} to the instance {1}'.format(
                           self.volume_id, instance_id)) #todo: add specific error?
     new_block_devices = self.get_block_devices()
     new_device_name = new_block_devices[0] # can it be different from device_name?
     return new_device_name
Exemplo n.º 6
0
 def put_import_task_status(self, task_id=None, status=None, volume_id=None, bytes_converted=None, error_code=None):
     if task_id is None or status is None:
         raise RuntimeError("Invalid parameters")
     params = {'InstanceId': config.get_worker_id(), 'ImportTaskId': task_id, 'Status': status}
     if bytes_converted != None:
         params['BytesConverted'] = bytes_converted
     if volume_id is not None:
         params['VolumeId'] = volume_id
     if error_code is not None:
         params['ErrorCode'] = error_code
     resp = self.conn.make_request('PutInstanceImportTaskStatus', params, path='/', verb='POST')
     if resp.status != 200:
         raise httplib.HTTPException(resp.status, resp.reason, resp.read())
     root = objectify.XML(resp.read())
     return 'true' != root.cancelled.text if hasattr(root, 'cancelled') else True
 def detach_volume(self, timeout_sec=3000, local_dev_timeout=30):
     worker.log.info('Detaching volume %s' % self.volume.id)
     if self.volume == None:
         raise RuntimeError('This import does not have volume id')
     worker.log.debug('detaching volume {0}'.format(self.volume.id))
     devices_before = self.get_block_devices()
     self.volume.update()
     # Do not attempt to detach a volume which is not attached/attaching, or
     # is not attached to this instance
     this_instance_id = config.get_worker_id()
     attached_state = self.volume.attachment_state()
     if not attached_state \
         or not attached_state.startswith('attach') \
         or (hasattr(self.volume,'attach_data')
             and self.volume.attach_data.instance_id != this_instance_id):
             self.volume_attached_dev = None
             return True
     # Begin detaching from this instance
     if not self.ec2_conn.detach_volume_and_wait(self.volume.id,
                                                 timeout_sec=timeout_sec):
         raise RuntimeError('Can not dettach volume {0}'
                            .format(self.volume.id)) #todo: add specific error?
     # If the attached dev is known, verify it is no longer present.
     if self.volume_attached_dev:
         elapsed = 0
         start=time.time()
         devices_after = devices_before
         while elapsed < local_dev_timeout:
             new_block_devices = self.get_block_devices()
             devices_after = list(set(devices_before) - set(new_block_devices))
             if not self.volume_attached_dev in devices_after:
                 break
             else:
                 time.sleep(2)
             elapsed = time.time() - start
         if self.volume_attached_dev in devices_after:
             # todo: add specific error?
             self.volume.update()
             worker.log.error('Volume:"{0}" state:"{1}". Local device:"{2}"'
                              'found on guest after {3} seconds'
                              .format(self.volume.id,
                              self.volume.status,
                              self.volume.local_blockdev,
                              timeout_sec))
     return True
 def __init__(self, task_id, manifest_url=None, volume_id=None):
     ImagingTask.__init__(self, task_id, "import_volume")
     self.manifest_url = manifest_url
     self.ec2_conn = ws.connect_ec2(
         aws_access_key_id=config.get_access_key_id(),
         aws_secret_access_key=config.get_secret_access_key(),
         security_token=config.get_security_token())
     self.volume = None
     self.volume_id = volume_id
     if self.volume_id:
         self.volume = self.ec2_conn.conn.get_all_volumes([self.volume_id,'verbose'])
     if not self.volume:
         raise ValueError('Request for volume:"{0}" returned:"{1}"'
                          .format(volume_id, str(self.volume)))
     self.volume = self.volume[0]
     self.volume_attached_dev = None
     self.instance_id = config.get_worker_id()
     self.process = None
 def detach_volume(self, timeout_sec=3000, local_dev_timeout=30):
     log.debug('Detaching volume %s' % self.volume.id, self.task_id)
     if self.volume is None:
         raise FailureWithCode('This import does not have volume id', INPUT_DATA_FAILURE)
     devices_before = get_block_devices()
     self.volume.update()
     # Do not attempt to detach a volume which is not attached/attaching, or
     # is not attached to this instance
     this_instance_id = config.get_worker_id()
     attached_state = self.volume.attachment_state()
     if not attached_state \
             or not attached_state.startswith('attach') \
             or (hasattr(self.volume, 'attach_data')
                 and self.volume.attach_data.instance_id != this_instance_id):
         self.volume_attached_dev = None
         return True
     # Begin detaching from this instance
     if not self.ec2_conn.detach_volume_and_wait(self.volume.id, timeout_sec=timeout_sec, task_id=self.task_id):
         raise FailureWithCode('Can not detach volume %s' % self.volume.id, DETACH_VOLUME_FAILURE)
     # If the attached dev is known, verify it is no longer present.
     if self.volume_attached_dev:
         elapsed = 0
         start = time.time()
         devices_after = devices_before
         while elapsed < local_dev_timeout:
             new_block_devices = get_block_devices()
             devices_after = list(set(devices_before) - set(new_block_devices))
             if not self.volume_attached_dev in devices_after:
                 break
             else:
                 time.sleep(2)
             elapsed = time.time() - start
         if self.volume_attached_dev in devices_after:
             self.volume.update()
             log.error('Volume:"{0}" state:"{1}". Local device:"{2}"'
                              'found on guest after {3} seconds'
                              .format(self.volume.id,
                                      self.volume.status,
                                      self.volume.local_blockdev,
                                      timeout_sec), self.task_id)
             return False
     return True
 def __init__(self, task_id, manifest_url=None, volume_id=None):
     ImagingTask.__init__(self, task_id, "import_volume")
     self.manifest_url = manifest_url
     self.instance_id = config.get_worker_id()
     self.process = None
LOG_FILE = '/var/log/eucalyptus-imaging-worker/worker.log'
LOG_BYTES = 1024 * 1024  # 1MB

log = logging.getLogger('worker')
botolog = logging.getLogger('boto')
log.setLevel(logging.INFO)
botolog.setLevel(logging.INFO)
# local handler
local_formatter = logging.Formatter('%(asctime)s %(name)s [%(levelname)s]:%(message)s')
file_log_handler = RotatingFileHandler(LOG_FILE, maxBytes=LOG_BYTES, backupCount=5)
file_log_handler.setFormatter(local_formatter)
log.addHandler(file_log_handler)
botolog.addHandler(file_log_handler)
# remote handler
if config.get_log_server() != None and config.get_log_server_port() != None:
    remote_formatter = logging.Formatter('imaging-worker ' + config.get_worker_id() + ' [%(levelname)s]:%(message)s')
    remote_log_handler = SysLogHandler(address=(config.get_log_server(), config.get_log_server_port()),
                                       facility=SysLogHandler.LOG_DAEMON)
    remote_log_handler.setFormatter(remote_formatter)
    log.addHandler(remote_log_handler)

# Log level will default to INFO
# If you want more information (like DEBUG) you will have to set the log level
def set_loglevel(lvl):
    global log
    lvl_num = None
    if isinstance(lvl, str):
        try:
            lvl_num = logging.__getattribute__(lvl.upper())
        except AttributeError:
            log.warn("Failed to set log level to '%s'" % lvl)