def run(self): servo.log.info('starting cloudwatch metrics reporter') elb_host = config.get_clc_host() servo_instance_id = config.get_servo_id() if elb_host is None or servo_instance_id is None: servo.log.error('some required parameters are missing; failed to start cloudwatch report loop') return start_time = time.time() while time.time() - start_time < config.CWATCH_REPORT_PERIOD_SEC and self.running: time.sleep(1) while self.running: aws_access_key_id = config.get_access_key_id() aws_secret_access_key = config.get_secret_access_key() security_token = config.get_security_token() con = servo.ws.connect_elb(host_name=elb_host, aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, security_token=security_token) try: metric = stat_instance.get_and_clear_stat() con.put_cw_metric(servo_instance_id, metric) servo.log.debug('reported the metrics: %s' % metric) except Exception, err: servo.log.error('failed to report the cloudwatch metrics: %s', err) start_time = time.time() while time.time() - start_time < config.CWATCH_REPORT_PERIOD_SEC and self.running: time.sleep(1)
def start(self): # check if workflow enabled """ if subprocess.call(['/usr/libexec/eucalyptus/euca-run-workflow', '-h'], stdout=os.devnull, stderr=os.devnull) != 0: worker.log.error('Failed to find euca-run-workflow. Would not start service') self.__status = WorkerLoop.STOPPED else: self.__status = WorkerLoop.RUNNING """ self.__status = WorkerLoop.RUNNING while self.__status == WorkerLoop.RUNNING: worker.log.info('Querying for new imaging task') try: con = worker.ws.connect_imaging_worker(host_name=self.__euca_host, aws_access_key_id=config.get_access_key_id(), aws_secret_access_key=config.get_secret_access_key(), security_token=config.get_security_token()) import_task = con.get_import_task() task = ImagingTask.from_import_task(import_task) if task: worker.log.info('Processing import task %s' % task) if task.process_task(): worker.log.info('Done processing task %s' % task.task_id) else: worker.log.warn('Processing of the task %s failed' % task.task_id) else: pass except Exception, err: worker.log.error('Failed to query imaging service: %s' % err) start_time = time.time() while time.time() - start_time < config.QUERY_PERIOD_SEC and self.__status == WorkerLoop.RUNNING: worker.log.debug('sleeping') time.sleep(10)
def run(self): servo.log.info('starting cloudwatch metrics reporter') elb_host = config.get_clc_host() servo_instance_id = config.get_servo_id() if elb_host is None or servo_instance_id is None: servo.log.error( 'some required parameters are missing; failed to start cloudwatch report loop' ) return while self.running: aws_access_key_id = config.get_access_key_id() aws_secret_access_key = config.get_secret_access_key() security_token = config.get_security_token() try: con = servo.ws.connect_elb( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, security_token=security_token) metric = stat_instance.get_and_clear_stat() con.put_cw_metric(servo_instance_id, metric) servo.log.debug('reported the metrics: %s' % metric) except Exception, err: servo.log.error('failed to report the cloudwatch metrics: %s', err) cw_loop_delay = config.CWATCH_REPORT_PERIOD_SEC while cw_loop_delay > 0 and self.running: time.sleep(1) cw_loop_delay -= 1
def start(self): self.__status = WorkerLoop.RUNNING while self.__status == WorkerLoop.RUNNING: logger.info('Querying for new imaging task') try: con = eucaimgworker.ws.connect_imaging_worker(aws_access_key_id=config.get_access_key_id(), aws_secret_access_key=config.get_secret_access_key(), security_token=config.get_security_token()) import_task = con.get_import_task() try: task = ImagingTask.from_import_task(import_task) if task: logger.info('Processing import task %s' % task, task.task_id) if task.process_task(): logger.info('Done processing task %s' % task.task_id, task.task_id) else: logger.error('Processing of the task %s failed' % task.task_id, task.task_id) else: pass except Exception, err: if type(err) is FailureWithCode: con.put_import_task_status(task_id=import_task.task_id, status='FAILED', error_code=err.failure_code) else: con.put_import_task_status(task_id=import_task.task_id, status='FAILED', error_code=GENERAL_FAILURE) logger.error('Failed to process task for unknown reason: %s' % err) except Exception, err: tb = traceback.format_exc() logger.error(str(tb) + '\nFailed to query imaging service: %s' % err) query_period = config.QUERY_PERIOD_SEC while query_period > 0 and self.__status == WorkerLoop.RUNNING: time.sleep(1) query_period -= 1
def __init__(self, task_id, task_type): self.task_id = task_id self.task_type = task_type self.is_conn = worker.ws.connect_imaging_worker(host_name=config.get_clc_host(), aws_access_key_id=config.get_access_key_id(), aws_secret_access_key=config.get_secret_access_key(), security_token=config.get_security_token())
def download_server_certificate(cert_arn, task_id=None): f = FloppyCredential(task_id=task_id) access_key_id = config.get_access_key_id() secret_access_key = config.get_secret_access_key() security_token = config.get_security_token() con = ws.connect_euare(aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key, security_token=security_token) cert = con.download_server_certificate(f.get_instance_pub_key(), f.get_instance_pk(), f.get_iam_pub_key(), f.get_iam_token(), cert_arn) return cert
def __init__(self, task_id, task_type): self.task_id = task_id self.task_type = task_type self.is_conn = ws.connect_imaging_worker(aws_access_key_id=config.get_access_key_id(), aws_secret_access_key=config.get_secret_access_key(), security_token=config.get_security_token()) self.should_run = True self.bytes_transferred = None self.volume_id = None self.task_thread = None
def list_images(args): ec2_conn = boto.connect_ec2(config.get_access_key_id(), config.get_secret_access_key()) print "AMI ID".rjust(16), "Name".rjust(16), "Description".rjust(42) print (16*"-").rjust(16), (16*"-").rjust(16), (42*"-").rjust(42) for reservation in ec2_conn.get_all_instances(): for image in ec2_conn.get_all_images(owners=[reservation.owner_id,]): print str(image.id).rjust(16), str(image.name).rjust(16), str(image.description).rjust(42)
def list_ips(args): ec2_conn = boto.connect_ec2(config.get_access_key_id(), config.get_secret_access_key()) print "IP".rjust(16), "Domain".rjust(12), "Instance".rjust(12) print (16*"-").rjust(12), (12*"-").rjust(12), (12*"-").rjust(12) for address in ec2_conn.get_all_addresses(): if address.instance_id == '': print str(address.public_ip).rjust(16), str(address.domain).rjust(12), "N/A".rjust(12) else: print str(address.public_ip).rjust(16), str(address.domain).rjust(12), str(address.instance_id).rjust(12)
def stop_instances(args): ec2_conn = boto.connect_ec2(config.get_access_key_id(), config.get_secret_access_key()) for instance_id in args.instance_ids: try: ec2_conn.get_all_instances(instance_ids=[instance_id,]) except: print "Cannot find instance: '%s'!" % instance_id continue if len(ec2_conn.stop_instances(instance_ids=[instance_id,])) == 1: print "Stopping instance: '%s'..." % instance_id
def disassociate_ip(args): ec2_conn = boto.connect_ec2(config.get_access_key_id(), config.get_secret_access_key()) try: ec2_conn.get_all_addresses(addresses=[args.ip_address,]) except: print "Cannot find allocated IP address: '%s'!" % args.ip_address sys.exit(1) if ec2_conn.disassociate_address(args.ip_address): print "Disassociated IP address '%s'." % args.ip_address else: print "Failed to disassociate IP address '%s'!" % args.ip_address
def list_instances(args): ec2_conn = boto.connect_ec2(config.get_access_key_id(), config.get_secret_access_key()) print "Name".rjust(12), "ID".rjust(12), "Type".rjust(12), print "Status".rjust(12) print (12*"-").rjust(12), (12*"-").rjust(12), (12*"-").rjust(12), print (12*"-").rjust(12) reservations = ec2_conn.get_all_instances() for reservation in reservations: for instance in reservation.instances: print str(instance.tags['Name']).rjust(12), str(instance.id).rjust(12), str(instance.instance_type).rjust(12), print str(instance.state).rjust(12)
def __init__(self, task_id, manifest_url=None, volume_id=None): ImagingTask.__init__(self, task_id, "import_volume") self.manifest_url = manifest_url self.ec2_conn = ws.connect_ec2( aws_access_key_id=config.get_access_key_id(), aws_secret_access_key=config.get_secret_access_key(), security_token=config.get_security_token()) self.volume = None self.volume_id = volume_id if self.volume_id: self.volume = self.ec2_conn.conn.get_all_volumes([self.volume_id,'verbose']) if not self.volume: raise ValueError('Request for volume:"{0}" returned:"{1}"' .format(volume_id, str(self.volume))) self.volume = self.volume[0] self.volume_attached_dev = None self.instance_id = config.get_worker_id() self.process = None
def associate_ip(args): ec2_conn = boto.connect_ec2(config.get_access_key_id(), config.get_secret_access_key()) try: ec2_conn.get_all_addresses(addresses=[args.ip_address,]) except: print "Cannot find allocated IP address: '%s'!" % args.ip_address sys.exit(1) try: ec2_conn.get_all_instances(instance_ids=[args.instance_id,]) except: print "Cannot find instance: '%s'!" % args.instance_id sys.exit(1) if ec2_conn.associate_address(instance_id=args.instance_id, public_ip=args.ip_address) == 1: print "Associated IP address '%s' with instance '%s'." % (args.ip_address, args.instance_id) else: print "Failed to associate IP address '%s' with instance '%s'!" % (args.ip_address, args.instance_id)
def start(self): self.__status = ServiceLoop.RUNNING while self.__status == ServiceLoop.RUNNING: service.log.info('querying CLC for new imaging task') try: access_key_id = config.get_access_key_id() secret_access_key = config.get_secret_access_key() security_token = config.get_security_token() con = EucaISConnection(host_name=self.__euca_host, aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key, security_token=security_token) task = con.get_import_task() if task != None: # task processing service.log.info('processing import task %s' % task.task_id) except Exception, err: service.log.error('failed to query the imaging service: %s' % err) start_time = time.time() while time.time() - start_time < config.QUERY_PERIOD_SEC and self.__status == serviceLoop.RUNNING: service.log.debug('sleeping') time.sleep(10)
def start(self): if config.ENABLE_CLOUD_WATCH: hl = mon.LogListener(stat_instance) hl.start() self.__status = ServoLoop.RUNNING proxy_mgr = ProxyManager() hc_mgr = HealthCheckManager() while self.__status == ServoLoop.RUNNING: # call elb-describe-services lbs = None try: access_key_id = config.get_access_key_id() secret_access_key = config.get_secret_access_key() security_token = config.get_security_token() con = servo.ws.connect_elb(host_name=self.__elb_host, aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key, security_token=security_token) lbs = con.get_servo_load_balancers(self.__instance_id) except Exception, err: servo.log.error('failed to query the elb service: %s' % err) if lbs is None: servo.log.warning('failed to find the loadbalancers') else: # prepare Listener lists # call update_listeners received=[] try: for lb in lbs: if lb.health_check is not None: interval = lb.health_check.interval healthy_threshold = lb.health_check.healthy_threshold unhealthy_threshold = lb.health_check.unhealthy_threshold timeout = lb.health_check.timeout target = lb.health_check.target if interval is None or healthy_threshold is None or unhealthy_threshold is None or timeout is None or target is None: pass else: hc = HealthCheckConfig(interval, healthy_threshold, unhealthy_threshold, timeout, target) if health_check.health_check_config is None or health_check.health_check_config != hc: health_check.health_check_config = hc servo.log.info('new health check config: %s' % hc) hc_mgr.reset() instances = [] if lb.instances is not None and isinstance(lb.instances, Iterable): for inst in lb.instances: instances.append(str(inst.id)) hc_mgr.set_instances(instances) in_service_instances = [] for inst_id in instances: if hc_mgr.health_status(inst_id) is 'InService': in_service_instances.append(inst_id) if lb.listeners is not None and isinstance(lb.listeners, Iterable) : for listener in lb.listeners: protocol=listener.protocol port=listener.load_balancer_port instance_port=listener.instance_port instance_protocol=None # TODO: boto doesn't have the field ssl_cert=None # TODO: not supported cookie_expiration = ServoLoop.get_cookie_expiration(listener) cookie_name = ServoLoop.get_cookie_name(listener) l = Listener(protocol=protocol, port=port, instance_port=instance_port, instance_protocol=instance_protocol, ssl_cert=ssl_cert, loadbalancer=lb.name, cookie_name=cookie_name, cookie_expiration=cookie_expiration) for inst_id in in_service_instances: hostname = servo.hostname_cache.get_hostname(inst_id) if hostname is not None: l.add_instance(hostname) received.append(l) except Exception, err: servo.log.error('failed to receive listeners: %s' % err) try: proxy_mgr.update_listeners(received) servo.log.debug('listener updated') except Exception, err: servo.log.error('failed to update proxy listeners: %s' % err)
def start(self): log_listener = None access_logger = AccessLogger() access_logger.start() if config.ENABLE_CLOUD_WATCH: log_listener = mon.LogListener(stat_instance) log_listener.access_logger = access_logger log_listener.start() self.__status = ServoLoop.RUNNING proxy_mgr = ProxyManager() hc_mgr = HealthCheckManager() while self.__status == ServoLoop.RUNNING: # call elb-describe-services lbs = None try: access_key_id = config.get_access_key_id() secret_access_key = config.get_secret_access_key() security_token = config.get_security_token() con = servo.ws.connect_elb(aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key, security_token=security_token) lbs = con.get_servo_load_balancers(self.__instance_id) except Exception, err: servo.log.error('failed to query the elb service: %s' % err) if lbs is None: servo.log.warning('failed to find the loadbalancers') else: # prepare Listener lists # call update_listeners received=[] try: conn_idle_timeout = config.CONNECTION_IDLE_TIMEOUT for lb in lbs: try: if log_listener: # assume there is only one loadbalancer per servo log_listener.set_loadbalancer(lb.name) attr = lb.attributes conn_idle_timeout = attr.connecting_settings.idle_timeout if int(conn_idle_timeout) < 1: conn_idle_timeout = 1 elif int(conn_idle_timeout) > 3600: conn_idle_timeout = 3600 access_log_setting = attr.access_log access_logger.loadbalancer = lb.name if access_log_setting.s3_bucket_name != None: access_logger.bucket_name = access_log_setting.s3_bucket_name servo.log.debug('access log bucket name: %s' % access_logger.bucket_name) if access_log_setting.s3_bucket_prefix != None: access_logger.bucket_prefix = access_log_setting.s3_bucket_prefix servo.log.debug('access log bucket prefix: %s' % access_logger.bucket_prefix) if access_log_setting.emit_interval != None: access_logger.emit_interval = int(access_log_setting.emit_interval) servo.log.debug('access log emit interval: %d' % access_logger.emit_interval) if access_log_setting.enabled != None: access_logger.enabled = access_log_setting.enabled servo.log.debug('access log enabled?: %s' % access_logger.enabled) except Exception, err: servo.log.warning('failed to get connection idle timeout: %s' % str(err)) if lb.health_check is not None: interval = lb.health_check.interval healthy_threshold = lb.health_check.healthy_threshold unhealthy_threshold = lb.health_check.unhealthy_threshold timeout = lb.health_check.timeout target = lb.health_check.target if interval is None or healthy_threshold is None or unhealthy_threshold is None or timeout is None or target is None: pass else: hc = HealthCheckConfig(interval, healthy_threshold, unhealthy_threshold, timeout, target) if health_check.health_check_config is None or health_check.health_check_config != hc: health_check.health_check_config = hc servo.log.info('new health check config: %s' % hc) hc_mgr.reset() instances = [] if lb.instances is not None and isinstance(lb.instances, Iterable): instances.extend(lb.instances) instance_ids = [inst.instance_id for inst in instances] hc_mgr.set_instances(instances) in_service_instances = [] for inst_id in instance_ids: if hc_mgr.health_status(inst_id) is 'InService': in_service_instances.append(inst_id) if lb.listeners is not None and isinstance(lb.listeners, Iterable) : for listener in lb.listeners: protocol=listener.protocol port=listener.load_balancer_port instance_port=listener.instance_port instance_protocol=listener.instance_protocol ssl_cert=str(listener.ssl_certificate_id) policies = ServoLoop.get_listener_policies(lb, listener.policy_names) policies.extend(ServoLoop.get_backend_policies(lb, instance_port)) l = Listener(protocol=protocol, port=port, instance_port=instance_port, instance_protocol=instance_protocol, ssl_cert=ssl_cert, loadbalancer=lb.name, policies=policies, connection_idle_timeout=conn_idle_timeout) for inst_id in in_service_instances: hostname = servo.hostname_cache.get_hostname(inst_id) if hostname is not None: l.add_instance(hostname) received.append(l) except Exception, err: servo.log.error('failed to receive listeners: %s' % err) try: proxy_mgr.update_listeners(received) servo.log.debug('listener updated') except Exception, err: servo.log.error('failed to update proxy listeners: %s' % err)
def start(self): log_listener = None access_logger = AccessLogger() access_logger.start() if config.ENABLE_CLOUD_WATCH: log_listener = mon.LogListener(stat_instance) log_listener.access_logger = access_logger log_listener.start() self.__status = ServoLoop.RUNNING proxy_mgr = ProxyManager() hc_mgr = HealthCheckManager() while self.__status == ServoLoop.RUNNING: # call elb-describe-services lbs = None try: access_key_id = config.get_access_key_id() secret_access_key = config.get_secret_access_key() security_token = config.get_security_token() con = servo.ws.connect_elb( aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key, security_token=security_token) lbs = con.get_servo_load_balancers(self.__instance_id) except Exception, err: servo.log.error('failed to query the elb service: %s' % err) if lbs is None: servo.log.warning('failed to find the loadbalancers') else: # prepare Listener lists # call update_listeners received = [] try: conn_idle_timeout = config.CONNECTION_IDLE_TIMEOUT for lb in lbs: try: if log_listener: # assume there is only one loadbalancer per servo log_listener.set_loadbalancer(lb.name) attr = lb.attributes conn_idle_timeout = attr.connecting_settings.idle_timeout if int(conn_idle_timeout) < 1: conn_idle_timeout = 1 elif int(conn_idle_timeout) > 3600: conn_idle_timeout = 3600 access_log_setting = attr.access_log access_logger.loadbalancer = lb.name if access_log_setting.s3_bucket_name != None: access_logger.bucket_name = access_log_setting.s3_bucket_name servo.log.debug( 'access log bucket name: %s' % urllib2.quote(access_logger.bucket_name)) if access_log_setting.s3_bucket_prefix != None: access_logger.bucket_prefix = access_log_setting.s3_bucket_prefix servo.log.debug( 'access log bucket prefix: %s' % urllib2.quote(access_logger.bucket_prefix)) if access_log_setting.emit_interval != None: access_logger.emit_interval = int( access_log_setting.emit_interval) servo.log.debug( 'access log emit interval: %d' % access_logger.emit_interval) if access_log_setting.enabled != None: access_logger.enabled = access_log_setting.enabled servo.log.debug('access log enabled?: %s' % access_logger.enabled) except Exception, err: servo.log.warning( 'failed to get connection idle timeout: %s' % str(err)) if lb.health_check is not None: interval = lb.health_check.interval healthy_threshold = lb.health_check.healthy_threshold unhealthy_threshold = lb.health_check.unhealthy_threshold timeout = lb.health_check.timeout target = lb.health_check.target if interval is None or healthy_threshold is None or unhealthy_threshold is None or timeout is None or target is None: pass else: hc = HealthCheckConfig(interval, healthy_threshold, unhealthy_threshold, timeout, target) if health_check.health_check_config is None or health_check.health_check_config != hc: health_check.health_check_config = hc servo.log.info( 'new health check config: %s' % hc) hc_mgr.reset() instances = [] if lb.instances is not None and isinstance( lb.instances, Iterable): instances.extend(lb.instances) instance_ids = [inst.instance_id for inst in instances] hc_mgr.set_instances(instances) in_service_instances = [] for inst_id in instance_ids: if hc_mgr.health_status(inst_id) is 'InService': in_service_instances.append(inst_id) if lb.listeners is not None and isinstance( lb.listeners, Iterable): for listener in lb.listeners: protocol = listener.protocol port = listener.load_balancer_port instance_port = listener.instance_port instance_protocol = listener.instance_protocol ssl_cert = str(listener.ssl_certificate_id) policies = ServoLoop.get_listener_policies( lb, listener.policy_names) policies.extend( ServoLoop.get_backend_policies( lb, instance_port)) l = Listener( protocol=protocol, port=port, instance_port=instance_port, instance_protocol=instance_protocol, ssl_cert=ssl_cert, loadbalancer=lb.name, policies=policies, connection_idle_timeout=conn_idle_timeout) for inst_id in in_service_instances: hostname = servo.hostname_cache.get_hostname( inst_id) if hostname is not None: l.add_instance(hostname) received.append(l) except Exception, err: servo.log.error('failed to receive listeners: %s' % err) try: proxy_mgr.update_listeners(received) servo.log.debug('listener updated') except Exception, err: servo.log.error('failed to update proxy listeners: %s' % err)
def __init__(self, task_id, manifest_url=None, volume_id=None): ImagingTask.__init__(self, task_id, "import_volume") self.manifest_url = manifest_url self.volume_id = volume_id self.ec2_conn = worker.ws.connect_ec2(host_name=config.get_clc_host(), aws_access_key_id=config.get_access_key_id(), aws_secret_access_key=config.get_secret_access_key(), security_token=config.get_security_token())
def start(self): if config.ENABLE_CLOUD_WATCH: hl = mon.LogListener(stat_instance) hl.start() self.__status = ServoLoop.RUNNING proxy_mgr = ProxyManager() hc_mgr = HealthCheckManager() while self.__status == ServoLoop.RUNNING: # call elb-describe-services lbs = None try: access_key_id = config.get_access_key_id() secret_access_key = config.get_secret_access_key() security_token = config.get_security_token() con = servo.ws.connect_elb( host_name=self.__elb_host, aws_access_key_id=access_key_id, aws_secret_access_key=secret_access_key, security_token=security_token) lbs = con.get_servo_load_balancers(self.__instance_id) except Exception, err: servo.log.error('failed to query the elb service: %s' % err) if lbs is None: servo.log.warning('failed to find the loadbalancers') else: # prepare Listener lists # call update_listeners received = [] try: for lb in lbs: if lb.health_check is not None: interval = lb.health_check.interval healthy_threshold = lb.health_check.healthy_threshold unhealthy_threshold = lb.health_check.unhealthy_threshold timeout = lb.health_check.timeout target = lb.health_check.target if interval is None or healthy_threshold is None or unhealthy_threshold is None or timeout is None or target is None: pass else: hc = HealthCheckConfig(interval, healthy_threshold, unhealthy_threshold, timeout, target) if health_check.health_check_config is None or health_check.health_check_config != hc: health_check.health_check_config = hc servo.log.info( 'new health check config: %s' % hc) hc_mgr.reset() instances = [] if lb.instances is not None and isinstance( lb.instances, Iterable): for inst in lb.instances: instances.append(str(inst.id)) hc_mgr.set_instances(instances) in_service_instances = [] for inst_id in instances: if hc_mgr.health_status(inst_id) is 'InService': in_service_instances.append(inst_id) if lb.listeners is not None and isinstance( lb.listeners, Iterable): for listener in lb.listeners: protocol = listener.protocol port = listener.load_balancer_port instance_port = listener.instance_port instance_protocol = None # TODO: boto doesn't have the field ssl_cert = None # TODO: not supported cookie_expiration = ServoLoop.get_cookie_expiration( listener) cookie_name = ServoLoop.get_cookie_name( listener) l = Listener( protocol=protocol, port=port, instance_port=instance_port, instance_protocol=instance_protocol, ssl_cert=ssl_cert, loadbalancer=lb.name, cookie_name=cookie_name, cookie_expiration=cookie_expiration) for inst_id in in_service_instances: hostname = servo.hostname_cache.get_hostname( inst_id) if hostname is not None: l.add_instance(hostname) received.append(l) except Exception, err: servo.log.error('failed to receive listeners: %s' % err) try: proxy_mgr.update_listeners(received) servo.log.debug('listener updated') except Exception, err: servo.log.error('failed to update proxy listeners: %s' % err)