def get_driver(self, core_identity): from api import get_esh_driver # No Cache model driver = get_esh_driver(core_identity) return driver # Cached model if not self.driver_map.get(core_identity): driver = get_esh_driver(core_identity) self.driver_map[core_identity] = driver logger.info("Driver created for identity %s : %s" % (core_identity, driver)) return driver driver = self.driver_map[core_identity] logger.info("Driver found for identity %s: %s" % (core_identity, driver)) return driver
def run(): for provider_name in PROVIDERS: driver = get_esh_driver( Identity.objects.get(provider__location=provider_name, created_by__username=RUNNER_USERNAME), RUNNER_USERNAME) runnable_machines = [ m for m in driver.list_machines() if 'eki' not in m.name and 'eri' not in m.name ] for idx, m in enumerate(runnable_machines): try: (token, esh_instance) = launch_esh_instance( driver, { 'machine_alias': m.alias, 'size_alias': '2', 'name': 'Testing Deploy and Networking %s' % ((idx + 1), ) }) logger.info("Launched Image %s - %s.. Sleep 30sec" % (m.name, m.alias)) time.sleep(30) #Test the SSH port #Test that your key works #Test the VNC port #etc. except Exception as e: logger.exception(e) finally: #driver.destroy_instance(esh_instance.id) pass
def over_allocation_test(identity, esh_instances): from api import get_esh_driver from core.models.instance import convert_esh_instance from atmosphere import settings over_allocated, time_diff = check_over_allocation( identity.created_by.username, identity.id, time_period=relativedelta(day=1, months=1)) logger.info("Overallocation Test: %s - %s - %s\tInstances:%s" % (identity.created_by.username, over_allocated, time_diff, esh_instances)) if not over_allocated: # Nothing changed, bail. return False if settings.DEBUG: logger.info('Do not enforce allocations in DEBUG mode') return False driver = get_esh_driver(identity) running_instances = [] for instance in esh_instances: #Suspend active instances, update the task in the DB try: if driver._is_active_instance(instance): driver.suspend_instance(instance) except Exception, e: if 'in vm_state suspended' not in e.message: raise updated_esh = driver.get_instance(instance.id) updated_core = convert_esh_instance(driver, updated_esh, identity.provider.id, identity.id, identity.created_by) running_instances.append(updated_core)
def set_provider_quota(identity_id): """ """ identity = Identity.objects.get(id=identity_id) if not identity.credential_set.all(): #Can't update quota if credentials arent set return if identity.provider.get_type_name().lower() == 'openstack': driver = get_esh_driver(identity) username = identity.created_by.username user_id = driver._connection._get_user_id() tenant_id = driver._connection._get_tenant_id() membership = IdentityMembership.objects.get(identity__id=identity_id, member__name=username) user_quota = membership.quota if user_quota: values = {'cores': user_quota.cpu, 'ram': user_quota.memory * 1024} logger.info("Updating quota for %s to %s" % (username, values)) ad = AccountDriver(identity.provider) admin_driver = ad.admin_driver admin_driver._connection.ex_update_quota_for_user(tenant_id, user_id, values) return True
def repair_instance(accounts, admin, instance, provider, new_fixed_ip=None): tenant_id = instance.extra['tenantId'] tenant = accounts.user_manager.get_project_by_id(tenant_id) tenant_name = tenant.name identity = Identity.objects.get( created_by__username=tenant_name, provider__id=provider.id) network_init(identity) network_resources = accounts.network_manager.find_tenant_resources(tenant_id) network = network_resources['networks'] if not network: network, subnet = accounts.create_network(identity) else: network = network[0] subnet = network_resources['subnets'][0] #Ensure the network,subnet exist if not new_fixed_ip: new_fixed_ip = get_next_ip(network_resources['ports']) user_driver = get_esh_driver(identity) port = accounts.network_manager.create_port(instance.id, network['id'], subnet['id'], new_fixed_ip, tenant_id) print "Created new port: %s" % port attached_intf = user_driver._connection.ex_attach_interface(instance.id, port['id']) print "Attached port to driver: %s" % attached_intf
def suspend_all_instances(): admin_driver = get_admin_driver(Provider.objects.get(id=4)) all_insts = admin_driver.meta(admin_driver=admin_driver).all_instances() users = [] bad_instances = [] for i in all_insts: if "creator" in i.extra["metadata"]: users.append(i.extra["metadata"]["creator"]) else: bad_instances.append(i) if bad_instances: print "WARN: These instances are MISSING because they have incomplete metadata:\n%s" % (bad_instances,) all_users = sorted(list(OrderedDict.fromkeys(users))) for count, user in enumerate(all_users): ident = Identity.objects.filter(created_by__username=user, provider__id=4) if len(ident) > 1: print "WARN: User %s has >1 identity!" % user ident = ident[0] driver = get_esh_driver(ident) instances = driver.list_instances() print "Found %s instances for %s" % (len(instances), user) for inst in instances: if inst._node.extra["status"] == "active": print "Attempt to suspend Instance %s in state %s" % (inst.id, inst._node.extra["status"]) try: suspend_instance(driver, inst, ident.provider.id, ident.id, ident.created_by) print "Suspended Instance %s.. Sleep 2min" % (inst.id,) time.sleep(2 * 60) except Exception, err: print "WARN: Could not suspend instance %s. Error: %s" % (inst.id, err)
def _get_admin_driver(): from core.models import Credential from api import get_esh_driver identity = Credential.objects.get(value=secrets.EUCA_ADMIN_SECRET).identity driver = get_esh_driver(identity, identity.created_by) return driver
def enforce_allocation(identity, user, time_used): from core.models.instance import convert_esh_instance #TODO: When user->group is no longer true, #TODO: Is 'group' allowed to use this identity? #group = Group.objects.get(name=user.username) #im = identity.identitymembership_set.get(member=group) allocation = get_allocation(user.username, identity.id) if not allocation: return False max_time_allowed = timedelta(minutes=allocation.threshold) time_diff = max_time_allowed - time_used over_allocated = time_diff.total_seconds() <= 0 if not over_allocated: return False if not settings.ENFORCING: logger.info('Settings dictate allocations are NOT enforced') return False logger.info("%s is OVER their allowed quota by %s" % (user.username, time_diff)) driver = get_esh_driver(identity) esh_instances = driver.list_instances() for instance in esh_instances: try: if driver._is_active_instance(instance): #Suspend active instances, update the task in the DB driver.suspend_instance(instance) #Give it a few seconds to suspend time.sleep(3) updated_esh = driver.get_instance(instance.id) updated_core = convert_esh_instance(driver, updated_esh, identity.provider.id, identity.id, user) except Exception, e: if 'in vm_state suspended' not in e.message: raise
def run(): for provider_name in PROVIDERS: driver = get_esh_driver( Identity.objects.get( provider__location=provider_name, created_by__username=RUNNER_USERNAME), RUNNER_USERNAME) runnable_machines = [m for m in driver.list_machines() if 'eki' not in m.name and 'eri' not in m.name] for idx, m in enumerate(runnable_machines): try: (token, esh_instance) = launch_esh_instance(driver, { 'machine_alias': m.alias, 'size_alias': '2', 'name': 'Testing Deploy and Networking %s' % ((idx + 1), )}) logger.info("Launched Image %s - %s.. Sleep 30sec" % (m.name, m.alias)) time.sleep(30) #Test the SSH port #Test that your key works #Test the VNC port #etc. except Exception as e: logger.exception(e) finally: #driver.destroy_instance(esh_instance.id) pass
def get_driver(self, core_identity): from api import get_esh_driver #No Cache model driver = get_esh_driver(core_identity) return driver #Cached model if not self.driver_map.get(core_identity): driver = get_esh_driver(core_identity) self.driver_map[core_identity] = driver logger.info("Driver created for identity %s : %s" % (core_identity, driver)) return driver driver = self.driver_map[core_identity] logger.info("Driver found for identity %s: %s" % (core_identity, driver)) return driver
def monitor_instances_for_user(provider, username, instances): from core.models.instance import convert_esh_instance from api import get_esh_driver try: user = AtmosphereUser.objects.get(username=username) #TODO: When user->group is no longer true, # we will need to modify this.. group = Group.objects.get(name=user.username) ident = user.identity_set.get(provider=provider) im = ident.identitymembership_set.get(member=group) #NOTE: Couples with API, probably want this in # service/driver driver = get_esh_driver(ident) core_instances = [] #NOTE: We are converting them so they will # be picked up as core models for the 'over_allocation_test' for instance in instances: c_inst = convert_esh_instance(driver, instance, ident.provider.id, ident.id, ident.created_by) core_instances.append(c_inst) over_allocation = over_allocation_test(im.identity, instances) core_instances = user.instance_set.filter( provider_machine__provider=provider, end_date=None) core_instances_ident = ident.instance_set.filter(end_date=None) update_instances(driver, im.identity, instances, core_instances) except: logger.exception("Unable to monitor User:%s on Provider:%s" % (username, provider))
def suspend_all_instances(): admin_driver = get_admin_driver(Provider.objects.get(id=4)) all_insts = admin_driver.meta(admin_driver=admin_driver).all_instances() users = [] bad_instances = [] for i in all_insts: if 'creator' in i.extra['metadata']: users.append(i.extra['metadata']['creator']) else: bad_instances.append(i) if bad_instances: print "WARN: These instances are MISSING because they have incomplete metadata:\n%s" % ( bad_instances, ) all_users = sorted(list(OrderedDict.fromkeys(users))) for count, user in enumerate(all_users): ident = Identity.objects.filter(created_by__username=user, provider__id=4) if len(ident) > 1: print "WARN: User %s has >1 identity!" % user ident = ident[0] driver = get_esh_driver(ident) instances = driver.list_instances() print "Found %s instances for %s" % (len(instances), user) for inst in instances: if inst._node.extra['status'] == 'active': print "Attempt to suspend Instance %s in state %s" % ( inst.id, inst._node.extra['status']) try: suspend_instance(driver, inst, ident.provider.id, ident.id, ident.created_by) print "Suspended Instance %s.. Sleep 2min" % (inst.id, ) time.sleep(2 * 60) except Exception, err: print "WARN: Could not suspend instance %s. Error: %s" % ( inst.id, err)
def monitor_instances_for_user(provider, username, instances): from core.models.instance import convert_esh_instance from api import get_esh_driver try: user = AtmosphereUser.objects.get(username=username) #TODO: When user->group is no longer true, # we will need to modify this.. group = Group.objects.get(name=user.username) ident = user.identity_set.get(provider=provider) im = ident.identitymembership_set.get(member=group) #NOTE: Couples with API, probably want this in # service/driver driver = get_esh_driver(ident) core_instances = [] #NOTE: We are converting them so they will # be picked up as core models for the 'over_allocation_test' for instance in instances: c_inst = convert_esh_instance( driver, instance, ident.provider.id, ident.id, ident.created_by) core_instances.append(c_inst) over_allocation = over_allocation_test(im.identity, instances) core_instances = user.instance_set.filter( provider_machine__provider=provider, end_date=None) core_instances_ident = ident.instance_set.filter(end_date=None) update_instances(driver, im.identity, instances, core_instances) except: logger.exception("Unable to monitor User:%s on Provider:%s" % (username,provider))
def main(): parser = argparse.ArgumentParser() parser.add_argument("--user", required=True, help="Username that instance belongs to.") parser.add_argument("--provider", type=int, required=True, help="Provider instance is running in.") parser.add_argument("--instance", required=True, help="OpenStack instance id.") parser.add_argument("--resume", action="store_true", help="Resume the instance instead of suspending it.") args = parser.parse_args() user = args.user instance = args.instance provider = args.provider try: ident = Identity.objects.get(provider__id=provider, created_by__username=user) except Exception as e: print("Identity could not be found for user: %s on provider: %s" % (user, provider)) print_exc() return 1 driver = get_esh_driver(ident) try: inst = driver.get_instance(instance) except Exception as e: print("Instance %s was not found." % (instance)) print_exc() return 2 if args.resume: try: resume_instance(driver, inst, ident.provider.id, ident.id, ident.created_by) except Exception as e: print("Resume failed.") print( "Calling service.instance.resume_instance failed for instance %s." % (instance)) print_exc() return 3 print("Resumed %s." % (instance)) else: try: suspend_instance(driver, inst, ident.provider.id, ident.id, ident.created_by) except Exception as e: print("Suspend failed.") print( "Calling service.instance.suspend_instance failed for instance %s." % (instance)) print_exc() return 4 print("Suspended %s." % (instance)) return 0
def clear_empty_ips(): logger.debug("clear_empty_ips task started at %s." % datetime.now()) from service import instance as instance_service from rtwo.driver import OSDriver from api import get_esh_driver from service.accounts.openstack import AccountDriver as OSAccountDriver identities = Identity.objects.filter(provider__type__name__iexact="openstack", provider__active=True) key_sorter = lambda ident: attrgetter(ident.provider.type.name, ident.created_by.username) identities = sorted(identities, key=key_sorter) os_acct_driver = None total = len(identities) for idx, core_identity in enumerate(identities): try: # Initialize the drivers driver = get_esh_driver(core_identity) if not isinstance(driver, OSDriver): continue if not os_acct_driver or os_acct_driver.core_provider != core_identity.provider: os_acct_driver = OSAccountDriver(core_identity.provider) logger.info("Initialized account driver") # Get useful info creds = core_identity.get_credentials() tenant_name = creds["ex_tenant_name"] logger.info("Checking Identity %s/%s - %s" % (idx + 1, total, tenant_name)) # Attempt to clean floating IPs num_ips_removed = driver._clean_floating_ip() if num_ips_removed: logger.debug("Removed %s ips from OpenStack Tenant %s" % (num_ips_removed, tenant_name)) # Test for active/inactive instances instances = driver.list_instances() active = any(driver._is_active_instance(inst) for inst in instances) inactive = all(driver._is_inactive_instance(inst) for inst in instances) for instance in instances: if driver._is_inactive_instance(instance) and instance.ip: # If an inactive instance has floating/fixed IPs.. Remove them! instance_service.remove_ips(driver, instance) if active and not inactive: # User has >1 active instances AND not all instances inactive pass elif os_acct_driver.network_manager.get_network_id( os_acct_driver.network_manager.neutron, "%s-net" % tenant_name ): # User has 0 active instances OR all instances are inactive # Network exists, attempt to dismantle as much as possible remove_network = not inactive logger.info("Removing project network %s for %s" % (remove_network, tenant_name)) if remove_network: # Sec. group can't be deleted if instances are suspended # when instances are suspended we pass remove_network=False os_acct_driver.delete_security_group(core_identity) os_acct_driver.delete_network(core_identity, remove_network=remove_network) else: # logger.info("No Network found. Skipping %s" % tenant_name) pass except Exception as exc: logger.exception(exc) logger.debug("clear_empty_ips task finished at %s." % datetime.now())
def get_core_instances(identity_id): identity = CoreIdentity.objects.get(id=identity_id) driver = get_esh_driver(identity) instances = driver.list_instances() core_instances = [ convert_esh_instance(driver, esh_instance, identity.provider.id, identity.id, identity.created_by) for esh_instance in instances ] return core_instances
def get_driver(self, core_identity): from api import get_esh_driver if self.driver_map.get(core_identity): driver = self.driver_map[core_identity] logger.info("Driver reused: %s" % driver) return driver driver = get_esh_driver(core_identity) logger.info("Driver initialized: %s" % driver) self.driver_map[core_identity] = driver return driver
def get_admin_driver(provider): """ Create an admin driver for a given provider. """ try: from api import get_esh_driver return get_esh_driver(provider.accountprovider_set.all()[0].identity) except: logger.info("Admin driver for provider %s not found." % (provider.location)) return None
def main(): for ident in Identity.objects.filter(provider__id=1): driver = get_esh_driver(ident) try: vols = driver.list_volumes() except: print 'No volumes found for %s' % ident.created_by.username if not vols: continue print ('%s\n---\n' % ident.created_by.username) for vol in vols: print ('%s\t%s' %( vol.alias, vol.extra['createTime']))
def destroy_instance(identity_id, instance_alias): core_identity = CoreIdentity.objects.get(id=identity_id) esh_driver = get_esh_driver(core_identity) instance = esh_driver.get_instance(instance_alias) #Bail if instance doesnt exist if not instance: return None if isinstance(esh_driver, OSDriver): #Openstack: Remove floating IP first esh_driver._connection.ex_disassociate_floating_ip(instance) node_destroyed = esh_driver._connection.destroy_node(instance) return node_destroyed
def prepare_driver(request, provider_id, identity_id): """ TODO: Cache driver based on specific provider return esh_driver """ from api import get_esh_driver from core.models import Provider, Identity username = extractUser(request).username core_provider = Provider.objects.get(id=provider_id) core_identity = Identity.objects.get(id=identity_id) esh_driver = get_esh_driver(core_provider, core_identity, username) return esh_driver
def get_current_quota(identity_id): driver = get_esh_driver(Identity.objects.get(id=identity_id)) cpu = ram = disk = suspended = 0 instances = driver.list_instances() for instance in instances: if instance.extra['status'] == 'suspended': suspended += 1 continue size = instance.size cpu += size.cpu ram += size.ram disk += size._size.disk return {'cpu': cpu, 'ram': ram, 'disk': disk, 'suspended_count': suspended}
def launch_instance(user, provider_id, identity_id, size_alias, machine_alias, **kwargs): """ Required arguments will launch the instance, extras will do provider-specific modifications. Test the quota, Launch the instance, creates a core repr and updates status. returns a core_instance object after updating core DB. """ now_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") if machine_alias: alias = "machine,%s" % machine_alias elif 'volume_alias' in kwargs: alias = "boot_volume,%s" % kwargs['volume_alias'] else: raise Exception("Not enough data to launch: " "volume_alias/machine_alias is missing") status_logger.debug("%s,%s,%s,%s,%s,%s" % (now_time, user, "No Instance", alias, size_alias, "Request Received")) core_identity = CoreIdentity.objects.get(id=identity_id) esh_driver = get_esh_driver(core_identity, user) size = esh_driver.get_size(size_alias) #May raise SizeNotAvailable check_size(size, provider_id) #May raise OverQuotaError or OverAllocationError check_quota(user.username, identity_id, size) #May raise InvalidCredsError, SecurityGroupNotCreated (esh_instance, token, password) = launch_esh_instance(esh_driver, machine_alias, size_alias, core_identity, **kwargs) #Convert esh --> core core_instance = convert_esh_instance( esh_driver, esh_instance, provider_id, identity_id, user, token, password) esh_size = esh_driver.get_size(esh_instance.size.id) core_size = convert_esh_size(esh_size, provider_id) core_instance.update_history( core_instance.esh.extra['status'], core_size, #3rd arg is task OR tmp_status core_instance.esh.extra.get('task') or core_instance.esh.extra.get('metadata', {}).get('tmp_status'), first_update=True) return core_instance
def destroy_instance(instance_alias, core_identity_id): from service import instance as instance_service from rtwo.driver import OSDriver from api import get_esh_driver try: logger.debug("destroy_instance task started at %s." % datetime.now()) node_destroyed = instance_service.destroy_instance( core_identity_id, instance_alias) core_identity = Identity.objects.get(id=core_identity_id) driver = get_esh_driver(core_identity) if isinstance(driver, OSDriver): #Spawn off the last two tasks logger.debug("OSDriver Logic -- Remove floating ips and check" " for empty project") driverCls = driver.__class__ provider = driver.provider identity = driver.identity instances = driver.list_instances() active = [driver._is_active_instance(inst) for inst in instances] if not active: logger.debug("Driver shows 0 of %s instances are active" % (len(instances), )) #For testing ONLY.. Test cases ignore countdown.. if app.conf.CELERY_ALWAYS_EAGER: logger.debug("Eager task waiting 1 minute") time.sleep(60) destroy_chain = chain( clean_empty_ips.subtask((driverCls, provider, identity), immutable=True, countdown=5), remove_empty_network.subtask( (driverCls, provider, identity, core_identity_id), immutable=True, countdown=60)) destroy_chain() else: logger.debug("Driver shows %s of %s instances are active" % (len(active), len(instances))) #For testing ONLY.. Test cases ignore countdown.. if app.conf.CELERY_ALWAYS_EAGER: logger.debug("Eager task waiting 15 seconds") time.sleep(15) destroy_chain = \ clean_empty_ips.subtask( (driverCls, provider, identity), immutable=True, countdown=5).apply_async() logger.debug("destroy_instance task finished at %s." % datetime.now()) return node_destroyed except Exception as exc: logger.exception(exc) destroy_instance.retry(exc=exc)
def launch_instance(user, provider_id, identity_id, size_alias, machine_alias, **kwargs): """ Required arguments will launch the instance, extras will do provider-specific modifications. Test the quota, Launch the instance, creates a core repr and updates status. returns a core_instance object after updating core DB. """ now_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") if machine_alias: alias = "machine,%s" % machine_alias elif 'volume_alias' in kwargs: alias = "boot_volume,%s" % kwargs['volume_alias'] else: raise Exception("Not enough data to launch: " "volume_alias/machine_alias is missing") status_logger.debug("%s,%s,%s,%s,%s,%s" % (now_time, user, "No Instance", alias, size_alias, "Request Received")) core_identity = CoreIdentity.objects.get(id=identity_id) esh_driver = get_esh_driver(core_identity, user) size = esh_driver.get_size(size_alias) #May raise SizeNotAvailable check_size(size, provider_id) #May raise OverQuotaError or OverAllocationError check_quota(user.username, identity_id, size) #May raise InvalidCredsError (esh_instance, token, password) = launch_esh_instance(esh_driver, machine_alias, size_alias, core_identity, **kwargs) #Convert esh --> core core_instance = convert_esh_instance( esh_driver, esh_instance, provider_id, identity_id, user, token, password) esh_size = esh_driver.get_size(esh_instance.size.id) core_size = convert_esh_size(esh_size, provider_id) core_instance.update_history( core_instance.esh.extra['status'], core_size, #3rd arg is task OR tmp_status core_instance.esh.extra.get('task') or core_instance.esh.extra.get('metadata', {}).get('tmp_status'), first_update=True) return core_instance
def destroy_instance(instance_alias, core_identity_id): from service import instance as instance_service from rtwo.driver import OSDriver from api import get_esh_driver try: logger.debug("destroy_instance task started at %s." % datetime.now()) node_destroyed = instance_service.destroy_instance( core_identity_id, instance_alias) core_identity = Identity.objects.get(id=core_identity_id) driver = get_esh_driver(core_identity) if isinstance(driver, OSDriver): #Spawn off the last two tasks logger.debug("OSDriver Logic -- Remove floating ips and check" " for empty project") driverCls = driver.__class__ provider = driver.provider identity = driver.identity instances = driver.list_instances() active = [driver._is_active_instance(inst) for inst in instances] if not active: logger.debug("Driver shows 0 of %s instances are active" % (len(instances),)) #For testing ONLY.. Test cases ignore countdown.. if app.conf.CELERY_ALWAYS_EAGER: logger.debug("Eager task waiting 1 minute") time.sleep(60) destroy_chain = chain( clean_empty_ips.subtask( (driverCls, provider, identity), immutable=True, countdown=5), remove_empty_network.subtask( (driverCls, provider, identity, core_identity_id), immutable=True, countdown=60)) destroy_chain() else: logger.debug("Driver shows %s of %s instances are active" % (len(active), len(instances))) #For testing ONLY.. Test cases ignore countdown.. if app.conf.CELERY_ALWAYS_EAGER: logger.debug("Eager task waiting 15 seconds") time.sleep(15) destroy_chain = \ clean_empty_ips.subtask( (driverCls, provider, identity), immutable=True, countdown=5).apply_async() logger.debug("destroy_instance task finished at %s." % datetime.now()) return node_destroyed except Exception as exc: logger.exception(exc) destroy_instance.retry(exc=exc)
def _init_by_provider(self, provider, *args, **kwargs): from api import get_esh_driver self.core_provider = provider provider_creds = provider.get_credentials() self.provider_creds = provider_creds admin_identity = provider.get_admin_identity() admin_creds = admin_identity.get_credentials() self.admin_driver = get_esh_driver(admin_identity) admin_creds = self._libcloud_to_openstack(admin_creds) all_creds = {'location': provider.get_location()} all_creds.update(admin_creds) all_creds.update(provider_creds) return all_creds
def main(): driver = get_esh_driver(Identity.objects.get(provider__id=4, created_by__username='******')) for app in Application.objects.all(): pms = app.providermachine_set.filter(provider__id=4) if len(pms) >= 2: for pm in pms: print "%s shares application %s" % (pm.identifier, app.name) mach = driver.get_machine(pm.identifier) if not mach: print "%s doesnt exist" % pm.identifier continue if mach.name != app.name: new_app = create_application(pm.identifier, 4, mach.name) pm.application = new_app pm.save() print 'New app created:%s' % new_app.name
def destroy_instance(identity_id, instance_alias): core_identity = CoreIdentity.objects.get(id=identity_id) esh_driver = get_esh_driver(core_identity) instance = esh_driver.get_instance(instance_alias) #Bail if instance doesnt exist if not instance: return None if isinstance(esh_driver, OSDriver): #Openstack: Remove floating IP first try: esh_driver._connection.ex_disassociate_floating_ip(instance) except Exception as exc: if 'floating ip not found' not in exc.message: raise node_destroyed = esh_driver._connection.destroy_node(instance) return node_destroyed
def current_instance_time(user, instances, identity_id, delta_time): """ Add all running instances to core, so that the database is up to date before calling 'core_instance_time' """ from api import get_esh_driver ident = Identity.objects.get(id=identity_id) driver = get_esh_driver(ident) core_instance_list = [ convert_esh_instance(driver, inst, ident.provider.id, ident.id, user) for inst in instances] #All instances that don't have an end-date should be #included, even if all of their time is not. time_used = core_instance_time(user, ident.id, delta_time, running=core_instance_list) return time_used
def main(): parser = argparse.ArgumentParser() parser.add_argument("--user", required=True, help="Username that instance belongs to.") parser.add_argument("--provider", type=int, required=True, help="Provider instance is running in.") parser.add_argument("--instance", required=True, help="OpenStack instance id.") parser.add_argument("--resume", action="store_true", help="Resume the instance instead of suspending it.") args = parser.parse_args() user = args.user instance = args.instance provider = args.provider try: ident = Identity.objects.get(provider__id=provider, created_by__username=user) except Exception as e: print("Identity could not be found for user: %s on provider: %s" % (user, provider)) print_exc() return 1 driver = get_esh_driver(ident) try: inst = driver.get_instance(instance) except Exception as e: print("Instance %s was not found." % (instance)) print_exc() return 2 if args.resume: try: resume_instance(driver, inst, ident.provider.id, ident.id, ident.created_by) except Exception as e: print("Resume failed.") print("Calling service.instance.resume_instance failed for instance %s." % (instance)) print_exc() return 3 print("Resumed %s." % (instance)) else: try: suspend_instance(driver, inst, ident.provider.id, ident.id, ident.created_by) except Exception as e: print("Suspend failed.") print("Calling service.instance.suspend_instance failed for instance %s." % (instance)) print_exc() return 4 print("Suspended %s." % (instance)) return 0
def freeze_instance_task(identity_id, instance_id, **celery_task_args): from api import get_esh_driver identity = Identity.objects.get(id=identity_id) driver = get_esh_driver(identity) kwargs = {} private_key = "/opt/dev/atmosphere/extras/ssh/id_rsa" kwargs.update({'ssh_key': private_key}) kwargs.update({'timeout': 120}) si_script = sync_instance() kwargs.update({'deploy': si_script}) instance = driver.get_instance(instance_id) driver.deploy_to(instance, **kwargs) fi_script = freeze_instance() kwargs.update({'deploy': fi_script}) driver.deploy_to(instance, **kwargs)
def freeze_instance_task(identity_id, instance_id): from api import get_esh_driver identity = Identity.objects.get(id=identity_id) driver = get_esh_driver(identity) kwargs = {} private_key = "/opt/dev/atmosphere/extras/ssh/id_rsa" kwargs.update({"ssh_key": private_key}) kwargs.update({"timeout": 120}) si_script = sync_instance() kwargs.update({"deploy": si_script}) instance = driver.get_instance(instance_id) driver.deploy_to(instance, **kwargs) fi_script = freeze_instance() kwargs.update({"deploy": fi_script}) deploy_to.delay(driver.__class__, driver.provider, driver.identity, instance.id, **kwargs)
def launch_instance(user, provider_id, identity_id, size_alias, machine_alias, **kwargs): """ Required arguments will launch the instance, extras will do provider-specific modifications. Test the quota, Launch the instance, creates a core repr and updates status. returns a core_instance object after updating core DB. """ core_identity = CoreIdentity.objects.get(id=identity_id) esh_driver = get_esh_driver(core_identity, user) size = esh_driver.get_size(size_alias) #May raise SizeNotAvailable check_size(size, provider_id) #May raise OverQuotaError or OverAllocationError check_quota(user.username, identity_id, size) #May raise InvalidCredsError (esh_instance, token, password) = launch_esh_instance(esh_driver, machine_alias, size_alias, core_identity, **kwargs) #Convert esh --> core core_instance = convert_esh_instance( esh_driver, esh_instance, provider_id, identity_id, user, token, password) core_instance.update_history( core_instance.esh.extra['status'], #2nd arg is task OR tmp_status core_instance.esh.extra.get('task') or core_instance.esh.extra.get('metadata', {}).get('tmp_status'), first_update=True) return core_instance
def monitor_instances(): """ This task should be run every 5m-15m """ from api import get_esh_driver from core.models import IdentityMembership for im in IdentityMembership.objects.all(): #Start by checking for running/missing instances core_instances = im.identity.instance_set.filter(end_date=None) if not core_instances: continue #Running/missing instances found. We may have to do something! driver = get_esh_driver(im.identity) esh_instances = driver.list_instances() #Test allocation && Suspend instances if we are over allocated time over_allocation = over_allocation_test(im.identity, esh_instances) if over_allocation: continue #We may need to update instance status history update_instances(im.identity, esh_instances, core_instances)
def set_provider_quota(identity_id): """ """ identity = Identity.objects.get(id=identity_id) if not identity.credential_set.all(): #Can't update quota if credentials arent set return if identity.provider.get_type_name().lower() == 'openstack': driver = get_esh_driver(identity) username = identity.created_by.username user_id = driver._connection._get_user_id() tenant_id = driver._connection._get_tenant_id() membership = IdentityMembership.objects.get(identity__id=identity_id, member__name=username) user_quota = membership.quota if user_quota: values = {'cores': user_quota.cpu, 'ram': user_quota.memory * 1024} logger.info("Updating quota for %s to %s" % (username, values)) ad = AccountDriver(identity.provider) admin_driver = ad.admin_driver admin_driver._connection.ex_update_quota_for_user( tenant_id, user_id, values) return True
def over_allocation_test(identity, esh_instances): from api import get_esh_driver from core.models.instance import convert_esh_instance over_allocated, time_diff = check_over_allocation( identity.created_by.username, identity.id) if not over_allocated: # Nothing changed, bail. return False driver = get_esh_driver(identity) for instance in esh_instances: #Suspend, get updated status/task, and update the DB try: driver.suspend_instance(instance) except Exception, e: if 'in vm_state suspended' not in e.message: raise updated_esh = driver.get_instance(instance.id) updated_core = convert_esh_instance(driver, updated_esh, identity.provider.id, identity.id, identity.created_by) updated_core.update_history(updated_esh.extra['status'], updated_esh.extra.get('task'))
def enforce_allocation(identity, user, time_used): from core.models.instance import convert_esh_instance #TODO: When user->group is no longer true, # we will need to modify this.. group = Group.objects.get(name=user.username) im = identity.identitymembership_set.get(member=group) allocation = get_allocation(user.username, identity.id) if not allocation: return False max_time_allowed = timedelta(minutes=allocation.threshold) time_diff = max_time_allowed - time_used over_allocated = time_diff.total_seconds() <= 0 if not over_allocated: return False #if settings.DEBUG: # logger.info('Do not enforce allocations in DEBUG mode') # return False logger.info("%s is OVER their allowed quota by %s" % (user.username, time_diff)) driver = get_esh_driver(identity) esh_instances = driver.list_instances() for instance in esh_instances: try: if driver._is_active_instance(instance): #Suspend active instances, update the task in the DB driver.suspend_instance(instance) #Give it a few seconds to suspend time.sleep(3) updated_esh = driver.get_instance(instance.id) updated_core = convert_esh_instance( driver, updated_esh, identity.provider.id, identity.id, user) except Exception, e: if 'in vm_state suspended' not in e.message: raise
def clear_empty_ips(): logger.debug("clear_empty_ips task started at %s." % datetime.now()) from service import instance as instance_service from rtwo.driver import OSDriver from api import get_esh_driver from service.accounts.openstack import AccountDriver as\ OSAccountDriver identities = Identity.objects.filter( provider__type__name__iexact='openstack', provider__active=True) typename = ident.provider.type.name username = ident.created_by.username key_sorter = lambda ident: attrgetter(typename, username) identities = sorted( identities, key=key_sorter) os_acct_driver = None total = len(identities) for idx, core_identity in enumerate(identities): try: #Initialize the drivers driver = get_esh_driver(core_identity) if not isinstance(driver, OSDriver): continue if not os_acct_driver or\ os_acct_driver.core_provider != core_identity.provider: os_acct_driver = OSAccountDriver(core_identity.provider) logger.info("Initialized account driver") # Get useful info creds = core_identity.get_credentials() tenant_name = creds['ex_tenant_name'] logger.info("Checking Identity %s/%s - %s" % (idx+1, total, tenant_name)) # Attempt to clean floating IPs num_ips_removed = driver._clean_floating_ip() if num_ips_removed: logger.debug("Removed %s ips from OpenStack Tenant %s" % (num_ips_removed, tenant_name)) #Test for active/inactive instances instances = driver.list_instances() active = any(driver._is_active_instance(inst) for inst in instances) inactive = all(driver._is_inactive_instance(inst) for inst in instances) if active and not inactive: #User has >1 active instances AND not all instances inactive pass elif os_acct_driver.network_manager.get_network_id( os_acct_driver.network_manager.neutron, '%s-net' % tenant_name): #User has 0 active instances OR all instances are inactive #Network exists, attempt to dismantle as much as possible remove_network = not inactive logger.info("Removing project network %s for %s" % (remove_network, tenant_name)) if remove_network: #Sec. group can't be deleted if instances are suspended # when instances are suspended we pass remove_network=False os_acct_driver.delete_security_group(core_identity) os_acct_driver.delete_network( core_identity, remove_network=remove_network) else: #logger.info("No Network found. Skipping %s" % tenant_name) pass except Exception as exc: logger.exception(exc) logger.debug("clear_empty_ips task finished at %s." % datetime.now())
def _get_cached_driver(provider=None, identity=None, force=False): if provider: return _get_cached_admin_driver(provider, force) if not drivers.get(identity) or force: drivers[identity] = get_esh_driver(identity) return drivers[identity]