示例#1
0
	def trashman(self):
		from webapp.libs.openstack import instance_info
		from webapp.libs.openstack import instance_decommission		

		# build the response
		response = {"response": "success", "result": {"message": "", "server": {}}}

		# get instance (server) info
		cluster_response = instance_info(self)

		if cluster_response['response'] == "success":
			# we should NOT have this, so try to decomission out of desperation
			cluster_response = instance_decommission(self)
			response['result']['message'] = "Terminating instance %s" % self.name
		else:
			# delete this instance into forever
			self.address_model.release()
			self.delete(self)
			response['result']['message'] = "Instance %s has been deleted." % self.name

		# make a call to the callback url to report instance details
		appliance = Appliance().get()
		callback_url = self.callback_url
		pool_response = pool_instances(
			url=callback_url,
			instance=self,
			appliance=appliance)

		return response
示例#2
0
    def trashman(self):
        from webapp.libs.openstack import instance_info
        from webapp.libs.openstack import instance_decommission

        # build the response
        response = {
            "response": "success",
            "result": {
                "message": "",
                "server": {}
            }
        }

        # get instance (server) info
        cluster_response = instance_info(self)

        if cluster_response['response'] == "success":
            # we should NOT have this, so try to decomission out of desperation
            cluster_response = instance_decommission(self)
            response['result'][
                'message'] = "Terminating instance %s" % self.name
        else:
            # delete this instance into forever
            address = Addresses().get_by_id(self.address_id)
            address.release()
            self.delete(self)
            response['result'][
                'message'] = "Instance %s has been deleted." % self.name

        # make a call to the callback url to report instance details
        appliance = Appliance().get()
        callback_url = self.callback_url
        pool_response = pool_instance(url=callback_url,
                                      instance=self,
                                      appliance=appliance)

        return response
示例#3
0
	def housekeeping(self):
		from webapp.libs.openstack import instance_info
		from webapp.libs.openstack import instance_decommission
		from webapp.libs.openstack import instance_console

		# build the response
		response = {"response": "success", "result": {"message": "", "server": {}}}

		# get instance (server) info
		cluster_response = instance_info(self)
		server = cluster_response['result']['server']

		# we all have limited time in this reality
		epoch_time = int(time.time())

		# set start state
		start_state = self.state

		# this is complicated...because we aren't EC with OpenStack...or I'm crazy
		if cluster_response['response'] == "success": 
			# openstack responded it found this instance
			if server.status == "ACTIVE":
				# openstack says the server is running
				if self.expires < epoch_time:
					# suspend the instance for non-payment
					try:
						self.suspend()
						response['result']['message'] = "Instance %s suspended." % self.name
					except Exception as e:
						response['response'] = 'error'
						response['result']['message'] = \
							'Instance {instance} suspending failed: "{error}".'.format(
								instance=self.name, error=str(e))
						return response
					self.state = 5
				elif self.expires > epoch_time:
					# openstack says we're running, and we're paid
					if self.state == 5 or self.state == 6:
						# we move the instance to starting mode
						response['result']['message'] = "Instance %s is starting." % self.name
						self.state = 3
			elif server.status == "SUSPENDED" or server.status == "SHUTOFF":
				# openstack says this instance is suspended
				if self.expires > epoch_time:
					# should be running because not expired
					try:
						self.resume()
						response['result']['message'] = "Instance %s resumed." % self.name
					except Exception as e:
						response['response'] = 'error'
						response['result']['message'] = \
							'Instance {instance} resume failed: "{error}".'.format(
								instance=self.name, error=str(e))
						return response
					self.state = 3 # mark as starting
				if self.expires + app.config['POOL_DECOMMISSION_TIME'] < epoch_time:
					# should be destroyed (suspended for +2 hours without pay)
					response['result']['message'] = "Instance %s decommissioned." % self.name
					self.state = 7
			else:
				# openstack indicates another state besides SUSPENDED or ACTIVE
				if self.expires > epoch_time:
					# we should be running, but in a weird state - destroy then restart
					response = instance_decommission(self)
					response['result']['message'] = "Instance %s restarted." % self.name
					self.state = 2 # set as paid and ready to start
					app.logger.error("OpenStack says instance=(%s) isn't in the correct state.  Setting to restart." % self.name)
				else:
					# expired but in a weird state - destroy
					response = instance_decommission(self)
					response['result']['message'] = "Instance %s decommissioned." % self.name
					self.state = 7
		else:
			# openstack can't find this instance
			if self.expires > epoch_time:
				if self.state == 2:
					# check error rate
					if self.message_count > 10:
						# we're failing to start the instance, so decomission
						response['result']['message'] = "Instance %s decommissioned." % self.name
						self.state = 7
						app.logger.error("Exceeded error rate on callbacks for instance=(%s). Decomissioning." % self.name)
				else:
					# set instance to restart - not expired, should be running
					response['response'] = "error" # technically, someone is probably f*****g with things
					response['result']['message'] = "Setting instance %s to restart." % self.name
					self.state = 2 # will be started shortly after this by start
					app.logger.error("OpenStack doesn't know about instance=(%s). Setting to restart." % self.name)
			else:
				# no reason to be running
				response['response'] = "error"
				response['result']['message'] = "Instance %s decommissioned." % self.name
				self.state = 7 # will be deleted shortly after this by trashman

		# get instance console output
		cluster_response = instance_console(self)
		if 'console' in response['result']:
			self.console = response['result']['console']

		# save updated properties
		self.save()

		# make a call to the callback url to report instance details if either the
		# state has changed or the last state change is less than 900 secs ago
		if self.state != start_state or self.updated >= int(time.time()) - 900:
			appliance = Appliance().get()
			callback_url = self.callback_url
			pool_response = pool_instances(
				url=callback_url,
				instance=self,
				appliance=appliance)

		return response
示例#4
0
	def nudge(self):
		from webapp.libs.openstack import try_associate_floating_ip
		from webapp.libs.openstack import instance_info
		from webapp.libs.openstack import instance_console
		from webapp.libs.openstack import instance_decommission

		# get instance console output
		response = instance_console(self)
		if 'console' in response['result']:
			self.console = response['result']['console']
		self.update()

		# get instance (server) info
		response = instance_info(self)

		# set start state
		start_state = self.state

		# set instance meta data
		if response['response'] == "success":
			server = response['result']['server']

			# if the state is ACTIVE, we set to be running state==4
			if server.status == "ACTIVE":
				# set network info
				self.state = 4

				# try to get a floating ip for the new server
				float_response = try_associate_floating_ip(server)

				# check if call got a floating IP
				if float_response['response'] == "success":
						# get instance info again to pick up new IP
						response = instance_info(self)

						# load the response into the server object
						if response['response'] == "success":
							server = response['result']['server']

				else:
					# log 'errors' in floating assignment
					app.logger.info(float_response['result']['message'])

				# extract IP addresses using IPy
				# in some circumstances this will squash multiple same/same address types
				# we only extract and store one each of private ipv4, public ipv4, and public ipv6
				for key in server.networks.keys(): # any network names
					for address in server.networks[key]: # loop through each address for each network
						# private IPv4
						if IP(address).iptype() == "PRIVATE" and IP(address).version() == 4:
							self.privateipv4 = address
						# public IPv4
						elif IP(address).iptype() == "PUBLIC" and IP(address).version() == 4:
							self.publicipv4 = address
						# public IPv6
						elif IP(address).iptype() == "ALLOCATED ARIN" and IP(address).version() == 6:
							self.publicipv6 = address

				# update the instance
				self.update()

			# ERROR status from openstack
			elif server.status == "ERROR":
				# instance failed to start, so delete and reset to paid
				response = instance_decommission(self)

				self.state = 2 # will be started again shortly
				self.update()

				response['response'] = "error"
				response['result']['message'] = "OpenStack errored on instance start."
				
				app.logger.error("OpenStack error on starting instance=(%s).  Setting to restart." % self.name)

			# SPAWNING status from openstack
			else:
				# we all have limited time in this reality
				epoch_time = int(time.time())			

				# wait_timer is 5 minutes after the last update
				wait_timer = self.updated + 300

				# test to see if we are 'hung' on SPAWNING for more than wait_timer
				if epoch_time > wait_timer:
					# we're now  past when the instance needed to move to RUNNING
					response = instance_decommission(self)

					response['response'] = "error"
					response['result']['message'] = "Setting instance %s to restart." % self.name
					
					self.state = 2 # will be started shortly after this by start
					self.updated = epoch_time # given we 'timed' out, give the instance more time
					self.update()

					"""
					of anyplace, this is where you *might* want to add some time to the instance
					because a time based payment has been made on it.  however, this could lead to 
					a situation where an instance gets stuck in a circular state of erroring, getting
					more time, erroring again, rinse and repeat.  instead of embracing this eventuality, 
					we choose to short the customer her measly few cents instead, and let it serve as a 
					as an excuse to add 'karma hits' on bad starts from providers as a feature later
					"""

					app.logger.error("OpenStack hung starting instance=(%s).  Setting to restart." % self.name)
					
				else:
					# this is a 'soft' fail
					response['response'] = "error"
					response['result']['message'] = "Still starting instance=(%s)." % self.name


		# OpenStack reports instance NOT FOUND	
		else:
			# we all have limited time in this reality
			epoch_time = int(time.time())			

			# we first check if we're outright expired (shouldn't happen)
			if self.expires < epoch_time:
				# no reason to be running as we're expired
				self.state = 7 # will be deleted shortly after this by trashman
				self.update()

				response['response'] = "error"
				response['result']['message'] = "Instance %s decommissioned." % self.name

				app.logger.error("OpenStack couldn't find expired instance=(%s). Decomissioning." % self.name)

			else:
				# we didn't find the instance in openstack, yet we should be running
				self.state = 2 # set to be started again
				self.update()

				response['response'] = "error"
				response['result']['message'] = "OpenStack couldn't find instance.  Restarting."
				
				app.logger.error("OpenStack couldn't find instance=(%s). Setting to restart." % self.name)

		# make a call to the callback url to report instance details on state change
		if self.state != start_state:
			appliance = Appliance().get()
			callback_url = self.callback_url
			pool_response = pool_instances(
				url=callback_url,
				instance=self,
				appliance=appliance)

		return response
示例#5
0
    def housekeeping(self):
        from webapp.libs.openstack import instance_info
        from webapp.libs.openstack import instance_decommission
        from webapp.libs.openstack import instance_console

        # build the response
        response = {
            "response": "success",
            "result": {
                "message": "",
                "server": {}
            }
        }

        # get instance (server) info
        cluster_response = instance_info(self)
        server = cluster_response['result']['server']

        # we all have limited time in this reality
        epoch_time = int(time.time())

        # set start state
        start_state = self.state

        # this is complicated...because we aren't EC with OpenStack...or I'm crazy
        if cluster_response['response'] == "success":
            # openstack responded it found this instance
            if server.status == "ACTIVE":
                # openstack says the server is running
                if self.expires < epoch_time:
                    # suspend the instance for non-payment
                    try:
                        self.suspend()
                        response['result'][
                            'message'] = "Instance %s suspended." % self.name
                    except Exception as e:
                        response['response'] = 'error'
                        response['result']['message'] = \
                         'Instance {instance} suspending failed: "{error}".'.format(
                          instance=self.name, error=str(e))
                        return response
                    self.state = 5
                elif self.expires > epoch_time:
                    # openstack says we're running, and we're paid
                    if self.state == 5 or self.state == 6:
                        # we move the instance to starting mode
                        response['result'][
                            'message'] = "Instance %s is starting." % self.name
                        self.state = 3
            elif server.status == "SUSPENDED" or server.status == "SHUTOFF":
                # openstack says this instance is suspended
                if self.expires > epoch_time:
                    # should be running because not expired
                    try:
                        self.resume()
                        response['result'][
                            'message'] = "Instance %s resumed." % self.name
                    except Exception as e:
                        response['response'] = 'error'
                        response['result']['message'] = \
                         'Instance {instance} resume failed: "{error}".'.format(
                          instance=self.name, error=str(e))
                        return response
                    self.state = 3  # mark as starting
                if self.expires + app.config[
                        'POOL_DECOMMISSION_TIME'] < epoch_time:
                    # should be destroyed (suspended for +2 hours without pay)
                    response['result'][
                        'message'] = "Instance %s decommissioned." % self.name
                    self.state = 7
            else:
                # openstack indicates another state besides SUSPENDED or ACTIVE
                if self.expires > epoch_time:
                    # we should be running, but in a weird state - destroy then restart
                    response = instance_decommission(self)
                    response['result'][
                        'message'] = "Instance %s restarted." % self.name
                    self.state = 2  # set as paid and ready to start
                    app.logger.error(
                        "OpenStack says instance=(%s) isn't in the correct state.  Setting to restart."
                        % self.name)
                else:
                    # expired but in a weird state - destroy
                    response = instance_decommission(self)
                    response['result'][
                        'message'] = "Instance %s decommissioned." % self.name
                    self.state = 7
        else:
            # openstack can't find this instance
            if self.expires > epoch_time:
                if self.state == 2:
                    # check error rate
                    if self.message_count > 10:
                        # we're failing to start the instance, so decomission
                        response['result'][
                            'message'] = "Instance %s decommissioned." % self.name
                        self.state = 7
                        app.logger.error(
                            "Exceeded error rate on callbacks for instance=(%s). Decomissioning."
                            % self.name)
                else:
                    # set instance to restart - not expired, should be running
                    response[
                        'response'] = "error"  # technically, someone is probably f*****g with things
                    response['result'][
                        'message'] = "Setting instance %s to restart." % self.name
                    self.state = 2  # will be started shortly after this by start
                    app.logger.error(
                        "OpenStack doesn't know about instance=(%s). Setting to restart."
                        % self.name)
            else:
                # no reason to be running
                response['response'] = "error"
                response['result'][
                    'message'] = "Instance %s decommissioned." % self.name
                self.state = 7  # will be deleted shortly after this by trashman

        # get instance console output
        cluster_response = instance_console(self)
        if 'console' in response['result']:
            self.console = response['result']['console']

        # save updated properties
        self.save()

        # make a call to the callback url to report instance details if either the
        # state has changed or the last state change is less than 900 secs ago
        if self.state != start_state or self.updated >= int(time.time()) - 900:
            appliance = Appliance().get()
            callback_url = self.callback_url
            pool_response = pool_instances(url=callback_url,
                                           instance=self,
                                           appliance=appliance)

        return response
示例#6
0
    def nudge(self):
        from webapp.libs.openstack import try_associate_floating_ip
        from webapp.libs.openstack import instance_info
        from webapp.libs.openstack import instance_console
        from webapp.libs.openstack import instance_decommission

        # get instance console output
        response = instance_console(self)
        if 'console' in response['result']:
            self.console = response['result']['console']
        self.update()

        # get instance (server) info
        response = instance_info(self)

        # set start state
        start_state = self.state

        # set instance meta data
        if response['response'] == "success":
            server = response['result']['server']

            # if the state is ACTIVE, we set to be running state==4
            if server.status == "ACTIVE":
                # set network info
                self.state = 4

                # try to get a floating ip for the new server
                float_response = try_associate_floating_ip(server)

                # check if call got a floating IP
                if float_response['response'] == "success":
                    # get instance info again to pick up new IP
                    response = instance_info(self)

                    # load the response into the server object
                    if response['response'] == "success":
                        server = response['result']['server']

                else:
                    # log 'errors' in floating assignment
                    app.logger.info(float_response['result']['message'])

                # extract IP addresses using IPy
                # in some circumstances this will squash multiple same/same address types
                # we only extract and store one each of private ipv4, public ipv4, and public ipv6
                for key in server.networks.keys():  # any network names
                    for address in server.networks[
                            key]:  # loop through each address for each network
                        # private IPv4
                        if IP(address).iptype() == "PRIVATE" and IP(
                                address).version() == 4:
                            self.privateipv4 = address
                        # public IPv4
                        elif IP(address).iptype() == "PUBLIC" and IP(
                                address).version() == 4:
                            self.publicipv4 = address
                        # public IPv6
                        elif IP(address).iptype() == "ALLOCATED ARIN" and IP(
                                address).version() == 6:
                            self.publicipv6 = address

                # update the instance
                self.update()

            # ERROR status from openstack
            elif server.status == "ERROR":
                # instance failed to start, so delete and reset to paid
                response = instance_decommission(self)

                self.state = 2  # will be started again shortly
                self.update()

                response['response'] = "error"
                response['result'][
                    'message'] = "OpenStack errored on instance start."

                app.logger.error(
                    "OpenStack error on starting instance=(%s).  Setting to restart."
                    % self.name)

            # SPAWNING status from openstack
            else:
                # we all have limited time in this reality
                epoch_time = int(time.time())

                # wait_timer is 5 minutes after the last update
                wait_timer = self.updated + 300

                # test to see if we are 'hung' on SPAWNING for more than wait_timer
                if epoch_time > wait_timer:
                    # we're now  past when the instance needed to move to RUNNING
                    response = instance_decommission(self)

                    response['response'] = "error"
                    response['result'][
                        'message'] = "Setting instance %s to restart." % self.name

                    self.state = 2  # will be started shortly after this by start
                    self.updated = epoch_time  # given we 'timed' out, give the instance more time
                    self.update()
                    """
					of anyplace, this is where you *might* want to add some time to the instance
					because a time based payment has been made on it.  however, this could lead to 
					a situation where an instance gets stuck in a circular state of erroring, getting
					more time, erroring again, rinse and repeat.  instead of embracing this eventuality, 
					we choose to short the customer her measly few cents instead, and let it serve as a 
					as an excuse to add 'karma hits' on bad starts from providers as a feature later
					"""

                    app.logger.error(
                        "OpenStack hung starting instance=(%s).  Setting to restart."
                        % self.name)

                else:
                    # this is a 'soft' fail
                    response['response'] = "error"
                    response['result'][
                        'message'] = "Still starting instance=(%s)." % self.name

        # OpenStack reports instance NOT FOUND
        else:
            # we all have limited time in this reality
            epoch_time = int(time.time())

            # we first check if we're outright expired (shouldn't happen)
            if self.expires < epoch_time:
                # no reason to be running as we're expired
                self.state = 7  # will be deleted shortly after this by trashman
                self.update()

                response['response'] = "error"
                response['result'][
                    'message'] = "Instance %s decommissioned." % self.name

                app.logger.error(
                    "OpenStack couldn't find expired instance=(%s). Decomissioning."
                    % self.name)

            else:
                # we didn't find the instance in openstack, yet we should be running
                self.state = 2  # set to be started again
                self.update()

                response['response'] = "error"
                response['result'][
                    'message'] = "OpenStack couldn't find instance.  Restarting."

                app.logger.error(
                    "OpenStack couldn't find instance=(%s). Setting to restart."
                    % self.name)

        # make a call to the callback url to report instance details on state change
        if self.state != start_state:
            appliance = Appliance().get()
            callback_url = self.callback_url
            pool_response = pool_instances(url=callback_url,
                                           instance=self,
                                           appliance=appliance)

        return response