def complete(self, **kwargs): """ Signal to the API server that any action we need to perform is complete and the API server can finish its connection with the client that initiated the API request. """ # Save a timestamp from when we finished execution. self.endTime = time.time() if(settings.DEBUG_MODE): kwargs['responses'] = self.responses # Set our results self.result = kwargs try: message = "Completed {} operation on chute {}: {}".format( self.updateType, self.new.name, "success" if kwargs['success'] else "failure") out.usage(message, chute=self.new.name, updateType=self.updateType, createdTime=self.createdTime, startTime=self.startTime, endTime=self.endTime, **kwargs) except Exception as e: out.exception(e, True) # Call the function we were provided self.func(self)
def executePlans(update): """ Primary function that actually executes all the functions that were added to plans by all the exc modules. This function can heavily modify the OS/files/etc.. so the return value is very important. Returns: True in error : abortPlans function should be called False otherwise : everything is OK """ out.header('Executing plans %r\n' % (update)) # Finding the functions to call is actually done by a 'iterator' like function in the plangraph module while(True): # This function either returns None or a tuple just like generate added to it p = update.plans.getNextTodo() # No more to do? if(not p): break # Explode tuple otherwise func, args = p # We are in a try-except block so if func isn't callable that will catch it try: out.verbose('Calling %s\n' % (func)) # # Call the function from the execution plan # # args may be empty, but we don't want to pass in a tuple if we don't need to # so this below explodes the args so if @args is (), then what is passed is @update skipme = func(*((update, ) + args)) except Exception as e: out.exception(e, True) #, plans=str(update.plans)) # Removed because breaks new out.exception call update.responses.append({'exception': str(e), 'traceback': traceback.format_exc()}) update.failure = str(e) return True # The functions we call here can return other functions, if they do these are functions that should # be skipped later on (for instance a set* function discovering it didn't change anything, later on # we shouldn't call the corresponding reload function) if(skipme): # These functions can return individual functions to skip, or a list of multiple functions if (not isinstance(skipme, list)): skipme = [skipme] for skip in skipme: out.warn('Identified a skipped function: %r\n' % (skip)) update.plans.registerSkip(skip) # Now we are done return False
def performUpdates(self): """This is the main working function of the PDConfigurer class. It should be executed as a separate thread, it does the following: checks for any updates to perform does them responds to the server removes the update checks for more updates if more exist it calls itself again more quickly else it puts itself to sleep for a little while """ #add any chutes that should already be running to the front of the update queue before processing any updates startQueue = reloadChutes() self.updateLock.acquire() # insert the data into the front of our update queue so that all old chutes restart befor new ones are processed for updateObj in startQueue: self.updateQueue.insert(0, updateObj) self.updateLock.release() # Always perform this work while(self.reactor.running): # Check for new updates updateObj = self.getNextUpdate() if(updateObj is None): time.sleep(1) continue try: # Take the object and identify the update type update = updateObject.parse(updateObj) out.info('Performing update %s\n' % (update)) # TESTING start if(settings.FC_BOUNCE_UPDATE): # pragma: no cover out.testing('Bouncing update %s, result: %s\n' % ( update, settings.FC_BOUNCE_UPDATE)) update.complete(success=True, message=settings.FC_BOUNCE_UPDATE) continue # TESTING end # Based on each update type execute could be different update.execute() except Exception as e: out.exception(e, True)
def setup_net_interfaces(update): """ Link interfaces in the host to the internal interface in the docker container using pipework. :param update: The update object containing information about the chute. :type update: obj :returns: None """ interfaces = update.new.getCache('networkInterfaces') for iface in interfaces: if iface.get('netType') == 'wifi': IP = iface.get('ipaddrWithPrefix') internalIntf = iface.get('internalIntf') externalIntf = iface.get('externalIntf') else: # pragma: no cover continue # Construct environment for pipework call. It only seems to require # the PATH variable to include the directory containing the docker # client. On Snappy this was not happening by default, which is why # this code is here. env = {"PATH": os.environ.get("PATH", "")} if settings.DOCKER_BIN_DIR not in env['PATH']: env['PATH'] += ":" + settings.DOCKER_BIN_DIR cmd = ['/apps/paradrop/current/bin/pipework', externalIntf, '-i', internalIntf, update.name, IP] out.info("Calling: {}\n".format(" ".join(cmd))) try: proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) for line in proc.stdout: out.info("pipework: {}\n".format(line.strip())) for line in proc.stderr: out.warn("pipework: {}\n".format(line.strip())) except OSError as e: out.warn('Command "{}" failed\n'.format(" ".join(cmd))) out.exception(e, True) raise e