Example #1
0
def MakeModeling(baseDir,
                 dirName,
                 outputFileName="",
                 key="",
                 pdfDir=None,
                 mcscale=1.0,
                 histName="hist.root",
                 interactive=False):

    #
    # Create the Histogram manager which will do the stacking with the data
    #
    thisJob = ProcessManager()

    #
    # Add the Process
    #
    thisJob.AddProcess("file1",
                       file=baseDir + "/" + histName,
                       dir=dirName,
                       color=ROOT.kYellow,
                       scale=mcscale)

    if not interactive:
        outputFile = TFile(outputFileName, "RECREATE")
        thisJob.makeStacks(outputFile, key)

    return thisJob
Example #2
0
    def evalInput(self, input):
        args = LaunchArguments([self.path], True)
        try:
            manager = ProcessManager(args, None)
        except PtraceError as e:
            warning(e)
            return -1

        def do_write(proc_wrap: ProcessWrapper, s: str):
            if DO_SYSCALL:
                proc_wrap.writeToBuf(
                    'b"""%s"""' %
                    s)  # convert this so that no newline is added
            else:
                proc_wrap.in_pipe.write(s)

        proc_wrap = manager.getCurrentProcess()
        do_write(proc_wrap, input + "\n")

        try:
            manager.cont()
        except KeyboardInterrupt:
            pass

        out = proc_wrap.read(0x1000)
        assert out.startswith(b",") and out.endswith(b".")
        return int(out[1:-1])
Example #3
0
    def __init__(self, launch_args: LaunchArguments, startupfile=None, inputsock=False):
        self.inputPoll = PaulaPoll()
        self.manager = ProcessManager(launch_args, self.inputPoll)

        self.stdinQ = PollableQueue()
        self.inputPoll.register(self.stdinQ.fileno(), "userinput")
        self.reader_thread = InputReader(self.stdinQ, startupfile)
        self.sock_reader = InputSockReader(self.stdinQ) if inputsock else None

        self.hyxTalker = None
        self._errmsg_suffix = ""
Example #4
0
 def __init__(self,
              log,
              config,
              max_log_time=MAX_LOG_TIME,
              max_temp_time=MAX_TEMP_TIME):
     self.testnode_log = log
     self.log = log
     self.config = config or {}
     self.process_manager = ProcessManager(log)
     self.working_directory = config['working_directory']
     self.node_test_suite_dict = {}
     self.file_handler = None
     self.max_log_time = max_log_time
     self.max_temp_time = max_temp_time
     self.url_access = "https://[0::0]:0123"  # Ipv6 + port of the node
Example #5
0
 def __init__(self, path_to_fuzzme: str):
     super().__init__(path_to_fuzzme)
     args = LaunchArguments([path], False)
     self.manager = manager = ProcessManager(args, None)
     self.pending_SIGCHLDs = 0
     manager.addBreakpoint("b main")
     manager.cont()
     print(manager.getCurrentProcess().where())
Example #6
0
 def __init__(self, log, config):
   self.log = log
   self.config = config or {}
   self.process_manager = ProcessManager(log)
   self.node_test_suite_dict = {}
   # hack until slapos.cookbook is updated
   if self.config.get('working_directory', '').endswith("slapos/"):
     self.config['working_directory'] = self.config[
       'working_directory'][:-(len("slapos/"))] + "testnode"
Example #7
0
 def __init__(self, log, config, max_log_time=MAX_LOG_TIME,
              max_temp_time=MAX_TEMP_TIME):
   self.testnode_log = log
   self.log = log
   self.config = config or {}
   self.process_manager = ProcessManager(log)
   self.node_test_suite_dict = {}
   self.max_log_time = max_log_time
   self.max_temp_time = max_temp_time
   self.file_handler = None
Example #8
0
 def __init__(self, log, config, max_log_time=MAX_LOG_TIME,
              max_temp_time=MAX_TEMP_TIME):
   self.testnode_log = log
   self.log = log
   self.config = config or {}
   self.process_manager = ProcessManager(log)
   self.working_directory = config['working_directory']
   self.node_test_suite_dict = {}
   self.file_handler = None
   self.max_log_time = max_log_time
   self.max_temp_time = max_temp_time
   self.url_access = "https://[0::0]:0123" # Ipv6 + port of the node
Example #9
0
	def addSlide(self,input):
		# Create layout
		bullet_slide_layout = self.prs.slide_layouts[1]
		slide = self.prs.slides.add_slide(bullet_slide_layout) 
		# Create shape
		shapes = slide.shapes
		title_shape = shapes.title
		body_shape = shapes.placeholders[1]
		# Add slide elements
		title_shape.text = input
		tf = body_shape.text_frame
		# Create points
		processor = ProcessManager(input) # Process input
		processor.setSlideElements()
		elements = processor.getSlideElements() # elements are { (l1,c1), (l2,c2), ...   }
		# Traverse through elements to add slide components ... 
		# point = (level,content)
		for point in elements:
			p = tf.add_paragraph()
			p.font.size = Pt(20)
			p.level = point[0]
			p.text = point[1]
Example #10
0
    def __init__(self, path_to_fuzzme: str):
        super().__init__(path_to_fuzzme)
        args = LaunchArguments([path_to_fuzzme], False)
        self.manager = manager = ProcessManager(args, None)

        # manager.addBreakpoint("b main")
        self.root_proc = manager.getCurrentProcess()

        self.root_proginfo = ProgramInfo(path, self.root_proc.getPid(),
                                         self.root_proc)

        break_at = self.root_proginfo.getAddrOf("read")
        # break_at = root_proginfo.baseDict[path] + 0x1251
        # break_at = 0x555555555251
        manager.addBreakpoint("b %d" % break_at)
        manager.cont()
        print(self.root_proc.where())

        self.pref_dict = dict([("", self.manager.getCurrentProcess())])
        self.spawned_procs = 0
Example #11
0
def main():

    try:
        client = iothub_client_init()
        manager = ProcessManager()
        manager.start_process(DEFAULT_COMMAND)
        #manager = Rtl433Stub()

        while True:
            lines = manager.get_stdout()
            #lines = manager.get_telemtry()

            for line in lines:
                prevMsg = None
                for txt in line:
                    jsonobj = json.loads(txt)
                    pkt = dict()
                    pkt['dateTime'] = jsonobj['time']
                    pkt['model'] = jsonobj['model']
                    pkt['id'] = jsonobj['id']
                    pkt['channel'] = jsonobj['channel']
                    pkt['battery'] = jsonobj['battery_ok']
                    msg_type = jsonobj['subtype']
                    pkt['wind_speed'] = jsonobj['wind_avg_km_h']

                    if msg_type == 49:
                        pkt['wind_dir'] = jsonobj['wind_dir_deg']
                        pkt['rain_mm'] = jsonobj['rain_mm']
                    elif msg_type == 56:
                        pkt['temperature'] = jsonobj['temperature_C']
                        pkt['humidity'] = jsonobj['humidity']

                    if pkt != prevMsg:
                        prevMsg = pkt
                        message = Message(json.dumps(pkt))
                        print(message)
                        print("Sending message...")
                        try:
                            client.send_message(message)
                        except:
                            print("Sending failed")
                        time.sleep(.1)
    except KeyboardInterrupt:
        manager.stop_process()
Example #12
0
def MakeModeling(baseDir,
                 dirName,
                 outputFileName="",
                 key="",
                 pdfDir=None,
                 mcscale=1.0,
                 histName1="",
                 histName2="",
                 interactive=False,
                 debug=False):

    #
    # Create the Histogram manager which will do the stacking with the data
    #
    thisJob = ProcessManager()

    #
    # Add the various background modeling
    #
    thisJob.AddProcess("dir1",
                       file=baseDir + "/" + histName1,
                       dir=dirName,
                       color=ROOT.kBlack,
                       scale=1.0)

    thisJob.AddProcess("dir2",
                       file=baseDir + "/" + histName2,
                       dir=dirName,
                       color=ROOT.kYellow,
                       scale=mcscale)

    if not interactive:
        outputFile = TFile(outputFileName, "RECREATE")
        thisJob.makeStacks(outputFile, key)

    return thisJob
Example #13
0
class TestNode(object):

  def __init__(self, log, config, max_log_time=MAX_LOG_TIME,
               max_temp_time=MAX_TEMP_TIME):
    self.testnode_log = log
    self.log = log
    self.config = config or {}
    self.process_manager = ProcessManager(log)
    self.working_directory = config['working_directory']
    self.node_test_suite_dict = {}
    self.file_handler = None
    self.max_log_time = max_log_time
    self.max_temp_time = max_temp_time
    self.url_access = "https://[0::0]:0123" # Ipv6 + port of the node


  def checkOldTestSuite(self,test_suite_data):
    config = self.config
    installed_reference_set = set(os.listdir(self.working_directory))
    wished_reference_set = set([x['test_suite_reference'] for x in test_suite_data])
    to_remove_reference_set = installed_reference_set.difference(
                                 wished_reference_set)
    for y in to_remove_reference_set:
      fpath = os.path.join(self.working_directory,y)
      self.delNodeTestSuite(y)
      self.log("testnode.checkOldTestSuite, DELETING : %r" % (fpath,))
      if os.path.isdir(fpath):
       shutil.rmtree(fpath)
      else:
       os.remove(fpath)
  
  def getNodeTestSuite(self, reference):
    node_test_suite = self.node_test_suite_dict.get(reference)
    if node_test_suite is None:
      node_test_suite = NodeTestSuite(reference)
      self.node_test_suite_dict[reference] = node_test_suite

    node_test_suite.edit(
               log=self.log,
               config=self.config, 
               process_manager=self.process_manager)
    return node_test_suite

  def delNodeTestSuite(self, reference):
    if self.node_test_suite_dict.has_key(reference):
      self.node_test_suite_dict.pop(reference)

  def constructProfile(self, node_test_suite, test_type, use_relative_path=False):
    config = self.config
    profile_content = ''
    assert len(node_test_suite.vcs_repository_list), "we must have at least one repository"
    profile_path_count = 0
    profile_content_list = []
    for vcs_repository in node_test_suite.vcs_repository_list:
      url = vcs_repository['url']
      buildout_section_id = vcs_repository.get('buildout_section_id', None)
      repository_path = vcs_repository['repository_path']
      try:
        profile_path = vcs_repository[PROFILE_PATH_KEY]
      except KeyError:
        pass
      else:
        profile_path_count += 1
        if profile_path_count > 1:
          raise ValueError(PROFILE_PATH_KEY + ' defined more than once')

        # Absolute path to relative path
        software_config_path = os.path.join(repository_path, profile_path)
        if use_relative_path :
          from_path = os.path.join(self.working_directory,
                                    node_test_suite.reference)
          software_config_path = os.path.relpath(software_config_path, from_path)


        profile_content_list.append("""
[buildout]
extends = %(software_config_path)s
""" %  {'software_config_path': software_config_path})

      # Construct sections
      if not(buildout_section_id is None):
        # Absolute path to relative
        if use_relative_path:
          from_path = os.path.join(self.working_directory,
                                    node_test_suite.reference)
          repository_path = os.path.relpath(repository_path, from_path)

        if test_type=="ScalabilityTest":
#          updater = Updater(repository_path, git_binary=self.config['git_binary'],
#          branch = vcs_repository.get('branch','master'), log=self.log, process_manager=self.process_manager)
#          updater.checkout()
#          revision = updater.getRevision()[1]
          all_revision = node_test_suite.revision
          # from 'sec1=xx-azer,sec2=yy-qwer,..' to [[sec1,azer],[sec2,qwer],..]
          revision_list = [ [x.split('=')[0],x.split('=')[1].split('-')[1]] for x in all_revision.split(',') ]
          # from [[sec1,azer],[sec2,qwer],..] to {sec1:azer,sec2:qwer,..}
          revision_dict = {branch:revision for branch,revision in revision_list}
          # <obfuscated_url> word is modified by in runner.prepareSlapOSForTestSuite()
          profile_content_list.append("""
[%(buildout_section_id)s]
repository = <obfuscated_url>/%(buildout_section_id)s/%(buildout_section_id)s.git
revision = %(revision)s
ignore-ssl-certificate = true
develop = false
""" %     {'buildout_section_id': buildout_section_id,
          'revision': revision_dict[buildout_section_id]})
        else:
          profile_content_list.append("""
[%(buildout_section_id)s]
repository = %(repository_path)s
branch = %(branch)s
develop = false
""" %     {'buildout_section_id': buildout_section_id,
          'repository_path' : repository_path,
          'branch' : vcs_repository.get('branch','master')})
    if not profile_path_count:
      raise ValueError(PROFILE_PATH_KEY + ' not defined')
    # Write file
    custom_profile = open(node_test_suite.custom_profile_path, 'w')
    # sort to have buildout section first
    profile_content_list.sort(key=lambda x: [x, ''][x.startswith('\n[buildout]')])
    custom_profile.write(''.join(profile_content_list))
    custom_profile.close()
    sys.path.append(repository_path)

  def getAndUpdateFullRevisionList(self, node_test_suite):
    full_revision_list = []
    config = self.config
    log = self.log
    for vcs_repository in node_test_suite.vcs_repository_list:
      repository_path = vcs_repository['repository_path']
      repository_id = vcs_repository['repository_id']
      branch = vcs_repository.get('branch')
      # Make sure we have local repository
      updater = Updater(repository_path, git_binary=config['git_binary'],
         branch=branch, log=log, process_manager=self.process_manager,
         working_directory=node_test_suite.working_directory,
         url=vcs_repository["url"])
      updater.checkout()
      revision = "-".join(updater.getRevision())
      full_revision_list.append('%s=%s' % (repository_id, revision))
    node_test_suite.revision = ','.join(full_revision_list)
    return full_revision_list

  def registerSuiteLog(self, test_result, node_test_suite):
    """
      Create a log dedicated for the test suite,
      and register the url to master node.
    """
    suite_log_path, folder_id = node_test_suite.createSuiteLog()
    self._initializeSuiteLog(suite_log_path)
    # TODO make the path into url
    test_result.reportStatus('LOG url', "%s/%s" % (self.config.get('httpd_url'),
                             folder_id), '')
    self.log("going to switch to log %r" % suite_log_path)
    self.process_manager.log = self.log = self.getSuiteLog()
    return suite_log_path

  def getSuiteLog(self):
    return self.suite_log

  def _initializeSuiteLog(self, suite_log_path):
    # remove previous handlers
    logger = logging.getLogger('testsuite')
    if self.file_handler is not None:
      logger.removeHandler(self.file_handler)
    # and replace it with new handler
    logger_format = '%(asctime)s %(name)-13s: %(levelname)-8s %(message)s'
    formatter = logging.Formatter(logger_format)
    logging.basicConfig(level=logging.INFO, format=logger_format)
    self.file_handler = logging.FileHandler(filename=suite_log_path)
    self.file_handler.setFormatter(formatter)
    logger.addHandler(self.file_handler)
    logger.info('Activated logfile %r output' % suite_log_path)
    self.suite_log = logger.info

  def checkRevision(self, test_result, node_test_suite):
    config = self.config
    log = self.log
    if log is None:
      log = self.log
    if node_test_suite.revision != test_result.revision:
     log('Disagreement on tested revision, checking out: %r' % (
          (node_test_suite.revision,test_result.revision),))
     for i, repository_revision in enumerate(test_result.revision.split(',')):
      vcs_repository = node_test_suite.vcs_repository_list[i]
      repository_path = vcs_repository['repository_path']
      revision = repository_revision.rsplit('-', 1)[1]
      # other testnodes on other boxes are already ready to test another
      # revision
      log('  %s at %s' % (repository_path, node_test_suite.revision))
      updater = Updater(repository_path, git_binary=config['git_binary'],
                        revision=revision, log=log,
                        process_manager=self.process_manager)
      updater.checkout()
      updater.git_update_server_info()
      updater.git_create_repository_link()
      node_test_suite.revision = test_result.revision

  def _cleanupLog(self):
    config = self.config
    log_directory = self.config['log_directory']
    now = time.time()
    for log_folder in os.listdir(log_directory):
      folder_path = os.path.join(log_directory, log_folder)
      if os.path.isdir(folder_path):
        if (now - os.stat(folder_path).st_mtime)/86400 > self.max_log_time:
          self.log("deleting log directory %r" % (folder_path,))
          shutil.rmtree(folder_path)

  def _cleanupTemporaryFiles(self):
    """
    buildout seems letting files under /tmp. To avoid regular error of
    missing disk space, remove old logs
    """
    temp_directory = self.config["system_temp_folder"]
    now = time.time()
    user_id = os.geteuid()
    for temp_folder in os.listdir(temp_directory):
      folder_path = os.path.join(temp_directory, temp_folder)
      if (temp_folder.startswith("tmp") or
          temp_folder.startswith("buildout")):
        try:
          stat = os.stat(folder_path)
          if stat.st_uid == user_id and \
              (now - stat.st_mtime)/86400 > self.max_temp_time:
            self.log("deleting temp directory %r" % (folder_path,))
            if os.path.isdir(folder_path):
              shutil.rmtree(folder_path)
            else:
              os.remove(folder_path)
        except OSError:
          self.log("_cleanupTemporaryFiles exception", exc_info=sys.exc_info())

  def cleanUp(self,test_result):
    log = self.log
    log('Testnode.cleanUp')
    self.process_manager.killPreviousRun()
    self._cleanupLog()
    self._cleanupTemporaryFiles()

  def run(self):
    log = self.log
    config = self.config
    slapgrid = None
    previous_revision_dict = {}
    revision_dict = {}
    test_result = None
    test_node_slapos = SlapOSInstance()
    test_node_slapos.edit(working_directory=self.config['slapos_directory'])
    try:
      while True:
        try:
          node_test_suite = None
          self.log = self.process_manager.log = self.testnode_log
          self.cleanUp(None)
          remote_test_result_needs_cleanup = False
          begin = time.time()
          portal_url = config['test_suite_master_url']
          portal = taskdistribution.TaskDistributionTool(portal_url,
                                                         logger=DummyLogger(log))
          self.portal = portal
          self.test_suite_portal = taskdistribution.TaskDistributor(
                                                        portal_url,
                                                        logger=DummyLogger(log))
          self.test_suite_portal.subscribeNode(node_title=config['test_node_title'],
                                               computer_guid=config['computer_id'])
          test_suite_data = self.test_suite_portal.startTestSuite(
                                               node_title=config['test_node_title'],
                                               computer_guid=config['computer_id'])
          if type(test_suite_data) == str:
            # Backward compatiblity
            test_suite_data = json.loads(test_suite_data)
          test_suite_data = Utils.deunicodeData(test_suite_data)
          log("Got following test suite data from master : %r" % \
              (test_suite_data,))
          try:
            my_test_type = self.test_suite_portal.getTestType()
          except:
            log("testnode, error during requesting getTestType() method \
from the distributor.")
            raise
          # Select runner according to the test type
          if my_test_type == 'UnitTest':
            runner = UnitTestRunner(self)
          elif my_test_type == 'ScalabilityTest':
            runner = ScalabilityTestRunner(self)
          else:
            log("testnode, Runner type %s not implemented.", my_test_type)
            raise NotImplementedError
          log("Type of current test is %s" % (my_test_type,))
          # master testnode gets test_suites, slaves get nothing
          runner.prepareSlapOSForTestNode(test_node_slapos)
          # Clean-up test suites
          self.checkOldTestSuite(test_suite_data)
          for test_suite in test_suite_data:
            remote_test_result_needs_cleanup = False
            node_test_suite = self.getNodeTestSuite(
               test_suite["test_suite_reference"])

            node_test_suite.edit(
               working_directory=self.config['working_directory'],
               log_directory=self.config['log_directory'])

            node_test_suite.edit(**test_suite)
            if my_test_type == 'UnitTest':
              runner = UnitTestRunner(node_test_suite)
            elif my_test_type == 'ScalabilityTest':
              runner = ScalabilityTestRunner(node_test_suite)
            else:
              log("testnode, Runner type %s not implemented.", my_test_type)
              raise NotImplementedError

            # XXX: temporary hack to prevent empty test_suite
            if not hasattr(node_test_suite, 'test_suite'):
              node_test_suite.edit(test_suite='')
            run_software = True
            # kill processes from previous loop if any
            self.process_manager.killPreviousRun()
            self.getAndUpdateFullRevisionList(node_test_suite)
            # Write our own software.cfg to use the local repository
            self.constructProfile(node_test_suite, my_test_type, 
                                  runner.getRelativePathUsage())
            # Make sure we have local repository
            test_result = portal.createTestResult(node_test_suite.revision, [],
                     config['test_node_title'], False,
                     node_test_suite.test_suite_title,
                     node_test_suite.project_title)
            remote_test_result_needs_cleanup = True
            log("testnode, test_result : %r" % (test_result, ))
            if test_result is not None:
              self.registerSuiteLog(test_result, node_test_suite)
              self.checkRevision(test_result,node_test_suite)
              node_test_suite.edit(test_result=test_result)
              # Now prepare the installation of SlapOS and create instance
              status_dict = runner.prepareSlapOSForTestSuite(node_test_suite)
              # Give some time so computer partitions may start
              # as partitions can be of any kind we have and likely will never have
              # a reliable way to check if they are up or not ...
              time.sleep(20)
              if my_test_type == 'UnitTest':
                runner.runTestSuite(node_test_suite, portal_url)
              elif my_test_type == 'ScalabilityTest':
                error_message = None
                # A problem is appeared during runTestSuite
                if status_dict['status_code'] == 1:
                  error_message = "Software installation too long or error(s) are present during SR install."
                else:
                  status_dict = runner.runTestSuite(node_test_suite, portal_url)
                  # A problem is appeared during runTestSuite
                  if status_dict['status_code'] == 1:
                    error_message = status_dict['error_message']

                # If an error is appeared
                if error_message:
                  test_result.reportFailure(
                      stdout=error_message
                  )
                  self.log(error_message)
                  raise ValueError(error_message)
              else:
                raise NotImplementedError
                  
              # break the loop to get latest priorities from master
              break
            self.cleanUp(test_result)
        except (SubprocessError, CalledProcessError) as e:
          log("SubprocessError", exc_info=sys.exc_info())
          if remote_test_result_needs_cleanup:
            status_dict = e.status_dict or {}
            test_result.reportFailure(
              command=status_dict.get('command'),
              stdout=status_dict.get('stdout'),
              stderr=status_dict.get('stderr'),
            )
          continue
        except ValueError as e:
          # This could at least happens if runTestSuite is not found
          log("ValueError", exc_info=sys.exc_info())
          if node_test_suite is not None:
            node_test_suite.retry_software_count += 1
        except CancellationError, e:
          log("CancellationError", exc_info=sys.exc_info())
          self.process_manager.under_cancellation = False
          node_test_suite.retry = True
          continue
        except:
          ex_type, ex, tb = sys.exc_info()
          traceback.print_tb(tb)
          log("erp5testnode exception", exc_info=sys.exc_info())
          raise
        now = time.time()
        self.cleanUp(test_result)
        if (now-begin) < 120:
          sleep_time = 120 - (now-begin)
          log("End of processing, going to sleep %s" % sleep_time)
          time.sleep(sleep_time)
Example #14
0
                    rr = build_round_robin(args.quantum, sc)
                    schedulers.append(rr)
                    scheduler_found = True
                    break
    args.scheduler = schedulers or [fifo]

    return args


if __name__ == "__main__":
    args = read_args()

    process_list = [Process(*p) for p in read_process(args.filename)]

    for p in process_list:
        p.time_io = args.time_io

    process_manager = ProcessManager(process_list)
    process_manager.enable_log(args.verbose)
    for sc in args.scheduler:
        process_manager.scheduler = sc
        process_manager.run()

    if args.iterative:
        process_manager.enable_log(True)
        process_manager.begin()
        while True:
            cmd = input(">")
            process_manager.execute()
            process_manager.next_clock()
Example #15
0
class InputHandler:

    def __init__(self, launch_args: LaunchArguments, startupfile=None, inputsock=False):
        self.inputPoll = PaulaPoll()
        self.manager = ProcessManager(launch_args, self.inputPoll)

        self.stdinQ = PollableQueue()
        self.inputPoll.register(self.stdinQ.fileno(), "userinput")
        self.reader_thread = InputReader(self.stdinQ, startupfile)
        self.sock_reader = InputSockReader(self.stdinQ) if inputsock else None

        self.hyxTalker = None
        self._errmsg_suffix = ""

    def execute(self, cmd):
        try:
            return self._execute(cmd)
        except ValueError as err:
            return str(err)

    def _execute(self, cmd):
        manager = self.manager
        procWrap = manager.getCurrentProcess()
        proc = procWrap.ptraceProcess

        result = ""
        if cmd.startswith("hyx") and not self.hyxTalker:
            _, _, cmd = cmd.partition(" ")
            result = self.init_hyx(cmd)

        elif cmd.startswith("call"):
            result = manager.callFunction(cmd)

        elif cmd.startswith("c"):  # continue
            result = manager.cont()

        elif cmd.startswith("w "):  # write
            _, _, cmd = cmd.partition(" ")
            result = manager.write(cmd)
            if CONT_AFTER_WRITE:
                if result:
                    print(result)
                result = manager.cont()

        elif cmd.startswith("fork"):
            result = self.fork(cmd)

        elif cmd.startswith("sw"):  # switch
            result = self.switch(cmd)

        elif cmd.startswith("tree"):
            result = self.switch("switch ?")

        elif cmd.startswith("b"):
            result = manager.addBreakpoint(cmd)

        elif cmd.startswith("rb"):
            _,_, cmd = cmd.partition(" ")
            result = manager.getCurrentProcess().removeBreakpoint(cmd)

        elif cmd.startswith("malloc"):
            result = manager.callFunction("call " + cmd)

        elif cmd.startswith("free"):
            result = manager.callFunction("call " + cmd)

        elif cmd.startswith("fin"):
            result = manager.finish()

        elif cmd.startswith("list b"):
            return manager.getCurrentProcess().ptraceProcess.breakpoints

        elif cmd.startswith("s"):
            result = manager.cont(singlestep=True)

        elif cmd.startswith("fam"):
            result = manager.family()

        elif cmd.startswith("maps"):
            result = manager.dumpMaps()

        elif cmd.startswith("p"):
            result = manager.print(cmd)

        elif cmd.startswith("x"):
            result = manager.examine(cmd)

        elif cmd.startswith("trace"):
            result = manager.trace_syscall(cmd)

        elif cmd.startswith("getsegment") and False:
            _, _, cmd = cmd.partition(" ")
            result = manager.getCurrentProcess().get_own_segment()

        elif cmd.startswith("where"):
            result = manager.getCurrentProcess().where()

        elif cmd.startswith("name"):
            _, _, cmd = cmd.partition(" ")
            pid, _, name = cmd.partition(" ")

            # first give pid, then name. if you only give name,
            # first partition (currently pid) is actually name
            if name:
                pid = int(pid)
            else:
                name, pid = pid, 0
            result = manager.name_process(name, pid)

        elif cmd.startswith("?"):
            my_help(cmd)

        else:
            result = "use ? for a list of available commands"

        return result if result else ""

    def inputLoop(self):
        print("type ? for help")
        while True:
            skip_hyx_update = False
            poll_result = self.inputPoll.poll()
            assert len(poll_result) > 0

            if len(poll_result) == 1:
                name, fd, event = poll_result[0]
                if name == "hyx":
                    skip_hyx_update = self.handle_hyx(event)
                elif name == "userinput":
                    self.handle_stdin()
                elif "-out" in name:
                    self.handle_procout(name, fd, event)

                elif "-err" in name:
                    self.handle_stderr(event)

            else:  # this happens when two sockets are written to at the "same" time
                for name, fd, event in poll_result:
                    if "-out" in name:
                        self.handle_procout(name, fd, event)
                        break
                    elif "-err" in name:
                        self.handle_stderr(name)
                        break

                info(poll_result)

            if self.hyxTalker:
                try:
                    self.hyxTalker.updateHyx()
                except ValueError as e:
                    warning("encountered %s when updating hyx" % e)
                    self._switch_hyxtalker()

    def handle_stderr(self, event):
        stderr_prefix = "[ERR] %s"
        msg = stderr_prefix % self.manager.getCurrentProcess().read(0x1000, "err")
        print(msg)
        if self.hyxTalker:
            self.hyxTalker.sendMessage(msg + self._errmsg_suffix)
            self._errmsg_suffix = ""

    # this is called when a new line has been put to the stdinQ
    def handle_stdin(self):
        cmd = self.stdinQ.get()[:-1]  # remove newline
        assert isinstance(cmd, str)

        result = self.execute(cmd)
        if result:
            print(result)

    def handle_hyx(self, event):
        """Handles incoming updates/ command requests from hyx etc
            :return True if hyx shouldnt be  """
        hyxtalker = self.hyxTalker

        if event & POLLHUP:  # sock closed
            remaining_data = hyxtalker.hyxsock.recv(1000)
            if remaining_data:
                print(remaining_data)
            self.delete_hyx()
            return
        if event != POLLIN:
            raise NotImplementedError("unknown event: %s" % event)

        check = hyxtalker.hyxsock.recv(1)
        if check == CMD_REQUEST:
            cmd = hyxtalker.recvCommand()
            print("%s   (hyx)" % cmd)

            if cmd.strip().startswith("fork"):
                # if this would not be done, hyx would interpret the new heap (sent when forking) as the result from the command
                hyxtalker.sendCommandResponse("forking")
                result = self.execute(cmd)
                print(result)
            else:
                result = self.execute(cmd)
                print(result)
                hyxtalker.sendCommandResponse(result)

        elif check == UPD_FROMBLOB or check == UPD_FROMBLOBNEXT:
            hyxtalker.getUpdate(isNextByte=(check == UPD_FROMBLOBNEXT))

        else:
            warning(check, event)
            raise NotImplementedError

    def handle_procout(self, name, fd, event):
        procWrap = self.manager.getCurrentProcess()
        assert isinstance(procWrap, ProcessWrapper)
        read_bytes = procWrap.out_pipe.read(4096)
        if self.sock_reader:
            self.sock_reader.acc_sock.send(read_bytes)

        print("[OUT] %s" % read_bytes)
        if self.hyxTalker:
            self.hyxTalker.sendMessage("[OUT] %s" % read_bytes)


    def delete_hyx(self):
        self.hyxTalker.destroy(rootsock=True)
        self.hyxTalker = None

    def init_hyx(self, cmd: str):
        """open a segment with Hyx. You can specify the permissions of the segment, default is rwp.
       You can use slicing syntax, [1:-3] will open the segment starting with an offset of 0x1000, ending 0x3000 bytes before actual send of segment
       You can also trim the segment to start at the first page that has some non-zero bytes in it.

       Example use:
       hyx heap [f:]     omits the first fifteen pages
       hyx stack [i:i]   removes "boring" (zero-filled) pages from the start and end
       hyx libc rp"""
        currentProcess = self.manager.getCurrentProcess()
        args = INIT_HYX_ARGS.match(cmd)

        if not args:
            init_args = MemorySegmentInitArgs("heap", "rwp", 0, 0, False, False)
        else:
            segment = args.group(1)
            permissions = args.group(2)
            if permissions is None:
                permissions = "rwp"

            # if sliceoffsets are specified, convert the strings to int
            convert_func = lambda slice_str: int(slice_str, 16) * 0x1000 if slice_str else 0
            start, stop = map(convert_func, [args.group(4), args.group(6)])

            init_args = MemorySegmentInitArgs(segment, permissions, start, stop,
                                              start_nonzero=bool(args.group(5)),
                                              stop_nonzero=bool(args.group(7))
                                              )

        try:
            heap = Heap(currentProcess, init_args)
        except ValueError as e:
            return str(e)

        self.hyxTalker = HyxTalker(heap, self.inputPoll)

    def fork(self, cmd):
        manager = self.manager
        currProc = manager.getCurrentProcess()

        # make sure there is a new child after forking, switch to new child
        children_count = len(currProc.children)
        result = manager.fork(cmd)
        if len(currProc.children) > children_count:
            self._switch_hyxtalker()

        return result

    def switch(self, cmd):
        manager = self.manager
        _, _, cmd = cmd.partition(" ")
        result = manager.switchProcess(cmd)
        self._switch_hyxtalker()

        return result

    def _switch_hyxtalker(self):
        if not self.hyxTalker:
            return

        newProc = self.manager.getCurrentProcess()
        if newProc.heap:
            newHeap = newProc.heap
        else:
            args = self.hyxTalker.heap.args
            newHeap = Heap(newProc, args)

        self.hyxTalker.heap = newHeap
        self.hyxTalker.sendNewHeap(newHeap.start, newHeap.stop)

        msg = "switched to %d" % newProc.getPid()
        self.hyxTalker.sendMessage(msg)

        self._errmsg_suffix = "   " + msg   # next time stderr is printed, add this
Example #16
0
class TestNode(object):
    def __init__(self,
                 log,
                 config,
                 max_log_time=MAX_LOG_TIME,
                 max_temp_time=MAX_TEMP_TIME):
        self.testnode_log = log
        self.log = log
        self.config = config or {}
        self.process_manager = ProcessManager(log)
        self.working_directory = config['working_directory']
        self.node_test_suite_dict = {}
        self.file_handler = None
        self.max_log_time = max_log_time
        self.max_temp_time = max_temp_time
        self.url_access = "https://[0::0]:0123"  # Ipv6 + port of the node

    def checkOldTestSuite(self, test_suite_data):
        config = self.config
        installed_reference_set = set(os.listdir(self.working_directory))
        wished_reference_set = set(
            [x['test_suite_reference'] for x in test_suite_data])
        to_remove_reference_set = installed_reference_set.difference(
            wished_reference_set)
        for y in to_remove_reference_set:
            fpath = os.path.join(self.working_directory, y)
            self.delNodeTestSuite(y)
            self.log("testnode.checkOldTestSuite, DELETING : %r" % (fpath, ))
            if os.path.isdir(fpath):
                shutil.rmtree(fpath)
            else:
                os.remove(fpath)

    def getNodeTestSuite(self, reference):
        node_test_suite = self.node_test_suite_dict.get(reference)
        if node_test_suite is None:
            node_test_suite = NodeTestSuite(reference)
            self.node_test_suite_dict[reference] = node_test_suite

        node_test_suite.edit(log=self.log,
                             config=self.config,
                             process_manager=self.process_manager)
        return node_test_suite

    def delNodeTestSuite(self, reference):
        if self.node_test_suite_dict.has_key(reference):
            self.node_test_suite_dict.pop(reference)

    def constructProfile(self,
                         node_test_suite,
                         test_type,
                         use_relative_path=False):
        config = self.config
        profile_content = ''
        assert len(node_test_suite.vcs_repository_list
                   ), "we must have at least one repository"
        profile_path_count = 0
        profile_content_list = []
        for vcs_repository in node_test_suite.vcs_repository_list:
            url = vcs_repository['url']
            buildout_section_id = vcs_repository.get('buildout_section_id',
                                                     None)
            repository_path = vcs_repository['repository_path']
            try:
                profile_path = vcs_repository[PROFILE_PATH_KEY]
            except KeyError:
                pass
            else:
                profile_path_count += 1
                if profile_path_count > 1:
                    raise ValueError(PROFILE_PATH_KEY +
                                     ' defined more than once')

                # Absolute path to relative path
                software_config_path = os.path.join(repository_path,
                                                    profile_path)
                if use_relative_path:
                    from_path = os.path.join(self.working_directory,
                                             node_test_suite.reference)
                    software_config_path = os.path.relpath(
                        software_config_path, from_path)

                profile_content_list.append(
                    """
[buildout]
extends = %(software_config_path)s
""" % {'software_config_path': software_config_path})

            # Construct sections
            if not (buildout_section_id is None):
                # Absolute path to relative
                if use_relative_path:
                    from_path = os.path.join(self.working_directory,
                                             node_test_suite.reference)
                    repository_path = os.path.relpath(repository_path,
                                                      from_path)

                if test_type == "ScalabilityTest":
                    #          updater = Updater(repository_path, git_binary=self.config['git_binary'],
                    #          branch = vcs_repository.get('branch','master'), log=self.log, process_manager=self.process_manager)
                    #          updater.checkout()
                    #          revision = updater.getRevision()[1]
                    all_revision = node_test_suite.revision
                    # from 'sec1=xx-azer,sec2=yy-qwer,..' to [[sec1,azer],[sec2,qwer],..]
                    revision_list = [[
                        x.split('=')[0],
                        x.split('=')[1].split('-')[1]
                    ] for x in all_revision.split(',')]
                    # from [[sec1,azer],[sec2,qwer],..] to {sec1:azer,sec2:qwer,..}
                    revision_dict = {
                        branch: revision
                        for branch, revision in revision_list
                    }
                    # <obfuscated_url> word is modified by in runner.prepareSlapOSForTestSuite()
                    profile_content_list.append(
                        """
[%(buildout_section_id)s]
repository = <obfuscated_url>/%(buildout_section_id)s/%(buildout_section_id)s.git
revision = %(revision)s
ignore-ssl-certificate = true
develop = false
""" % {
                            'buildout_section_id': buildout_section_id,
                            'revision': revision_dict[buildout_section_id]
                        })
                else:
                    profile_content_list.append(
                        """
[%(buildout_section_id)s]
repository = %(repository_path)s
branch = %(branch)s
develop = false
""" % {
                            'buildout_section_id': buildout_section_id,
                            'repository_path': repository_path,
                            'branch': vcs_repository.get('branch', 'master')
                        })
        if not profile_path_count:
            raise ValueError(PROFILE_PATH_KEY + ' not defined')
        # Write file
        custom_profile = open(node_test_suite.custom_profile_path, 'w')
        # sort to have buildout section first
        profile_content_list.sort(
            key=lambda x: [x, ''][x.startswith('\n[buildout]')])
        custom_profile.write(''.join(profile_content_list))
        custom_profile.close()
        sys.path.append(repository_path)

    def getAndUpdateFullRevisionList(self, node_test_suite):
        full_revision_list = []
        config = self.config
        log = self.log
        for vcs_repository in node_test_suite.vcs_repository_list:
            repository_path = vcs_repository['repository_path']
            repository_id = vcs_repository['repository_id']
            branch = vcs_repository.get('branch')
            # Make sure we have local repository
            updater = Updater(
                repository_path,
                git_binary=config['git_binary'],
                branch=branch,
                log=log,
                process_manager=self.process_manager,
                working_directory=node_test_suite.working_directory,
                url=vcs_repository["url"])
            updater.checkout()
            revision = "-".join(updater.getRevision())
            full_revision_list.append('%s=%s' % (repository_id, revision))
        node_test_suite.revision = ','.join(full_revision_list)
        return full_revision_list

    def registerSuiteLog(self, test_result, node_test_suite):
        """
      Create a log dedicated for the test suite,
      and register the url to master node.
    """
        suite_log_path, folder_id = node_test_suite.createSuiteLog()
        self._initializeSuiteLog(suite_log_path)
        # TODO make the path into url
        test_result.reportStatus(
            'LOG url', "%s/%s" % (self.config.get('httpd_url'), folder_id), '')
        self.log("going to switch to log %r" % suite_log_path)
        self.process_manager.log = self.log = self.getSuiteLog()
        return suite_log_path

    def getSuiteLog(self):
        return self.suite_log

    def _initializeSuiteLog(self, suite_log_path):
        # remove previous handlers
        logger = logging.getLogger('testsuite')
        if self.file_handler is not None:
            logger.removeHandler(self.file_handler)
        # and replace it with new handler
        logger_format = '%(asctime)s %(name)-13s: %(levelname)-8s %(message)s'
        formatter = logging.Formatter(logger_format)
        logging.basicConfig(level=logging.INFO, format=logger_format)
        self.file_handler = logging.FileHandler(filename=suite_log_path)
        self.file_handler.setFormatter(formatter)
        logger.addHandler(self.file_handler)
        logger.info('Activated logfile %r output' % suite_log_path)
        self.suite_log = logger.info

    def checkRevision(self, test_result, node_test_suite):
        config = self.config
        log = self.log
        if log is None:
            log = self.log
        if node_test_suite.revision != test_result.revision:
            log('Disagreement on tested revision, checking out: %r' %
                ((node_test_suite.revision, test_result.revision), ))
            for i, repository_revision in enumerate(
                    test_result.revision.split(',')):
                vcs_repository = node_test_suite.vcs_repository_list[i]
                repository_path = vcs_repository['repository_path']
                revision = repository_revision.rsplit('-', 1)[1]
                # other testnodes on other boxes are already ready to test another
                # revision
                log('  %s at %s' % (repository_path, node_test_suite.revision))
                updater = Updater(repository_path,
                                  git_binary=config['git_binary'],
                                  revision=revision,
                                  log=log,
                                  process_manager=self.process_manager)
                updater.checkout()
                updater.git_update_server_info()
                updater.git_create_repository_link()
                node_test_suite.revision = test_result.revision

    def _cleanupLog(self):
        config = self.config
        log_directory = self.config['log_directory']
        now = time.time()
        for log_folder in os.listdir(log_directory):
            folder_path = os.path.join(log_directory, log_folder)
            if os.path.isdir(folder_path):
                if (now - os.stat(folder_path).st_mtime
                    ) / 86400 > self.max_log_time:
                    self.log("deleting log directory %r" % (folder_path, ))
                    shutil.rmtree(folder_path)

    def _cleanupTemporaryFiles(self):
        """
    buildout seems letting files under /tmp. To avoid regular error of
    missing disk space, remove old logs
    """
        temp_directory = self.config["system_temp_folder"]
        now = time.time()
        user_id = os.geteuid()
        for temp_folder in os.listdir(temp_directory):
            folder_path = os.path.join(temp_directory, temp_folder)
            if (temp_folder.startswith("tmp")
                    or temp_folder.startswith("buildout")):
                try:
                    stat = os.stat(folder_path)
                    if stat.st_uid == user_id and \
                        (now - stat.st_mtime)/86400 > self.max_temp_time:
                        self.log("deleting temp directory %r" %
                                 (folder_path, ))
                        if os.path.isdir(folder_path):
                            shutil.rmtree(folder_path)
                        else:
                            os.remove(folder_path)
                except OSError:
                    self.log("_cleanupTemporaryFiles exception",
                             exc_info=sys.exc_info())

    def cleanUp(self, test_result):
        log = self.log
        log('Testnode.cleanUp')
        self.process_manager.killPreviousRun()
        self._cleanupLog()
        self._cleanupTemporaryFiles()

    def run(self):
        log = self.log
        config = self.config
        slapgrid = None
        previous_revision_dict = {}
        revision_dict = {}
        test_result = None
        test_node_slapos = SlapOSInstance()
        test_node_slapos.edit(
            working_directory=self.config['slapos_directory'])
        try:
            while True:
                try:
                    node_test_suite = None
                    self.log = self.process_manager.log = self.testnode_log
                    self.cleanUp(None)
                    remote_test_result_needs_cleanup = False
                    begin = time.time()
                    portal_url = config['test_suite_master_url']
                    portal = taskdistribution.TaskDistributionTool(
                        portal_url, logger=DummyLogger(log))
                    self.portal = portal
                    self.test_suite_portal = taskdistribution.TaskDistributor(
                        portal_url, logger=DummyLogger(log))
                    self.test_suite_portal.subscribeNode(
                        node_title=config['test_node_title'],
                        computer_guid=config['computer_id'])
                    test_suite_data = self.test_suite_portal.startTestSuite(
                        node_title=config['test_node_title'],
                        computer_guid=config['computer_id'])
                    if type(test_suite_data) == str:
                        # Backward compatiblity
                        test_suite_data = json.loads(test_suite_data)
                    test_suite_data = Utils.deunicodeData(test_suite_data)
                    log("Got following test suite data from master : %r" % \
                        (test_suite_data,))
                    try:
                        my_test_type = self.test_suite_portal.getTestType()
                    except:
                        log("testnode, error during requesting getTestType() method \
from the distributor.")
                        raise
                    # Select runner according to the test type
                    if my_test_type == 'UnitTest':
                        runner = UnitTestRunner(self)
                    elif my_test_type == 'ScalabilityTest':
                        runner = ScalabilityTestRunner(self)
                    else:
                        log("testnode, Runner type %s not implemented.",
                            my_test_type)
                        raise NotImplementedError
                    log("Type of current test is %s" % (my_test_type, ))
                    # master testnode gets test_suites, slaves get nothing
                    runner.prepareSlapOSForTestNode(test_node_slapos)
                    # Clean-up test suites
                    self.checkOldTestSuite(test_suite_data)
                    for test_suite in test_suite_data:
                        remote_test_result_needs_cleanup = False
                        node_test_suite = self.getNodeTestSuite(
                            test_suite["test_suite_reference"])

                        node_test_suite.edit(
                            working_directory=self.config['working_directory'],
                            log_directory=self.config['log_directory'])

                        node_test_suite.edit(**test_suite)
                        if my_test_type == 'UnitTest':
                            runner = UnitTestRunner(node_test_suite)
                        elif my_test_type == 'ScalabilityTest':
                            runner = ScalabilityTestRunner(node_test_suite)
                        else:
                            log("testnode, Runner type %s not implemented.",
                                my_test_type)
                            raise NotImplementedError

                        # XXX: temporary hack to prevent empty test_suite
                        if not hasattr(node_test_suite, 'test_suite'):
                            node_test_suite.edit(test_suite='')
                        run_software = True
                        # kill processes from previous loop if any
                        self.process_manager.killPreviousRun()
                        self.getAndUpdateFullRevisionList(node_test_suite)
                        # Write our own software.cfg to use the local repository
                        self.constructProfile(node_test_suite, my_test_type,
                                              runner.getRelativePathUsage())
                        # Make sure we have local repository
                        test_result = portal.createTestResult(
                            node_test_suite.revision, [],
                            config['test_node_title'], False,
                            node_test_suite.test_suite_title,
                            node_test_suite.project_title)
                        remote_test_result_needs_cleanup = True
                        log("testnode, test_result : %r" % (test_result, ))
                        if test_result is not None:
                            self.registerSuiteLog(test_result, node_test_suite)
                            self.checkRevision(test_result, node_test_suite)
                            node_test_suite.edit(test_result=test_result)
                            # Now prepare the installation of SlapOS and create instance
                            status_dict = runner.prepareSlapOSForTestSuite(
                                node_test_suite)
                            # Give some time so computer partitions may start
                            # as partitions can be of any kind we have and likely will never have
                            # a reliable way to check if they are up or not ...
                            time.sleep(20)
                            if my_test_type == 'UnitTest':
                                runner.runTestSuite(node_test_suite,
                                                    portal_url)
                            elif my_test_type == 'ScalabilityTest':
                                error_message = None
                                # A problem is appeared during runTestSuite
                                if status_dict['status_code'] == 1:
                                    error_message = "Software installation too long or error(s) are present during SR install."
                                else:
                                    status_dict = runner.runTestSuite(
                                        node_test_suite, portal_url)
                                    # A problem is appeared during runTestSuite
                                    if status_dict['status_code'] == 1:
                                        error_message = status_dict[
                                            'error_message']

                                # If an error is appeared
                                if error_message:
                                    test_result.reportFailure(
                                        stdout=error_message)
                                    self.log(error_message)
                                    raise ValueError(error_message)
                            else:
                                raise NotImplementedError

                            # break the loop to get latest priorities from master
                            break
                        self.cleanUp(test_result)
                except (SubprocessError, CalledProcessError) as e:
                    log("SubprocessError", exc_info=sys.exc_info())
                    if remote_test_result_needs_cleanup:
                        status_dict = e.status_dict or {}
                        test_result.reportFailure(
                            command=status_dict.get('command'),
                            stdout=status_dict.get('stdout'),
                            stderr=status_dict.get('stderr'),
                        )
                    continue
                except ValueError as e:
                    # This could at least happens if runTestSuite is not found
                    log("ValueError", exc_info=sys.exc_info())
                    if node_test_suite is not None:
                        node_test_suite.retry_software_count += 1
                except CancellationError, e:
                    log("CancellationError", exc_info=sys.exc_info())
                    self.process_manager.under_cancellation = False
                    node_test_suite.retry = True
                    continue
                except:
                    ex_type, ex, tb = sys.exc_info()
                    traceback.print_tb(tb)
                    log("erp5testnode exception", exc_info=sys.exc_info())
                    raise
                now = time.time()
                self.cleanUp(test_result)
                if (now - begin) < 120:
                    sleep_time = 120 - (now - begin)
                    log("End of processing, going to sleep %s" % sleep_time)
                    time.sleep(sleep_time)
Example #17
0
    spamQ.put(myList)


def eggs(eggsQ):
    time.sleep(5)
    eggsQ.put('I\'m done cooking eggs!')


def bacon(myString, baconQ):
    x = len(myString)
    baconQ.put(myString + ' is ' + str(x) + ' chars long!')


if __name__ == '__main__':
    #Create ProcessManager object
    pManager = ProcessManager()
    #Create Process string IDs
    spamID = 'SPAM'
    eggsID = 'EGGS'
    baconID = 'tasty'
    spamOut = mp.Queue()
    eggsOut = mp.Queue()
    stringyOut = mp.Queue()
    #Start processes
    groceryList = ['milk', 'cheese', 'and for dessert:']
    #Create spam process
    pSpam = mp.Process(target=spam, args=(3.14, groceryList, spamOut))
    pSpam.start()
    #pManager.startProcess(spamID, spam, (3.14, groceryList), (spamOut,))
    #Create eggs process
    pEggs = mp.Process(target=eggs, args=(eggsOut, ))
Example #18
0
class TestNode(object):

  def __init__(self, log, config):
    self.log = log
    self.config = config or {}
    self.process_manager = ProcessManager(log)
    self.node_test_suite_dict = {}
    # hack until slapos.cookbook is updated
    if self.config.get('working_directory', '').endswith("slapos/"):
      self.config['working_directory'] = self.config[
        'working_directory'][:-(len("slapos/"))] + "testnode"

  def checkOldTestSuite(self,test_suite_data):
    config = self.config
    installed_reference_set = set(os.listdir(config['working_directory']))
    wished_reference_set = set([x['test_suite_reference'] for x in test_suite_data])
    to_remove_reference_set = installed_reference_set.difference(
                                 wished_reference_set)
    for y in to_remove_reference_set:
      fpath = os.path.join(config['working_directory'],y)
      self.delNodeTestSuite(y)
      if os.path.isdir(fpath):
       shutil.rmtree(fpath)
      else:
       os.remove(fpath)

  def getNodeTestSuite(self, reference):
    node_test_suite = self.node_test_suite_dict.get(reference)
    if node_test_suite is None:
      node_test_suite = NodeTestSuite(reference)
      self.node_test_suite_dict[reference] = node_test_suite
    return node_test_suite

  def delNodeTestSuite(self, reference):
    if self.node_test_suite_dict.has_key(reference):
      self.node_test_suite_dict.pop(reference)

  def constructProfile(self, node_test_suite):
    config = self.config
    profile_content = ''
    assert len(node_test_suite.vcs_repository_list), "we must have at least one repository"
    profile_path_count = 0
    profile_content_list = []
    for vcs_repository in node_test_suite.vcs_repository_list:
      url = vcs_repository['url']
      buildout_section_id = vcs_repository.get('buildout_section_id', None)
      repository_path = vcs_repository['repository_path']
      try:
        profile_path = vcs_repository[PROFILE_PATH_KEY]
      except KeyError:
        pass
      else:
        profile_path_count += 1
        if profile_path_count > 1:
          raise ValueError(PROFILE_PATH_KEY + ' defined more than once')
        profile_content_list.append("""
[buildout]
extends = %(software_config_path)s
""" %  {'software_config_path': os.path.join(repository_path, profile_path)})

      if not(buildout_section_id is None):
        profile_content_list.append("""
[%(buildout_section_id)s]
repository = %(repository_path)s
branch = %(branch)s
""" %  {'buildout_section_id': buildout_section_id,
   'repository_path' : repository_path,
   'branch' : vcs_repository.get('branch','master')})
    if not profile_path_count:
      raise ValueError(PROFILE_PATH_KEY + ' not defined')
    custom_profile = open(node_test_suite.custom_profile_path, 'w')
    # sort to have buildout section first
    profile_content_list.sort(key=lambda x: [x, ''][x.startswith('\n[buildout]')])
    custom_profile.write(''.join(profile_content_list))
    custom_profile.close()
    sys.path.append(repository_path)

  def getAndUpdateFullRevisionList(self, node_test_suite):
    full_revision_list = []
    config = self.config
    log = self.log
    for vcs_repository in node_test_suite.vcs_repository_list:
      repository_path = vcs_repository['repository_path']
      repository_id = vcs_repository['repository_id']
      if not os.path.exists(repository_path):
        parameter_list = [config['git_binary'], 'clone',
                          vcs_repository['url']]
        if vcs_repository.get('branch') is not None:
          parameter_list.extend(['-b',vcs_repository.get('branch')])
        parameter_list.append(repository_path)
        log(subprocess.check_output(parameter_list, stderr=subprocess.STDOUT))
      # Make sure we have local repository
      updater = Updater(repository_path, git_binary=config['git_binary'],
         log=log, process_manager=self.process_manager)
      updater.checkout()
      revision = "-".join(updater.getRevision())
      full_revision_list.append('%s=%s' % (repository_id, revision))
    node_test_suite.revision = ','.join(full_revision_list)
    return full_revision_list

  def addWatcher(self,test_result):
    config = self.config
    if config.get('log_file'):
     log_file_name = config['log_file']
     log_file = open(log_file_name)
     log_file.seek(0, 2)
     log_file.seek(-min(5000, log_file.tell()), 2)
     test_result.addWatch(log_file_name,log_file,max_history_bytes=10000)
     return log_file_name

  def checkRevision(self, test_result, node_test_suite):
    config = self.config
    log = self.log
    if node_test_suite.revision != test_result.revision:
     log('Disagreement on tested revision, checking out: %r' % (
          (node_test_suite.revision,test_result.revision),))
     for i, repository_revision in enumerate(test_result.revision.split(',')):
      vcs_repository = node_test_suite.vcs_repository_list[i]
      repository_path = vcs_repository['repository_path']
      revision = repository_revision.rsplit('-', 1)[1]
      # other testnodes on other boxes are already ready to test another
      # revision
      log('  %s at %s' % (repository_path, node_test_suite.revision))
      updater = Updater(repository_path, git_binary=config['git_binary'],
                        revision=revision, log=log,
                        process_manager=self.process_manager)
      updater.checkout()
      node_test_suite.revision = test_result.revision

  def _prepareSlapOS(self, working_directory, slapos_instance,
          create_partition=1, software_path_list=None, **kw):
    """
    Launch slapos to build software and partitions
    """
    slapproxy_log = os.path.join(self.config['log_directory'],
                                  'slapproxy.log')
    self.log('Configured slapproxy log to %r' % slapproxy_log)
    reset_software = slapos_instance.retry_software_count > 10
    self.log('testnode, retry_software_count : %r' % \
             slapos_instance.retry_software_count)
    self.slapos_controler = SlapOSControler.SlapOSControler(
      working_directory, self.config, self.log)
    self.slapos_controler.initializeSlapOSControler(slapproxy_log=slapproxy_log,
       process_manager=self.process_manager, reset_software=reset_software,
       software_path_list=software_path_list)
    self.process_manager.supervisord_pid_file = os.path.join(\
         self.slapos_controler.instance_root, 'var', 'run', 'supervisord.pid')
    method_list= ["runSoftwareRelease"]
    if create_partition:
      method_list.append("runComputerPartition")
    for method_name in method_list:
      slapos_method = getattr(self.slapos_controler, method_name)
      status_dict = slapos_method(self.config,
                                  environment=self.config['environment'],
                                 )
      if status_dict['status_code'] != 0:
         slapos_instance.retry = True
         slapos_instance.retry_software_count += 1
         raise SubprocessError(status_dict)
      else:
         slapos_instance.retry_software_count = 0
    return status_dict

  def prepareSlapOSForTestNode(self, test_node_slapos):
    """
    We will build slapos software needed by the testnode itself,
    like the building of selenium-runner by default
    """
    return self._prepareSlapOS(self.config['slapos_directory'],
              test_node_slapos, create_partition=0,
              software_path_list=self.config.get("software_list"))

  def prepareSlapOSForTestSuite(self, node_test_suite):
    return self._prepareSlapOS(node_test_suite.working_directory,
              node_test_suite,
              software_path_list=[node_test_suite.custom_profile_path])

  def _dealShebang(self,run_test_suite_path):
    line = open(run_test_suite_path, 'r').readline()
    invocation_list = []
    if line[:2] == '#!':
      invocation_list = line[2:].split()
    return invocation_list

  def runTestSuite(self, node_test_suite, portal_url):
    config = self.config
    parameter_list = []
    run_test_suite_path_list = glob.glob("%s/*/bin/runTestSuite" % \
        self.slapos_controler.instance_root)
    if not len(run_test_suite_path_list):
      raise ValueError('No runTestSuite provided in installed partitions.')
    run_test_suite_path = run_test_suite_path_list[0]
    run_test_suite_revision = node_test_suite.revision
    # Deal with Shebang size limitation
    invocation_list = self._dealShebang(run_test_suite_path)
    invocation_list.extend([run_test_suite_path,
                           '--test_suite', node_test_suite.test_suite,
                           '--revision', node_test_suite.revision,
                           '--test_suite_title', node_test_suite.test_suite_title,
                           '--node_quantity', config['node_quantity'],
                           '--master_url', portal_url])
    firefox_bin_list = glob.glob("%s/soft/*/parts/firefox/firefox-slapos" % \
        config["slapos_directory"])
    if len(firefox_bin_list):
      parameter_list.append('--firefox_bin')
    xvfb_bin_list = glob.glob("%s/soft/*/parts/xserver/bin/Xvfb" % \
        config["slapos_directory"])
    if len(xvfb_bin_list):
      parameter_list.append('--xvfb_bin')
    supported_paramater_set = self.process_manager.getSupportedParameterSet(
                           run_test_suite_path, parameter_list)
    if '--firefox_bin' in supported_paramater_set:
      invocation_list.extend(["--firefox_bin", firefox_bin_list[0]])
    if '--xvfb_bin' in supported_paramater_set:
      invocation_list.extend(["--xvfb_bin", xvfb_bin_list[0]])
    bt5_path_list = config.get("bt5_path")
    if bt5_path_list not in ('', None,):
      invocation_list.extend(["--bt5_path", bt5_path_list])
    # From this point, test runner becomes responsible for updating test
    # result. We only do cleanup if the test runner itself is not able
    # to run.
    SlapOSControler.createFolder(node_test_suite.test_suite_directory,
                                 clean=True)
    self.process_manager.spawn(*invocation_list,
                          cwd=node_test_suite.test_suite_directory,
                          log_prefix='runTestSuite', get_output=False)

  def cleanUp(self,test_result):
    log = self.log
    log('Testnode.cleanUp')
    self.process_manager.killPreviousRun()
    if test_result is not None:
      try:
        test_result.removeWatch(self.config['log_file'])
      except KeyError:
        log("KeyError, Watcher already deleted or not added correctly")

  def run(self):
    log = self.log
    config = self.config
    slapgrid = None
    previous_revision_dict = {}
    revision_dict = {}
    test_result = None
    test_node_slapos = SlapOSInstance()
    test_node_slapos.edit(working_directory=self.config['slapos_directory'])
    try:
      while True:
        try:
          self.cleanUp(None)
          remote_test_result_needs_cleanup = False
          begin = time.time()
          self.prepareSlapOSForTestNode(test_node_slapos)
          portal_url = config['test_suite_master_url']
          portal = taskdistribution.TaskDistributionTool(portal_url, logger=DummyLogger(log))
          test_suite_portal = taskdistribution.TaskDistributor(portal_url, logger=DummyLogger(log))
          test_suite_json =  test_suite_portal.startTestSuite(config['test_node_title'])
          test_suite_data = deunicodeData(json.loads(test_suite_json))
          log("Got following test suite data from master : %r" % \
              (test_suite_data,))
          #Clean-up test suites
          self.checkOldTestSuite(test_suite_data)
          for test_suite in test_suite_data:
            remote_test_result_needs_cleanup = False
            node_test_suite = self.getNodeTestSuite(
               test_suite["test_suite_reference"])
            node_test_suite.edit(
               working_directory=self.config['working_directory'])
            node_test_suite.edit(**test_suite)
            run_software = True
            # Write our own software.cfg to use the local repository
            self.constructProfile(node_test_suite)
            # kill processes from previous loop if any
            self.process_manager.killPreviousRun()
            self.getAndUpdateFullRevisionList(node_test_suite)
            # Make sure we have local repository
            test_result = portal.createTestResult(node_test_suite.revision, [],
                     config['test_node_title'], False,
                     node_test_suite.test_suite_title,
                     node_test_suite.project_title)
            remote_test_result_needs_cleanup = True
            log("testnode, test_result : %r" % (test_result, ))
            if test_result is not None:
              log_file_name = self.addWatcher(test_result)
              self.checkRevision(test_result,node_test_suite)
              # Now prepare the installation of SlapOS and create instance
              status_dict = self.prepareSlapOSForTestSuite(node_test_suite)
              # Give some time so computer partitions may start
              # as partitions can be of any kind we have and likely will never have
              # a reliable way to check if they are up or not ...
              time.sleep(20)
              self.runTestSuite(node_test_suite,portal_url)
              test_result.removeWatch(log_file_name)
              # break the loop to get latest priorities from master
              break
            self.cleanUp(test_result)
        except (SubprocessError, CalledProcessError) as e:
          log("SubprocessError", exc_info=sys.exc_info())
          if test_result is not None:
            test_result.removeWatch(log_file_name)
          if remote_test_result_needs_cleanup:
            status_dict = e.status_dict or {}
            test_result.reportFailure(
              command=status_dict.get('command'),
              stdout=status_dict.get('stdout'),
              stderr=status_dict.get('stderr'),
            )
          continue
        except ValueError as e:
          # This could at least happens if runTestSuite is not found
          log("ValueError", exc_info=sys.exc_info())
          node_test_suite.retry_software_count += 1
        except CancellationError, e:
          log("CancellationError", exc_info=sys.exc_info())
          self.process_manager.under_cancellation = False
          node_test_suite.retry = True
          continue
        except:
            log("erp5testnode exception", exc_info=sys.exc_info())
            raise
        now = time.time()
        self.cleanUp(test_result)
        if (now-begin) < 120:
          sleep_time = 120 - (now-begin)
          log("End of processing, going to sleep %s" % sleep_time)
          time.sleep(sleep_time)
from Fetcher import Fetcher
from ProcessManager import ProcessManager


fetcher = Fetcher()

proc_attrs = {
             'online': {'func': fetcher.fetch_online_players, 'arg': 10},
             'highscores': {'func': fetcher.fetch_highscores, 'arg': 2*60*60}
            }

if __name__ == '__main__':
    pm = ProcessManager()
    pm.run(proc_attrs, 10)


Example #20
0
parser = ArgumentParser()
parser.add_argument("num_children")
parser.add_argument("sleepint")

args = parser.parse_args()
num_children = int(args.num_children)
sleep_every = int(args.sleepint)

print(resource.getrlimit(resource.RLIMIT_NPROC))

args = ["demo/vuln"]
launch_args = LaunchArguments(args, random=False)
dummy_poll = PaulaPoll()

manager = ProcessManager(launch_args, dummy_poll)

root_proc = manager.getCurrentProcess()
root_proc.insertBreakpoint("main")
root_proc.cont()
new_procs = []

time_dict = dict()
time_dict["start"] = default_timer()

#time_dict["launch"] = default_timer()
# root_proc.getrlimit(1)

#new_procs = [root_proc.forkProcess() for _ in range(num_children)]

RECOVER_EVERY = sleep_every
Example #21
0
    print(f'Using config file {args.config}')
    config = toml.load(args.config)
    return config


config = process_args()

app = flask.Flask(__name__)
CORS(app)
flog.default_handler.setFormatter(logging.Formatter(config['log_format']))
logging.basicConfig(filename=config['log_file'],
                    format=config['log_format'],
                    level=logging.DEBUG)
print(f'Flask logger is redirected to {config["log_file"]}')

pm = ProcessManager()
pm.add_job('motor', spin_motors)
pm.add_job('servo', move_servo)


@app.route('/api/v1/healthcheck', methods=['GET'])
def healthcheck():
    return 'ok', 200


@app.route('/api/v1/wifistrength', methods=['GET'])
def wifi_strength():
    return check_wifi(), 200


@app.route('/api/v1/motor', methods=['POST'])
Example #22
0
def t265_update():
    try:
        xyz_rpy_queue.get_nowait()
        xyz_rpy = xyzrpy_value[0:6]
        pose_tracker.update_t265_pose(xyz_rpy[0], xyz_rpy[1], xyz_rpy[5])
        for v, c in zip(pose_tracker.field_xyt, 'xyt'):
            odom_table.putNumber(f'field_{c}', v)
        for v, c in zip(pose_tracker.robot_xyt, 'xyt'):
            odom_table.putNumber(f'robot_{c}', v)
        return True
    except Empty:
        return False


t265_process_manager = ProcessManager(
    lambda: T265Process(xyz_rpy_queue, xyzrpy_value, encoder_v_queue),
    t265_update,
)

time.sleep(4)  # must have t265 launch first to work


def cv_update():
    global last_target_found_time
    try:
        target_found, target_dis, target_relative_dir_left, \
                target_t265_azm, camera_xyt = target_queue.get_nowait()
        if not target_found:
            pose_tracker.clear_calibration()
            if time.time(
            ) > last_target_found_time + Constants.HOLD_TARGET_TIME:
                odom_table.putBoolean('target_found', False)