Esempio n. 1
0
  def run(self):
    # Elevate our priority, set us to the highest so that we can more effectively throttle
    success = windows_api.set_current_thread_priority(windows_api.THREAD_PRIORITY_HIGHEST)
    
    # If we failed to get HIGHEST priority, try above normal, else we're still at default
    if not success:
      windows_api.set_current_thread_priority(windows_api.THREAD_PRIORITY_ABOVE_NORMAL)
    
    # Run while the process is running
    while True:
      try:
        # Get the frequency
        frequency = repy_constants.CPU_POLLING_FREQ_WIN
        
        # Base amount of sleeping on return value of 
    	  # win_check_cpu_use to prevent under/over sleeping
        slept = win_check_cpu_use(nanny_resource_limits.resource_limit("cpu"), self.pid)
        
        if slept == -1:
          # Something went wrong, try again
          pass
        elif (slept < frequency):
          time.sleep(frequency-slept)

      except windows_api.DeadProcess:
        #  Process may be dead
        harshexit.harshexit(97)
        
      except:
        tracebackrepy.handle_exception()
        print >> sys.stderr, "CPU Nanny died!   Trying to kill everything else"
        harshexit.harshexit(25)
Esempio n. 2
0
def run_file(resourcefn, program, args, usercontext, logfile=None, simpleexec=False, usercode=None):
  # Armon: Initialize the circular logger before starting the nanny
  if logfile:
    # time to set up the circular logger
    loggerfo = loggingrepy.circular_logger(logfile)
    # and redirect err and out there...
    sys.stdout = loggerfo
    sys.stderr = loggerfo
  else:
    # let's make it so that the output (via print) is always flushed
    sys.stdout = loggingrepy.flush_logger(sys.stdout)
    
  # start the nanny up and read the resource file.  
  nanny.start_resource_nanny(resourcefn)

  # now, let's fire up the cpu / disk / memory monitor...
  nonportable.monitor_cpu_disk_and_mem()

  # grab the user code from the file
  if not usercode:
    usercode = read_file(program)

  # Armon: Create the main namespace
  try:
    main_namespace = virtual_namespace.VirtualNamespace(usercode, program)
  except CodeUnsafeError, e:
    print "Specified repy program is unsafe!"
    print "Static-code analysis failed with error: "+str(e)
    harshexit.harshexit(5)
Esempio n. 3
0
def execute_namespace_until_completion(thisnamespace, thiscontext):

    # I'll use this to detect when the program is idle so I know when to quit...
    idlethreadcount = threading.activeCount()

    # add my thread to the set of threads that are used...
    event_id = idhelper.getuniqueid()
    try:
        nanny.tattle_add_item('events', event_id)
    except Exception as e:
        tracebackrepy.handle_internalerror("Failed to acquire event for '" + \
                  "initialize' event.\n(Exception was: %s)" % e.message, 140)

    try:
        thisnamespace.evaluate(thiscontext)
    except SystemExit:
        raise
    except:
        # I think it makes sense to exit if their code throws an exception...
        tracebackrepy.handle_exception()
        harshexit.harshexit(6)
    finally:
        nanny.tattle_remove_item('events', event_id)

    # I've changed to the threading library, so this should increase if there are
    # pending events
    while threading.activeCount() > idlethreadcount:
        # do accounting here?
        time.sleep(0.25)

    # Once there are no more events, return...
    return
Esempio n. 4
0
  def run(self):
    # Run forever
    while True:
      # Read a message
      try:
        mesg = read_message_from_pipe(self.readhandle)
      except Exception as e:
        break

      # Check for a handler function
      if mesg[0] in IPC_HANDLER_FUNCTIONS:
        # Invoke the handler function with the data
        handler = IPC_HANDLER_FUNCTIONS[mesg[0]]
        handler(mesg[1])

      # Print a message if there is a message on an unknown channel
      else:
        print("[WARN] Message on unknown channel from parent process:" + mesg[0])


    ### We only leave the loop on a fatal error, so we need to exit now

    # Write out status information, our parent would do this, but its dead.
    statusstorage.write_status("Terminated")  
    print("Monitor process died! Terminating!", file=sys.stderr)
    harshexit.harshexit(70)
Esempio n. 5
0
def _stopfile_exit(exitcode, pid):
    # On Windows, we are in the Repy process, so we can just use harshexit
    if harshexit.ostype in ["Windows", "WindowsCE"]:
        # Harshexit will store the appriopriate status for us
        harshexit.harshexit(exitcode)

    else:  # On NIX we are on the external process
        try:
            if exitcode == 44:
                # Write out status information, repy was Stopped
                statusstorage.write_status("Stopped")
            else:
                # Status terminated
                statusstorage.write_status("Terminated")
        except:
            pass

        # Disable the other status thread, in case the resource thread detects we've killed repy
        statusstorage.init(None)

        # Kill repy
        harshexit.portablekill(pid)

        # Fix Derek proposed, this should solve the problem of
        # the monitor exiting before the repy process.
        time.sleep(1)

        # Exit
        harshexit.harshexit(78)
Esempio n. 6
0
 def wrapped_func():
   try:
     function()
   except:
     # Exit if they throw an uncaught exception
     tracebackrepy.handle_exception()
     harshexit.harshexit(30)
def _stopfile_exit(exitcode, pid):
  # On Windows, we are in the Repy process, so we can just use harshexit
  if harshexit.ostype in ["Windows", "WindowsCE"]:
    # Harshexit will store the appriopriate status for us
    harshexit.harshexit(exitcode)

  else:    # On NIX we are on the external process
    try:
      if exitcode == 44:
        # Write out status information, repy was Stopped
        statusstorage.write_status("Stopped")  
      else:
        # Status terminated
        statusstorage.write_status("Terminated")
    except:
      pass

    # Disable the other status thread, in case the resource thread detects we've killed repy
    statusstorage.init(None)

    # Kill repy
    harshexit.portablekill(pid)

    # Fix Derek proposed, this should solve the problem of 
    # the monitor exiting before the repy process.
    time.sleep(1)

    # Exit
    harshexit.harshexit(78)
Esempio n. 8
0
def do_forked_resource_monitor():
  global repy_process_id

  # Get a pipe
  (readhandle, writehandle) = os.pipe()

  # I'll fork a copy of myself
  childpid = os.fork()

  if childpid == 0:
    # We are the child, close the write end of the pipe
    os.close(writehandle)

    # Start a thread to check on the survival of the parent
    parent_process_checker(readhandle).start()

    return
  else:
    # We are the parent, close the read end
    os.close(readhandle)

  # Store the childpid
  repy_process_id = childpid

  # Start the nmstatusinterface
  nmstatusinterface.launch(repy_process_id)
  
  # Small internal error handler function
  def _internal_error(message):
    try:
      print >> sys.stderr, message
      sys.stderr.flush()
    except:
      pass
      
    # Stop the nmstatusinterface, we don't want any more status updates
    nmstatusinterface.stop()  

    # Kill repy
    harshexit.portablekill(childpid)

    try:
      # Write out status information, repy was Stopped
      statusstorage.write_status("Terminated")  
    except:
      pass
  
  try:
    # Some OS's require that you wait on the pid at least once
    # before they do any accounting
    (pid, status) = os.waitpid(childpid,os.WNOHANG)
    
    # Launch the resource monitor, if it fails determine why and restart if necessary
    resource_monitor(childpid, writehandle)
    
  except ResourceException, exp:
    # Repy exceeded its resource limit, kill it
    _internal_error(str(exp)+" Impolitely killing child!")
    harshexit.harshexit(98)
Esempio n. 9
0
 def check_failed_lock_acquire():
   for i in xrange(wait_time):
     if context['lock_acquired']:
       break
     time.sleep(1)
   else:
     print>>sys.stderr, "Uninstallation cannot be completed; Failed to acquire lock."
     harshexit.harshexit(1)
Esempio n. 10
0
 def wrapped_func():
   try:
     function()
   except:
     # Exit if they throw an uncaught exception
     tracebackrepy.handle_exception()
     harshexit.harshexit(30)
   finally: 
     # Remove the event before I exit
     nanny.tattle_remove_item('events',eventhandle)
Esempio n. 11
0
 def wrapped_func():
     try:
         function()
     except:
         # Exit if they throw an uncaught exception
         tracebackrepy.handle_exception()
         harshexit.harshexit(30)
     finally:
         # Remove the event before I exit
         nanny.tattle_remove_item('events', eventhandle)
Esempio n. 12
0
    def run(self):
        # How often the memory will be checked (seconds)
        memory_check_interval = repy_constants.CPU_POLLING_FREQ_WIN
        # The ratio of the disk polling time to memory polling time.
        disk_to_memory_ratio = int(repy_constants.DISK_POLLING_HDD /
                                   memory_check_interval)

        # Which cycle number we're on
        counter = 0

        # Elevate our priority, above normal is higher than the usercode, and is enough for disk/mem
        windows_api.set_current_thread_priority(
            windows_api.THREAD_PRIORITY_ABOVE_NORMAL)

        # need my pid to get a process handle...
        mypid = os.getpid()

        # run forever (only exit if an error occurs)
        while True:
            try:
                # Increment the interval counter
                counter += 1

                # Check memory use, get the WorkingSetSize or RSS
                memused = windows_api.process_memory_info(
                    mypid)['WorkingSetSize']

                if memused > nanny.get_resource_limit("memory"):
                    # We will be killed by the other thread...
                    raise Exception, "Memory use '" + str(
                        memused) + "' over limit '" + str(
                            nanny.get_resource_limit("memory")) + "'"

                # Check if we should check the disk
                if (counter % disk_to_memory_ratio) == 0:
                    # Check diskused
                    diskused = compute_disk_use(
                        repy_constants.REPY_CURRENT_DIR)
                    if diskused > nanny.get_resource_limit("diskused"):
                        raise Exception, "Disk use '" + str(
                            diskused) + "' over limit '" + str(
                                nanny.get_resource_limit("diskused")) + "'"
                # Sleep until the next iteration of checking the memory
                time.sleep(memory_check_interval)

            except windows_api.DeadProcess:
                #  Process may be dead, or die while checking memory use
                #  In any case, there is no reason to continue running, just exit
                harshexit.harshexit(99)

            except:
                tracebackrepy.handle_exception()
                print >> sys.stderr, "Nanny died!   Trying to kill everything else"
                harshexit.harshexit(20)
Esempio n. 13
0
def finalize():
    global idlethreadcount, event_id

    nanny.tattle_remove_item('events', event_id)
    # I've changed to the threading library, so this should increase if there are
    # pending events

    while threading.activeCount() > idlethreadcount:
        # do accounting here?
        time.sleep(0.25)
    # Once there are no more pending events for the user thread, we exit
    harshexit.harshexit(0)
Esempio n. 14
0
def createthread(function):
  """
  <Purpose>
    Creates a new thread of execution.

  <Arguments>
    function:
      The function to invoke on entering the new thread.

  <Exceptions>
    RepyArgumentError is raised if the function is not callable.
    ResourceExhaustedError is raised if there are no available events.

  <Side Effects>
    Launches a new thread.

  <Resource Consumption>
    Consumes an event.

  <Returns>
    None
  """
  # Check if the function is callable
  if not safe_callable(function):
    raise RepyArgumentError("Provided function is not callable!")

  # Generate a unique handle and see if there are resources available
  eventhandle = EVENT_PREFIX + idhelper.getuniqueid()
  #nanny.tattle_add_item('events', eventhandle)

  # Wrap the provided function
  def wrapped_func():
    try:
      function()
    except:
      # Exit if they throw an uncaught exception
      tracebackrepy.handle_exception()
      harshexit.harshexit(30)
    #finally: 
      # Remove the event before I exit
      #nanny.tattle_remove_item('events',eventhandle)


  # Create a thread object
  tobj = threading.Thread(target=wrapped_func, name=idhelper.get_new_thread_name(EVENT_PREFIX))

  # Check if we get an exception trying to create a new thread
  try:
    tobj.start()
  except thread.error:
    # Set exit code 56, which stands for a Threading Error
    # The Node manager will detect this and handle it
    harshexit.harshexit(56)
Esempio n. 15
0
def main():
  # JAC: This function should be kept as stable if possible.   Others who
  # extend Repy may be doing essentially the same thing in their main and
  # your changes may not be reflected there!


  # Armon: The CMD line path to repy is the first argument
  repy_location = sys.argv[0]

  # Get the directory repy is in
  repy_directory = os.path.dirname(repy_location)
  
  init_repy_location(repy_directory)
  

  ### PARSE OPTIONS.   These are command line in our case, but could be from
  ### anywhere if this is repurposed...
  usage = "USAGE: repy.py [options] resource_file program_to_run.r2py [program args]"
  parser = optparse.OptionParser(usage=usage)
  add_repy_options(parser)
  options, args = parser.parse_args()
  
  if len(args) < 2:
    print "Repy requires a resource file and the program to run!"
    parser.print_help()
    sys.exit(1)
  
  resourcefn = args[0]
  progname = args[1]
  progargs = args[2:]
  
  # Do a huge amount of initialization.
  parse_options(options)
  
  ### start resource restrictions, etc. for the nanny
  initialize_nanny(resourcefn)

  # Read the user code from the file
  try:
    filehandle = open(progname)
    usercode = filehandle.read()
    filehandle.close()
  except:
    print "FATAL ERROR: Unable to read the specified program file: '%s'" % (progname)
    sys.exit(1)

  # create the namespace...
  try:
    newnamespace = virtual_namespace.VirtualNamespace(usercode, progname)
  except CodeUnsafeError, e:
    print "Specified repy program is unsafe!"
    print "Static-code analysis failed with error: "+str(e)
    harshexit.harshexit(5)
Esempio n. 16
0
def main():
  # JAC: This function should be kept as stable if possible.   Others who
  # extend Repy may be doing essentially the same thing in their main and
  # your changes may not be reflected there!


  # Armon: The CMD line path to repy is the first argument
  repy_location = sys.argv[0]

  # Get the directory repy is in
  repy_directory = os.path.dirname(repy_location)
  
  init_repy_location(repy_directory)
  

  ### PARSE OPTIONS.   These are command line in our case, but could be from
  ### anywhere if this is repurposed...
  usage = "USAGE: repy.py [options] resource_file program_to_run.repy [program args]"
  parser = optparse.OptionParser(usage=usage)
  add_repy_options(parser)
  options, args = parser.parse_args()
  
  if len(args) < 2:
    print "Repy requires a resource file and the program to run!"
    parser.print_help()
    sys.exit(1)
  
  resourcefn = args[0]
  progname = args[1]
  progargs = args[2:]
  
  # Do a huge amount of initialization.
  parse_options(options)
  
  ### start resource restrictions, etc. for the nanny
  initialize_nanny(resourcefn)

  # Read the user code from the file
  try:
    filehandle = open(progname)
    usercode = filehandle.read()
    filehandle.close()
  except:
    print "FATAL ERROR: Unable to read the specified program file: '%s'" % (progname)
    sys.exit(1)

  # create the namespace...
  try:
    newnamespace = virtual_namespace.VirtualNamespace(usercode, progname)
  except CodeUnsafeError, e:
    print "Specified repy program is unsafe!"
    print "Static-code analysis failed with error: "+str(e)
    harshexit.harshexit(5)
Esempio n. 17
0
def createthread(function):
  """
  <Purpose>
    Creates a new thread of execution.

  <Arguments>
    function:
      The function to invoke on entering the new thread.

  <Exceptions>
    RepyArgumentError is raised if the function is not callable.
    ResourceExhaustedError is raised if there are no available events.

  <Side Effects>
    Launches a new thread.

  <Resource Consumption>
    Consumes an event.

  <Returns>
    None
  """
  # Check if the function is callable
  if not safe_callable(function):
    raise RepyArgumentError("Provided function is not callable!")

  # Generate a unique handle and see if there are resources available
  eventhandle = EVENT_PREFIX + idhelper.getuniqueid()
  nanny.tattle_add_item('events', eventhandle)

  # Wrap the provided function
  def wrapped_func():
    try:
      function()
    except:
      # Exit if they throw an uncaught exception
      tracebackrepy.handle_exception()
      harshexit.harshexit(30)
    finally: 
      # Remove the event before I exit
      nanny.tattle_remove_item('events',eventhandle)

  # Create a thread object
  tobj = threading.Thread(target=wrapped_func, name=idhelper.get_new_thread_name(EVENT_PREFIX))

  # Check if we get an exception trying to create a new thread
  try:
    tobj.start()
  except thread.error:
    # Set exit code 56, which stands for a Threading Error
    # The Node manager will detect this and handle it
    harshexit.harshexit(56)
Esempio n. 18
0
def finalize():
  global idlethreadcount, event_id
  
  nanny.tattle_remove_item('events', event_id)
  # I've changed to the threading library, so this should increase if there are
  # pending events
  

  while threading.activeCount() > idlethreadcount:
    # do accounting here?
    time.sleep(0.25)
  # Once there are no more pending events for the user thread, we exit
  harshexit.harshexit(0)
Esempio n. 19
0
    def run(self):
        # How often the memory will be checked (seconds)
        memory_check_interval = repy_constants.CPU_POLLING_FREQ_WIN
        # The ratio of the disk polling time to memory polling time.
        disk_to_memory_ratio = int(repy_constants.DISK_POLLING_HDD / memory_check_interval)

        # Which cycle number we're on
        counter = 0

        # Elevate our priority, above normal is higher than the usercode, and is enough for disk/mem
        windows_api.set_current_thread_priority(windows_api.THREAD_PRIORITY_ABOVE_NORMAL)

        # need my pid to get a process handle...
        mypid = os.getpid()

        # run forever (only exit if an error occurs)
        while True:
            try:
                # Increment the interval counter
                counter += 1

                # Check memory use, get the WorkingSetSize or RSS
                memused = windows_api.process_memory_info(mypid)["WorkingSetSize"]

                if memused > nanny.get_resource_limit("memory"):
                    # We will be killed by the other thread...
                    raise Exception, "Memory use '" + str(memused) + "' over limit '" + str(
                        nanny.get_resource_limit("memory")
                    ) + "'"

                # Check if we should check the disk
                if (counter % disk_to_memory_ratio) == 0:
                    # Check diskused
                    diskused = compute_disk_use(repy_constants.REPY_CURRENT_DIR)
                    if diskused > nanny.get_resource_limit("diskused"):
                        raise Exception, "Disk use '" + str(diskused) + "' over limit '" + str(
                            nanny.get_resource_limit("diskused")
                        ) + "'"
                # Sleep until the next iteration of checking the memory
                time.sleep(memory_check_interval)

            except windows_api.DeadProcess:
                #  Process may be dead, or die while checking memory use
                #  In any case, there is no reason to continue running, just exit
                harshexit.harshexit(99)

            except:
                tracebackrepy.handle_exception()
                print >> sys.stderr, "Nanny died!   Trying to kill everything else"
                harshexit.harshexit(20)
Esempio n. 20
0
  def run(self):
    # Calculate how often disk should be checked
    if ostype == "WindowsCE":
      disk_interval = int(repy_constants.RESOURCE_POLLING_FREQ_WINCE / repy_constants.CPU_POLLING_FREQ_WINCE)
    else:
      disk_interval = int(repy_constants.RESOURCE_POLLING_FREQ_WIN / repy_constants.CPU_POLLING_FREQ_WIN)
    current_interval = 0 # What cycle are we on  
    
    # Elevate our priority, above normal is higher than the usercode, and is enough for disk/mem
    windows_api.set_current_thread_priority(windows_api.THREAD_PRIORITY_ABOVE_NORMAL)
    
    # need my pid to get a process handle...
    mypid = os.getpid()

    # run forever (only exit if an error occurs)
    while True:
      try:
        # Check memory use, get the WorkingSetSize or RSS
        memused = windows_api.process_memory_info(mypid)['WorkingSetSize']
        
        if memused > nanny_resource_limits.resource_limit("memory"):
          # We will be killed by the other thread...
          raise Exception, "Memory use '"+str(memused)+"' over limit '"+str(nanny_resource_limits.resource_limit("memory"))+"'"
        
        # Increment the interval we are on
        current_interval += 1

        # Check if we should check the disk
        if (current_interval % disk_interval) == 0:
          # Check diskused
          diskused = compute_disk_use(repy_constants.REPY_CURRENT_DIR)
          if diskused > nanny_resource_limits.resource_limit("diskused"):
            raise Exception, "Disk use '"+str(diskused)+"' over limit '"+str(nanny_resource_limits.resource_limit("diskused"))+"'"
        
        if ostype == 'WindowsCE':
          time.sleep(repy_constants.CPU_POLLING_FREQ_WINCE)
        else:
          time.sleep(repy_constants.CPU_POLLING_FREQ_WIN)
        
      except windows_api.DeadProcess:
        #  Process may be dead, or die while checking memory use
        #  In any case, there is no reason to continue running, just exit
        harshexit.harshexit(99)

      except:
        tracebackrepy.handle_exception()
        print >> sys.stderr, "Nanny died!   Trying to kill everything else"
        harshexit.harshexit(20)
Esempio n. 21
0
def settimer(waittime, function, args):
    """
   <Purpose>
      Allow the current event to set an event to be performed in the future.
      This does not guarantee the event will be triggered at that time, only
      that it will be triggered after that time.

   <Arguments>
      waittime:
         The minimum amount of time to wait before delivering the event
      function:
         The function to call
      args:
         The arguments to pass to the function.   This should be a tuple or 
         list

   <Exceptions>
      None.

   <Side Effects>
      None.

   <Returns>
      A timer handle, for use with canceltimer
  """
    restrictions.assertisallowed('settimer', waittime)

    eventhandle = generate_eventhandle()

    nanny.tattle_add_item('events', eventhandle)

    tobj = threading.Timer(waittime, functionwrapper,
                           [function] + [eventhandle] + [args])

    # Set the name of the thread
    tobj.setName(idhelper.get_new_thread_name(EVENT_PREFIX))

    timerinfo[eventhandle] = {'timer': tobj}

    # Check if we get an exception trying to create a new thread
    try:
        # start the timer
        tobj.start()
    except thread.error, exp:
        # Set exit code 56, which stands for a Threading Error
        # The Node manager will detect this and handle it
        harshexit.harshexit(56)
Esempio n. 22
0
def settimer(waittime, function, args):
  """
   <Purpose>
      Allow the current event to set an event to be performed in the future.
      This does not guarantee the event will be triggered at that time, only
      that it will be triggered after that time.

   <Arguments>
      waittime:
         The minimum amount of time to wait before delivering the event
      function:
         The function to call
      args:
         The arguments to pass to the function.   This should be a tuple or 
         list

   <Exceptions>
      None.

   <Side Effects>
      None.

   <Returns>
      A timer handle, for use with canceltimer
  """
  restrictions.assertisallowed('settimer',waittime)
  
  eventhandle = generate_eventhandle()

  nanny.tattle_add_item('events',eventhandle)

  tobj = threading.Timer(waittime,functionwrapper,[function] + [eventhandle] + [args])

  # Set the name of the thread
  tobj.setName(idhelper.get_new_thread_name(EVENT_PREFIX))

  timerinfo[eventhandle] = {'timer':tobj}
  
  # Check if we get an exception trying to create a new thread
  try:
    # start the timer
    tobj.start()
  except thread.error, exp:
    # Set exit code 56, which stands for a Threading Error
    # The Node manager will detect this and handle it
    harshexit.harshexit(56)
Esempio n. 23
0
def exitall():
  """
   <Purpose>
      Allows the user program to stop execution of the program without
      passing an exit event to the main program. 

   <Arguments>
      None.

   <Exceptions>
      None.

   <Side Effects>
      Interactions with timers and connection / message receiving functions 
      are undefined.   These functions may be called after exit and may 
      have undefined state.

   <Returns>
      None.   The current thread does not resume after exit
  """
  harshexit.harshexit(200)
Esempio n. 24
0
def exitall():
    """
   <Purpose>
      Allows the user program to stop execution of the program without
      passing an exit event to the main program. 

   <Arguments>
      None.

   <Exceptions>
      None.

   <Side Effects>
      Interactions with timers and connection / message receiving functions 
      are undefined.   These functions may be called after exit and may 
      have undefined state.

   <Returns>
      None.   The current thread does not resume after exit
  """
    harshexit.harshexit(200)
Esempio n. 25
0
def functionwrapper(func, timerhandle, args):
  #restrictions ?
  # call the function with the arguments
  try:
    if timerhandle in timerinfo:
      del timerinfo[timerhandle]
    else:
      # I've been "stopped" by canceltimer
      return
  except KeyError:
    # I've been "stopped" by canceltimer
    return
    
  try:
    func(*args)
  except:
    # Exit if they throw an uncaught exception
    tracebackrepy.handle_exception()
    harshexit.harshexit(30)
    
  # remove the event before I exit
  nanny.tattle_remove_item('events',timerhandle)
Esempio n. 26
0
def functionwrapper(func, timerhandle, args):
    #restrictions ?
    # call the function with the arguments
    try:
        if timerhandle in timerinfo:
            del timerinfo[timerhandle]
        else:
            # I've been "stopped" by canceltimer
            return
    except KeyError:
        # I've been "stopped" by canceltimer
        return

    try:
        func(*args)
    except:
        # Exit if they throw an uncaught exception
        tracebackrepy.handle_exception()
        harshexit.harshexit(30)

    # remove the event before I exit
    nanny.tattle_remove_item('events', timerhandle)
Esempio n. 27
0
class monitor_process_checker(threading.Thread):
    def __init__(self, readhandle):
        """
    <Purpose>
      Terminates harshly if the monitor process dies before we do.

    <Arguments>
      readhandle: A file descriptor to the handle of a pipe to the monitor
          process.
    """
        # Name our self
        threading.Thread.__init__(self, name="ProcessChecker")

        # Store the handle
        self.readhandle = readhandle

    def run(self):
        # Run forever
        while True:
            # Read a message
            try:
                mesg = read_message_from_pipe(self.readhandle)
            except Exception, e:
                break

            # Check for a handler function
            if mesg[0] in IPC_HANDLER_FUNCTIONS:
                # Invoke the handler function with the data
                handler = IPC_HANDLER_FUNCTIONS[mesg[0]]
                handler(mesg[1])

            # Print a message if there is a message on an unknown channel
            else:
                print "[WARN] Message on unknown channel from monitor process:", mesg[
                    0]

        ### We only leave the loop on a fatal error, so we need to exit now

        # Write out status information, the monitor process would do this, but its dead.
        statusstorage.write_status("Terminated")
        print >> sys.stderr, "Monitor process died! Terminating!", repr(e)
        harshexit.harshexit(70)
Esempio n. 28
0
def handle_internalerror(error_string, exitcode):
  """
  <Author>
    Brent Couvrette
  <Purpose>
    When an internal error happens in repy it should be handled differently 
    than normal exceptions, because internal errors could possibly lead to
    security vulnerabilities if we aren't careful.  Therefore when an internal
    error occurs, we will not return control to the user's program.  Instead
    we will log the error to the service log if available, then terminate.
  <Arguments>
    error_string - The error string to be logged if logging is enabled.
    exitcode - The exit code to be used in the harshexit call.
  <Exceptions>
    None
  <Side Effects>
    The program will exit.
  <Return>
    Shouldn't return because harshexit will always be called.
  """
  if servicelog:
    import servicelogger

  try:
    print >> sys.stderr, "Internal Error"
    handle_exception()
    if not servicelog:
      # If the service log is disabled, lets just exit.
      harshexit.harshexit(exitcode)
    else:
      # Internal errors should not be given to the user's code to be caught,
      # so we print the exception to the service log and exit. -Brent
      exceptionstring = "[INTERNAL ERROR] " + error_string + '\n'
      for line in traceback.format_stack():
        exceptionstring = exceptionstring + line
  
      # This magic is determining what directory we are in, so that can be
      # used as an identifier in the log.  In a standard deployment this
      # should be of the form vXX where XX is the vessel number.  We don't
      # want any exceptions here preventing us from exitting, so we will
      # wrap this in a try-except block, and use a default value if we fail.
      try:
        identifier = os.path.basename(os.getcwd())
      except:
        # We use a blank except because if we don't, the user might be able to
        # handle the exception, which is unacceptable on internal errors.  Using
        # the current pid should avoid any attempts to write to the same file at
        # the same time.
        identifier = str(os.getpid())
      else:
        if identifier == '':
          # If the identifier is blank, use the PID.
          identifier = str(os.getpid())
    
      # Again we want to ensure that even if we fail to log, we still exit.
      try:
        servicelogger.multi_process_log(exceptionstring, identifier, logdirectory)
      except Exception, e:
        # if an exception occurs, log it (unfortunately, to the user's log)
        print 'Inner abort of servicelogger'
        print e,type(e)
        traceback.print_exc()
      finally:
        harshexit.harshexit(exitcode)
Esempio n. 29
0
        identifier = os.path.basename(os.getcwd())
      except:
        # We use a blank except because if we don't, the user might be able to
        # handle the exception, which is unacceptable on internal errors.  Using
        # the current pid should avoid any attempts to write to the same file at
        # the same time.
        identifier = str(os.getpid())
      else:
        if identifier == '':
          # If the identifier is blank, use the PID.
          identifier = str(os.getpid())
    
      # Again we want to ensure that even if we fail to log, we still exit.
      try:
        servicelogger.multi_process_log(exceptionstring, identifier, logdirectory)
      except Exception, e:
        # if an exception occurs, log it (unfortunately, to the user's log)
        print 'Inner abort of servicelogger'
        print e,type(e)
        traceback.print_exc()
      finally:
        harshexit.harshexit(exitcode)

  except Exception, e:
    # if an exception occurs, log it (unfortunately, to the user's log)
    print 'Outer abort of servicelogger'
    print e,type(e)
    traceback.print_exc()
  finally:
    harshexit.harshexit(842)
Esempio n. 30
0
  initialize_id = idhelper.getuniqueid()
  try:
    nanny.tattle_add_item('events', initialize_id)
  except Exception, e:
    tracebackrepy.handle_internalerror("Failed to aquire event for '" + \
        "initialize' event.\n(Exception was: %s)" % e.message, 140)

  try:
    main_namespace.evaluate(usercontext)
  except SystemExit:
    raise
  except:
    # I think it makes sense to exit if their code throws an exception...
    tracebackrepy.handle_exception()
    harshexit.harshexit(6)
  finally:
    nanny.tattle_remove_item('events', initialize_id)


  # I've changed to the threading library, so this should increase if there are
  # pending events
  while threading.activeCount() > idlethreadcount:
    # do accounting here?
    time.sleep(0.25)


  # Once there are no more pending events for the user thread, we give them
  # an "exit" event.   This allows them to clean up, etc. if needed.

  # call the user program to notify them that we are exiting...
Esempio n. 31
0
def main():
  global configuration

  if not FOREGROUND:
    # Background ourselves.
    daemon.daemonize()


  # Check if we are running in testmode.
  if TEST_NM:
    nodemanager_pid = os.getpid()
    servicelogger.log("[INFO]: Running nodemanager in test mode on port 1224, "+
                      "pid %s." % str(nodemanager_pid))
    nodeman_pid_file = open(os.path.join(os.getcwd(), 'nodemanager.pid'), 'w')
    
    # Write out the pid of the nodemanager process that we started to a file.
    # This is only done if the nodemanager was started in test mode.
    try:
      nodeman_pid_file.write(str(nodemanager_pid))
    finally:
      nodeman_pid_file.close()

  else:
    # ensure that only one instance is running at a time...
    gotlock = runonce.getprocesslock("seattlenodemanager")

    if gotlock == True:
      # I got the lock.   All is well...
      pass
    else:
      if gotlock:
        servicelogger.log("[ERROR]:Another node manager process (pid: " + str(gotlock) + 
                        ") is running")
      else:
        servicelogger.log("[ERROR]:Another node manager process is running")
      return


  servicelogger.log('[INFO]: This is Seattle release "' + version + "'") 

  # Feature add for #1031: Log information about the system in the nm log...
  servicelogger.log('[INFO]:platform.python_version(): "' + 
    str(platform.python_version())+'"')
  servicelogger.log('[INFO]:platform.platform(): "' + 
    str(platform.platform())+'"')

  # uname on Android only yields 'Linux', let's be more specific.
  try:
    import android
    servicelogger.log('[INFO]:platform.uname(): Android / "' + 
      str(platform.uname())+'"')
  except ImportError:
    servicelogger.log('[INFO]:platform.uname(): "'+str(platform.uname())+'"')

  # I'll grab the necessary information first...
  servicelogger.log("[INFO]:Loading config")
  # BUG: Do this better?   Is this the right way to engineer this?
  configuration = persist.restore_object("nodeman.cfg")

  # If Seattle is not installed, the nodemanager will have no vesseldict
  # and an incomplete config. Log this problem and exit.
  try:
    if configuration["seattle_installed"] is not True:
      servicelogger.log("[ERROR]:Seattle is not installed. Run the Seattle installer to create the required configuration files before starting the nodemanager. Exiting.")
      harshexit.harshexit(10)
  except KeyError:
    # There isn't even a "seattle_installed" entry in this dict!?
    servicelogger.log("[ERROR]:The nodemanager configuration, nodeman.cfg, is corrupt. Exiting.")
    harshexit.harshexit(11)
  
  
  # Armon: initialize the network restrictions
  initialize_ip_interface_restrictions(configuration)
  
  
  # Enable Affix and overload various Repy network API calls 
  # with Affix-enabled calls.
  # Use the node's publickey to generate a name for our node.
  mypubkey = rsa_publickey_to_string(configuration['publickey']).replace(" ", "")
  affix_stack_name = sha_hexhash(mypubkey)

  enable_affix('(CoordinationAffix)(MakeMeHearAffix)(NamingAndResolverAffix,' + 
      affix_stack_name + ')')

  # get the external IP address...
  myip = None
  while True:
    try:
      # Try to find our external IP.
      myip = emulcomm.getmyip()
    except Exception, e: # Replace with InternetConnectivityError ?
      # If we aren't connected to the internet, emulcomm.getmyip() raises this:
      if len(e.args) >= 1 and e.args[0] == "Cannot detect a connection to the Internet.":
        # So we try again.
        pass
      else:
        # It wasn't emulcomm.getmyip()'s exception. re-raise.
        raise
    else:
      # We succeeded in getting our external IP. Leave the loop.
      break
    time.sleep(0.1)
Esempio n. 32
0
            p.runctx(
                'main_namespace.evaluate(usercontext)',
                globals(),
                locals(),
            )
            p = pstats.Stats(p)
            # p.sort_stats('cumulative')
            p.print_stats()
        else:
            main_namespace.evaluate(usercontext)
    except SystemExit:
        raise
    except:
        # I think it makes sense to exit if their code throws an exception...
        tracebackrepy.handle_exception()
        harshexit.harshexit(6)
    finally:
        nanny.tattle_remove_item('events', event_id)

    # I've changed to the threading library, so this should increase if there are
    # pending events
    while threading.activeCount() > idlethreadcount:
        # do accounting here?
        time.sleep(0.25)

    # Once there are no more pending events for the user thread, we exit
    harshexit.harshexit(0)


def usage(str_err=""):
    # Ivan 12/24/2008
Esempio n. 33
0
      except:
        # We use a blank except because if we don't, the user might be able to
        # handle the exception, which is unacceptable on internal errors.  Using
        # the current pid should avoid any attempts to write to the same file at
        # the same time.
        identifier = str(os.getpid())
      else:
        if identifier == '':
          # If the identifier is blank, use the PID.
          identifier = str(os.getpid())
    
      # Again we want to ensure that even if we fail to log, we still exit.
      try:
        if servicelog:
          servicelogger.multi_process_log(exceptionstring, identifier, logdirectory)
      except Exception, e:
        # if an exception occurs, log it (unfortunately, to the user's log)
        print 'Inner abort of servicelogger'
        print e,type(e)
        traceback.print_exc()
      finally:
        harshexit.harshexit(exitcode)

  except Exception, e:
    # if an exception occurs, log it (unfortunately, to the user's log)
    print 'Outer abort of servicelogger'
    print e,type(e)
    traceback.print_exc()
  finally:
    harshexit.harshexit(842)
Esempio n. 34
0
def repy_main(argv=sys.argv):
  """Run repy with these command line arguments"""
  global simpleexec
  global logfile

  # Armon: The CMD line path to repy is the first argument
  repy_location = argv[0]

  # Get the directory repy is in
  repy_directory = os.path.dirname(repy_location)
  
  # Translate into an absolute path
  if os.path.isabs(repy_directory):
    absolute_repy_directory = repy_directory
  
  else:
    # This will join the currect directory with the relative path
    # and then get the absolute path to that location
    absolute_repy_directory = os.path.abspath(os.path.join(os.getcwd(), repy_directory))
  
  # Store the absolute path as the repy startup directory
  repy_constants.REPY_START_DIR = absolute_repy_directory
 
  # For security, we need to make sure that the Python path doesn't change even
  # if the directory does...
  newsyspath = []
  for item in sys.path[:]:
    if item == '' or item == '.':
      newsyspath.append(os.getcwd())
    else:
      newsyspath.append(item)

  # It should be safe now.   I'm assuming the user isn't trying to undercut us
  # by setting a crazy python path
  sys.path = newsyspath

  
  args = argv[1:]

  try:
    optlist, fnlist = getopt.getopt(args, '', [
      'simple', 'ip=', 'iface=', 'nootherips', 'logfile=',
      'stop=', 'status=', 'cwd=', 'servicelog', 'safebinary'
      ])

  except getopt.GetoptError:
    usage()
    sys.exit(1)

  # Set up the simple variable if needed
  simpleexec = False

  # By default we don't want to use the service logger
  servicelog = False

  # Default logfile (if the option --logfile isn't passed)
  logfile = None

  # Default stopfile (if the option --stopfile isn't passed)
  stopfile = None

  # Default stopfile (if the option --stopfile isn't passed)
  statusfile = None

  if len(fnlist) < 2:
    usage("Must supply a resource file and a program file to execute")
    sys.exit(1)

  for option, value in optlist:
    if option == '--simple':
      simpleexec = True

    elif option == '--ip':
      emulcomm.user_ip_interface_preferences = True

      # Append this ip to the list of available ones if it is new, since
      # multiple IP's may be specified
      if (True, value) not in emulcomm.user_specified_ip_interface_list:
        emulcomm.user_specified_ip_interface_list.append((True, value))

    elif option == '--iface':
      emulcomm.user_ip_interface_preferences = True
      
      # Append this interface to the list of available ones if it is new
      if (False, value) not in emulcomm.user_specified_ip_interface_list:
        emulcomm.user_specified_ip_interface_list.append((False, value))

    # Check if they have told us explicitly not to allow other IP's
    elif option == '--nootherips':
      # Set user preference to True
      emulcomm.user_ip_interface_preferences = True
      # Disable nonspecified IP's
      emulcomm.allow_nonspecified_ips = False
    
    elif option == '--logfile':
      # set up the circular log buffer...
      logfile = value

    elif option == '--stop':
      # Watch for the creation of this file and abort when it happens...
      stopfile = value

    elif option == '--status':
      # Write status information into this file...
      statusfile = value

    # Set Current Working Directory
    elif option == '--cwd':
      os.chdir(value)

    # Enable logging of internal errors to the service logger.
    elif option == '--servicelog':
      servicelog = True
    
    # Enable safe binary mode
    elif option == '--safebinary':
      safebinary.SAFEBINARY = True
      
  # Update repy current directory
  repy_constants.REPY_CURRENT_DIR = os.path.abspath(os.getcwd())

  # Initialize the NM status interface
  nmstatusinterface.init(stopfile, statusfile)
  
  # Write out our initial status
  statusstorage.write_status("Started")

  resourcefn = fnlist[0]
  progname = fnlist[1]
  progargs = fnlist[2:]

  # We also need to pass in whether or not we are going to be using the service
  # log for repy.  We provide the repy directory so that the vessel information
  # can be found regardless of where we are called from...
  tracebackrepy.initialize(servicelog, absolute_repy_directory)

  
  try:
    main(resourcefn, progname, progargs)
  except SystemExit:
    harshexit.harshexit(4)
  except:
    tracebackrepy.handle_exception()
    harshexit.harshexit(3)
Esempio n. 35
0
  initialize_id = idhelper.getuniqueid()
  try:
    nanny.tattle_add_item('events', initialize_id)
  except Exception, e:
    tracebackrepy.handle_internalerror("Failed to aquire event for '" + \
        "initialize' event.\n(Exception was: %s)" % e.message, 140)

  try:
    main_namespace.evaluate(usercontext)
  except SystemExit:
    raise
  except:
    # I think it makes sense to exit if their code throws an exception...
    tracebackrepy.handle_exception()
    harshexit.harshexit(6)
  finally:
    nanny.tattle_remove_item('events', initialize_id)


  # I've changed to the threading library, so this should increase if there are
  # pending events
  while threading.activeCount() > idlethreadcount:
    # do accounting here?
    time.sleep(0.25)


  # Once there are no more pending events for the user thread, we give them
  # an "exit" event.   This allows them to clean up, etc. if needed.

  # call the user program to notify them that we are exiting...
Esempio n. 36
0
        # Repy/Montior proccesses both _exit, so the thread should be stopped anyway
        # nmstatusinterface.stop()

        if (kill_repy):
            harshexit.portablekill(repypid)

        # XXX LP: Is this actually doeing something???
        try:
            statusstorage.write_status("Terminated")
        except:
            pass

        # The monitor process (child) should always exit this way on android
        # because we don't want the child to return back to Java if repy (parent)
        # is not alive anymore, which it should not be at this point
        harshexit.harshexit(monitor_exit_code)


def resource_monitor(repypid, pipe_handle):
    """
  <Purpose>
    Function runs in a loop forever, checking resource usage and throttling CPU.
    Checks CPU, memory, and disk.
    
  <Arguments>
    repypid:
      The pid of repy

    pipe_handle:
      A handle to the pipe to the repy process. Allows sending resource use information.
  """
Esempio n. 37
0
    del usercode

    if options.execinfo:
        print("=" * 40)
        print("Running program: " + progname)
        print("Arguments: " + progargs)
        print("=" * 40)

    newcontext = get_safe_context(progargs)

    try:
        newnamespace.evaluate(newcontext)
    except SystemExit:
        raise
    except:

        tracebackrepy.handle_exception()
        harshexit.harshexit(6)
    harshexit.harshexit(0)


if __name__ == '__main__':
    try:
        main()
    except SystemExit:
        harshexit.harshexit(4)
    except:
        tracebackrepy.handle_exception()
        harshexit.harshexit(3)
Esempio n. 38
0
def main():
    repy_location = sys.argv[0]
    repy_directory = os.path.dirname(repy_location)

    init_repy_location(repy_directory)

    usage = "USAGE: repy.py [options] program_to_run.r2py [program args]"
    parser = optparse.OptionParser(usage=usage)

    parser.disable_interspersed_args()

    add_repy_options(parser)
    options, args = parser.parse_args()

    if len(args) < 1:
        print("Repy requires a program to run!")
        parser.print_help()
        sys.exit(1)

    #resourcefn = args[0]
    progname = args[0]
    progargs = args[1:]

    parse_options(options)

    nmstatusinterface.launch(None)

    try:
        filehandle = open(progname)
        usercode = filehandle.read()
        filehandle.close()
    except:
        print(
            f"FATAL ERROR: Unable to read the specified program file: {progname}"
        )
        sys.exit(1)

    try:
        newnamespace = virtual_namespace.VirtualNamespace(usercode, progname)
    except CodeUnsafeError as e:
        print("Specified repy program is unsafe!")
        print("Static-code analysis failed with error: " + str(e))
        harshexit.harshexit(5)

    del usercode

    if options.execinfo:
        print("=" * 40)
        print("Running program: " + progname)
        print("Arguments: " + progargs)
        print("=" * 40)

    newcontext = get_safe_context(progargs)

    try:
        newnamespace.evaluate(newcontext)
    except SystemExit:
        raise
    except:

        tracebackrepy.handle_exception()
        harshexit.harshexit(6)
    harshexit.harshexit(0)
Esempio n. 39
0
def handle_internalerror(error_string, exitcode):
  """
  <Author>
    Brent Couvrette
  <Purpose>
    When an internal error happens in repy it should be handled differently 
    than normal exceptions, because internal errors could possibly lead to
    security vulnerabilities if we aren't careful.  Therefore when an internal
    error occurs, we will not return control to the user's program.  Instead
    we will log the error to the service log if available, then terminate.
  <Arguments>
    error_string - The error string to be logged if logging is enabled.
    exitcode - The exit code to be used in the harshexit call.
  <Exceptions>
    None
  <Side Effects>
    The program will exit.
  <Return>
    Shouldn't return because harshexit will always be called.
  """
  try:
    print >> sys.stderr, "Internal Error"
    handle_exception()
    if not servicelog:
      # If the service log is disabled, lets just exit.
      harshexit.harshexit(exitcode)
    else:
      # Internal errors should not be given to the user's code to be caught,
      # so we print the exception to the service log and exit. -Brent
      exceptionstring = "[INTERNAL ERROR] " + error_string + '\n'
      for line in traceback.format_stack():
        exceptionstring = exceptionstring + line
  
      # This magic is determining what directory we are in, so that can be
      # used as an identifier in the log.  In a standard deployment this
      # should be of the form vXX where XX is the vessel number.  We don't
      # want any exceptions here preventing us from exitting, so we will
      # wrap this in a try-except block, and use a default value if we fail.
      try:
        identifier = os.path.basename(os.getcwd())
      except:
        # We use a blank except because if we don't, the user might be able to
        # handle the exception, which is unacceptable on internal errors.  Using
        # the current pid should avoid any attempts to write to the same file at
        # the same time.
        identifier = str(os.getpid())
      else:
        if identifier == '':
          # If the identifier is blank, use the PID.
          identifier = str(os.getpid())
    
      # Again we want to ensure that even if we fail to log, we still exit.
      try:
        if servicelog:
          servicelogger.multi_process_log(exceptionstring, identifier, logdirectory)
      except Exception, e:
        # if an exception occurs, log it (unfortunately, to the user's log)
        print 'Inner abort of servicelogger'
        print e,type(e)
        traceback.print_exc()
      finally:
        harshexit.harshexit(exitcode)
Esempio n. 40
0
 
  try:
    if profile:
      p = cProfile.Profile()
      p.runctx('main_namespace.evaluate(usercontext)', globals(), locals(),)
      p = pstats.Stats(p)
      # p.sort_stats('cumulative')
      p.print_stats()
    else:
      main_namespace.evaluate(usercontext)
  except SystemExit:
    raise
  except:
    # I think it makes sense to exit if their code throws an exception...
    tracebackrepy.handle_exception()
    harshexit.harshexit(6)
  finally:
    nanny.tattle_remove_item('events', event_id)

  # I've changed to the threading library, so this should increase if there are
  # pending events
  while threading.activeCount() > idlethreadcount:
    # do accounting here?
    time.sleep(0.25)


  # Once there are no more pending events for the user thread, we exit
  harshexit.harshexit(0)


def usage(str_err=""):
Esempio n. 41
0
def repy_main(argv=sys.argv):
    """Run repy with these command line arguments"""
    global simpleexec
    global logfile

    # Armon: The CMD line path to repy is the first argument
    repy_location = argv[0]

    # Get the directory repy is in
    repy_directory = os.path.dirname(repy_location)

    # Translate into an absolute path
    if os.path.isabs(repy_directory):
        absolute_repy_directory = repy_directory

    else:
        # This will join the currect directory with the relative path
        # and then get the absolute path to that location
        absolute_repy_directory = os.path.abspath(
            os.path.join(os.getcwd(), repy_directory))

    # Store the absolute path as the repy startup directory
    repy_constants.REPY_START_DIR = absolute_repy_directory

    # For security, we need to make sure that the Python path doesn't change even
    # if the directory does...
    newsyspath = []
    for item in sys.path[:]:
        if item == '' or item == '.':
            newsyspath.append(os.getcwd())
        else:
            newsyspath.append(item)

    # It should be safe now.   I'm assuming the user isn't trying to undercut us
    # by setting a crazy python path
    sys.path = newsyspath

    args = argv[1:]

    try:
        optlist, fnlist = getopt.getopt(args, '', [
            'simple', 'ip=', 'iface=', 'nootherips', 'logfile=', 'stop=',
            'status=', 'cwd=', 'servicelog', 'safebinary'
        ])

    except getopt.GetoptError:
        usage()
        sys.exit(1)

    # Set up the simple variable if needed
    simpleexec = False

    # By default we don't want to use the service logger
    servicelog = False

    # Default logfile (if the option --logfile isn't passed)
    logfile = None

    # Default stopfile (if the option --stopfile isn't passed)
    stopfile = None

    # Default stopfile (if the option --stopfile isn't passed)
    statusfile = None

    if len(fnlist) < 2:
        usage("Must supply a resource file and a program file to execute")
        sys.exit(1)

    for option, value in optlist:
        if option == '--simple':
            simpleexec = True

        elif option == '--ip':
            emulcomm.user_ip_interface_preferences = True

            # Append this ip to the list of available ones if it is new, since
            # multiple IP's may be specified
            if (True, value) not in emulcomm.user_specified_ip_interface_list:
                emulcomm.user_specified_ip_interface_list.append((True, value))

        elif option == '--iface':
            emulcomm.user_ip_interface_preferences = True

            # Append this interface to the list of available ones if it is new
            if (False, value) not in emulcomm.user_specified_ip_interface_list:
                emulcomm.user_specified_ip_interface_list.append(
                    (False, value))

        # Check if they have told us explicitly not to allow other IP's
        elif option == '--nootherips':
            # Set user preference to True
            emulcomm.user_ip_interface_preferences = True
            # Disable nonspecified IP's
            emulcomm.allow_nonspecified_ips = False

        elif option == '--logfile':
            # set up the circular log buffer...
            logfile = value

        elif option == '--stop':
            # Watch for the creation of this file and abort when it happens...
            stopfile = value

        elif option == '--status':
            # Write status information into this file...
            statusfile = value

        # Set Current Working Directory
        elif option == '--cwd':
            os.chdir(value)

        # Enable logging of internal errors to the service logger.
        elif option == '--servicelog':
            servicelog = True

        # Enable safe binary mode
        elif option == '--safebinary':
            safebinary.SAFEBINARY = True

    # Update repy current directory
    repy_constants.REPY_CURRENT_DIR = os.path.abspath(os.getcwd())

    # Initialize the NM status interface
    nmstatusinterface.init(stopfile, statusfile)

    # Write out our initial status
    statusstorage.write_status("Started")

    resourcefn = fnlist[0]
    progname = fnlist[1]
    progargs = fnlist[2:]

    # We also need to pass in whether or not we are going to be using the service
    # log for repy.  We provide the repy directory so that the vessel information
    # can be found regardless of where we are called from...
    tracebackrepy.initialize(servicelog, absolute_repy_directory)

    try:
        main(resourcefn, progname, progargs)
    except SystemExit:
        harshexit.harshexit(4)
    except:
        tracebackrepy.handle_exception()
        harshexit.harshexit(3)
Esempio n. 42
0
def main(resourcefn, program, args):

  # Armon: Initialize the circular logger before starting the nanny
  if logfile:
    # time to set up the circular logger
    loggerfo = loggingrepy.circular_logger(logfile)
    # and redirect err and out there...
    sys.stdout = loggerfo
    sys.stderr = loggerfo
  else:
    # let's make it so that the output (via print) is always flushed
    sys.stdout = loggingrepy.flush_logger(sys.stdout)
    
  # start the nanny up and read the resource file.  
  nanny.start_resource_nanny(resourcefn)

  # now, let's fire up the cpu / disk / memory monitor...
  nonportable.monitor_cpu_disk_and_mem()

  # Armon: Update our IP cache
  emulcomm.update_ip_cache()


  # These will be the functions and variables in the user's namespace (along
  # with the builtins allowed by the safe module).
  usercontext = {'mycontext':{}}
  
  # Add to the user's namespace wrapped versions of the API functions we make
  # available to the untrusted user code.
  namespace.wrap_and_insert_api_functions(usercontext)

  # Convert the usercontext from a dict to a SafeDict
  usercontext = safe.SafeDict(usercontext)

  # Allow some introspection by providing a reference to the context
  usercontext["_context"] = usercontext

  # BAD:REMOVE all API imports
  usercontext["getresources"] = nonportable.get_resources
  usercontext["mycontext"]["wallclocktime"] = time.time
  #usercontext["openfile"] = emulfile.emulated_open
  #usercontext["listfiles"] = emulfile.listfiles
  #usercontext["removefile"] = emulfile.removefile
  #usercontext["exitall"] = emulmisc.exitall
  #usercontext["createlock"] = emulmisc.createlock
  #usercontext["getruntime"] = emulmisc.getruntime
  #usercontext["randombytes"] = emulmisc.randombytes
  #usercontext["createthread"] = emultimer.createthread
  #usercontext["sleep"] = emultimer.sleep
  #usercontext["getthreadname"] = emulmisc.getthreadname
  usercontext["createvirtualnamespace"] = virtual_namespace.createvirtualnamespace
  usercontext["getlasterror"] = emulmisc.getlasterror
      
  # grab the user code from the file
  try:
    usercode = file(program).read()
  except:
    print "Failed to read the specified file: '"+program+"'"
    raise

  # Armon: Create the main namespace
  try:
    main_namespace = virtual_namespace.VirtualNamespace(usercode, program)
  except CodeUnsafeError, e:
    print "Specified repy program is unsafe!"
    print "Static-code analysis failed with error: "+str(e)
    harshexit.harshexit(5)
Esempio n. 43
0
            start_worker_thread(configuration['pollfrequency'])

        if should_start_waitable_thread('advert', 'Advertisement Thread'):
            servicelogger.log("[WARN]:AdvertThread requires restart.")
            start_advert_thread(vesseldict, myname, configuration['publickey'])

        if should_start_waitable_thread('status', 'Status Monitoring Thread'):
            servicelogger.log(
                "[WARN]:StatusMonitoringThread requires restart.")
            start_status_thread(vesseldict, configuration['pollfrequency'])

        if not TEST_NM and not runonce.stillhaveprocesslock(
                "seattlenodemanager"):
            servicelogger.log(
                "[ERROR]:The node manager lost the process lock...")
            harshexit.harshexit(55)

        # Check for IP address changes.
        # When using Affix, the NamingAndResolverAffix takes over this.
        if not affix_enabled:
            current_ip = None
            while True:
                try:
                    current_ip = emulcomm.getmyip()
                except Exception, e:
                    # If we aren't connected to the internet, emulcomm.getmyip() raises this:
                    if len(e.args) >= 1 and e.args[
                            0] == "Cannot detect a connection to the Internet.":
                        # So we try again.
                        pass
                    else:
Esempio n. 44
0
        
    if not is_worker_thread_started():
      servicelogger.log("[WARN]:At " + str(time.time()) + " restarting worker...")
      start_worker_thread(configuration['pollfrequency'])

    if should_start_waitable_thread('advert','Advertisement Thread'):
      servicelogger.log("[WARN]:At " + str(time.time()) + " restarting advert...")
      start_advert_thread(vesseldict, myname, configuration['publickey'])

    if should_start_waitable_thread('status','Status Monitoring Thread'):
      servicelogger.log("[WARN]:At " + str(time.time()) + " restarting status...")
      start_status_thread(vesseldict,configuration['pollfrequency'])

    if not TEST_NM and not runonce.stillhaveprocesslock("seattlenodemanager"):
      servicelogger.log("[ERROR]:The node manager lost the process lock...")
      harshexit.harshexit(55)



    # Check for ip change.
    current_ip = None
    while True:
      try:
        current_ip = emulcomm.getmyip()
      except Exception, e:
        # If we aren't connected to the internet, emulcomm.getmyip() raises this:
        if len(e.args) >= 1 and e.args[0] == "Cannot detect a connection to the Internet.":
          # So we try again.
          pass
        else:
          # It wasn't emulcomm.getmyip()'s exception. re-raise.
Esempio n. 45
0
def init_namespace(resourcefn, program, args):

    global idlethreadcount, event_id

    # Armon: Initialize the circular logger before starting the nanny
    if logfile:
        # time to set up the circular logger
        loggerfo = loggingrepy.circular_logger(logfile)
        # and redirect err and out there...
        sys.stdout = loggerfo
        sys.stderr = loggerfo
    else:
        # let's make it so that the output (via print) is always flushed
        sys.stdout = loggingrepy.flush_logger(sys.stdout)

    # start the nanny up and read the resource file.
    nanny.start_resource_nanny(resourcefn)

    # now, let's fire up the cpu / disk / memory monitor...
    # nonportable.monitor_cpu_disk_and_mem()

    # Armon: Update our IP cache
    emulcomm.update_ip_cache()

    # These will be the functions and variables in the user's namespace (along
    # with the builtins allowed by the safe module).
    usercontext = {'mycontext': {}}

    # Add to the user's namespace wrapped versions of the API functions we make
    # available to the untrusted user code.
    namespace.wrap_and_insert_api_functions(usercontext)

    # Convert the usercontext from a dict to a SafeDict
    usercontext = safe.SafeDict(usercontext)

    # Allow some introspection by providing a reference to the context
    usercontext["_context"] = usercontext

    # BAD:REMOVE all API imports
    usercontext["getresources"] = nonportable.get_resources
    usercontext["mycontext"]["wallclocktime"] = time.time
    #usercontext["openfile"] = emulfile.emulated_open
    #usercontext["listfiles"] = emulfile.listfiles
    #usercontext["removefile"] = emulfile.removefile
    #usercontext["exitall"] = emulmisc.exitall
    #usercontext["createlock"] = emulmisc.createlock
    #usercontext["getruntime"] = emulmisc.getruntime
    #usercontext["randombytes"] = emulmisc.randombytes
    #usercontext["createthread"] = emultimer.createthread
    #usercontext["sleep"] = emultimer.sleep
    #usercontext["getthreadname"] = emulmisc.getthreadname
    usercontext[
        "createvirtualnamespace"] = virtual_namespace.createvirtualnamespace
    usercontext["getlasterror"] = emulmisc.getlasterror

    # grab the user code from the file
    try:
        usercode = file(program).read()
    except:
        print "Failed to read the specified file: '" + program + "'"
        raise

    # Armon: Create the main namespace
    try:
        main_namespace = virtual_namespace.VirtualNamespace(usercode, program)
    except CodeUnsafeError, e:
        print "Specified repy program is unsafe!"
        print "Static-code analysis failed with error: " + str(e)
        harshexit.harshexit(5)