def test_reap_process_group(self):
        """
        Spin up a process that can't be killed by SIGTERM and make sure
        it gets killed anyway.
        """
        parent_setup_done = multiprocessing.Semaphore(0)
        parent_pid = multiprocessing.Value('i', 0)
        child_pid = multiprocessing.Value('i', 0)
        args = [parent_pid, child_pid, parent_setup_done]
        parent = multiprocessing.Process(target=TestHelpers._parent_of_ignores_sigterm,
                                         args=args)
        try:
            parent.start()
            self.assertTrue(parent_setup_done.acquire(timeout=5.0))
            self.assertTrue(psutil.pid_exists(parent_pid.value))
            self.assertTrue(psutil.pid_exists(child_pid.value))

            helpers.reap_process_group(parent_pid.value, logging.getLogger(),
                                       timeout=1)

            self.assertFalse(psutil.pid_exists(parent_pid.value))
            self.assertFalse(psutil.pid_exists(child_pid.value))
        finally:
            try:
                os.kill(parent_pid.value, signal.SIGKILL)  # terminate doesnt work here
                os.kill(child_pid.value, signal.SIGKILL)  # terminate doesnt work here
            except OSError:
                pass
Esempio n. 2
0
def test_ensure_orca_ping_and_proc():
    pio.orca.config.timeout = None

    assert pio.orca.status.port is None
    assert pio.orca.status.pid is None

    pio.orca.ensure_server()

    assert pio.orca.status.port is not None
    assert pio.orca.status.pid is not None
    server_port = pio.orca.status.port
    server_pid = pio.orca.status.pid

    # Make sure server has time to start up
    time.sleep(10)

    # Check that server process number is valid
    assert psutil.pid_exists(server_pid)

    # Build server URL
    server_url = 'http://localhost:%s' % server_port

    # ping server
    assert ping_pongs(server_url)

    # shut down server
    pio.orca.shutdown_server()

    # Check that server process number no longer exists
    assert not psutil.pid_exists(server_pid)

    # Check that ping is no longer answered
    assert not ping_pongs(server_url)
Esempio n. 3
0
def test_server_timeout_shutdown():

    # Configure server to shutdown after 10 seconds without
    # calls to ensure_orca_server
    pio.orca.config.timeout = 10
    pio.orca.ensure_server()
    server_port = pio.orca.status.port
    server_pid = pio.orca.status.pid

    # Build server URL
    server_url = 'http://localhost:%s' % server_port

    # Check that server process number is valid
    assert psutil.pid_exists(server_pid)

    for i in range(3):
        # Sleep for just under 10 seconds
        time.sleep(8)
        assert ping_pongs(server_url)
        assert psutil.pid_exists(server_pid)
        pio.orca.ensure_server()

    # Sleep just over 10 seconds, server should then auto shutdown
    time.sleep(11)

    # Check that server process number no longer exists
    assert not psutil.pid_exists(server_pid)

    # Check that ping is no longer answered
    assert not ping_pongs(server_url)
    def test_uninstall_workflow(self):
        inputs = {
            'diamond_config': {
                'prefix': tempfile.mkdtemp(prefix='cloudify-'),
                'interval': 1,
                'handlers': {
                    'diamond.handler.archive.ArchiveHandler': {
                        'config': {
                            'log_file': tempfile.mktemp(),
                        }
                    }
                }
            },
            'collectors_config': {},

        }
        self.is_uninstallable = False
        self.env = self._create_env(inputs)
        self.env.execute('install', task_retries=0)
        pid_file = os.path.join(inputs['diamond_config']['prefix'],
                                'var', 'run', 'diamond.pid')
        with open(pid_file, 'r') as pf:
            pid = int(pf.read())

        if psutil.pid_exists(pid):
            self.env.execute('uninstall', task_retries=0)
            time.sleep(5)
        else:
            self.fail('diamond process not running')
        self.assertFalse(psutil.pid_exists(pid))
    def test_kill_process(self):
        """
        """
        with self.dataset_populator.test_history() as history_id:
            hda1 = self.dataset_populator.new_dataset(history_id, content="1 2 3")
            running_inputs = {
                "input1": {"src": "hda", "id": hda1["id"]},
                "sleep_time": 240,
            }
            running_response = self.dataset_populator.run_tool(
                "cat_data_and_sleep",
                running_inputs,
                history_id,
                assert_ok=False,
            ).json()
            job_dict = running_response["jobs"][0]

            app = self._app
            sa_session = app.model.context.current
            external_id = None
            state = False

            job = sa_session.query(app.model.Job).filter_by(tool_id="cat_data_and_sleep").one()
            # Not checking the state here allows the change from queued to running to overwrite
            # the change from queued to deleted_new in the API thread - this is a problem because
            # the job will still run. See issue https://github.com/galaxyproject/galaxy/issues/4960.
            while external_id is None or state != app.model.Job.states.RUNNING:
                sa_session.refresh(job)
                assert not job.finished
                external_id = job.job_runner_external_id
                state = job.state

            assert external_id
            external_id = int(external_id)

            pid_exists = psutil.pid_exists(external_id)
            assert pid_exists

            delete_response = self.dataset_populator.cancel_job(job_dict["id"])
            assert delete_response.json() is True

            state = None
            # Now make sure the job becomes complete.
            for i in range(100):
                sa_session.refresh(job)
                state = job.state
                if state == app.model.Job.states.DELETED:
                    break
                time.sleep(.1)

            # Now make sure the pid is actually killed.
            for i in range(100):
                if not pid_exists:
                    break
                pid_exists = psutil.pid_exists(external_id)
                time.sleep(.1)

            final_state = "pid exists? %s, final db job state %s" % (pid_exists, state)
            assert state == app.model.Job.states.DELETED, final_state
            assert not pid_exists, final_state
def test_can_cleanup_installed_listener():
    try:
        import psutil
    except:
        warnings.warn('No psutil module present for this test')
        return
    wrapper = PlatformWrapper()

    address="tcp://127.0.0.1:{}".format(get_rand_port())
    wrapper.startup_platform(address)

    assert wrapper is not None
    assert wrapper.is_running()

    auuid = wrapper.install_agent(agent_dir="examples/ListenerAgent",
        start=False)
    assert auuid is not None
    started = wrapper.start_agent(auuid)
    assert isinstance(started, int)
    assert psutil.pid_exists(started)

    wrapper.shutdown_platform()
    # give operating system enough time to update pids.
    gevent.sleep(0.1)
    assert not psutil.pid_exists(started)
Esempio n. 7
0
    def set_from_processbot(self, processbot_pid, spawned_process_pids):
        """create the resource except for the information realted to the processbot spawned processes"""
        #if not processbot_pid in psutil.pids():
        if not psutil.pid_exists(processbot_pid):
            raise Exception("cannot find process bot pid + " + str(processbot_pid))
        proc = psutil.Process( processbot_pid )
        try:
            self._processbot_cmd_line = ' '.join(proc.cmdline())
        except:
            self._processbot_cmd_line = ' '.join(proc.cmdline)
        self._processbot_pid = processbot_pid

        """add the processbot spawned processes pids and cmdline to the resource"""
        self._spawned_process_pids=spawned_process_pids
        self._spawned_process_cmd = []
        removed=[]
        for pid in self._spawned_process_pids:
            #if pid is dead remove it from the list
            #if not pid in psutil.pids():
            if not psutil.pid_exists(pid):
                removed.append( pid )
            else:
                try:
                    #here pid *should* exists, however get cmd line in try, except block
                    proc = psutil.Process( pid )
                    try:
                        self._spawned_process_cmd.append( ' '.join(proc.cmdline()) )
                    except:
                        self._spawned_process_cmd.append( ' '.join(proc.cmdline) )
                except:
                    removed.append( pid )
        self._spawned_process_pids = list(set(self._spawned_process_pids) - set(removed))
Esempio n. 8
0
    def stop(self):
        """
            Stopping Nginx and uWSGI services
        """
        try:
            #Down Nginx
            nginxChild = psutil.Process(self.nginxPid)
            nginxChild.terminate()
        except psutil.NoSuchProcess:
            pass
        except psutil.AccessDenied:
            print "couldn't kill child process with pid %s" % self.nginxPid
        else:
            nginxChild.wait(timeout=3)
        try:
            #Down uWSGI
            uWSGIChild = psutil.Process(self.uWSGIPid)
            uWSGIChild.kill()
        except psutil.NoSuchProcess:
            pass
        except psutil.AccessDenied:
            print "couldn't kill child process with pid %s" % self.uWSGIPid
        else:
            uWSGIChild.wait(timeout=3)

        # Check if nginx pid is off and uWSGI pid is off
        if not psutil.pid_exists(self.nginxPid) and not psutil.pid_exists(self.uWSGIPid):
            # Nginx and uWSGI services is down
            return True
        else:
            # Nginx and uWSGI services are alive
            return False
    def on_close_Click(self):
        # I am having trouble here. Unable to find if process like 7zip extraction, qemu, editor opened status etc.
        # Help required to identify/ running opened processes.
        global quit_ready

        if not  var.qemu_usb == "":
            if psutil.pid_exists(var.qemu_usb):
                print "QEMU process exist..."
                QtGui.QMessageBox.information(self, 'Process exist...',
                                                  'QEMU is running.\nPlease close QEMU before terminating multibootusb.')
            else:
                var.qemu_usb = ""

        elif not var.qemu_iso == "":
            if psutil.pid_exists(var.qemu_iso):
                print "QEMU process exist..."
                QtGui.QMessageBox.information(self, 'Process exist...',
                                                  'QEMU is running.\nPlease close QEMU before terminating multibootusb.')
            else:
                var.qemu_iso = ""

        elif not var.editor == "":
            if psutil.pid_exists(var.editor):
                print "Syslinux.cfg is opened for edit..."
                QtGui.QMessageBox.information(self, 'Process exist...',
                                              'syslinux.cfg is open for edit.\nPlease save and close file before terminating multibootusb.')
            else:
                var.editor = ""

        if var.qemu_usb == "" or var.qemu_iso == "" or var.editor == "":
            quit_ready = "yes"
            print "Closing multibootusb..."
            QtGui.qApp.closeAllWindows()
    def test_uninstall_workflow(self):
        inputs = {
            "diamond_config": {
                "prefix": tempfile.mkdtemp(prefix="cloudify-"),
                "interval": 1,
                "handlers": {"diamond.handler.archive.ArchiveHandler": {"config": {"log_file": tempfile.mktemp()}}},
            },
            "collectors_config": {},
        }
        prefix = inputs["diamond_config"]["prefix"]
        self.is_uninstallable = False
        self.env = self._create_env(inputs)
        self.env.execute("install", task_retries=0)
        pid_file = os.path.join(prefix, "var", "run", "diamond.pid")
        with open(pid_file, "r") as pf:
            pid = int(pf.read())

        # Check if all directories and paths have been created during install
        paths_to_uninstall = self._mock_get_paths(prefix)
        for path in paths_to_uninstall:
            self.assertTrue(os.path.exists(path), msg="Path doesn't exist: {0}".format(path))

        if psutil.pid_exists(pid):
            self.env.execute("uninstall", task_retries=0)
            time.sleep(5)
        else:
            self.fail("diamond process not running")
        self.assertFalse(psutil.pid_exists(pid))

        # Check if uninstall cleans up after diamond
        for path in paths_to_uninstall:
            self.assertFalse(os.path.exists(path), msg="Path exists: {0}".format(path))
Esempio n. 11
0
 def test_pid_exists(self):
     sproc = get_test_subprocess()
     wait_for_pid(sproc.pid)
     self.assertTrue(psutil.pid_exists(sproc.pid))
     psutil.Process(sproc.pid).kill()
     sproc.wait()
     self.assertFalse(psutil.pid_exists(sproc.pid))
     self.assertFalse(psutil.pid_exists(-1))
 def terminate(self, pid):
     os.kill(pid, signal.SIGTERM)
     time.sleep(1)
     if not psutil.pid_exists(pid): return
     time.sleep(2)
     if psutil.pid_exists(pid):
         self.log.warning('pid %s failed to terminate in 3s, killing' % pid)
         os.kill(pid, signal.SIGKILL)
Esempio n. 13
0
def main(name, client_args={}):
    parser = argparse.ArgumentParser(description=name)
    parser.add_argument( '-d', '--daemon',  action="store_true",
                         help='run as daemon')
    parser.add_argument( '-k', '--kill',  action="store_true",
                         help='kill running instance if any before start')
    parser.add_argument( '-f', '--force', action="store_true",
                         help='force start on bogus lockfile')
    parser.add_argument( '-v', '--verbose', action="store_true",
                         help='')
    args = parser.parse_args()
    
    if args.verbose:
        loglevel = logging.DEBUG
    else:
        loglevel = logging.INFO

    if os.geteuid() == 0:
        lock = '/var/run/%s' % name
    else:
        lock = os.path.join(os.environ['HOME'],'.%s' % name)

    pid = -1
    if os.path.exists(lock+'.lock'):
        with open(lock+'.lock','r') as pidfile:
            pid = int(pidfile.readline().strip())
    if pid != -1 and not psutil.pid_exists(pid):
        os.remove(lock+'.lock')
        pid = -1

    if pid != -1:
        if args.kill:
            try:
                print "Sending SIGTERM to", pid
                os.kill(pid,signal.SIGTERM)
                retries = 5
                while psutil.pid_exists(pid) and retries:
                    retries -= 1
                    time.sleep(1)
                if psutil.pid_exists(pid):
                    print "Sending SIGKILL to", pid
                    os.kill(pid,signal.SIGKILL)
                    time.sleep(1)
                if psutil.pid_exists(pid):
                    print "Could not kill", pid
                    print "Kill manually"
                    sys.exit(1)
            except OSError, e:
                if args.force:
                    print "Error stopping running instance, forcing start"
                    os.remove(lock+".lock")
                else:
                    print e
                    sys.exit(1)
        else:
            if os.path.exists(lock+'.lock'):
                print "Process already running (lockfile exists), exiting"
                sys.exit(1)
Esempio n. 14
0
 def test_pid_exists(self):
     sproc = get_test_subprocess()
     self.assertTrue(psutil.pid_exists(sproc.pid))
     p = psutil.Process(sproc.pid)
     p.kill()
     p.wait()
     self.assertFalse(psutil.pid_exists(sproc.pid))
     self.assertFalse(psutil.pid_exists(-1))
     self.assertEqual(psutil.pid_exists(0), 0 in psutil.pids())
Esempio n. 15
0
    def stopRun(self):
            
        filename = '%s/system/controlDict'%self.currentFolder
        parsedData = ParsedParameterFile(filename,createZipped=False)
        parsedData['stopAt'] = 'writeNow'
        parsedData.writeFile()
        time.sleep(0.1)
        
        self.findChild(QtGui.QPushButton,'pushButton_3').setEnabled(False)

#        while 1:
#            command = 'ps | cut -d " " -f 7 | grep Foam > %s/runningNow'%self.currentFolder
#            os.system(command)
#            f = open('%s/runningNow'%self.currentFolder, 'r')
#            if not f.read():
#                break
#            f.close()
#            time.sleep(0.1)
        
        import psutil
        import utils
        self.progress = QtGui.QProgressBar()
        self.progress.setWindowTitle("Saving the current data... Hold tight")        
        resolution = utils.get_screen_resolutions()
        self.progress.setGeometry(int(resolution[0])/2 - 175,int(resolution[1])/2,350,30)
        self.progress.show()

        i=0
        while psutil.pid_exists(self.window().runningpid):
            #retraso un minuto la edicion del control dict
            tt = list(localtime())
            tt[4] = (tt[4]+1)%60 #Agrego el modulo porque cuando el min es 0, 0-1 = -1
            command = 'touch -d "%s" %s'%(strftime("%Y-%m-%d %H:%M:%S", struct_time(tuple(tt))),filename)
            os.system(command)
            
            self.progress.setValue(i)
            QtGui.QApplication.processEvents()
            i=i+0.1
            time.sleep(0.1)

        self.progress.setValue(100)
        self.progress.close()
        if psutil.pid_exists(self.window().runningpid):
            command = 'kill %s'%self.window().runningpid
            os.system(command)

        self.window().runningpid = -1
        self.window().save_config()
        self.window().runW.pushButton_run.setEnabled(True)
        self.window().runW.pushButton_reset.setEnabled(True)
        self.window().tab_mesh.setEnabled(True)
        self.window().refresh_pushButton.setEnabled(True)
        leave = [1,5]
        for i in range(self.window().treeWidget.topLevelItemCount()):
            if i not in leave:
                self.window().treeWidget.topLevelItem(i).setDisabled(False)
Esempio n. 16
0
	def terminate(self, widget, data=None):
		port = self.textbox.get_text()
		pid=int(port)
		if psutil.pid_exists(pid) == True:
			p=psutil.Process(pid)
			p.kill()
			print 'Pid: %d terminated' % pid
		elif psutil.pid_exists(pid) == False:
			em= gtk.MessageDialog(None, gtk.DIALOG_DESTROY_WITH_PARENT, gtk.MESSAGE_ERROR, gtk.BUTTONS_CLOSE, "This PID does not exist")
			em.run()
			em.destroy()
Esempio n. 17
0
    def check_one_vm_for_dead_builder(self, vmd):
        # TODO: builder should renew lease periodically
        # and we should use that time instead of in_use_since and pid checks
        in_use_since = vmd.get_field(self.vmm.rc, "in_use_since")
        pid = vmd.get_field(self.vmm.rc, "used_by_pid")

        if not in_use_since or not pid:
            return
        in_use_time_elapsed = time.time() - float(in_use_since)

        # give a minute for worker to set correct title
        if in_use_time_elapsed < 60 and str(pid) == "None":
            return

        pid = int(pid)
        # try:
        #     # here we can catch race condition: worker acquired VM but haven't set process title yet
        #     if psutil.pid_exists(pid) and vmd.vm_name in psutil.Process(pid).cmdline[0]:
        #         return
        #
        #     self.log.info("Process `{}` not exists anymore, doing second try. VM data: {}"
        #                   .format(pid, vmd))
        #     # dirty hack: sleep and check again
        #     time.sleep(5)
        #     if psutil.pid_exists(pid) and vmd.vm_name in psutil.Process(pid).cmdline[0]:
        #         return
        # except Exception:
        #     self.log.exception("Failed do determine if process `{}` still alive for VM: {}, assuming alive"
        #                        .format(pid, vmd))
        #     return

        # psutil changed Process().cmdline from property to function between f20 and f22
        # disabling more precise check for now
        try:
            # here we can catch race condition: worker acquired VM but haven't set process title yet
            if psutil.pid_exists(pid):
                return

            self.log.info("Process `{}` not exists anymore, doing second try. VM data: {}"
                          .format(pid, vmd))
            # dirty hack: sleep and check again
            time.sleep(5)
            if psutil.pid_exists(pid):
                return

        except Exception:
            self.log.exception("Failed do determine if process `{}` still alive for VM: {}, assuming alive"
                               .format(pid, vmd))
            return

        self.log.info("Process `{}` not exists anymore, terminating VM: {} ".format(pid, vmd.vm_name))
        self.vmm.start_vm_termination(vmd.vm_name, allowed_pre_state=VmStates.IN_USE)
Esempio n. 18
0
    def _kill_pid_in_file_if_exists(self, pid_file_path):
        """
        Kill the process referred to by the pid in the pid_file_path if it exists and the process with pid is running.

        :param pid_file_path: the path to the pid file (that should only contain the pid if it exists at all)
        :type pid_file_path: str
        """
        if not os.path.exists(pid_file_path):
            self._logger.info("Pid file {0} does not exist.".format(pid_file_path))
            return

        with open(pid_file_path, 'r') as f:
            pid = f.readline()

        if not psutil.pid_exists(int(pid)):
            self._logger.info("Pid file {0} exists, but pid {1} doesn't exist.".format(pid_file_path, pid))
            os.remove(pid_file_path)
            return

        # Because PIDs are re-used, we want to verify that the PID corresponds to the correct command.
        proc = psutil.Process(int(pid))
        proc_command = ' '.join(proc.cmdline())
        matched_proc_command = False

        for command_keyword in self._command_whitelist_keywords:
            if command_keyword in proc_command:
                matched_proc_command = True
                break

        if not matched_proc_command:
            self._logger.info(
                "PID {0} is running, but command '{1}' is not a clusterrunner command".format(pid, proc_command))
            return

        # Try killing gracefully with SIGTERM first. Then give process some time to gracefully shutdown. If it
        # doesn't, perform a SIGKILL.
        # @TODO: use util.timeout functionality once it gets merged
        os.kill(int(pid), signal.SIGTERM)
        sigterm_start = time.time()

        while (time.time()-sigterm_start) <= self._sigterm_sigkill_grace_period_sec:
            if not psutil.pid_exists(int(pid)):
                break
            time.sleep(0.1)

        if psutil.pid_exists(int(pid)):
            self._logger.info("SIGTERM signal to PID {0} failed. Killing with SIGKILL".format(pid))
            os.kill(int(pid), signal.SIGKILL)
            return

        self._logger.info("Killed process with PID {0} with SIGTERM".format(pid))
Esempio n. 19
0
    def __init__(self, status):
        """
        Initialization of server nginx and uWSGI
        """
        if self.getNginxPid() != "" and self.getuWSGIPid() != "":
            self.nginxPid = int(self.getNginxPid())
            self.uWSGIPid = int(self.getuWSGIPid())

        if status == 'start':
            if psutil.pid_exists(self.nginxPid) and psutil.pid_exists(self.uWSGIPid):
                return self.statusServer(True)
            else:
                if self.run():
                    return self.statusServer(True)
        elif status == 'stop':
            if psutil.pid_exists(self.nginxPid) or psutil.pid_exists(self.uWSGIPid):
                if self.stop():
                    return self.statusServer(False)
        elif status == 'restart':
            if psutil.pid_exists(self.nginxPid) or psutil.pid_exists(self.uWSGIPid) or not \
                    psutil.pid_exists(self.nginxPid) or not psutil.pid_exists(self.uWSGIPid):
                if self.restart():
                    self.statusServer(None)
        else:
            self.help()
            sys.exit(-1)
        sys.exit(1)
Esempio n. 20
0
 def check_for_process_end(self, process, module_path):
     self.socket.send(str(process.pid))
     p = psutil.pid_exists(process.pid)
     seconds_elapsed = 0
     run_time = dt.datetime.now()
     while p is True:
         p = psutil.pid_exists(process.pid)
         seconds_elapsed = dt.datetime.now() - run_time
         if seconds_elapsed.total_seconds() > 30:
             self.logger.info('Total close time for process: ' + str(process.pid) + ' elapsed. Terminating...')
             process.terminate()
             process.join()
             #self.remove_module_from_pid_dict(module_path)
             break
Esempio n. 21
0
 def kill_pid(cls, pid):
     pid = int(pid)
     if not psutil.pid_exists(pid):
         return False
     process = psutil.Process(pid)
     process.terminate()
     try:
         process.wait(timeout=5)
     except psutil.TimeoutExpired:
         process.kill()
     if psutil.pid_exists(pid):
         return False
     else:
         return True
Esempio n. 22
0
    def test_stopping_job(self):
        self.job_wrapper.command_line = '''python -c "import time; time.sleep(15)"'''
        runner = local.LocalJobRunner(self.app, 1)

        def queue():
            runner.queue_job(self.job_wrapper)

        t = threading.Thread(target=queue)
        t.start()
        external_id = self.job_wrapper.wait_for_external_id()
        assert psutil.pid_exists(external_id)
        runner.stop_job(self.job_wrapper)
        t.join(1)
        assert not psutil.pid_exists(external_id)
Esempio n. 23
0
def _wait_and_kill(pid_to_wait, pids_to_kill):
  """ Wait for a process to finish if it exists, and then kill a list of processes.

  Args:
    pid_to_wait: the process to wait for.
    pids_to_kill: a list of processes to kill after the process of pid_to_wait finishes.
  """
  if psutil.pid_exists(pid_to_wait):
    psutil.Process(pid=pid_to_wait).wait()

  for pid_to_kill in pids_to_kill:
    if psutil.pid_exists(pid_to_kill):
      p = psutil.Process(pid=pid_to_kill)
      p.kill()
Esempio n. 24
0
 def test_pid_exists_2(self):
     reap_children()
     pids = psutil.pids()
     for pid in pids:
         try:
             assert psutil.pid_exists(pid)
         except AssertionError:
             # in case the process disappeared in meantime fail only
             # if it is no longer in psutil.pids()
             time.sleep(.1)
             if pid in psutil.pids():
                 self.fail(pid)
     pids = range(max(pids) + 5000, max(pids) + 6000)
     for pid in pids:
         self.assertFalse(psutil.pid_exists(pid), msg=pid)
Esempio n. 25
0
def set_cpu_affinity(n, process_ids, actual=not Conf.TESTING):
    """
    Sets the cpu affinity for the supplied processes.
    Requires the optional psutil module.
    :param int n:
    :param list process_ids: a list of pids
    :param bool actual: Test workaround for Travis not supporting cpu affinity
    """
    # check if we have the psutil module
    if not psutil:
        return
    # get the available processors
    cpu_list = list(range(psutil.cpu_count()))
    # affinities of 0 or gte cpu_count, equals to no affinity
    if not n or n >= len(cpu_list):
        return
    # spread the workers over the available processors.
    index = 0
    for pid in process_ids:
        affinity = []
        for k in range(n):
            if index == len(cpu_list):
                index = 0
            affinity.append(cpu_list[index])
            index += 1
        if psutil.pid_exists(pid):
            p = psutil.Process(pid)
            if actual:
                p.cpu_affinity(affinity)
            logger.info('{} will use cpu {}'.format(pid, affinity))
Esempio n. 26
0
    def _wait(node_uuid, popen_obj):
        locals['returncode'] = popen_obj.poll()

        # check if the console pid is created and the process is running.
        # if it is, then the shellinaboxd is invoked successfully as a daemon.
        # otherwise check the error.
        if locals['returncode'] is not None:
            if (locals['returncode'] == 0 and os.path.exists(pid_file) and
                psutil.pid_exists(_get_console_pid(node_uuid))):
                raise loopingcall.LoopingCallDone()
            else:
                (stdout, stderr) = popen_obj.communicate()
                locals['errstr'] = _(
                    "Command: %(command)s.\n"
                    "Exit code: %(return_code)s.\n"
                    "Stdout: %(stdout)r\n"
                    "Stderr: %(stderr)r") % {
                        'command': ' '.join(args),
                        'return_code': locals['returncode'],
                        'stdout': stdout,
                        'stderr': stderr}
                LOG.warning(locals['errstr'])
                raise loopingcall.LoopingCallDone()

        if (time.time() > expiration):
            locals['errstr'] = _("Timeout while waiting for console subprocess"
                                 "to start for node %s.") % node_uuid
            LOG.warning(locals['errstr'])
            raise loopingcall.LoopingCallDone()
Esempio n. 27
0
    async def kill_loop(
        self,
    ):
        while True:
            if not psutil.pid_exists(
                pid=self.pid_to_kill,
            ):
                sys.exit(1)

            if self.running:
                if self.soft_timeout != 0 and self.time_elapsed >= self.soft_timeout:
                    self.kill_process(
                        pid=self.pid_to_kill,
                        signal=self.soft_timeout_signal,
                    )

                if self.hard_timeout != 0 and self.time_elapsed >= self.hard_timeout:
                    self.kill_process(
                        pid=self.pid_to_kill,
                        signal=self.hard_timeout_signal,
                    )

                if self.critical_timeout != 0 and self.time_elapsed >= self.critical_timeout:
                    self.kill_process(
                        pid=self.pid_to_kill,
                        signal=self.critical_timeout_signal,
                    )

                self.time_elapsed += self.sleep_interval

            await asyncio.sleep(
                delay=self.sleep_interval,
                loop=self.async_loop,
            )
Esempio n. 28
0
    def patch(self, request):
        try:
            data = received_json_data=json.loads(request.body)
        except Exception as e:
            return JsonResponse(BAD_JSON_RESPONSE, status=400)

        if 'pid' not in data:
            return JsonResponse(NO_PID_RESPONSE, status=400)

        if 'priority' not in data:
            return JsonResponse(NO_PRIORITY_RESPONSE, status=400)

        try:
            pid = int(data['pid'])
            priority = int(data['priority'])
        except Exception as e:
            return JsonResponse(BAD_PS_PATCH_RESPONSE, status=400)

        if priority > 20 or priority < -20:
            return JsonResponse(BAD_PATCH_PRIORITY_RESPONSE, status=400)

        if not psutil.pid_exists(pid):
            msg = 'No process with pid :' + str(pid)
            return JsonResponse({ 'result': 'error', 'message': msg }, status=400)
        process = psutil.Process(pid)

        try:
            process.nice(priority)
        except psutil.AccessDenied as e:
            return JsonResponse(NOT_ENOUGH_PERMISSION_RESPONSE, status=400)

        return JsonResponse({'result': 'ok'})
Esempio n. 29
0
    def _read_pidfile(self):
        """Read the PID file and check to make sure it's not stale."""
        if self.pidfile is None:
            return None

        if not os.path.isfile(self.pidfile):
            return None

        # Read the PID file
        with open(self.pidfile, 'r') as fp:
            try:
                pid = int(fp.read())
            except ValueError:
                self._emit_warning(
                    'Empty or broken pidfile {pidfile}; removing'.format(pidfile=self.pidfile))
                pid = None

        if pid is not None and psutil.pid_exists(pid):
            return pid
        else:
            # Remove the stale PID file
            try:
                os.remove(self.pidfile)
            except:
                pass
            return None
Esempio n. 30
0
    def determine_status(self, proc, isalive=False, expectedfail=()):
        """
        Use to determine if the situation has failed.
        Parameters:
            proc -- the processhandler instance
            isalive -- Use True to indicate we pass if the process exists; however, by default
                       the test will pass if the process does not exist (isalive == False)
            expectedfail -- Defaults to [], used to indicate a list of fields that are expected to fail
        """
        returncode = proc.proc.returncode
        didtimeout = proc.didTimeout
        detected = psutil.pid_exists(proc.pid)
        output = ''
        # ProcessHandler has output when store_output is set to True in the constructor
        # (this is the default)
        if getattr(proc, 'output'):
            output = proc.output

        if 'returncode' in expectedfail:
            self.assertTrue(returncode, "Detected an unexpected return code of: %s" % returncode)
        elif isalive:
            self.assertEqual(returncode, None, "Detected not None return code of: %s" % returncode)
        else:
            self.assertNotEqual(returncode, None, "Detected unexpected None return code of")

        if 'didtimeout' in expectedfail:
            self.assertTrue(didtimeout, "Detected that process didn't time out")
        else:
            self.assertTrue(not didtimeout, "Detected that process timed out")

        if isalive:
            self.assertTrue(detected, "Detected process is not running, process output: %s" % output)
        else:
            self.assertTrue(not detected, "Detected process is still running, process output: %s" % output)
Esempio n. 31
0
    def _handleTask(self, task):

        ##
        # @fn _handleTask
        # @brief handles message incoming from MQTT
        # @param task: <List> task sent

        topic, message = task

        for proc in self.CBmap:
            name = proc["ID"]
            pid = proc["processPID"]
            if psutil.pid_exists(pid):
                self.debugLog.debug("{} still running".format(name))
            else:
                self.debugLog.debug("REMOVING {}".format(name))
                self.CBmap.remove(proc)

        # When counter triggers check Scope
        if topic == "counter/TimeTrigger":
            tmstp = message.decode("utf-8")
            tmstp = int(tmstp)

            self.debugLog.debug("Current timestamp {}".format(tmstp))

            flagNewScope, newCoins, delCoins, changedCoins = self._actScope()
            if flagNewScope:
                self._actStreams()
                self._actCoinBots(newCoins, delCoins, changedCoins)

            if self.tradesMap:
                activeTrades = [
                    trade for trade in self.tradesMap.values()
                    if trade["status"] in ["open", "posted", "new"]
                ]
                self.debugLog.debug("Trades open {} information {}".format(
                    len(activeTrades), [[trade["symbol"], trade["tradeId"]]
                                        for trade in activeTrades]))
                self._cleanPossibleTradesList(tmstp)
                self._checkPostedTrades()
                self._analyzeTrades()

        elif "tradeData" in topic:
            #TODO NEW TRADE POSSIBLE
            tradeInfo = json.loads(message)

            self.tradesLog.info(
                "Trade message with trade Id {} from {}".format(
                    tradeInfo["tradeId"], tradeInfo["symbol"]))
            self.debugLog.info(tradeInfo)

            if "exit" in topic:
                if tradeInfo["status"] in ["open", "posted"]:
                    #Trade is open in binance
                    return self._openTradeUpdate(tradeInfo, operation=1)

                elif tradeInfo["status"] in ["canceled", "new"]:
                    #trade was canceled due to not being taken or is still non taken
                    #but CB notify exit, means we have a Faked trade
                    return self._getTradeStats(tradeInfo, "faked")

            elif "cancel" in topic:
                #We should only enter here if trade was posted but hasnt been taken or trade is new
                if tradeInfo["status"] == "new":
                    tradeInfo["status"] = "cancel"
                    #TODO CHANGE INFO IN TRADES MAP TO CANCELED
                    return self._publish2Topic(
                        "botManager/{}/tradeData/cancel".format(
                            tradeInfo["symbol"]), tradeInfo)
                elif tradeInfo["status"] == "posted":
                    #!Cant cancel an open trade!!!
                    return self._openTradeUpdate(tradeInfo, operation=2)

                else:
                    #TODO ISSUE HERE
                    return

            else:
                symbol = topic.split('/')[1]
                tradeInfo['symbol'] = symbol
                return self.tradesMap.update({tradeInfo["tradeId"]: tradeInfo})

        elif topic == "binApi/tradeInfo/executed":
            tradeInfo = json.loads(message)
            self.debugLog.debug("Trade {} has been executed".format(
                tradeInfo["tradeId"]))
            self.tradesMap[tradeInfo["tradeId"]].update(tradeInfo)
            self._publish2Topic(
                "botManager/{}/tradeData/executed".format(tradeInfo["symbol"]),
                tradeInfo)

        elif "killACK" in topic:
            cb = topic.split("/")[1]
            self.debugLog.debug("Kill acknowledge received in {}".format(cb))
Esempio n. 32
0
File: cli.py Progetto: pesos/heiko
def cli():
    """Entrypoint to the command-line interface (CLI) of heiko.

    It parses arguments from sys.argv and performs the appropriate actions.
    """
    args = parser.parse_args()

    # Get heiko directory - to save .out and .pid files
    heiko_home = Path.home() / ".heiko"
    heiko_processes = heiko_home / "processes"
    os.makedirs(heiko_processes, exist_ok=True)

    if args.version:
        print("heiko version:", heiko.__version__)

    # list running daemons
    elif args.command == "list":
        pid_files = glob.glob(str(heiko_home / "*.pid"))
        # store PIDs
        pids = []
        for pid_file in pid_files:
            with open(pid_file, "r") as f:
                pids.append(f.read())
        # get names
        names = [
            pid_file.split("/")[-1].split("_", 1)[-1].split(".")[0]
            for pid_file in pid_files
        ]
        print("Currently running daemons:")
        print("name\tPID")
        for name, pid in zip(names, pids):
            if psutil.pid_exists(int(pid)):
                print(f"{name}\t{pid}")

    elif args.command == "init":
        if "name" not in args:
            parser.print_usage()
            sys.exit(1)

        c = Config(args.name)

        for node in c.nodes:
            # Initialization and Benchmarking
            try:
                utils = NodeDetails(node=node)
                asyncio.get_event_loop().run_until_complete(utils.getDetails())
                print("Printing node details")
                print("CPU:\n", utils.cpu)
                print("\nRAM:\n", utils.mem)
                print("\nCPU Usage:\n", utils.load)
                print("Syncing files .........")
                sync_folder(args.name, node)

                if c.first_job.init:
                    asyncio.get_event_loop().run_until_complete(
                        run_client(node, c.first_job.init))
            except Exception as e:
                logging.error("%s", e)

    elif args.command == "logs":
        path_to_log = heiko_home / f"heiko_{args.name}.out"
        if not file_exists(path_to_log):
            raise Exception(
                "name for the heiko daemon provided does not exist")

        # read logs
        mode = "rt"
        if args.clear:
            # clear file before reading (opening in w mode clears the file)
            mode = "wt+"
        if args.follow:
            follow(path_to_log)
        else:
            # read whole log at once
            with open(path_to_log, mode) as f:
                print(f.read())
    else:
        if "name" not in args:
            parser.print_usage()
            sys.exit(1)

        # Manage daemon
        daemon = HeikoDaemon(
            args.name,
            heiko_home / f"heiko_{args.name}.pid",
            stdout=heiko_home / f"heiko_{args.name}.out",
            stderr=heiko_home / f"heiko_{args.name}.out",
        )

        if args.command == "start":
            daemon.start()
        elif args.command == "stop":
            daemon.stop()
        elif args.command == "restart":
            daemon.restart()
Esempio n. 33
0
    def op(self, request):
        cdata = self.cleaned_data
        id = cdata.get('id')
        action = cdata.get('action')

        obj = SqlOrdersExecTasks.objects.get(id=id)
        context = {}
        if obj.exec_status in ('0', '1', '4'):
            context = {'status': 2, 'msg': '请不要重复操作任务'}
        else:
            # 判断是否使用gh-ost执行
            if SysConfig.objects.get(key='is_ghost').is_enabled == '0':
                # 获取gh-ost的sock文件
                # 将语句中的注释和SQL分离
                sql_split = {}
                for stmt in sqlparse.split(obj.sql):
                    sql = sqlparse.parse(stmt)[0]
                    sql_comment = sql.token_first()
                    if isinstance(sql_comment, sqlparse.sql.Comment):
                        sql_split = {
                            'comment': sql_comment.value,
                            'sql': sql.value.replace(sql_comment.value, '')
                        }
                    else:
                        sql_split = {'comment': '', 'sql': sql.value}

                # 获取不包含注释的SQL语句
                sql = sql_split['sql']
                formatsql = re.compile(
                    '^ALTER(\s+)TABLE(\s+)([\S]*)(\s+)(ADD|CHANGE|REMAME|MODIFY|DROP)([\s\S]*)',
                    re.I)
                match = formatsql.match(sql)
                # 由于gh-ost不支持反引号,会被解析成命令,因此此处替换掉
                table = match.group(3).replace('`', '')
                # 将schema.table进行处理,这种情况gh-ost不识别,只保留table
                if len(table.split('.')) > 1:
                    table = table.split('.')[1]
                sock = os.path.join('/tmp',
                                    f"gh-ost.{obj.database}.{table}.sock")
                # 判断程序是否允许
                if psutil.pid_exists(obj.ghost_pid):
                    if os.path.exists(sock):
                        if action == 'pause_ghost':
                            pause_cmd = f"echo throttle | nc -U {sock}"
                            p = subprocess.Popen(pause_cmd, shell=True)
                            p.wait()
                            context = {'status': 1, 'msg': '暂停动作已执行,请查看输出'}

                        if action == 'recovery_ghost':
                            recovery_cmd = f"echo no-throttle | nc -U {sock}"
                            p = subprocess.Popen(recovery_cmd, shell=True)
                            p.wait()
                            context = {'status': 1, 'msg': '恢复动作已执行,请查看输出'}

                        if action == 'stop_ghost':
                            stop_cmd = f"echo panic | nc -U {sock}"
                            p = subprocess.Popen(stop_cmd, shell=True)
                            p.wait()
                            context = {'status': 1, 'msg': '终止动作已执行,请查看输出'}
                    else:
                        context = {'status': 2, 'msg': f'不能找到文件{sock}, 操作失败'}
                else:
                    os.remove(sock) if os.path.exists(sock) else None
                    context = {'status': 2, 'msg': '进程不存在,操作失败'}
        return context
Esempio n. 34
0
 def _stop(self, p):
     self.__run = False
     if psutil.pid_exists(p):
         os.kill(p, 9)
Esempio n. 35
0
    nmap_xml_output = open(filename, 'w')
    nmap_xml_output.write(results)
    nmap_xml_output.close()
    beautiful_soup_parsing.main(filename)


if __name__ == "__main__":

    from getopt import getopt

    # check if pid is set to something.
    # if so, check if process is running.

    try:
        pid = os.readlink(LOCKFILE)
        if psutil.pid_exists(int(pid)):
            print 'portscanner already running.'
            os.sys.exit()
        else:
            os.unlink(LOCKFILE)
    except OSError:
        pass
        # make file
    os.symlink(str(os.getpid()), LOCKFILE)

    netmask = NETMASK

    ip = IP
    print ip

    filename = FILENAME
Esempio n. 36
0
    def _register_instance(cls, unique_worker_id=None, worker_name=None):
        if cls.worker_id:
            return cls.worker_id, cls.instance_slot
        # make sure we have a unique name
        instance_num = 0
        temp_folder = gettempdir()
        files = glob(
            os.path.join(temp_folder, cls.prefix + cls.sep + '*' + cls.ext))
        slots = {}
        for file in files:
            parts = file.split(cls.sep)
            try:
                pid = int(parts[1])
            except Exception:
                # something is wrong, use non existing pid and delete the file
                pid = -1
            # count active instances and delete dead files
            if not psutil.pid_exists(pid):
                # delete the file
                try:
                    os.remove(os.path.join(file))
                except Exception:
                    pass
                continue

            instance_num += 1
            try:
                with open(file, 'r') as f:
                    uid, slot = str(f.read()).split('\n')
                    slot = int(slot)
            except Exception:
                continue

            if uid == unique_worker_id:
                return None, None

            slots[slot] = uid

        # get a new slot
        if not slots:
            cls.instance_slot = 0
        else:
            # guarantee we have the minimal slot possible
            for i in range(max(slots.keys()) + 2):
                if i not in slots:
                    cls.instance_slot = i
                    break

        # build worker id based on slot
        if not unique_worker_id:
            unique_worker_id = worker_name + cls.worker_name_sep + str(
                cls.instance_slot)

        # create lock
        cls._pid_file = NamedTemporaryFile(dir=gettempdir(),
                                           prefix=cls.prefix + cls.sep +
                                           str(os.getpid()) + cls.sep,
                                           suffix=cls.ext)
        cls._pid_file.write(('{}\n{}'.format(unique_worker_id,
                                             cls.instance_slot)).encode())
        cls._pid_file.flush()
        cls.worker_id = unique_worker_id

        return cls.worker_id, cls.instance_slot
Esempio n. 37
0
    def __monitor(cls, q, prc_application_pid):
        """Monitors CPU and Memory.

        Parameters
        ----------
        q: Object
            Object of Queue Class

        prc_application_pid: Object
            Object of Process class
        """
        try:
            status = 0
            prc_profiler_pid = psutil.Process()

            int_cpu_count = cls.server_profile('cpu')
            str_cpu_max = str(int_cpu_count) + "00"

            lst_cpu = []
            lst_mem = []
            q_limit = False

            while psutil.pid_exists(prc_application_pid.pid):
                flt_total_cpu_usage = 0.0
                flt_total_mem_usage = 0.0
                prc_children = prc_application_pid.children(recursive=True)

                flt_total_child_cpu_usage = 0

                prc_application_pid.cpu_percent()
                for prc_child in prc_children:
                    prc_child.cpu_percent()

                time.sleep(5)

                for prc_child in prc_children:
                    if psutil.pid_exists(prc_child.pid):
                        flt_total_child_cpu_usage = flt_total_child_cpu_usage + prc_child.cpu_percent(
                        )
                        flt_total_mem_usage = flt_total_mem_usage + prc_child.memory_percent(
                        )
                flt_total_cpu_usage = prc_application_pid.cpu_percent(
                ) + flt_total_child_cpu_usage
                flt_total_mem_usage = prc_application_pid.memory_percent(
                ) + flt_total_mem_usage
                lst_cpu.append(
                    (flt_total_cpu_usage / float(str_cpu_max)) * 100)
                lst_mem.append(flt_total_mem_usage)
                if q_limit == True:
                    q.get()
                    q.get()

                q.put(status)
                q.put(lst_cpu)
                q.put(lst_mem)

                q_limit = True
        except Exception as error:
            status = 1
            q.put(status)
            q.put(error)
Esempio n. 38
0
def assertServerProcessIsAlive():
    with open(os.path.join(FALLOUT_HOME, "run", "fallout.pid"),
              "r") as pidfile:
        pid = int(pidfile.read())
        assert psutil.pid_exists(pid)
        return pid
Esempio n. 39
0
 def test_reap_children(self):
     subp = get_test_subprocess()
     assert psutil.pid_exists(subp.pid)
     reap_children()
     assert not psutil.pid_exists(subp.pid)
Esempio n. 40
0
 def terminate(self):
     if self.process and psutil.pid_exists(self.process.pid):
         reap_process_group(self.process.pid, self.log)
Esempio n. 41
0
 def new_load(self, cr, *args, **kwargs):
     res = method(self, cr, *args, **kwargs)
     try:
         build_obj = self.get('scm.repository.branch.build')
         if build_obj:
             cr.execute("select relname from pg_class where relname='%s'" %
                        build_obj._table)
             if cr.rowcount:
                 _logger.info(
                     "Cleaning testing/running builds before restarting")
                 # Empty builds directory: sources being copied when killing the server are not deleted
                 for dirname in os.listdir(build_obj._builds_path):
                     Thread(target=shutil.rmtree,
                            args=(os.path.join(build_obj._builds_path,
                                               dirname), )).start()
                 # Search testing builds
                 build_infos = build_obj.search_read(
                     cr, SUPERUSER_ID, [('state', '=', 'testing')],
                     ['ppid'])
                 build_ids = [
                     b['id'] for b in build_infos
                     if not psutil.pid_exists(b['ppid'])
                 ]
                 branch_ids = [
                     b['branch_id']
                     for b in build_obj.read(cr,
                                             SUPERUSER_ID,
                                             build_ids, ['branch_id'],
                                             load='_classic_write')
                 ]
                 if build_ids:
                     # Kill invalid builds
                     build_obj._remove_container(cr, SUPERUSER_ID,
                                                 build_ids)
                     build_obj.write(cr, SUPERUSER_ID, build_ids, {
                         'state': 'done',
                         'result': 'killed'
                     })
                 # Search running builds not running anymore
                 runnning_build_ids = build_obj.search(
                     cr, SUPERUSER_ID, [('state', '=', 'running')])
                 actual_runnning_build_ids = []
                 docker_host_obj = self['docker.host']
                 for docker_host_id in docker_host_obj.search(
                         cr, SUPERUSER_ID, []):
                     docker_host = docker_host_obj.browse(
                         cr, SUPERUSER_ID, docker_host_id)
                     actual_runnning_build_ids += [
                         int(container['Names'][0].replace('/build_', ''))
                         for container in
                         docker_host.get_client().containers()
                         if container['Names']
                         and container['Names'][0].startswith('/build_')
                     ]
                 build_ids = list(
                     set(runnning_build_ids) -
                     set(actual_runnning_build_ids))
                 if build_ids:
                     # Kill invalid builds
                     build_obj._remove_container(cr, SUPERUSER_ID,
                                                 build_ids)
                     build_obj.write(cr, SUPERUSER_ID, build_ids,
                                     {'state': 'done'})
                 # Force build creation for branch in test before server stop
                 if branch_ids:
                     branch_obj = self.get('scm.repository.branch')
                     thread = Thread(target=branch_obj.
                                     _force_create_build_with_new_cursor,
                                     args=(
                                         cr,
                                         SUPERUSER_ID,
                                         branch_ids,
                                     ))
                     thread.start()
     except Exception, e:
         _logger.error(get_exception_message(e))
Esempio n. 42
0
def check_subprocess_cleaned(pid):
    return psutil.pid_exists(pid) is False
Esempio n. 43
0
 def is_running(self):
         return psutil.pid_exists(self.p.pid) and self.p.poll() == None
Esempio n. 44
0
def test_whether_worker_leaked_when_task_finished_with_errors(
        ray_start_regular):

    driver_template = """
import ray
import os
import ray
import numpy as np
import time

ray.init(address="{address}", namespace="test")

# The util actor to store the pid cross jobs.
@ray.remote
class PidStoreActor:
    def __init(self):
        self._pid = None

    def put(self, pid):
        self._pid = pid
        return True

    def get(self):
        return self._pid

def _store_pid_helper():
    try:
        pid_store_actor = ray.get_actor("pid-store", "test")
    except Exception:
        pid_store_actor = PidStoreActor.options(
            name="pid-store", lifetime="detached").remote()
    assert ray.get(pid_store_actor.put.remote(os.getpid()))

@ray.remote
def normal_task(large1, large2):
    # Record the pid of this normal task.
    _store_pid_helper()
    time.sleep(60 * 60)
    return "normaltask"

large = ray.put(np.zeros(100 * 2**10, dtype=np.int8))
obj = normal_task.remote(large, large)
print(ray.get(obj))
"""
    driver_script = driver_template.format(
        address=ray_start_regular["address"])
    driver_proc = run_string_as_driver_nonblocking(driver_script)
    try:
        driver_proc.wait(10)
    except Exception:
        pass

    def get_normal_task_pid():
        try:
            pid_store_actor = ray.get_actor("pid-store", "test")
            return ray.get(pid_store_actor.get.remote())
        except Exception:
            return None

    wait_for_condition(lambda: get_normal_task_pid() is not None, 10)
    pid_store_actor = ray.get_actor("pid-store", "test")
    normal_task_pid = ray.get(pid_store_actor.get.remote())
    assert normal_task_pid is not None
    normal_task_proc = psutil.Process(normal_task_pid)
    print("killing normal task process, pid =", normal_task_pid)
    normal_task_proc.send_signal(signal.SIGTERM)

    def normal_task_was_reconstructed():
        curr_pid = get_normal_task_pid()
        return curr_pid is not None and curr_pid != normal_task_pid

    wait_for_condition(lambda: normal_task_was_reconstructed(), 10)
    driver_proc.send_signal(signal.SIGTERM)
    # Sleep here to make sure raylet has triggered cleaning up
    # the idle workers.
    wait_for_condition(lambda: not psutil.pid_exists(normal_task_pid), 10)