Esempio n. 1
0
def pipe(cmd, stdin=None, timeout=None, env=None):
    '''Runs 'stdin' through 'cmd' and returns stdout, stderr and whether we
    timed out.'''

    # Extend the current environment, if requested
    extra_env = None
    if env:
        extra_env = environ.copy()
        for var in env:
            extra_env[var] = env[var]

    # setsid puts the subprocess in its own process group, rather than the group
    # containing this Python process
    killed = None
    proc = Popen(cmd,
                 stdin=PIPE if stdin else None,
                 stdout=PIPE,
                 stderr=PIPE,
                 preexec_fn=setsid,
                 env=extra_env)

    try:
        (stdout, stderr) = proc.communicate(input=stdin, timeout=timeout)
        result = {'stdout': stdout, 'stderr': stderr, 'killed': False}
    except TimeoutExpired:
        # Kill the process group, which will include all children
        killpg(getpgid(proc.pid), SIGTERM)
        result = {'stdout': None, 'stderr': None, 'killed': True}

    proc.wait()  # Reaps zombies

    return result
Esempio n. 2
0
class CppSim(ProcessWorkerThread):

    def handle_eval(self, record):
        val = np.nan
        # Continuously check for new outputs from the subprocess
        self.process = Popen(['./sumfun_ext', array2str(record.params[0])], stdout=PIPE, bufsize=1, universal_newlines=True)

        for line in self.process.stdout:
            try:
                val = float(line.strip())  # Try to parse output
                if val > 350:  # Terminate if too large
                    self.process.terminate()
                    self.finish_success(record, 350)
                    return
            except ValueError:  # If the output is nonsense we terminate
                logging.warning("Incorrect output")
                self.process.terminate()
                self.finish_cancelled(record)
                return
        self.process.wait()

        rc = self.process.poll()  # Check the return code
        if rc < 0 or np.isnan(val):
            logging.info("WARNING: Incorrect output or crashed evaluation")
            self.finish_cancelled(record)
        else:
            self.finish_success(record, val)
Esempio n. 3
0
def run_timed(cmd, stdin=None, timeout=None):
    '''Run the given command 'cmd', with optional stdin, and return its stdout,
    stderr and the time it took to run (in seconds). Also report whether or not
    the command was killed by going over the (optional) timeout.'''
    start = default_timer()
    proc = Popen(cmd,
                 stdin=PIPE if stdin else None,
                 stdout=PIPE,
                 stderr=PIPE,
                 preexec_fn=setsid)

    try:
        (stdout, stderr) = proc.communicate(input=stdin, timeout=timeout)
        result = {'stdout': stdout, 'stderr': stderr, 'killed': False}
    except TimeoutExpired:
        # Kill the process group, which will include all children
        killpg(getpgid(proc.pid), SIGTERM)
        result = {'stdout': None, 'stderr': None, 'killed': True}

    proc.wait()  # Reaps zombies

    end = default_timer()
    result['time'] = end - start
    if timeout is not None:
        result['timeout'] = timeout
    return result
Esempio n. 4
0
def actuate(interval=3600):  #amoubnt of time between shots

    image_path = '/home/pi/Desktop/output.jpg'

    still = Popen('sudo raspistill -o ~/Desktop/output.jpg',
                  stdout=None,
                  shell=True)  #snap: call the camera
    still.wait()

    storage.child("images/lastpic.jpg").put(image_path)

    time.sleep(float(interval))
Esempio n. 5
0
    def do_interview(self,
                     hosts=None,
                     num_of_segments=1,
                     directory_pairs=None,
                     has_mirrors=False):
        """
        hosts: list of hosts to expand to
        num_of_segments: number of segments to expand
        directory_pairs: list of tuple directory pairs where first element is the primary directory and the 2nd is the mirror.

        Note: this code is done with the assumption of primary only cluster.
        There is an assumption that the user knows the kind of cluster to expand.

        Returns: string output, int returncode
        """
        if directory_pairs is None:
            directory_pairs = ('/tmp/foo', '')

        if num_of_segments != len(directory_pairs):
            raise Exception(
                "Amount of directory_pairs needs to be the same amount as the segments."
            )

        # If working_directory is None, then Popen will use the directory where
        # the python code is being ran.
        p1 = Popen(["gpexpand"],
                   stdout=PIPE,
                   stdin=PIPE,
                   cwd=self.working_directory)

        # Very raw form of doing the interview part of gpexpand.
        # May be pexpect is the way to do this if we really need it to be more complex
        # Cannot guarantee that this is not flaky either.

        # Would you like to initiate a new System Expansion Yy|Nn (default=N):
        p1.stdin.write("y\n")

        # **Enter a blank line to only add segments to existing hosts**[]:
        p1.stdin.write("%s\n" % (",".join(hosts) if hosts else ""))

        if has_mirrors:
            #What type of mirroring strategy would you like? spread|grouped (default=grouped):
            p1.stdin.write("\n")

        #How many new primary segments per host do you want to add? (default=0):
        p1.stdin.write("%s\n" % num_of_segments)

        # Enter new primary data directory #<number primary segment>
        for directory in directory_pairs:
            primary, mirror = directory
            p1.stdin.write("%s\n" % primary)
            if mirror:
                p1.stdin.write("%s\n" % mirror)

        output, err = p1.communicate()

        return output, p1.wait()
Esempio n. 6
0
    def testGetPIDStatus(self):
        """Test that ToilStatus.getPIDStatus() behaves as expected."""
        jobstoreName = 'pidStatusTest'
        jobstoreLoc = os.path.join(os.getcwd(), jobstoreName)

        cmd = [
            'python', '-m', 'toil.test.sort.sort', 'file:' + jobstoreName,
            '--clean', 'never'
        ]
        wf = Popen(cmd)
        time.sleep(
            2)  # Need to let jobstore be created before checking its contents.
        self.assertEqual(ToilStatus.getPIDStatus(jobstoreLoc), 'RUNNING')
        wf.wait()
        self.assertEqual(ToilStatus.getPIDStatus(jobstoreLoc), 'COMPLETED')
        os.remove(os.path.join(jobstoreLoc, 'pid.log'))
        self.assertEqual(ToilStatus.getPIDStatus(jobstoreLoc), 'QUEUED')
        shutil.rmtree(jobstoreLoc)
Esempio n. 7
0
def crawl_and_yield(crawlCommand):
    ps = Popen(crawlCommand,
               stdout=PIPE,
               stderr=PIPE,
               cwd=os.path.dirname(os.path.abspath(__file__)))
    for stdout_line in iter(ps.stdout.readline, ""):
        print stdout_line
    ps.stdout.close()
    return_code = ps.wait()
    if return_code:
        raise CalledProcessError(return_code, crawlCommand)
Esempio n. 8
0
    def testGetStatusSuccessfulToilWF(self):
        """
        Test that ToilStatus.getStatus() behaves as expected with a successful Toil workflow.

        While this workflow could be called by importing and evoking its main function, doing so would remove the
        opprotunity to test the 'RUNNING' functionality of getStatus().
        """
        jobstoreName = 'successful-toil-js'
        jobstoreLoc = os.path.join(os.getcwd(), jobstoreName)
        cmd = [
            'python', '-m', 'toil.test.sort.sort', 'file:' + jobstoreName,
            '--clean', 'never'
        ]
        wf = Popen(cmd)
        time.sleep(
            2)  # Need to let jobstore be created before checking its contents.
        self.assertEqual(ToilStatus.getStatus(jobstoreLoc), 'RUNNING')
        wf.wait()
        self.assertEqual(ToilStatus.getStatus(jobstoreLoc), 'COMPLETED')
        shutil.rmtree(jobstoreLoc)
Esempio n. 9
0
File: pypez.py Progetto: bw2/pypez
    def execute_command_locally(self, command_obj, directory, job_log):
        # create a temporary log file
        temp_log_file = None
        if not command_obj.dont_log_output:
            temp_log_filename = get_absolute_path("tmp.pipelinelog." + command_obj.command_id + "." + str(random.randint(10*10, 10**11 - 1)), directory, allow_compressed_version=False)
            temp_log_file = open(temp_log_filename, "w+")
            #temp_log_file = tempfile.NamedTemporaryFile(bufsize=0) #

        spawned_process = Popen(command_obj.command, bufsize = 0, shell=True, cwd=directory, stdout=temp_log_file, stderr=temp_log_file, executable=self.shell)

        if temp_log_file:
            # while the job is running, continually read from the temp_log_file and copy this to the job_log
            self.copy_command_output_to_log(command_obj, spawned_process, temp_log_file, job_log)

            temp_log_file.close()
            os.remove(temp_log_filename)
            #os.remove(temp_file_path)
        else:
            spawned_process.wait()

        if spawned_process.returncode is not None and spawned_process.returncode != 0:
            raise Exception("Non-zero return code: " + str(spawned_process.returncode))
Esempio n. 10
0
    def wait(self, timeout=None):
        """Wait for the started process to complete.

        "timeout" is a floating point number of seconds after
            which to timeout.  Default is None, which is to never timeout.

        If the wait time's out it will raise a ProcessError. Otherwise it
        will return the child's exit value. Note that in the case of a timeout,
        the process is still running. Use kill() to forcibly stop the process.
        """
        if timeout is None or timeout < 0:
            # Use the parent call.
            try:
                return Popen.wait(self)
            except OSError as ex:
                # If the process has already ended, that is fine. This is
                # possible when wait is called from a different thread.
                if ex.errno != 10:  # No child process
                    raise
                return self.returncode

        # We poll for the retval, as we cannot rely on self.__hasTerminated
        # to be called, as there are some code paths that do not trigger it.
        # The accuracy of this wait call is between 0.1 and 1 second.
        time_now = time.time()
        time_end = time_now + timeout
        # These values will be used to incrementally increase the wait period
        # of the polling check, starting from the end of the list and working
        # towards the front. This is to avoid waiting for a long period on
        # processes that finish quickly, see bug 80794.
        time_wait_values = [1.0, 0.5, 0.2, 0.1]
        while time_now < time_end:
            result = self.poll()
            if result is not None:
                return result
            # We use hasTerminated here to get a faster notification.
            self.__hasTerminated.acquire()
            if time_wait_values:
                wait_period = time_wait_values.pop()
            self.__hasTerminated.wait(wait_period)
            self.__hasTerminated.release()
            time_now = time.time()
        # last chance
        result = self.poll()
        if result is not None:
            return result

        raise ProcessError(
            "Process timeout: waited %d seconds, "
            "process not yet finished." % (timeout, ), WAIT_TIMEOUT)
Esempio n. 11
0
 def testGetStatusFailedCWLWF(self):
     """Test that ToilStatus.getStatus() behaves as expected with a failing CWL workflow."""
     files = [
         'src/toil/test/cwl/sorttool.cwl', 'src/toil/test/cwl/whale.txt'
     ]
     jobstoreName = 'failing-cwl-js'
     jobstoreLoc = os.path.join(os.getcwd(), jobstoreName)
     cmd = [
         'toil-cwl-runner', '--jobStore', jobstoreLoc, '--clean', 'never',
         '--badWorker', '1', files[0], '--reverse', '--input', files[1]
     ]
     wf = Popen(cmd)
     wfRun = psutil.Process(pid=wf.pid)
     time.sleep(
         2
     )  # Needed to let the jobstore be created before checking its contents.
     wfRun.suspend(
     )  # This workflow runs so quickly that we need to pause so we can get a 'RUNNING' response.
     self.assertEqual(ToilStatus.getStatus(jobstoreLoc), 'RUNNING')
     wfRun.resume()
     wf.wait()
     self.assertEqual(ToilStatus.getStatus(jobstoreLoc), 'ERROR')
     shutil.rmtree(jobstoreLoc)
Esempio n. 12
0
    def wait(self, timeout=None):
        """Wait for the started process to complete.

        "timeout" is a floating point number of seconds after
            which to timeout.  Default is None, which is to never timeout.

        If the wait time's out it will raise a ProcessError. Otherwise it
        will return the child's exit value. Note that in the case of a timeout,
        the process is still running. Use kill() to forcibly stop the process.
        """
        if timeout is None or timeout < 0:
            # Use the parent call.
            try:
                return Popen.wait(self)
            except OSError as ex:
                # If the process has already ended, that is fine. This is
                # possible when wait is called from a different thread.
                if ex.errno != 10:  # No child process
                    raise
                return self.returncode

        # We poll for the retval, as we cannot rely on self.__hasTerminated
        # to be called, as there are some code paths that do not trigger it.
        # The accuracy of this wait call is between 0.1 and 1 second.
        time_now = time.time()
        time_end = time_now + timeout
        # These values will be used to incrementally increase the wait period
        # of the polling check, starting from the end of the list and working
        # towards the front. This is to avoid waiting for a long period on
        # processes that finish quickly, see bug 80794.
        time_wait_values = [1.0, 0.5, 0.2, 0.1]
        while time_now < time_end:
            result = self.poll()
            if result is not None:
                return result
            # We use hasTerminated here to get a faster notification.
            self.__hasTerminated.acquire()
            if time_wait_values:
                wait_period = time_wait_values.pop()
            self.__hasTerminated.wait(wait_period)
            self.__hasTerminated.release()
            time_now = time.time()
        # last chance
        result = self.poll()
        if result is not None:
            return result

        raise ProcessError("Process timeout: waited %d seconds, "
                           "process not yet finished." % (timeout,),
                           WAIT_TIMEOUT)
Esempio n. 13
0
def run2(command, check=True, timeout=None, *args, **kwargs):
    ''' Run a command.

        If check=True (the default),
        then if return code is not zero or there is stderr output,
        raise CalledProcessError. Return any output in the exception.

        If timeout (in seconds) is set and command times out, raise TimeoutError. '''

    ''' Parts from subprocess32.check_output(). '''

    raise Exception('Deprecated. Use the sh module.')

    # use subprocess32 for timeout
    from subprocess32 import Popen, CalledProcessError, TimeoutExpired

    process = Popen(command, stdout=stdout, stderr=stderr, *args, **kwargs)
    try:
        process.wait(timeout=timeout)
    except TimeoutExpired:
        print('TimeoutExpired') #DEBUG
        #print('stdout: %s, (%d)' % (str(stdout), len(str(stdout)))) #DEBUG
        #print('stderr: %s, (%d)' % (str(stderr), len(str(stderr)))) #DEBUG
        try:
            process.kill()
            process.wait()
        finally:
            print('after kill/wait') #DEBUG
            #print('stdout: %s, (%d)' % (str(stdout), len(str(stdout)))) #DEBUG
            #print('stderr: %s, (%d)' % (str(stderr), len(str(stderr)))) #DEBUG
            raise TimeoutExpired(process.args, timeout)

    if check:
        retcode = process.poll()
        if retcode:
            raise CalledProcessError(retcode, process.args)
Esempio n. 14
0
    def wait(self, timeout=None):
        """Wait for the started process to complete.

        "timeout" is a floating point number of seconds after
            which to timeout.  Default is None, which is to never timeout.

        If the wait time's out it will raise a ProcessError. Otherwise it
        will return the child's exit value. Note that in the case of a timeout,
        the process is still running. Use kill() to forcibly stop the process.
        """
        if timeout is None or timeout < 0:
            # Use the parent call.
            try:
                return Popen.wait(self)
            except OSError, ex:
                # If the process has already ended, that is fine. This is
                # possible when wait is called from a different thread.
                if ex.errno != 10:  # No child process
                    raise
                return self.returncode
Esempio n. 15
0
    def wait(self, timeout=None):
        """Wait for the started process to complete.
        
        "timeout" is a floating point number of seconds after
            which to timeout.  Default is None, which is to never timeout.

        If the wait time's out it will raise a ProcessError. Otherwise it
        will return the child's exit value. Note that in the case of a timeout,
        the process is still running. Use kill() to forcibly stop the process.
        """
        if timeout is None or timeout < 0:
            # Use the parent call.
            try:
                return Popen.wait(self)
            except OSError, ex:
                # If the process has already ended, that is fine. This is
                # possible when wait is called from a different thread.
                if ex.errno != 10:  # No child process
                    raise
                return self.returncode
Esempio n. 16
0
                last_targetT = targetT
                last_targetH = targetH
                targetT = params['tempDes']
                targetH = params['humDes']
                targetL = params['lightMode']
                LtimeOn = params['timeOn']
                LtimeOff = params['timeOff']
                lightCameraInterval = params['cameraInterval']
                #change PID module setpoints to target
                pid_temp.SetPoint = targetT
                pid_hum.SetPoint = targetH
            except (KeyboardInterrupt):
                print(" ")
                print("Terminating Program...")
                heat_process.kill()
                heat_process.wait()
                hum_process.kill()
                hum_process.wait()
                fan_process.kill()
                fan_process.wait()
                light_camera_process.kill()
                light_camera_process.wait()
                sys.exit()
            except:
                pass
        #poll subprocesses if applicable and relaunch/update actuators
        poll_heat = heat_process.poll() #heat
        if poll_heat is not None:
            heat_process = Popen(['python', 'heatingElement.py', str(tempPID_out)], stdout=PIPE, stdin=PIPE, stderr=PIPE)

        poll_hum = hum_process.poll() #hum
Esempio n. 17
0
import time
import subprocess32
from subprocess32 import Popen, PIPE, STDOUT

GPIO.setmode(GPIO.BCM)

GPIO.setup(23, GPIO.IN, pull_up_down=GPIO.PUD_UP)  #Button to GPIO23

running = False
try:
    while True:
        button_state = GPIO.input(23)
        if button_state == False:
            if running == False:
                print('starting...')
                running = True
                sensingfeedback = Popen(['python', 'sensingfeedback_v1.4.py'],
                                        stdout=PIPE,
                                        stdin=PIPE,
                                        stderr=PIPE)  #start python subprocess
                time.sleep(3.5)
            else:
                print('stopping...')
                running = False
                sensingfeedback.kill()
                sensingfeedback.wait()
                time.sleep(3.5)
except:
    GPIO.cleanup()
    print('Exited.')
Esempio n. 18
0
def build_ray_docker(dir):
    proc = Popen(["/bin/bash", "build-docker.sh", "--skip-examples"], cwd=dir)
    proc.wait()
    return proc.returncode == 0
Esempio n. 19
0
File: pypez.py Progetto: bw2/pypez
    def execute_command_using_LSF(self, command_obj, directory, job_log):
        # TODO put this in config file
        tempdir = "/broad/hptmp/weisburd/tmp/" #tempfile.gettempdir()

        #if not os.path.isfile(qsub_executable):
        #	raise Exception("Cannot submit job to SGE. %(qsub_executable)s command not found." % locals())


        num_cores = min(command_obj.num_threads_used, self.SGE_max_cores_to_reserve)

        # compute SGE job name
        lsf_job_name = ""
        if job_log.name:
            lsf_job_name += job_log.name
        if command_obj.name:
            if lsf_job_name:
                lsf_job_name += " " # add spacer
            lsf_job_name += command_obj.name

        if lsf_job_name:
            lsf_job_name = lsf_job_name.replace(" ", "_")
        else:
            lsf_job_name = os.path.basename(command_obj.command.split(" ")[0])

        # create a temporary log file
        temp_log_filename = None
        if not command_obj.dont_log_output:
            temp_log_filename = get_absolute_path("tmp.pipelinelog." + command_obj.command_id + "." + str(random.randint(10*10, 10**11 - 1)), directory, allow_compressed_version=False)
            temp_log_file = open(temp_log_filename, "a+") # create log file
            temp_log_file.close()
            temp_log_file = open(temp_log_filename, "r") # open this file for reading



        qsub_script_name = os.path.join(tempdir, lsf_job_name + ".%d.sh" % random.randint(10**9, 10**10 - 1))

        f = open( qsub_script_name, "w+")
        #f.write("source ~/.bashrc;" + "\n")
        #f.write("/db/ngs-bioinfo/prog/bash-4.2/bin/bash \n") # make sure the shell is a bash shell
        f.write("echo Running on $HOSTNAME. `uptime | cut -c 40-`, cpus: `cat /proc/cpuinfo | grep processor | wc -l`, `cat /proc/meminfo | grep MemTotal`, `cat /proc/meminfo | grep MemFree`;" + "\n")
        f.write("cd " + directory + "\n")
        f.write(command_obj.command + "\n")
        f.close()

        os.system("chmod 777 " + qsub_script_name)
        qsub_command = "bsub "
        qsub_command += " -K " # make hte command run interactively
        qsub_command += " -q week "  # hour or week
        qsub_command += " -J %s " % lsf_job_name
        qsub_command += " -P project "  # email when done
        if num_cores:
            qsub_command += " -n %s " % num_cores
        qsub_command  += " -R rusage[mem=%s] " % command_obj.memory_in_gb # memory usage
        qsub_command += " -o " + temp_log_filename + " "  # this also capture stderr (since stdout is not specified)
        qsub_command += qsub_script_name
        qsub_command += ' > /dev/null'

        spawned_qsub_process = Popen(qsub_command, bufsize = 0, shell=True, cwd=directory)

        # while the job is running, continually read from the temp_log_file and copy this to the job_log
        if temp_log_filename:
            self.copy_command_output_to_log(command_obj, spawned_qsub_process, temp_log_file, job_log)

            temp_log_file.close()
            os.remove(temp_log_filename)
        else:
            spawned_qsub_process.wait()

        if spawned_qsub_process.returncode is not None and spawned_qsub_process.returncode != 0:
            raise Exception("Non-zero return code: " + str(spawned_qsub_process.returncode))


        os.remove(qsub_script_name)
Esempio n. 20
0
File: pypez.py Progetto: bw2/pypez
    def execute_command_using_SGE(self, command_obj, directory, job_log):
        # TODO put this in config file
        qsub_executable = "/opt/gridengine/bin/lx-amd64/qsub"
        #qsub_executable = "qsub"
        tempdir = tempfile.gettempdir() # "/db/ngs-bioinfo/tmp"

        #if not os.path.isfile(qsub_executable):
        #	raise Exception("Cannot submit job to SGE. %(qsub_executable)s command not found." % locals())


        num_cores = min(command_obj.num_threads_used, self.SGE_max_cores_to_reserve)

        # compute SGE job name
        sge_job_name = ""
        if job_log.name:
            sge_job_name += job_log.name
        if command_obj.name:
            if sge_job_name:
                sge_job_name += " " # add spacer
            sge_job_name += command_obj.name

        if sge_job_name:
            sge_job_name = sge_job_name.replace(" ", "_")
        else:
            sge_job_name = os.path.basename(command_obj.command.split(" ")[0])

        # create a temporary log file
        temp_log_filename = None
        if not command_obj.dont_log_output:
            temp_log_filename = get_absolute_path("tmp.pipelinelog." + command_obj.command_id + "." + str(random.randint(10*10, 10**11 - 1)), directory, allow_compressed_version=False)
            temp_log_file = open(temp_log_filename, "a+") # create log file
            temp_log_file.close()
            temp_log_file = open(temp_log_filename, "r") # open this file for reading



        qsub_script_name = os.path.join(tempdir, sge_job_name + ".%d.sh" % random.randint(10**9, 10**10 - 1))
        f = open( qsub_script_name, "w+")
        #f.write("source ~/.bashrc;" + "\n")
        f.write("/db/ngs-bioinfo/prog/bash-4.2/bin/bash \n") # make sure the shell is a bash shell
        f.write("echo Running on $HOSTNAME. `uptime | cut -c 40-`, cpus: `cat /proc/cpuinfo | grep processor | wc -l`, `cat /proc/meminfo | grep MemTotal`, `cat /proc/meminfo | grep MemFree`;" + "\n")
        f.write("cd " + directory + "\n")
        f.write(command_obj.command + "\n")
        f.close()

        os.system("chmod 777 " + qsub_script_name)
        qsub_command = qsub_executable + " "
        qsub_command += " -V "  # import all environment
        if num_cores:
            qsub_command += " -pe orte " + str(num_cores) + "  "

        qsub_command += " -sync y "  # write err to out
        qsub_command += " -j y "  # write err to out
        qsub_command += " -o " + temp_log_filename + " "
        #qsub_command += " -e " + stderr + " "
        qsub_command += qsub_script_name # + " >& /dev/null"


        spawned_qsub_process = Popen(qsub_command, bufsize = 0, shell=True, cwd=directory)

        # while the job is running, continually read from the temp_log_file and copy this to the job_log
        if temp_log_filename:
            self.copy_command_output_to_log(command_obj, spawned_qsub_process, temp_log_file, job_log)

            temp_log_file.close()
            os.remove(temp_log_filename)
        else:
            spawned_qsub_process.wait()

        if spawned_qsub_process.returncode is not None and spawned_qsub_process.returncode != 0:
            raise Exception("Non-zero return code: " + str(spawned_qsub_process.returncode))


        os.remove(qsub_script_name)
Esempio n. 21
0
                #last_targetT = targetT
                #last_targetH = targetH
                #targetT = params['tempDes']
                #targetH = params['humDes']
                #targetL = params['lightMode']
                #LtimeOn = params['timeOn']
                #LtimeOff = params['timeOff']
                #cameraInterval = params['cameraInterval']
                #change PID module setpoints to target
                pid_temp.SetPoint = targetT
                pid_hum.SetPoint = targetH
            except (KeyboardInterrupt):
                print(" ")
                print("Terminating Program...")
                heat_process.kill()
                heat_process.wait()
                hum_process.kill()
                hum_process.wait()
                fan_process.kill()
                fan_process.wait()
                light_process.kill()
                light_process.wait()
                camera_process.kill()
                camera_process.wait()
                water_process.kill()
                water_process.wait()
                sys.exit()
            except Exception as e:
                print(e)
                pass