Example #1
0
    def check_and_start(self):
        mode = "Process"
        if mode == "serial":
            for node in self._list:
                ret = node.check_and_start()
                print self.check_and_start.__name__, "ret=", ret
                if ret == 1:
                    return
        elif mode == "Process":
            plist = []
            for node in self._list:
                p = Process(target=node.check_and_start, args=())
                plist.append(p)
                p.start()

            for p in plist:
                p.join(10.0)

        elif mode == "Pool":
            pool = Pool(processes=3)
            plist = []
            for node in self._list:
                plist.append(pool.apply_async(node.check_and_start, ()))
            print "pool wait()"
            for p in plist:
                print "p.ready=", p.ready()
                p.wait(1.0)
                print "p.ready=", p.ready()
                print "--------------------------"
Example #2
0
class Viewer:
    def __init__(self):
        # create a pipe to communicate with the child
        (self.pipe, self.child_pipe) = Pipe()

        # create the subprocess
        self.child = Process(target=self._handler)
        # set the child to run as a background process (i.e., exit when parent does)
        self.child.daemon = True

        self.child.start()

    def update(self, knowledge_base, robot_state):
        self.pipe.send((knowledge_base, robot_state))
        # path = '/tmp/transfer.pickle'
        # pickle.dump((knowledge_base, robot_state), open(path, 'wb'))
        # self.pipe.send(path)

    def close(self):
        # signal the child to close
        self.pipe.close()
        # wait a bit for the process to exit
        self.child.wait(1)

        # kill the process if it doesn't exit normally
        if self.child.is_alive():
            logger.warn("Viewer escalating to kill child")
            self.child.cancel()

        self.child = None
        logger.info("Viewer closed")

    def _handler(self):
        logger.debug("child {} start".format(self.child.pid))

        viewer = RemoteKnowledgeBaseViewer(self.child_pipe)
        viewer.run()

    @property
    def heartbeat(self):
        logger.info("checking for heartbeat")
        if self.pipe.poll():
            logger.info("heartbeat present")
            self.pipe.recv()
            logger.info("heartbeat read")
            return True
        else:
            return False
Example #3
0
class FakeProcess:
    '''
    Runs an instance of multiprocessing.Process, which displays fake results based on PySystemMock.fakeCommandResult{},
    or based on a generic countdown using the command string, in the event that fakeCommandResult{} doesn't match.
    This class functions as an adapter from multiprocessing.Process() to subprocess.Popen(), which the caller will expect.
    '''
    stdout = FakeStdout()  # can be read by callers as if it's a Process.stdout object
    process = None

    MOCK_STEPS_ITERATIONS = 5

    def start(self, command, fakeCommandResults):
        fakeCommandResult = self.getFakeResultForCommand(command, fakeCommandResults)
        self.process = Process(target=writeFakeCommandResultsToPipe, args=(self.stdout.writer, fakeCommandResult))
        self.process.start()

    def getFakeResultForCommand(self, command, fakeCommandResults):
        for regex in fakeCommandResults:
            match = re.search(regex, command.__str__())
            if match:
                return fakeCommandResults[regex].split('\n')
        return ["processing %s [%d]..." % (command, i) for i in range(self.MOCK_STEPS_ITERATIONS, 0, -1)]

    def poll(self):
        return self.process.exitcode

    def wait(self):
        return self.process.wait()

    def terminate(self):
        self.process.terminate()
Example #4
0
    return aws_responses.flask_error_response(msg,
                                              code=code,
                                              error_type=error_type)


def run_lambda_executor(cmd, env_vars={}, async=False):
    process = run(cmd,
                  async=True,
                  stderr=subprocess.PIPE,
                  outfile=subprocess.PIPE,
                  env_vars=env_vars)
    if async:
        result = '{"async": "%s"}' % async
        log_output = 'Lambda executed asynchronously'
    else:
        return_code = process.wait()
        result = to_str(process.stdout.read())
        log_output = to_str(process.stderr.read())

        if return_code != 0:
            raise Exception(
                'Lambda process returned error status code: %s. Output:\n%s' %
                (return_code, log_output))
    return result, log_output


def set_function_code(code, lambda_name):
    def generic_handler(event, context):
        raise Exception((
            'Unable to find executor for Lambda function "%s". ' +
            'Note that Node.js Lambdas currently require LAMBDA_EXECUTOR=docker'
print('fork: \nProcess (%s) start...' % os.getpid())
pid = os.fork()  # fork 轻松创建子进程
if pid == 0:  # 子进程返回 0, 父进程返回子进程的 id, getppid() 得到父进程 pid
    print('I am child process (%s) and my parent is (%s).' % (os.getpid(), os.getppid()))
    exit(0)  # 子进程执行打这里就退出, 不执行后面的
else: print('I (%s) just created a child process (%s).' % (os.getpid(), pid))

##################################################################
## multiprocessing Process
from multiprocessing import Process # fork 无法在 Windows 上运行, Process 可以跨平台
def run_proc(name): print('Run child process %s (%s)...' % (name, os.getpid()))  # 子进程要执行的代码
print('\nProcess: \nParent process (%s).' % os.getpid())
p = Process(target=run_proc, args=('test',))  # 参数在 args 中传
p.start()  # start() 方法启动, 这样创建进程比 fork() 还要简单
p.join()  # join() 方法可以等待子进程结束后再继续往下运行, 通常用于进程间的同步

##################################################################
## subprocess
import subprocess  # 启动一个子进程, 然后控制其输入和输出
print('\nsubprocess 没有控制输入输出: \n$ nslookup www.python.org')  # 不用控制的
r = subprocess.call(['nslookup', 'www.python.org']); print('Exit code:', r)
print('\nsubprocess 控制输入输出: $ nslookup')
p = subprocess.Popen(['nslookup'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = p.communicate(b'set q=mx\npython.org\nexit\n')  # 相当于下面三条命令: set q=mx; python.org; exit
print(output.decode('utf-8'))
print('Exit code:', p.returncode)
p = subprocess.Popen(['nslookup'], stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr)
p.wait()  # 加上这句话才能在终端等待输入...
p.kill()
print(p.returncode)  # 手动结束的话不会执行到这里, 执行到 kill() 后就会报错 KeyboardInterrupt
Example #6
0
print('\nProcess: \nParent process (%s).' % os.getpid())
p = Process(target=run_proc, args=('test',))  # 参数在 args 中传
p.start()  # start() 方法启动, 这样创建进程比 fork() 还要简单
p.join()  # join() 方法可以等待子进程结束后再继续往下运行, 通常用于进程间的同步
##################################################################
# Pool time
from multiprocessing import Pool  # 在 Process 基础上启动大量子进程
# def long_time_task(name):
#     print('Run task %s (%s)...' % (name, os.getpid()))
#     start = time.time(); time.sleep(random.random() * 3)
#     end = time.time(); print('Task %s runs %0.2f seconds.' % (name, (end - start)))
# print('\nPool: \nParent process %s.' % os.getpid())
# p = Pool(4)  # 大小为 4 的线程池, 所以下面 前4个 进程特别快就生成了, 第5个 需要等其中一个运行完
# for i in range(5): p.apply_async(long_time_task, args=(i,))
# p.close()  # 调用 close() 之后就不能继续添加新的 Process 了
# p.join()  # Pool 对象调用 join() 方法会等待所有子进程执行完毕, 调用 join() 之前必须先调用 close()
##################################################################
# subprocess
import subprocess  # 启动一个子进程, 然后控制其输入和输出
print('\nsubprocess 没有控制输入输出: \n$ nslookup www.python.org')  # 不用控制的
r = subprocess.call(['nslookup', 'www.python.org']); print('Exit code:', r)
print('\nsubprocess 控制输入输出: $ nslookup')
p = subprocess.Popen(['nslookup'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, err = p.communicate(b'set q=mx\npython.org\nexit\n')  # 相当于下面三条命令: set q=mx; python.org; exit
print(output.decode('utf-8'))
print('Exit code:', p.returncode)
p = subprocess.Popen(['nslookup'], stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr)
p.wait()  # 加上这句话才能在终端等待输入...
p.kill()
print(p.returncode)  # 手动结束的话不会执行到这里, 执行到 kill() 后就会报错 KeyboardInterrupt
Example #7
0
        process.append(p4)
        s5 = s4 + e1 + 1
        p5 = Process(target=hash_cal, args=(s5, len(files) - 1, files, hash))
        p5.start()
        process.append(p5)
        for p in process:
            p.join()
        if os.path.isfile('hashfile.txt'):
            d2 = json.load(open("hashfile.txt"))
        while len(process) > 0:
            process = [job for job in process if job.is_alive()]
            print(".")
            time.sleep(1)

        for p in process:
            p.wait()

    json.dump(hash.copy(), open("hashfile.txt", 'w'))
    diff(hash, d2, ddiff)
    print(ddiff)
    print("Sending the POST to the server")
    if len(ddiff.keys()) > 0:
        PARAMS = json.dumps(ddiff)
        URL = "http://localhost/fim/php/loadHash.php"
        da = {'hashes': PARAMS, 'sysid': sysID}
        r1 = requests.post(url=URL, data=da)
        print(r1.text)
        body = "Results from the system " + x + "  "
        for key in ddiff.keys():
            body = body + key
        sendmail(body)
Example #8
0
    UCINEO_NANCY_CE = [ conf+'\\UCINEO_NANCY_CE\\ApiFctSup_bouchon.obj', conf+'\\UCINEO_NANCY_CE\\ConvertUTF.obj', conf+'\\UCINEO_NANCY_CE\\HttpInterface.obj', conf+'\\UCINEO_NANCY_CE\\InterfaceSupUcineo.obj', conf+'\\UCINEO_NANCY_CE\\io_pc.obj', conf+'\\UCINEO_NANCY_CE\\Io_plug_com.obj', conf+'\\UCINEO_NANCY_CE\\Io_plug_counter.obj', conf+'\\UCINEO_NANCY_CE\\Io_plug_gain.obj', conf+'\\UCINEO_NANCY_CE\\Io_plug_input.obj', conf+'\\UCINEO_NANCY_CE\\Io_plug_intit.obj', conf+'\\UCINEO_NANCY_CE\\Io_plug_matrice.obj', conf+'\\UCINEO_NANCY_CE\\Io_plug_output.obj', conf+'\\UCINEO_NANCY_CE\\Iodlg.obj', conf+'\\UCINEO_NANCY_CE\\main.obj', conf+'\\UCINEO_NANCY_CE\\noyau_win32.obj', conf+'\\UCINEO_NANCY_CE\\SaeSupInterface_Cfg.obj' ]

    while(liste):
      cur = liste.pop()
      objet = eval(cur).pop()
      while(eval(cur)):
        print objet
        while True:
          if( running.value < MAX_RUNNING_VALUE ):
              p = Process( target = nmake, args = (cur,running,objet,config) )
              p.start()
              running.value = running.value + 1
              objet = eval(cur).pop()
              print "Il y a ",running.value," en cours"
              break

    print running.value
    time.sleep(6)
    # while ( running.value <= 0 ):
    #   print running.value
    if(config == "1"):
      cfg = 'CFG=UCINEO_NANCY_CE - Win32 (WCE x86) Release'
    else:
      cfg = 'CFG=UCINEO_NANCY_CE - Win32 (WCE x86) Debug'
    p = subprocess.Popen(['nmake', '/F',  'UCINEO_NANCY_CE.vcn ',cfg])
    p.wait()
    # elapsed_time = time.time() - start_time
    # print "Compilation effectuee en : "+elapsed_time.str()+" secondes"

Example #9
0
with open('pairs_dict.json') as infile:
    o = json.load(infile)
    chunkSize = 3
    for i in xrange(0, len(o), chunkSize):
        with open('file_' + str(i//chunkSize) + '.json', 'w') as outfile:
            json.dump(o[i:i+chunkSize], outfile)
===
t1 = Process(target=f, args=(x,))
t2 = Process(target=f, args=('bob',))

t1.start()
t2.start()

t1.join()
t2.join()
    exit_codes = [p.wait() for p in p1, p2]

===
from itertools import izip_longest

def grouper(iterable, n, fillvalue=None):
    "Collect data into fixed-length chunks or blocks"
    # grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
    args = [iter(iterable)] * n
    return izip_longest(fillvalue=fillvalue, *args)
This lets you iterate over your tweets in groups of 5000:

for i, group in enumerate(grouper(input_tweets, 5000)):
    with open('outputbatch_{}.json'.format(i), 'w') as outputfile:
        json.dump(list(group), outputfile)
====