Ejemplo n.º 1
0
    def execute_task(self, website: Website, busy: Value, post_id: str, comment_id: str):
        busy.value = 1
        if os.path.exists("data.json"):
            os.remove("data.json")
        print("Started crawling task")
        process = CrawlerProcess(get_project_settings())
        process.crawl("od_links", base_url=website.url)
        process.start()
        print("Done crawling")

        self.db.import_json("data.json", website)
        os.remove("data.json")
        print("Imported in SQLite3")

        if post_id:
            # Reply to post
            stats = self.db.get_website_stats(website.id)
            comment = self.reddit_bot.get_comment({"": stats}, website.id)
            print(comment)
            if "total_size" in stats and stats["total_size"] > 10000000:
                post = self.reddit_bot.reddit.submission(post_id)
                self.reddit_bot.reply(post, comment)
                pass
            else:
                self.reddit_bot.log_crawl(post_id)

        elif comment_id:
            # Reply to comment
            stats = self.db.get_website_stats(website.id)
            comment = self.reddit_bot.get_comment({"There you go!": stats}, website.id)
            print(comment)
            reddit_comment = self.reddit_bot.reddit.comment(comment_id)
            self.reddit_bot.reply(reddit_comment, comment)
        busy.value = 0
        print("Done crawling task")
Ejemplo n.º 2
0
    def setup_heartbeat(self, client_controller):
        cond = multiprocessing.Condition()
        s_init_finish = Value('i', 0)

        do_sample = Value('i', 0)
        do_sample_lock = Lock()

        server_process = multiprocessing.Process(
                target=self.server_heart_beat, 
                args=(cond, s_init_finish, do_sample, do_sample_lock))
        server_process.daemon = False
        server_process.start()

        logger.info("Waiting for server init ...")
        cond.acquire()
        while (s_init_finish.value == 0):
            cond.wait()
        if s_init_finish.value == 5:
            logger.error("Waiting for server init ... FAIL")
            raise RuntimeError("server init failed.")
        cond.release()
        logger.info("Waiting for server init ... Done")
        
        # let all clients start running the benchmark
        client_controller.client_run(do_sample, do_sample_lock)
        cond.acquire()
        s_init_finish.value = 0
        cond.release()
        return server_process
Ejemplo n.º 3
0
def call(args, stdout=None, stderr=None, stdin=None, daemonize=False,
         preexec_fn=None, shell=False, cwd=None, env=None):
    """
    Run an external command in a separate process and detach it from the current process. Excepting
    `stdout`, `stderr`, and `stdin` all file descriptors are closed after forking. If `daemonize`
    is True then the parent process exits. All stdio is redirected to `os.devnull` unless
    specified. The `preexec_fn`, `shell`, `cwd`, and `env` parameters are the same as their `Popen`
    counterparts. Return the PID of the child process if not daemonized.
    """
    stream = lambda s, m: s is None and os.open(os.devnull, m) or s
    stdout = stream(stdout, os.O_WRONLY)
    stderr = stream(stderr, os.O_WRONLY)
    stdin = stream(stdin, os.O_RDONLY)

    shared_pid = Value('i', 0)
    pid = os.fork()
    if pid > 0:
        os.waitpid(pid, 0)
        child_pid = shared_pid.value
        del shared_pid
        if daemonize:
            sys.exit(0)
        return child_pid
    else:
        os.setsid()
        proc = subprocess.Popen(args, stdout=stdout, stderr=stderr, stdin=stdin, close_fds=True,
                                preexec_fn=preexec_fn, shell=shell, cwd=cwd, env=env)
        shared_pid.value = proc.pid
        os._exit(0)
Ejemplo n.º 4
0
def main():
    #Begin Init
    settings.setup()
    from common.safeprint import safeprint
    try:
        import miniupnpc
    except:
        safeprint("Dependency miniupnpc is not installed. Running in outbound only mode")
        settings.config['outbound'] = True
    safeprint("settings are:")
    safeprint(settings.config)
    queue = Queue()
    live = Value('b',True)
    ear = listener(settings.config['port'],settings.config['outbound'],queue,live,settings.config['server'])
    ear.daemon = True
    ear.items = sync()
    ear.start()
    mouth = propagator(settings.config['port'] + 1, live)
    mouth.daemon = True
    mouth.items = ear.items
    mouth.start()
    feedback = []
    stamp = time()
    while queue.empty():
        if time() - 5 > stamp:
            break #pragma: no cover
    try:
        feedback = queue.get(False)
    except: #pragma: no cover
        safeprint("No feedback received from listener")
    ext_ip = ""     #Does this affect peers?
    ext_port = -1   #Does this affect peers?
    if feedback != []:
        settings.outbound = feedback[0]
        if settings.outbound is not True:
            ext_ip = feedback[1]
            ext_port = feedback[2]
    initializePeerConnections(settings.config['port'], ext_ip, ext_port)
    #End Init
    
    #Begin main loop
    if settings.config.get('seed'):
        safeprint("Seed mode activated")
        try:
            while True and not settings.config.get('test'):
                sleep(0.1)
        except KeyboardInterrupt:
            safeprint("Keyboard Interrupt")
    elif settings.config.get('server'):
        safeprint("Server mode activated")
    else:
        safeprint("Client mode activated")
    #End main loop
    
    #Begin shutdown
    safeprint("Beginning exit process")
    live.value = False
    settings.saveSettings()
    saveToFile()
    bounty.saveToFile()
Ejemplo n.º 5
0
def main():
    settings.setup()
    try:
        import miniupnpc
    except:
        safeprint("Dependency miniupnpc is not installed. Running in outbound only mode")
        settings.config['outbound'] = True
    safeprint("settings are:")
    safeprint(settings.config)
    queue = Queue()
    live = Value('b',True)
    ear = listener(settings.config['port'],settings.config['outbound'],queue,live,settings.config['server'])
    ear.daemon = True
    ear.start()
    feedback = []
    stamp = time()
    while queue.empty():
        if time() - 5 > stamp:
            break #pragma: no cover
    try:
        feedback = queue.get(False)
    except: #pragma: no cover
        safeprint("No feedback received from listener")
    ext_ip = ""     #Does this affect peers?
    ext_port = -1   #Does this affect peers?
    if feedback != []:
        settings.outbound = feedback[0]
        if settings.outbound is not True:
            ext_ip = feedback[1]
            ext_port = feedback[2]
    initializePeerConnections(settings.config['port'], ext_ip, ext_port)
    live.value = False
Ejemplo n.º 6
0
def hanging_get(my_id, messages, initial_peer_id):
    remote_sender = None
    remote_peer_id = Value("i", initial_peer_id)
    remote_sender = Process(target=to_remote_server, args=(5550,my_id,remote_peer_id)).start()

    while True:
        r = requests.get('http://localhost:8888/wait?peer_id=' + str(my_id))
        if r.status_code == 200:        

            if int(r.headers['pragma']) == my_id:
                connected = r.text.split("\n")
                for l in connected:
                    info = l.strip().split(",")

                    if len(info) == 3 and info[0] == "receiver" and info[2] == '1':
                        remote_peer_id.value = int(info[1])
                    if len(info) == 3 and info[0] == "receiver" and info[2] == '0':
                        remote_peer_id.value = -1                        
            else:
                messages.put(Message(int(r.headers['pragma']), r.text))
Ejemplo n.º 7
0
def main():
	running = Value(c_int, 1)
	readQueue = Queue()
	reader = Process(target = Reader("/dev/ttyUSB0", 9600), args = (running, readQueue))
	worker = Process(target = Worker(), args = (running, readQueue))
	reader.start()
	worker.start()
	time.sleep(5)
	running.value = 0
	reader.join()
	worker.join()
Ejemplo n.º 8
0
def camstream():
  print "CAMera STREAMer (OpenCV " + cv2.__version__ + ")"
  print "main(): OS: {}".format(os.name)
  
  # * Start CameraStreamer process
  print "main(): Starting CameraStreamer process..."
  if os.name == 'nt':  # [Windows]
    # ** Create shared objects (NOTE only necessary on Windows since it uses a different multiprocessing implementation)
    print "main(): [Windows] Creating shared objects..."
    # *** Stay alive flag
    stayAliveObj = Value(c_bool, True)
    
    # *** Frame counter
    frameCountObj = Value('i', 0)
    
    # *** Image array
    image = np.zeros((camera_frame_height, camera_frame_width, camera_frame_depth), dtype=np.uint8)
    imageShape = image.shape
    imageSize = image.size
    image.shape = imageSize  # flatten numpy array
    imageObj = Array(c_ubyte, image)  # create a synchronized shared array
    
    # *** Image shape
    imageShapeObj = Array('i', imageShape)
    cameraStreamerProcess = CameraStreamer(stayAliveObj, frameCountObj, imageObj, imageShapeObj)
  else:  # [POSIX]
    cameraStreamerProcess = CameraStreamer()
    # ** Grab generated shared objects to share with other child processes
    print "main(): [POSIX] Getting shared objects from CameraStreamer..."
    stayAliveObj = cameraStreamerProcess.stayAliveObj
    frameCountObj = cameraStreamerProcess.frameCountObj
    imageObj = cameraStreamerProcess.imageObj
    imageShapeObj = cameraStreamerProcess.imageShapeObj
  cameraStreamerProcess.start()
  
  # * Start StreamViewer process
  print "main(): Starting StreamViewer process..."
  streamViewerProcess = StreamViewer(stayAliveObj, frameCountObj, imageObj, imageShapeObj)
  streamViewerProcess.start()
  
  # * Wait for child processes to finish
  print "main(): Waiting for child processes to finish..."
  try:
    streamViewerProcess.join()
    cameraStreamerProcess.join()
  except KeyboardInterrupt:
    stayAliveObj.value = False
    streamViewerProcess.join()
    cameraStreamerProcess.join()
  print "main(): Done."
Ejemplo n.º 9
0
    def start(self, reload_from=None):
        """Start this server process.

        :param int reload_from: Optional, the PID of a running game server
                                process that this process should reload from
        :returns None:

        """
        assert not self._process, "server instance already started"
        pid = Value("i")
        self._process = Process(target=self._start,
                                args=(pid, socket_queue),
                                kwargs={"reload_from": reload_from})
        self._process.start()
        pid.value = self._process.pid
Ejemplo n.º 10
0
    def send_mldquery_regularly(self):
        self.logger.debug("")

        requraly_query_type = self.config[const.REGURALY_QUERY_TYPE]
        reguraly_query_interval = self.config[const.REGURALY_QUERY_INTERVAL]
        mc_query_interval = self.config[const.MC_QUERY_INTERVAL]

        # 初回送信前に定期送信クエリの送信間隔の1/4秒待つ
        time.sleep(reguraly_query_interval / 4)

        # General Query
        if requraly_query_type == self.GENERAL_QUERY:
            self.logger.debug("create general query")
            mc_info = {const.MC_TAG_MC_ADDR: const.DELIMIT_DOUBLE_COLON,
                       const.MC_TAG_SERV_IP: None}
            while self.SEND_LOOP:
                self.send_mldquery([mc_info])
                # タイムアウトチェック
                self.check_user_timeout()
                time.sleep(reguraly_query_interval - self.QUERY_QRV)

        # Specific Query
        elif requraly_query_type == self.SPECIFIC_QUERY:
            self.logger.debug("create specific query")
            next_interval = Value(ctypes.c_bool, False)

            while self.SEND_LOOP:
                query_proc = Process(
                    target=self.wait_query_interval,
                    args=(next_interval, reguraly_query_interval))
                query_proc.daemon = True
                query_proc.start()
                self.logger.debug(
                    "next_interval : %s", str(next_interval.value))
                self.send_mldquery(
                    self.mc_info_list, mc_query_interval, next_interval)
                # タイムアウトチェック
                self.check_user_timeout()

                # 定期送信クエリの送信間隔が過ぎていない場合は待ち
                if not next_interval.value:
                    self.logger.debug(
                        "waiting query interval(%d sec)...",
                        reguraly_query_interval)
                    query_proc.join()

                next_interval.value = False
                query_proc.terminate()
Ejemplo n.º 11
0
def main():
    arg_parser = argparse.ArgumentParser()
    arg_parser.add_argument('-f', dest='logfile', required=True)
    arg_parser.add_argument('-t', dest='sleep', type=int, default=1)

    args = arg_parser.parse_args()

    count = Value('i', 0)
    p = Process(target=log_counter, args=(args.logfile, count,))
    p.start()

    while True:
        time.sleep(args.sleep)
        print('{0} events/{1}s'.format(count.value, args.sleep))
        count.value = 0

    p.join()
Ejemplo n.º 12
0
def main():

    guardian = Heimdall()
    # zoeh = Zoehmacarena()

    # guardian.add_bot(zoeh)

    # guardian.main_loop()

    cont = True
    ispaused = False
    # bobby = Bob()
    state = Value("i", 0)

    l = Lock()
    # proc = Process(target=bobby.main, args=(l, state,))

    print("starting")
    # proc.start()
    while cont:
        if ispaused:
            command = raw_input("Heimdall[paused]: ")
        else:
            command = raw_input("Heimdall[running]: ")


        if command == "quit":
            cont = False
            state.value = -1

        elif command == "pause":
            l.acquire()
            print("paused")
            ispaused = True

        elif command == "start":
            print("unpausing")
            l.release()
            ispaused = False

        else:
            print("unknown command")

    # proc.join()
    print("finished")
    return
Ejemplo n.º 13
0
def server(backlog=5):
    server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    server_socket.bind(('', PORT))
    server_socket.listen(backlog)
    nclients = Value('i', 0)
    processes = []
    schedule.every(1).seconds.do(update_active_clients, nclients)

    try:
        while 1:
            conn, addr = server_socket.accept()
            print "new client " + str(addr)
            proc = Process(target=client_process, args=(conn, server_socket, nclients))
            processes.append(proc)
            proc.start()
            conn.close()
            nclients.value = len(multiprocessing.active_children())
    finally:
        # Ctrl-C
        for proc in processes:
            proc.terminate()
        server_socket.close()
Ejemplo n.º 14
0
def run_test(*args):
#    logins = get_bm_config('ADMINS') 
#    logins.extend([get_bm_config('API_USER_PREFIX') % i
#                   for i in xrange(args['proc'])])
#    msg_count = get_bm_config('MSG_COUNT')
#    host = args['host']
#    port = args['port']
#    visit_args = [(login, host, port, msg_count) for login in logins]
    visit_args = [[i] for i in [1, 2]]
    term = Value(c_int, 0)
    queue = Queue()
    logging.debug('users pool')
    pool = Pool(initializer=init,
                initargs=(term, queue),
                processes=10,
                maxtasksperchild=1)
    try:
        swarm = pool.map(visit, [i for i in range(10)])
        logging.debug('finishing...')
    except KeyboardInterrupt:
        logging.info('terminating...')
        term.value = 1
        pool.terminate()
def main():
    with open("/media/extradikke/UbuntuData/wikipedia_data/data_dump/enwiki-20150112-pages-articles.xml",
              mode='r') as big_file:
        fsize = os.stat(
            "/media/extradikke/UbuntuData/wikipedia_data/data_dump/enwiki-20150112-pages-articles.xml").st_size
        stream = pulldom.parse(big_file)
        # sys.setrecursionlimit(10000)
        redirect_queue = Queue(100)
        pages_queue = Queue(100)
        rawnodes_queue = Queue(1000)

        counter = 1
        redirect_counter = 0

        running = Value('b', True)

        link_extractor1 = Process(target=link_extractor, args=(rawnodes_queue, redirect_queue, pages_queue, running, ))
        link_extractor2 = Process(target=link_extractor, args=(rawnodes_queue, redirect_queue, pages_queue, running, ))
        # link_extractor3 = Process(target=link_extractor, args=(rawnodes_queue, redirect_queue, pages_queue, running, ))
        # link_extractor4 = Process(target=link_extractor, args=(rawnodes_queue, redirect_queue, pages_queue, running, ))
        xml_openeger = Process(target=opener, args=(rawnodes_queue, pages_queue, redirect_queue))
        redirects = Process(target=redirect_saver, args=(redirect_queue, running,))
        page_saver = Process(target=save_pages, args=(pages_queue, running,))
        #

        xml_openeger.start()
        processes = [link_extractor1, link_extractor2, redirects, page_saver]
        for process in processes:
            process.start()
        xml_openeger.join()
        while True:
            print("here???")
            if redirect_queue.empty() and pages_queue.empty() and rawnodes_queue.empty():
                running.value = False
                for process in processes:
                    process.join()
                break
Ejemplo n.º 16
0
 def _hash_result(self, cursor, timeout_unix_time):
   """Returns a hash that is independent of row order."""
   # A value of 1 indicates that the hash thread should continue to work.
   should_continue = Value("i", 1)
   def hash_result_impl():
     try:
       current_thread().result = 1
       while should_continue.value:
         LOG.debug("Fetching result for query with id %s"
             % cursor._last_operation_handle)
         rows = cursor.fetchmany(self.BATCH_SIZE)
         if not rows:
           return
         for row in rows:
           for idx, val in enumerate(row):
             # Floats returned by Impala may not be deterministic, the ending
             # insignificant digits may differ. Only the first 6 digits will be used
             # after rounding.
             if isinstance(val, float):
               sval = "%f" % val
               dot_idx = sval.find(".")
               val = round(val, 6 - dot_idx)
             current_thread().result += (idx + 1) * hash(val)
             # Modulo the result to Keep it "small" otherwise the math ops can be slow
             # since python does infinite precision math.
             current_thread().result %= maxint
     except Exception as e:
       current_thread().error = e
   hash_thread = create_and_start_daemon_thread(hash_result_impl)
   hash_thread.join(max(timeout_unix_time - time(), 0))
   if hash_thread.is_alive():
     should_continue.value = 0
     raise QueryTimeout()
   if hash_thread.error:
     raise hash_thread.error
   return hash_thread.result
Ejemplo n.º 17
0
def main():
    # getting password
    psswd = gethash(getpass(), mode="pass")

    # decrypting and loading settings
    settings = fdecrypt("files/vk.settings", psswd)
    # print(settings)
    login = "".join(re.findall(r"login=(.+)#endlogin", settings))
    password = "".join(re.findall(r"password=(.+)#endpass", settings))
    chatid = int("".join(re.findall(r"chatid=(\d+)#endchatid", settings)))
    albumid = int("".join(re.findall(r"album_id=(\d+)#endalbumid", settings)))
    userid = int("".join(re.findall(r"userid=(\d+)#enduserid", settings)))

    state_auth = False
    # getting session
    while not state_auth:
        try:
            vk_session = vk_api.VkApi(login, password, captcha_handler=captcha_handler)
            vk_session.authorization()
            vk = vk_session.get_api()
            state_auth = True
        except:
            state_auth = False
            tsleep(30)
            exception("smth goes wrong at geting api\n")

    # getting url
    url = geturl(psswd)

    # Lists of messages
    msg_man = Manager()
    list_of_cmds, list_of_alles, list_of_imnts, tl_msgs = msg_man.list(), msg_man.list(), msg_man.list(), msg_man.list()

    # accounting the speed
    iterations_vk = Value("i", 0)
    iterations_tl = Value("i", 0)

    # file`s safe work
    msgshistory = io.SharedFile("files/msgshistory.db")

    # stats Manager
    stat_man = Manager()
    curr_stat = stat_man.dict()
    curr_stat["temp"] = 0
    curr_stat["iter_tl"] = 0
    curr_stat["iter_vk"] = 0
    curr_stat["PID_VK"] = 0
    curr_stat["PID_TL"] = 0

    # starting bot
    print("Logged in, starting bot...")

    vk_process = Process(
        target=vk_run.run_vk_bot,
        args=(
            vk,
            chatid,
            albumid,
            userid,
            msgshistory,
            tl_msgs,
            list_of_alles,
            list_of_imnts,
            list_of_cmds,
            iterations_vk,
            curr_stat,
        ),
    )
    tl_process = Process(
        target=telegrambot.tlmain,
        args=(url, tl_msgs, msgshistory, list_of_alles, list_of_imnts, iterations_tl, curr_stat),
    )

    print("Starting VK bot...")
    vk_process.start()

    print("Starting TL bot...")
    tl_process.start()

    # checking if admin gave argument "--screen"
    if len(argv) > 1 and argv[1] == "--screen":
        import curses

        stdscr = curses.initscr()
        curses.noecho()
        stdscr.keypad(True)

    # crutch
    iterations_tl.value = 1
    iterations_vk.value = 1

    while True:
        out_string = """Temp: {0} C; \nSpeed_TL: {1}; \nSpeed_VK: {2};"""
        tempfile = open("/sys/class/thermal/thermal_zone0/temp", "r")
        # print('Temp: ' + str(float(tempfile.read().strip())/1000) + ' C ', end='')
        ctemp = str(float(tempfile.read().strip()) / 1000)
        tempfile.close()

        curr_stat["temp"] = ctemp
        curr_stat["iter_tl"] = iterations_tl.value
        curr_stat["iter_vk"] = iterations_vk.value

        if len(argv) > 1 and argv[1] == "--screen":
            stdscr.clear()
            stdscr.addstr(out_string.format(ctemp, iterations_tl.value, iterations_vk.value))
            stdscr.refresh()

        # checking if process is alive. If not, restarting process
        if iterations_tl.value == 0:
            tl_process.terminate()
            tl_process = Process(
                target=telegrambot.tlmain,
                args=(url, tl_msgs, msgshistory, list_of_alles, list_of_imnts, iterations_tl, curr_stat),
            )
            print("Restarting TL bot...")
            tl_process.start()
            # iterations_tl.value = 1

        if iterations_vk.value == 0:
            vk_process.terminate()
            vk_process = Process(
                target=vk_run.run_vk_bot,
                args=(
                    vk,
                    chatid,
                    albumid,
                    userid,
                    msgshistory,
                    tl_msgs,
                    list_of_alles,
                    list_of_imnts,
                    list_of_cmds,
                    iterations_vk,
                    curr_stat,
                ),
            )
            print("Restarting VK bot...")
            vk_process.start()
            # iterations_vk.value = 1

        # stdout.flush()
        iterations_tl.value = 0
        iterations_vk.value = 0
        tsleep(60)

    tl_process.join()
    vk_process.join()

    curses.nocbreak()
    stdscr.keypad(False)
    curses.echo()
    print("Exit...")
Ejemplo n.º 18
0
# Corps de la tache, elle s'execute tant qu'il y a des donnees dans la pile

if __name__ == '__main__':
    # ihm = subprocess.Popen(['nmake', '/F'])
    # ihm.wait()
# Ajout : Il faut aussi pour les projets gerer le fichier AppVersion.h
    appversion_update()
# On doit continuer a attendre qu'il n'y ait plus de taches dans la pool
    conf = sys.argv[1]
    if(conf == '1'):
      config = 'Release'
    else:
      config = 'Debug'
    while( liste or ( running.value > 0 ) ):
      if( ( running.value < 5 ) and liste ):
        if(liste):
          cur = liste.pop()
        p = Process( target = nmake, args = (cur,running,config,) )
        p.start()
        running.value = running.value + 1
        # print "**Librairie en cours de fab :"
        # print cur
        # print "Il y a ",running.value," en cours"
        # raw_input()
    print "\nOn lance l edition des liens\n"
    cfg = 'CFG=\"UCINEO_NANCY_CE - Win32 (WCE x86) '+ config + '\"'
    os.system('nmake' + '/F' + 'UCINEO_NANCY_CE.vcn ' + cfg) # En appelant une commande systeme, on bloque l execution du script 



Ejemplo n.º 19
0
def serve():
    """
    This is the server entry point, where a while loop
    receives messages, parses them, and acts accordingly.
    Actually data processing may be spawned from here as
    a separate Process.
    """

    # initiallize our bookkeeping
    state = Value('i', READY)
    proj = None
    scanNum = None
    refScan = True
    refScanNum = None
    filename = None

    # setup the server REPLY socket
    port = PORT
    context = zmq.Context()
    socket = context.socket(zmq.REP)
    socket.bind("tcp://*:%s" % port)

    # initialize the Process object
    p = None

    # initialize our 'memory' of what scans we
    # have processed
    scans = {}

    # TBF: terminate with a server command as well?
    # run till a keyboard interrupt
    run = True

    while run:
        try:
            #logger.debug ("waiting for message")
            msg = socket.recv()
            #logger.debug ("got msg")
            #logger.debug (msg)
            msgStr = "".join( chr(x) for x in msg)

            # ************ Process Data
            if msg == b"process":
                if state.value == READY:
                    # record the current state of all parameters so
                    # we know what scans are being processed
                    filepath = getFITSFilePath(proj, filename)
                    if proj not in scans:
                        scans[proj] = {}
                    scans[proj][scanNum] = {
                        "scanNum": scanNum,
                        "timestamp": datetime.now(),
                        "refScan": refScan,
                        "refScanNum": refScanNum,
                        "filename": filename,
                        "filepath": filepath,
                        "filepathSmoothed": filepath.replace(".fits", ".smoothed.fits")
                    }

                    logger.debug("scans list: %s" % scans)
                    if not refScan:
                        # this is a signal scan, so get the filename of our 
                        # reference scan
                        refScanFile = getRefScanFileName(scans, proj, refScanNum)
                    else:
                        refScanFile = None
                        
                    logger.debug("processing!")
                    p = Process(target=process, 
                                args=(state,
                                      proj, 
                                      scanNum, 
                                      refScan,
                                      refScanNum, 
                                      refScanFile, 
                                      filename))
                    p.start()
                    #state.value = PROCESSING
                    # socket.send_string("Started Processing")
                    logger.debug("Started Processing")
                    socket.send_string("OK")
                else:
                    logger.debug("processing already!")
                    socket.send_string("Already processing")

            # ******** STOP processing data        
            elif msg == b"stop":
                if state.value not in PROCESS_STATES:
                    socket.send_string("Nothing to stop")
                else:
                    if p is not None:
                        logger.debug ("terminating process")
                        logger.debug (stateMap[state.value])
                        p.terminate()
                        state.value = READY
                        logger.debug ("process terminated")
                        socket.send_string("OK")
                    else:
                        logger.debug ("can't terminate, p is none")
                        socket.send_string("can't terminate, p is none")

            # ************ return our current STATE            
            elif msg == b"get_state": # or msg == "get_state":
                #socket.send_string("READY" if state.value is 0 else "PROCESSING")    
                socket.send_string(stateMap[state.value])

            # ************* SET a parameter    
            elif msgStr[:4] == "set:":
                # set what to what?
                # set: key=value
                ps = msgStr[4:].split("=")
                if len(ps) != 2:
                    socket.send_string("Cant understand: %s" % msgStr)
                else:
                    key = ps[0]
                    value = ps[1]
                    if key == 'proj':
                        proj = value
                    elif key == 'scanNum':
                        scanNum = int(value)
                    elif key == 'refScan':
                        # TBF: settle on bool or int type?
                        # refScan = value == 'True' 
                        refScan = int(value) == 1 
                    elif key == 'refScanNum':
                        refScanNum = int(value)
                    elif key == 'filename':
                        filename = value
                    else:
                        logger.debug("unknonw key: %s" % key)                    
                    # socket.send_string("setting %s to %s" % (key, value))    
                    logger.debug("setting %s to %s" % (key, value))
                    socket.send_string("OK")

            # ********* RAISE an ERROR!            
            else:
                logger.debug("what?")
                logger.debug(msg)
                socket.send_string("Dont' understand message")

            # no need for the server to spin too fast
            time.sleep(1)

        except KeyboardInterrupt:

            # exit gracefully
            logger.debug("KeyboardInterrupt")
            if p is not None:
                p.terminate()
            # socket.send_string("server exiting")
            run = False
        
    logger.debug("Exiting server")   
Ejemplo n.º 20
0
                        help='interactive mode')
    parser.add_argument('-o', '--output', type=str, default="",
                        help="the output file")
    parser.add_argument('file', metavar="FILE", help="the input file to convert")
    parser.add_argument('-d', '--dependencies', type=str, default="",
                        help="the dependencies of the file to convert")
    args = parser.parse_args()

    if args.interactive:
        if not args.output:
            print("The ouput file is not optional when the interactive mode is activated")
            exit()
        dep = [f for f in args.dependencies.split(",") if f]
        print("Interactive mode activated "
              "(Press [enter] to quit).\n"
              "Waiting for modifications...")
        end_inter_mode = Value("i", False)
        p = Process(target=interactive, args=(args.file, dep, args.output, end_inter_mode))
        try:
            p.start()
            input("")
        finally:
            end_inter_mode.value = True
            p.join()
    else:
        txt, frames = convert(args.file, root=True)
        if args.output:
            with open(args.output, "w") as f:
                f.write(txt)
        else:
            print(txt)
Ejemplo n.º 21
0
odo_cov_stack = []
gps_cov_stack = []
final_cov_stack = []

data_logger_odo = []
data_logger_gps = []
data_logger_eky = []

###################################################
# GLobal readness
am_i_ready = False
# handles terminating other threads and process
global_finish = False
# used across multiple running process
global_finish_process = Value('i', 0)
global_finish_process.value = 0

weAreDoneHere = False
# for debugging
Program_DEBUG = True

### GPS Server related variables

LINETHRESHOLD = 800
reduceFactor = 0.35

# robot mode
# mode 0 - Manual control
# mode 1 - GPS control
# mode 2 - Line Following
robot_mode = 0
Ejemplo n.º 22
0
def read_barcode():
    num = Value('i', 1)
    result_queue = Queue(1)
    key_queue = Queue(1)
    frame_queue = Queue(1)
    cond = Condition()
    dbr_proc = Process(target=dbr_run, args=(
        frame_queue, key_queue, cond, num, result_queue))
    dbr_proc.start()

    vc = cv2.VideoCapture(0)
    vc.set(3, 640) #set width
    vc.set(4, 480) #set height

    if vc.isOpened():  # try to get the first frame
        rval, frame = vc.read()
    else:
        return

    windowName = "Robot View"
    
    try:
        while True:
            rval, frame = vc.read()
            cv2.imshow(windowName, frame)

            key = cv2.waitKey(1) & 0xFF 
            if key == ord('q'):
                key_queue.put(key)
                dbr_proc.join()
                break
            elif key == ord('c'):
                frame_queue.put(frame)
                key_queue.put(key)
            elif key == ord('a') or key == ord('d') or key == ord('w') or key == ord('s') or key == ord('p'):
                key_queue.put(key)

            try:
                ret = result_queue.get_nowait()
                results = ret.results
                image = ret.image

                thickness = 2
                color = (0,255,0)
                for result in results:
                    print("barcode format: " + result[0])
                    print("barcode value: " + result[1])
                    x1 = result[2]
                    y1 = result[3]
                    x2 = result[4]
                    y2 = result[5]
                    x3 = result[6]
                    y3 = result[7]
                    x4 = result[8]
                    y4 = result[9]

                    cv2.line(image, (x1, y1), (x2, y2), color, thickness)
                    cv2.line(image, (x2, y2), (x3, y3), color, thickness)
                    cv2.line(image, (x3, y3), (x4, y4), color, thickness)
                    cv2.line(image, (x4, y4), (x1, y1), color, thickness)

                    cv2.putText(image, result[1], (min([x1, x2, x3, x4]), min([y1, y2, y3, y4])), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), thickness)

                cv2.imshow("Localization", image)


            except:
                pass
    finally:
        num.value = 0
        
            
    vc.release()
    cv2.destroyAllWindows()
Ejemplo n.º 23
0
def video_feed_counter(conf, mode, input, output, url, camera):
    # construct the argument parser and parse the arguments
    # load the configuration file
    conf = Conf(conf)
    count = 0
    # initialize the MOG foreground background subtractor object
    # mog = cv2.bgsegm.createBackgroundSubtractorMOG()
    mog = cv2.createBackgroundSubtractorMOG2()
    # initialize and define the dilation kernel
    dKernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))

    # initialize the video writer process
    writerProcess = None

    # initialize the frame dimensions (we'll set them as soon as we read
    # the first frame from the video)
    W = None
    H = None

    # instantiate our centroid tracker and initialize a dictionary to
    # map each unique object ID to a trackable object
    ct = CentroidTracker(conf["max_disappeared"], conf["max_distance"])
    trackableObjects = {}

    # if a video path was not supplied, grab a reference to the webcam
    # if not args.get("input", False):
    # if input:
    # 	print("[INFO] starting video stream...")
    # 	# vs = VideoStream(src=0).start()
    # 	vs = VideoStream(usePiCamera=True).start()
    # 	time.sleep(2.0)

    # otherwise, grab a reference to the video file
    # else:
    print("[INFO] opening video file...")
    vs = cv2.VideoCapture(url, cv2.CAP_FFMPEG)
    # vs = cv2.VideoCapture(args["input"])

    # check if the user wants to use the difference flag feature
    if conf["diff_flag"]:
        # initialize the start counting flag and mouse click callback
        start = False
        cv2.namedWindow("set_points")
        cv2.setMouseCallback("set_points", set_points, [mode])

    # otherwise, the user does not want to use it
    else:
        # set the start flag as true indicating to start traffic counting
        start = True

    # initialize the direction info variable (used to store information
    # such as up/down or left/right vehicle count) and the difference
    # point (used to differentiate between left and right lanes)
    directionInfo = None
    diffPt = None
    fps = FPS().start()
    # print('fbs')
    # loop over frames from the video stream
    while (vs.isOpened()):
        # grab the next frame and handle if we are reading from either
        # VideoCapture or VideoStream
        # frame = vs.read()
        ret, frame = vs.read()  # import image
        # if not ret:
        # 	frame = cv2.VideoCapture(url)
        #     continue
        # if ret:
        #     frame = cv2.VideoCapture(url)
        #     continue

        # if we are viewing a video and we did not grab a frame then we
        # have reached the end of the video
        if input is not None and frame is None:
            break
        #print("frame in while")

        # check if the start flag is set, if so, we will start traffic
        # counting

        if start:
            # if the frame dimensions are empty, grab the frame
            # dimensions, instantiate the direction counter, and set the
            # centroid tracker direction

            if W is None or H is None:
                # start the frames per second throughput estimator
                #fps = FPS().start()
                (H, W) = frame.shape[:2]
                dc = DirectionCounter(mode, W - conf["x_offset"],
                                      H - conf["y_offset"])
                ct.direction = mode

                # check if the difference point is set, if it is, then
                # set it in the centroid tracker object
                if diffPt is not None:
                    ct.diffPt = diffPt

            # begin writing the video to disk if required
            if output is not None and writerProcess is None:
                # set the value of the write flag (used to communicate when
                # to stop the process)
                writeVideo = Value('i', 1)

                # initialize a shared queue for the exhcange frames,
                # initialize a process, and start the process
                frameQueue = Queue()
                writerProcess = Process(target=write_video,
                                        args=(output, writeVideo, frameQueue,
                                              W, H))
                writerProcess.start()

            # initialize a list to store the bounding box rectangles
            # returned by background subtraction model
            rects = []

            # convert the frame to grayscale image and then blur it
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            gray = cv2.GaussianBlur(gray, (5, 5), 0)

            # apply the MOG background subtraction model which returns
            # a mask
            mask = mog.apply(gray)

            # apply dilation
            dilation = cv2.dilate(mask, dKernel, iterations=2)

            # find contours in the mask
            cnts = cv2.findContours(dilation.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
            cnts = imutils.grab_contours(cnts)

            # loop over each contour
            for c in cnts:
                # if the contour area is less than the minimum area
                # required then ignore the object
                if cv2.contourArea(c) < conf["min_area"]:
                    continue

                # get the (x, y)-coordinates of the contour, along with
                # height and width
                (x, y, w, h) = cv2.boundingRect(c)

                # check if direction is *vertical and the vehicle is
                # further away from the line, if so then, no need to
                # detect it
                if mode == "vertical" and y < conf["limit"]:
                    continue

                # otherwise, check if direction is horizontal and the
                # vehicle is further away from the line, if so then,
                # no need to detect it
                elif mode == "horizontal" and x > conf["limit"]:
                    continue

                # add the bounding box coordinates to the rectangles list
                rects.append((x, y, x + w, y + h))

            # check if the direction is vertical
            if mode == "vertical":
                # draw a horizontal line in the frame -- once an object
                # crosses this line we will determine whether they were
                # moving 'up' or 'down'
                cv2.line(frame, (0, H - conf["y_offset"]),
                         (W, H - conf["y_offset"]), (0, 255, 255), 2)

                # check if a difference point has been set, if so, draw
                # a line diving the two lanes
                if diffPt is not None:
                    cv2.line(frame, (diffPt, 0), (diffPt, H), (255, 0, 0), 2)

            # otherwise, the direction is horizontal
            else:
                # draw a vertical line in the frame -- once an object
                # crosses this line we will determine whether they were
                # moving 'left' or 'right'
                # print('ddds')
                cv2.line(frame, (W - conf["x_offset"], 0),
                         (W - conf["x_offset"], H), (0, 255, 255), 2)

                # check if a difference point has been set, if so, draw a
                # line dividing the two lanes
                if diffPt is not None:
                    cv2.line(frame, (0, diffPt), (W, diffPt), (255, 0, 0), 2)

            # use the centroid tracker to associate the (1) old object
            # centroids with (2) the newly computed object centroids
            objects = ct.update(rects)

            # loop over the tracked objects
            for (objectID, centroid) in objects.items():
                # check to see if a trackable object exists for the
                # current object ID and initialize the color
                to = trackableObjects.get(objectID, None)
                color = (0, 0, 255)

                # create a new trackable object if needed
                if to is None:
                    to = TrackableObject(objectID, centroid)

                # otherwise, there is a trackable object so we can
                # utilize it to determine direction
                else:
                    # find the direction and update the list of centroids
                    dc.find_direction(to, centroid)
                    to.centroids.append(centroid)

                    # check to see if the object has been counted or not
                    if not to.counted:

                        # find the direction of motion of the vehicles
                        directionInfo = dc.count_object(to, centroid, camera)

                    # otherwise, the object has been counted and set the
                    # color to green indicate it has been counted
                    else:
                        color = (0, 255, 0)

                # store the trackable object in our dictionary
                trackableObjects[objectID] = to

                # draw both the ID of the object and the centroid of the
                # object on the output frame
                text = "ID {}".format(objectID)
                cv2.putText(frame, text, (centroid[0] - 10, centroid[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, 2)
                cv2.circle(frame, (centroid[0], centroid[1]), 4, color, -1)

            # extract the traffic counts and write/draw them
            if directionInfo is not None:
                for (i, (k, v)) in enumerate(directionInfo):
                    text = "{}: {}".format(k, v)
                    cv2.putText(frame, text, (10, ((i * 20) + 20)),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)

            # put frame into the shared queue for video writing
            if writerProcess is not None:
                frameQueue.put(frame)

            # show the output frame
            # cv2.imshow("Frame", frame)
            frames = cv2.imencode('.jpg', frame)[1].tobytes()
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + frames + b'\r\n')
            key = cv2.waitKey(1) & 0xFF

            # if the `q` key was pressed, break from the loop
            if key == ord("q"):
                break

            # update the FPS counter
            fps.update()

        # otherwise, the user has to select a difference point
        else:
            # show the output frame
            # cv2.imshow("set_points", frame)
            frames = cv2.imencode('.jpg', frame)[1].tobytes()
            yield (b'--frame\r\n'
                   b'Content-Type: image/jpeg\r\n\r\n' + frames + b'\r\n')
            key = cv2.waitKey(1) & 0xFF

            # if the `s` key was pressed, start traffic counting
            if key == ord("s"):
                # begin counting and eliminate the informational window
                start = True
                cv2.destroyWindow("set_points")

    # stop the timer and display FPS information
    fps.stop()
    print("[INFO] elapsed time: {:.2f}".format(fps.elapsed()))
    print("[INFO] approx. FPS: {:.2f}".format(fps.fps()))

    # terminate the video writer process
    if writerProcess is not None:
        writeVideo.value = 0
        writerProcess.join()

    # if we are not using a video file, stop the camera video stream
    # if not args.get("input", False):
    # 	vs.stop()

    # otherwise, release the video file pointer
    else:
        vs.release()

    # close any open windows
    cv2.destroyAllWindows()
def baumWelchP(bitext_sd, s_count, t_table,
               sd_count):  #L is the number of observations

    N = maxTargetSentenceLength(bitext_sd)
    print 'N', N
    N_max = N
    Y = bitext_sd

    yValues = s_count.keys()  #computes all possible english words
    (indexMap, biword) = map_bitext_to_int(sd_count)
    sd_size = len(indexMap)
    lastLikelihood = 0

    #a = zeros((N+1,N+1))
    '''
    a_array = Array(ct.c_double,((N+1)*(N+1)))
    a_array2 = np.frombuffer(a_array.get_obj()) # mp_arr and arr share the same memory
    a = a_array2.reshape((N+1,N+1)) # b and arr share the same memory
    '''
    manager = Manager()
    a = manager.dict()

    #pi = zeros(N+1)
    pi = Array('d', N + 1)

    logLikelihood = Value('d', 0.0)

    lastLogLikelihood = Value('d', 0.0)

    L = len(Y)
    #N = len(Y[0][1]) #first sentence x length
    #(a,pi) = initializeUniformly(N)

    for iterations in range(0, 10):
        #E step
        #c = defaultdict(int)
        startTime = time.time()
        print 'iteration', iterations
        logLikelihood.value = 0.0
        #totalGammaOverAllObservations = zeros(N+1)
        totalGammaOverAllObservations = Array('d', [0] * (N + 1))

        #totalGammaDeltaOverAllObservations_t = zeros((N+1,sd_size))
        totalGammaDeltaOverAllObservations_t = Array('d', [0] * ((N + 1) *
                                                                 (sd_size)))

        #totalGammaDeltaOverAllObservations_t_overall_states = zeros(sd_size)
        totalGammaDeltaOverAllObservations_t_overall_states = Array(
            'd', [0] * sd_size)

        totalGammaDeltaOverAllObservations_t_overall_states_over_dest = defaultdict(
            int)

        #totalGamma1OverAllObservations = zeros(N+1)
        totalGamma1OverAllObservations = Array('d', [0] * (N + 1))

        #totalC_j_Minus_iOverAllObservations = zeros((N+1,N+1))
        '''
        totalC_j_Minus_iOverAllObservations_array = Array(ct.c_double,(N+1)*(N+1))
        totalC_j_Minus_iOverAllObservations_array2 = np.frombuffer(totalC_j_Minus_iOverAllObservations_array.get_obj())
        totalC_j_Minus_iOverAllObservations = totalC_j_Minus_iOverAllObservations_array2.reshape((N+1,N+1))
        for i in range(N+1):
            for j in range(N+1):
                totalC_j_Minus_iOverAllObservations[i,j] = 0.0
        '''
        manager = Manager()

        totalC_j_Minus_iOverAllObservations = manager.dict()
        #totalC_l_Minus_iOverAllObservations = zeros(N+1)
        '''
        totalC_l_Minus_iOverAllObservations = Array('d',[0]*(N+1))
        '''
        manager2 = Manager()
        totalC_l_Minus_iOverAllObservations = manager2.dict()

        intervals = 10
        jobs = []
        lock = RLock()
        length_of_interval = L / intervals
        for i in range(0, intervals - 1):
            start = i * length_of_interval
            end = (i + 1) * length_of_interval
            #print start
            #print end
            p = Process(target=Expectation2,
                        args=(lock, t_table, N, Y, sd_size, indexMap,
                              iterations, totalGammaOverAllObservations,
                              totalGammaDeltaOverAllObservations_t,
                              totalGamma1OverAllObservations,
                              totalC_j_Minus_iOverAllObservations,
                              totalC_l_Minus_iOverAllObservations, start, end,
                              a, pi, logLikelihood, lastLogLikelihood))
            p.start()
            jobs.append(p)

        start = (intervals - 1) * length_of_interval
        end = L
        p = Process(target=Expectation2,
                    args=(lock, t_table, N, Y, sd_size, indexMap, iterations,
                          totalGammaOverAllObservations,
                          totalGammaDeltaOverAllObservations_t,
                          totalGamma1OverAllObservations,
                          totalC_j_Minus_iOverAllObservations,
                          totalC_l_Minus_iOverAllObservations, start, end, a,
                          pi, logLikelihood, lastLogLikelihood))
        p.start()
        jobs.append(p)
        for p in jobs:
            p.join()

        endTime = time.time()

        #print N
        print "%.2gs" % (endTime - startTime)
        #N = len(totalGamma1OverAllObservations)-1
        #print N
        print 'last , new ', lastLogLikelihood.value, logLikelihood.value
        #print 'likelihood difference ', (logLikelihood.value - lastLogLikelihood.value)
        lastLogLikelihood.value = logLikelihood.value

        totalGammaOverAllObservationsOverAllStates = 0.0
        sartTime = time.time()
        for i in range(1, N + 1):
            totalGammaOverAllObservationsOverAllStates += totalGammaOverAllObservations[
                i]

        print 'likelihood ', logLikelihood
        #lastLikelihood = liklihood
        N = len(totalGamma1OverAllObservations) - 1
        #print N

        # Create expected_counts(d,c) to be consistent with the Berg-Kirkpatrick et al.
        # To make it more memory efficient just keep either totalGammaDeltaOverAllObservations_t_overall_states or expected_counts
        expected_counts = defaultdict(int)

        for i in range(1, N + 1):
            for k in range(sd_size):

                address = i * sd_size + k
                totalGammaDeltaOverAllObservations_t_overall_states[
                    k] += totalGammaDeltaOverAllObservations_t[address]
                (f, e) = biword[k]
                totalGammaDeltaOverAllObservations_t_overall_states_over_dest[
                    e] += totalGammaDeltaOverAllObservations_t[address]

        for k in range(sd_size):
            (f, e) = biword[k]
            expected_counts[(
                f, e)] = totalGammaDeltaOverAllObservations_t_overall_states[k]

        totalGammaOverAllObservationsOverAllStates = 0.0
        for i in range(1, N + 1):
            totalGammaOverAllObservationsOverAllStates += totalGammaOverAllObservations[
                i]

        # M Step
        # We can clear a, b and pi here and then set the values for them
        #a = zeros((N+1,N+1))
        manager = Manager()
        a = manager.dict()
        pi = zeros(N + 1)
        t_table = defaultdict(int)

        for i in range(1, N + 1):
            pi[i] = totalGamma1OverAllObservations[i] * (1.0 / L)
            '''
            for j in range(1,N+1):
                a[(i,j)] = totalC_j_Minus_iOverAllObservations[(i,j)]/totalC_l_Minus_iOverAllObservations[i]
            '''
        #a[(i,j,I)] = p(j|i,I)
        #print 'totalC_j_Minus_iOverAllObservations ', totalC_j_Minus_iOverAllObservations
        totalC_j_Minus_iOverAllObservations = dict(
            totalC_j_Minus_iOverAllObservations)
        for (i, j, I) in totalC_j_Minus_iOverAllObservations:
            #print x
            a[(i, j, I)] = totalC_j_Minus_iOverAllObservations[
                (i, j, I)] / totalC_l_Minus_iOverAllObservations[(i, I)]
        #print 'a ', a
        #check_probability(a, N)
        for k in range(sd_size):
            (f, e) = biword[k]
            t_table[(
                f, e
            )] = totalGammaDeltaOverAllObservations_t_overall_states[
                k] / totalGammaDeltaOverAllObservations_t_overall_states_over_dest[
                    e]

        print iterations

    return (a, t_table, pi)
Ejemplo n.º 25
0
def transfer_coin():
    """
    transfer money
    Arg:
        {"kribzz":
            {"transactionId":"643427363","streeAddress":"700 Rodeo Drive","cityName":"Beverly Hills",
            "stateCode":"CA","zipCode":"90210","latitude":"34.079678","longitude":"-118.413515",
            "transactionDateTime":"1523855778",
            "transactionTotal":"9975000.00",
            "sellerName":"John Clark",
            "buyerName":"Emily Stevens",
            },
         "transfer": {
             "amount":12.54321,
             "destination_address":"ckbzzBGZpWBZiPyQmxN3NCK8DJ8S37Vhxb69a9pbYXgMAwKKPDGeZ93aZgSXfX1E3GMEbk6tgLGPK8gDAeGquLmASXKvRim7pzN",
         },
         "smart_contract": {
             "agent_signature":"ckbzzBGZpWBZiPyQmxN3NCK8DJ8S37Vhxb69a9pbYXgMAwKKPDGeZ93aZgSXfX1E3GMEbk6tgLGPK8gDAeGquLmASXKvRim7pzN",
             "agent_pkey":"ckbzzBGZpWBZiPyQmxN3NCK8DJ8S37Vhxb69a9pbYXgMAwKKPDGeZ93aZgSXfX1E3GMEbk6tgLGPK8gDAeGquLmASXKvRim7pzN",
             "investor_signature":"ckbzzBGZpWBZiPyQmxN3NCK8DJ8S37Vhxb69a9pbYXgMAwKKPDGeZ93aZgSXfX1E3GMEbk6tgLGPK8gDAeGquLmASXKvRim7pzN",
             "investor_pkey":"ckbzzBGZpWBZiPyQmxN3NCK8DJ8S37Vhxb69a9pbYXgMAwKKPDGeZ93aZgSXfX1E3GMEbk6tgLGPK8gDAeGquLmASXKvRim7pzN",
             "owner_signature":"ckbzzBGZpWBZiPyQmxN3NCK8DJ8S37Vhxb69a9pbYXgMAwKKPDGeZ93aZgSXfX1E3GMEbk6tgLGPK8gDAeGquLmASXKvRim7pzN",
             "owner_pkey":"ckbzzBGZpWBZiPyQmxN3NCK8DJ8S37Vhxb69a9pbYXgMAwKKPDGeZ93aZgSXfX1E3GMEbk6tgLGPK8gDAeGquLmASXKvRim7pzN",
         },
        }
    Return:
       reply(JSON) {"reply":
                 {"msgId":cursor.lastrowid,"error":{"errorCode":PSI.errorCode,"errorMessage":PSI.errorMessage}}
    }
    """

    psi_log_info(request.url)
    psi_log_info("POST: %s" % request.POST.dict)
    kr_data = None
    try:
        kr_data = parse_request("kribbz")
#        for key,val in kr_data.items():
#            d = kr_data[key]
#            psi_log_debug( str(d) + ' = ' + str(val))
    except Exception as errtxt:
        psi_log_error(str(errtxt))
        pass
    print(kr_data)

    d_address = None
    amnt = None
    kr_amnt = None
    wallet = None
    pwd = None
    fee = 100000
    mixin = 0
    try:
        kr_amnt = parse_request("transfer")
        #        for key,val in kr_data.items():
        #            d = kr_data[key]
        #            psi_log_debug( str(d) + ' = ' + str(val))
        amnt = kr_amnt["amount"]
        d_address = kr_amnt["destination_address"]
        fee = kr_amnt["fee"]
        mixin = kr_amnt["mixin"]
    except Exception as errtxt:
        psi_log_error(str(errtxt))
        pass
    print(amnt, d_address)

    sec = None
    try:
        sec = parse_request("wallet")
        for key, val in sec.items():
            d = sec[key]
#            psi_log_debug( str(d) + ' = ' + str(val))
        pwd = sec["password"]
        wallet = sec["wallet_name"]
    except Exception as errtxt:
        psi_log_error(str(errtxt))
        pass
    print(pwd, wallet)

    #    run_wallet(wallet, pwd)
    from subprocess import Popen, PIPE
    from tempfile import SpooledTemporaryFile as tempfile
    f = tempfile()
    f.write('exit\n')
    f.seek(0)

    # Start wallet
    #    print Popen(['/bin/grep','f'],stdout=PIPE,stdin=f).stdout.read()
    #./simple_wallet  --wallet-file ./wal2 --password Vatson2008  --rpc-bind-port 18082
    # ./simple_wallet --wallet-file /opt/kribbz/kribbz_wallet  --password Password12345 --rpc-bind-port 18082
    run_folder = APP_FOLDER
    cmd1 = "{0}simple_wallet".format(run_folder)
    cmd2 = "--wallet-file"
    cmd3 = "{0}{1}".format(WALLET_FOLDER, wallet)
    cmd4 = "--password"
    cmd5 = "{0}".format(pwd)
    cmd6 = "--rpc-bind-port"
    cmd7 = "18082"
    #    proc =  Popen([cmd1,cmd2,cmd3,cmd4,cmd5,cmd6,cmd7],stdout=PIPE,stdin=f).stdout.read()
    #    out =  prx.stdout.read()
    #    print(out)

    #    import sys
    #    from subprocess import *
    #    cmd = "{0}simple_wallet --wallet-file {1}{2}  --password={3} --rpc-bind-port 18082".format(run_folder, WALLET_FOLDER, wallet, pwd)
    #    proc = Popen(cmd, shell=True, stdout=PIPE)
    #    cnt = 0
    #    while True:
    #        cnt += 1
    #        if cnt > 40:
    #            break
    #        data = proc.stdout.readline()   # Alternatively proc.stdout.read(1024)
    #        if len(data) == 0:
    #            break
    #        sys.stdout.write(data)   # sys.stdout.buffer.write(data) on Python 3.x

    s_stop = Value('d', 0.0)

    ret = start_wallet(run_folder, wallet, pwd, s_stop)
    if not ret:
        rez2 = {
            "tx_hash": None,
            "msg": 'wallet error',
            "error": True,
            "success": False
        }
        return (json.dumps(rez2, indent=4))

    # simple wallet is running on the localhost and port of 18082
    url = WALLET_URL  # "http://localhost:18082/json_rpc"

    # standard json header
    headers = {'content-type': 'application/json'}
    #    destination_address = "ckbzzBr9AquP7taMtmySt5PLPRAAxoC9ueGu2bqgveWf62d9X8DawW4gHBxmjLBuYDF5vspjRoQU37SgFYbHAKTS83vfWonzRKq"  #wal5

    destination_address = d_address
    #    destination_address = "ckbzz7dYVt6T4sBWhjp8wqQUMgoE8hWyfFCcBjjdLrzEdJU9SQsXjm73rb4VzZt7RxGzuLCAs4ZtQQMJzeELHP841TbYDt4Rm9V" # wal6

    # amount of xmr to send
    print("amnt=", amnt)
    amount = float(amnt)
    #    amount = 12.54321

    # cryptonote amount format is different then
    # that normally used by people.
    # thus the float amount must be changed to
    # something that cryptonote understands
    int_amount = int(get_amount(amount))

    # just to make sure that amount->coversion->back
    # gives the same amount as in the initial number
    t_amount = float(get_money(str(int_amount)))
    print(t_amount, amount, int_amount)
    assert amount == float(get_money(
        str(int_amount))), "Amount conversion failed"

    # send specified xmr amount to the given destination_address
    recipents = [{"address": destination_address, "amount": int_amount}]

    # using given mixin
    # mixin = 0

    # get some random payment_id
    payment_id = get_payment_id()
    if kr_data:
        kr1 = kr_data
        kribbz_info = json.dumps(kr1)
        # simplewallet' procedure/method to call
        rpc_input = {
            "method": "transfer",
            "params": {
                "destinations": recipents,
                "mixin": mixin,
                "kribbz_info": kribbz_info,
                "get_tx_key": True,
                "fee": fee,
            },
        }
    else:
        rpc_input = {
            "method": "transfer",
            "params": {
                "destinations": recipents,
                "mixin": mixin,
                "get_tx_key": True,
                "fee": fee,
            },
        }

    # add standard rpc values
    rpc_input.update({"jsonrpc": "2.0", "id": "0"})
    try:
        # execute the rpc request
        response = requests.post(url,
                                 data=json.dumps(rpc_input),
                                 headers=headers)
        #    rez = json.dumps(response.json())
        rez = response.json()
        #        f.close()
        save_wallet()
        time.sleep(2)  # sleep
        s_stop.value = 1
        stop_wallet()
        success = True
    except Exception as ex1:
        out = 'coin transfer not successful'
        error = "simple wallet not started"
        success = False
        tx_hash = None


#    {u'jsonrpc': u'2.0', u'id': u'0', u'result': {u'tx_hash': u'00000000000000001701000000000000320e5717afca5e5e0000000000000000'}}
    if success:
        try:
            tx_hash = rez[u'result'][u'tx_hash']
        except:
            tx_hash = None
        if tx_hash is not None:
            out = 'coin transfer successful'
            success = True
            error = "0"
        else:
            out = 'coin transfer not successful'
            error = rez
            success = False

    rez2 = {"tx_hash": tx_hash, "msg": out, "error": error, "success": success}

    print(rez2)
    return (json.dumps(rez2, indent=4))
Ejemplo n.º 26
0
def main():
    # use fixed random state
    rand_state = np.random.RandomState(1).get_state()
    np.random.set_state(rand_state)
    tf_set_seeds(np.random.randint(1, 2**31 - 1))

    # Create UR5 Reacher2D environment
    env = ReacherEnv(setup="UR5_default",
                     host=None,
                     dof=2,
                     control_type="velocity",
                     target_type="position",
                     reset_type="zero",
                     reward_type="precision",
                     derivative_type="none",
                     deriv_action_max=5,
                     first_deriv_max=2,
                     accel_max=1.4,
                     speed_max=0.3,
                     speedj_a=1.4,
                     episode_length_time=4.0,
                     episode_length_step=None,
                     actuation_sync_period=1,
                     dt=0.04,
                     run_mode="multiprocess",
                     rllab_box=False,
                     movej_t=2.0,
                     delay=0.0,
                     random_state=rand_state)
    env = NormalizedEnv(env)
    # Start environment processes
    env.start()
    # Create baselines TRPO policy function
    sess = U.single_threaded_session()
    sess.__enter__()

    def policy_fn(name, ob_space, ac_space):
        return MlpPolicy(name=name,
                         ob_space=ob_space,
                         ac_space=ac_space,
                         hid_size=32,
                         num_hid_layers=2)

    # Create and start plotting process
    plot_running = Value('i', 1)
    shared_returns = Manager().dict({
        "write_lock": False,
        "episodic_returns": [],
        "episodic_lengths": [],
    })
    # Spawn plotting process
    pp = Process(target=plot_ur5_reacher,
                 args=(env, 2048, shared_returns, plot_running))
    pp.start()

    # Create callback function for logging data from baselines TRPO learn
    kindred_callback = create_callback(shared_returns)

    # Train baselines TRPO
    learn(env,
          policy_fn,
          max_timesteps=150000,
          timesteps_per_batch=2048,
          max_kl=0.05,
          cg_iters=10,
          cg_damping=0.1,
          vf_iters=5,
          vf_stepsize=0.001,
          gamma=0.995,
          lam=0.995,
          callback=kindred_callback)

    # Safely terminate plotter process
    plot_running.value = 0  # shutdown ploting process
    time.sleep(2)
    pp.join()

    env.close()
Ejemplo n.º 27
0
    finish_count = Value('i', 1)
    if (node_number == 0):
        # Start spawning the server
        server_p = Process(target=server_process,
                           args=(
                               rabbitmq_go,
                               finish_count,
                           ))
        rabbit_p = Process(target=cmd_process, args=("rabbitmq-server", ))
        index_p = Process(target=cmd_process, args=("index_server .", ))
        server_p.start()
        rabbit_p.start()
        sleep(2)
        index_p.start()
        sleep(0.5)
        rabbitmq_go.value = 1

    else:
        print "I AM CLIENT " + hostname
        context = zmq.Context()
        socket = context.socket(zmq.REQ)
        socket.connect("tcp://" + servername + ":5566")

        response = False
        while not response:
            sleep(0.5)
            socket.send("Awake?")
            message = socket.recv()
            response = (message == "Yes")

    # Starting image2plate jobs
Ejemplo n.º 28
0
        a = a['prices']
        a.pop()
        plt.plot(a)
        plt.show()
    print("end")


if __name__ == '__main__':
    print("Hello")
    running = Value('b', True)
    maxRequests = 4
    print('Initialization of the weather')
    a = Array('f', range(3))
    w = weather.Weather(a, running, 1)
    q = Queue(maxRequests)
    n = 12
    pol = CVOUKIVOI
    h = Process(target=homes.homes, args=(a, q, running, n, pol))
    m = market.Market(q, running, 2)
    m.start()
    w.start()
    h.start()
    print("gogogo")
    if input("Press any key") is not None:
        running.value = False
        print('STOPP')
    w.join()
    h.join()
    m.join()
    graph()
Ejemplo n.º 29
0
    subject_cert = asymmetric.load_certificate(base64.b64decode(rawCert))
    builder = OCSPRequestBuilder(subject_cert, issuerCerts[url])
    ocsp_request = builder.build()
    url = urlparse(url)
    headers = {}
    conn = httplib.HTTPConnection(url.netloc)
    conn.request("POST", url.path, ocsp_request.dump(), headers)
    res = conn.getresponse().read()
    ocspResponseClass = ocsp.OCSPResponse.load(res)
    return (ocspResponseClass.response_data['responses'][0]['cert_status'].name
            != 'good')


if __name__ == '__main__':
    q = Queue(workers * 16)
    check_finish = Value('i', 0)
    for i in range(workers):
        p = Process(target=doWork, args=(i, q, check_finish))
        p.start()
    try:
        ctr = 0
        for cert in open(infile, 'r'):
            q.put(cert)
            ctr += 1
            if (ctr % 10000 == 0):
                print(str(ctr) + " certificates processed")
        check_finish.value = 1
        print("End of put certificates into queue")
    except KeyboardInterrupt:
        sys.exit(1)
Ejemplo n.º 30
0
def run_server(tcp_ip, tcp_port, stream, password_range):
    q = JoinableQueue()
    found = Value('b', False)

    # Create a process responsible for generating password
    t = Process(target=generate,
                name="Password Generator",
                args=(q, password_range))
    t.daemon = True
    t.start()

    # Run a socket listening for clients' hearthbeats
    t = threading.Thread(target=hearthbeat,
                         name="Hearthbeat",
                         args=(tcp_ip, found))
    t.daemon = True
    t.start()

    # A separate thread to remove inactive clients
    t = threading.Thread(target=remove_inactive_clients,
                         name="Client clean-up",
                         args=(q, ))
    t.daemon = True
    t.start()

    # Initiate the listening socket on given port
    try:
        server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        server.bind((tcp_ip, tcp_port))
        server.listen(5)
    except socket.error as ex:
        print "Error opening socket:", ex
        return

    global counter
    counter = 0
    # Is used to keep track of currently processed password by clients. In case a client is invalidated
    # the data has to resend to another client
    global processed_passwords
    processed_passwords = {}
    start_time = time.time()

    # The server is terminated once the password is found, there are no more active clients and no
    # passwords are left to try or a KeyboardInterrupt (Ctrl + C) is received
    while True:
        print "Number of clients: " + str(len(clients))
        print "Estimated speed: " + str(
            1 / (time.time() - start_time) * counter) + " H/sec"
        message = prepare_data_for_transfer(q, stream)
        # There are no more passwords to send, but he have to wait for all clients to finish
        if (not message and not clients):
            print "Password is not in brute-forced space."
            server.close()
            return
        try:
            client, address = server.accept()
        except KeyboardInterrupt:
            print "Stoping server..."
            server.close()
            return

        # It is possible the queue was filled with passwords from inactivated clients
        # while the server was waiting for new connections
        if (not message):
            message = prepare_data_for_transfer(q, stream)

        result, counter = handle_connection(client, address, message)
        if (result):
            with found.get_lock():
                found.value = True
            server.close()
            return
Ejemplo n.º 31
0
    def __init__(self, model, device_ids=1, n_workers=None,
                 max_batch_size=None, max_image_size=DEFAULT_MAX_IMAGE_SIZE,
                 modder=None):
        """
        Args:
        - model (PyMjModel): MuJoCo model to use for rendering
        - device_ids (int/list): list of device ids to use for rendering.
            One or more workers will be assigned to each device, depending
            on how many workers are requested.
        - n_workers (int): number of parallel processes in the pool. Defaults
            to the number of device ids.
        - max_batch_size (int): maximum number of states that can be rendered
            in batch using .render(). Defaults to the number of workers.
        - max_image_size (int): maximum number pixels in images requested
            by .render()
        - modder (Modder): modder to use for domain randomization.
        """
        self._closed, self.pool = False, None

        if not (modder is None or inspect.isclass(modder)):
            raise ValueError("modder must be a class")

        if isinstance(device_ids, int):
            device_ids = list(range(device_ids))
        else:
            assert isinstance(device_ids, list), (
                "device_ids must be list of integer")

        n_workers = n_workers or 1
        self._max_batch_size = max_batch_size or (len(device_ids) * n_workers)
        self._max_image_size = max_image_size

        array_size = self._max_image_size * self._max_batch_size

        self._shared_rgbs = Array(ctypes.c_uint8, array_size * 3)
        self._shared_depths = Array(ctypes.c_float, array_size)

        self._shared_rgbs_array = np.frombuffer(
            self._shared_rgbs.get_obj(), dtype=ctypes.c_uint8)
        assert self._shared_rgbs_array.size == (array_size * 3), (
            "Array size is %d, expected %d" % (
                self._shared_rgbs_array.size, array_size * 3))
        self._shared_depths_array = np.frombuffer(
            self._shared_depths.get_obj(), dtype=ctypes.c_float)
        assert self._shared_depths_array.size == array_size, (
            "Array size is %d, expected %d" % (
                self._shared_depths_array.size, array_size))

        worker_id = Value(ctypes.c_int)
        worker_id.value = 0

        if get_start_method() != "spawn":
            raise RuntimeError(
                "Start method must be set to 'spawn' for the "
                "render pool to work. That is, you must add the "
                "following to the _TOP_ of your main script, "
                "before any other imports (since they might be "
                "setting it otherwise):\n"
                "  import multiprocessing as mp\n"
                "  if __name__ == '__main__':\n"
                "    mp.set_start_method('spawn')\n")

        self.pool = Pool(
            processes=len(device_ids) * n_workers,
            initializer=MjRenderPool._worker_init,
            initargs=(
                model.get_mjb(),
                worker_id,
                device_ids,
                self._shared_rgbs,
                self._shared_depths,
                modder))
Ejemplo n.º 32
0
        end = time()
        fpsTime += end - start
        fpsCnt += 1
        sleep(max(0, 1.0/30 - (end - start)))
except KeyboardInterrupt:
    pass

print("joining")

if VIDEO_OUT:
    outframe.fill(0)
    hdmi_out.writeframe(outframe)

gpio.gpio_write(latch_pwr, 0)
run.value = False
procthread.join()
statusthread.join()
print("FPS: %.2f, PROC: %.2f" %
      (fpsCnt / fpsTime, procCnt.value / procTime.value))

if 'videoIn' in globals():
    videoIn.release()
    del videoIn

if VIDEO_OUT and 'hdmi_out' in globals():
    hdmi_out.stop()
    hdmi_out.close()
    del hdmi_out

if 'db' in globals():
Ejemplo n.º 33
0
def main():
    # Create DXL Reacher1D environment
    env = DxlReacher1DEnv(
        setup='dxl_gripper_default',
        idn=1,
        baudrate=1000000,
        obs_history=1,
        dt=0.04,
        gripper_dt=0.01,
        rllab_box=False,
        episode_length_step=None,
        episode_length_time=2,
        max_torque_mag=100,
        control_type='torque',
        target_type='position',
        reset_type='zero',
        reward_type='linear',
        use_ctypes_driver=True,
    )

    # The outputs of the policy function are sampled from a Gaussian. However, the actions in terms of torque
    # commands are in the range [-max_torque_mag, max_torque_mag]. NormalizedEnv wrapper scales action accordingly.
    # By default, it does not normalize observations or rewards.
    env = NormalizedEnv(env)

    # Start environment processes
    env.start()

    # Create baselines trpo policy function
    sess = U.single_threaded_session()
    sess.__enter__()

    def policy_fn(name, ob_space, ac_space):
        return MlpPolicy(name=name,
                         ob_space=ob_space,
                         ac_space=ac_space,
                         hid_size=32,
                         num_hid_layers=2)

    # create and start plotting process
    plot_running = Value('i', 1)
    shared_returns = Manager().dict({
        "write_lock": False,
        "episodic_returns": [],
        "episodic_lengths": [],
    })
    # Plotting process
    pp = Process(target=plot_dxl_reacher,
                 args=(env, 2048, shared_returns, plot_running))
    pp.start()

    # Create callback function for logging data from baselines PPO learn
    kindred_callback = create_callback(shared_returns)

    # Train baselines TRPO
    learn(
        env,
        policy_fn,
        max_timesteps=50000,
        timesteps_per_batch=2048,
        max_kl=0.05,
        cg_iters=10,
        cg_damping=0.1,
        vf_iters=5,
        vf_stepsize=0.001,
        gamma=0.995,
        lam=0.995,
        callback=kindred_callback,
    )

    # Safely terminate plotter process
    plot_running.value = 0  # shutdown ploting process
    time.sleep(2)
    pp.join()

    # Shutdown the environment
    env.close()
Ejemplo n.º 34
0
Archivo: OnOff.py Proyecto: Miaou/BBB
    def runperiodic(self, stop, periodOn, periodOff):
        "Invert current state periodicly"
        if self.debug:
            print("Starting periodic checking")
        while not stop.value:
            gpio.output(self.pin, not self.state)
            self.state = not self.state
            if self.debug:
                print(self.pin, "switched state")
            
            if self.state:
                time.sleep(periodOn)
            else:
                time.sleep(periodOff)

        if self.debug:
            print("Ending OnOff checking")

if __name__ == '__main__':
    from threading import Thread
    test = OnOff("P8_15", 0.5,mode=False, debug=True)
    trigger = Value('b', 0)
    stop = Value('b', False)
    try:
        Thread(target=test.runperiodic, args=(stop, 3, 1)).start()
    except KeyboardInterrupt:
        print("That's it")
        stop.value = True

Ejemplo n.º 35
0
def calculate_impl(formula, result: Value, error: Value):
    try:
        result.value = str(calc(formula))
    except Exception as e:
        result.value = e.args[0]
        error.value = True
Ejemplo n.º 36
0
    print("  Total number of requests: {}").format(
        config["limit"] if config["limit"] != 0 else "unlimited")
    print("")
    print("Work details:")
    print("  Priority: {}").format(config["priority"])
    print("  Weight: {}").format(config["weight"])
    print("  Start delay: {} s").format(config["when"])
    print("  Execution time: {} ms").format(config["delay"])
    print("  Average queue depth: {}").format(
        (effective_rate * config["delay"]) / 1000)
    print("")
    print("Starting up...")

    # Create a set of processes, then start them all
    procs = [
        Process(target=httploop, args=(
            ctr + 1,
            config,
            done,
        )) for ctr in range(config["numProcesses"])
    ]
    map(lambda p: p.start(), procs)

    # Wait for user to cancel, then signal end and wait for processes to finish
    time.sleep(1)
    raw_input("\n\nEnd run (hit return)")
    done.value = 1
    map(lambda p: p.join(), procs)

    print("\nRun complete")
    processes = [
        Process(name="thread %s" % x,
                target=thread_embed,
                args=(q_lock, q, v_lock, v)) for x in range(NUMBER_OF_THREADS)
    ]

    for process in processes:
        process.start()

    while not q.empty():
        pass

    print("Done with embedding!")

    v.value = 0

    for file in glob.glob(G_DATAPATH + "*"):
        q.put(file)

    processes = [
        Process(name="thread %s" % x,
                target=thread_upload,
                args=(q_lock, q, v_lock, v)) for x in range(NUMBER_OF_THREADS)
    ]

    for process in processes:
        process.start()

    while not q.empty():
        pass
Ejemplo n.º 38
0
        return finish('success')

cnt = 0
for class_wnid in classes_to_scrape:

    cnt += 1
    class_name = class_info_dict[class_wnid]["class_name"]
    logging.info('********************** (% 3d/% 4d)   Scraping images for class: %s' % (cnt, len(classes_to_scrape), class_name,) )
    url_urls = IMAGENET_API_WNID_TO_URLS(class_wnid)

    time.sleep(0.05)
    resp = requests.get(url_urls)

    classsub = class_wnid + '___' + class_name
    class_folder = os.path.join(imagenet_images_folder, classsub)
    if not os.path.exists(class_folder):
        os.mkdir(class_folder)

    class_images.value = 0

    urls = [url.decode('utf-8') for url in resp.content.splitlines()]
    logging.info('number of images for class: ' + str(len(urls)))
    with open( os.path.join(imglists, classsub+'.txt'), 'w') as f:
        for u in urls:
            f.write(u)
            f.write('\n')

    logging.info("  Multiprocessing workers: {w}".format(w=args.multiprocessing_workers))
    with Pool(processes=args.multiprocessing_workers) as p:
        p.map(get_image, urls)
Ejemplo n.º 39
0
from time import sleep
import random

def runProcess(arr, remainActive):
	while remainActive.value:
		for idx in range(len(arr)):
			arr[idx] = random.randint(0,100)
		sleep(1)

def runMonitor(arr, remainActive):
	while remainActive.value:
		print(arr[:])
		sleep(2)

if __name__ == '__main__':
	random.seed()
	arr = Array('i', range(5))
	remainActive = Value('i', 1)

	process = Process(target = runProcess, args = (arr, remainActive))
	monitor = Process(target = runMonitor, args = (arr, remainActive))
	process.daemon = True
	monitor.daemon = True
	process.start()
	monitor.start()

	sleep(10)
	remainActive.value = 0

	process.join()
	monitor.join()
Ejemplo n.º 40
0
def stop_value():
    stop = Value('i', 0)
    yield stop
    stop.value = 1
Ejemplo n.º 41
0
		server_name = "cluster" + os.sep + "server-" + str(server)
		if not os.path.exists(server_name):
			os.makedirs(server_name)
	filter_log_dir = 'cluster' + os.sep + 'server-0/filter'
	if not os.path.exists(filter_log_dir):
		os.makedirs(filter_log_dir)

if __name__ == '__main__':
	#configura o formato do log utilizado para debug da aplicação
	FORMAT = '%(asctime)-15s %(type)s %(thread)d %(message)s'
	logging.basicConfig(filename='log-server',format=FORMAT, level=logging.DEBUG)
	#total de servidores simulados
	total_servers = 4

	create_dir(total_servers)
	dead = Value('i', 0)
	servers = [];
	servers.append(Process(target=server_function, args=('cluster/server-0', 500000, dead)))
	servers.append(Process(target=server_function, args=('cluster/server-1', 500000, dead)))
	servers.append(Process(target=server_function, args=('cluster/server-2', 500000, dead)))
	servers.append(Process(target=server_function, args=('cluster/server-3', 500000, dead)))
	for server in range(total_servers):
		servers[server].start()
	time.sleep(900)
	dead.value = 1
	for server in range(total_servers):
		servers[server].join()

	logger = logging.getLogger('log')
	logger.debug('%s', "Todos os servidores forem finalizados", extra={'type':'SERVER'})
Ejemplo n.º 42
0
                            # XXX handle over-sized requests
                    except socket.error:
                        pass
                    if EOL1 in requests[fileno] or EOL2 in requests[fileno]:
                        request_q.put({'id': fileno, 'raw': requests[fileno]})
                elif event & select.EPOLLOUT:
                    try:
                        while len(responses[fileno]) > 0:
                            byteswritten = connections[fileno].send(
                                responses[fileno])
                            responses[fileno] = responses[fileno][
                                byteswritten:]
                    except socket.error:
                        pass
                    if len(responses[fileno]) == 0:
                        epoll.modify(fileno, select.EPOLLET)
                        connections[fileno].shutdown(socket.SHUT_RDWR)
                elif event & select.EPOLLHUP:
                    epoll.unregister(fileno)
                    connections[fileno].close()
                    del connections[fileno]

    finally:
        epoll.unregister(serversocket.fileno())
        epoll.close()
        serversocket.close()

        is_running.value = 0
        for x in request_handlers:
            x.join()
Ejemplo n.º 43
0
    while run.value:
        pulse_rate = 60.0 / (bpm.value * 24)
        midi_output.send(clock_tick)
        t1 = perf_counter()
        if bpm.value <= 3000:
            sleep(pulse_rate * 0.8)
        t2 = perf_counter()
        while (t2 - t1) < pulse_rate:
            t2 = perf_counter()


if __name__ == '__main__':
    midi_ports = mido.get_output_names()
    print(midi_ports)

    shared_bpm = Value('i', 60)
    run_code = Value('i', 1)
    midi_clock_generator_proc = Process(target=midi_clock_generator, args=(midi_ports[0], shared_bpm, run_code))
    midi_clock_generator_proc.start()
    # sleep(.1)
    while run_code.value:
        bpm = input('Enter Tempo in BPM-> ')
        if bpm.isdigit():
            shared_bpm.value = int(bpm)
        else:
            run_code.value = False
    print('Shutting down process')
    midi_clock_generator_proc.join()
    midi_clock_generator_proc.close()
    print('exiting')
        replay_data_read_pipe, replay_data_write_pipe = Pipe(False)
        replay_data_distribution_pipes.append(
            (replay_data_read_pipe, replay_data_write_pipe))
        p = Process(target=RunActor, args=(i, gamma, \
                                           actor_cmd_write_pipe, weights_read_pipe, replay_data_write_pipe, \
                                           cancelation_token, training_active_flag))
        actor_processess.append(p)
        p.start()

    print("Awaiting buffer fill up")
    # 3. Fill up replay buffer
    while len(exp_buffer) < learner_batch_size * learner_prefetch_batches:
        sleep(1)

    # 4. Start learning
    buffer_ready.value = 1
    input("training networks.\nPress enter to finish\n\n")
    cancelation_token.value = 1

    actor_cmd_read_pipe.close()
    actor_cmd_write_pipe.close()

    actor_cmd_processor_thread.join()

    learner_cmd_read_pipe.close()
    learner_cmd_write_pipe.close()
    learner_replay_data_write_pipe.close()
    learner_replay_data_read_pipe.close()
    learner_priorities_read_pipe.close()
    learner_priorities_write_pipe.close()
Ejemplo n.º 45
0
                                     args=(keym, weatherValue, cont))
    homeList = [
        multiprocessing.Process(target=home_p.home,
                                args=(keyh, keym, weatherValue, cont))
        for i in range(nbHomes)
    ]
    weather = multiprocessing.Process(target=weather_p.weather,
                                      args=(weatherValue, cont))

    weather.start()

    market.start()
    for currentHome in homeList:
        currentHome.start()

    time.sleep(temps)
    cont.value = 0

    market.join()

    #for currentHome in homeList:
    #currentHome.join()

    weather.join()

    #weather.join()

    mqm.remove()
    mqh.remove()
    print("--------------- Fin de la simulation ---------------")
Ejemplo n.º 46
0
    DAQ_handle = DAQ.Init()
    DAQ.DAC_Write(DAQ_handle, Laser_Port, 0)       #Laser is off
    DAQ.DAC_Write(DAQ_handle, Shutter_Port, 0)       #Shutter is close
    
    
    # ############# Inititalizing the main loop for optrode ##############'''
    DAC_Sampl_Index = 0    
    Spec_Sampl_Index = 0

    
    # ##################### Initializing the variables ###################'''
    Integration_list = [8000, 16000, 32000, 64000, 128000, 256000, 512000]
    No_DAC_Sample = 1400 # Number of samples for Photodiod per iteration of the laser exposer. Every sample takes ~3 ms.
    SB_Is_Done = Value('i', 0)
    SB_Current_Record = Array('f', np.zeros(shape=( len(Spec_handle.wavelengths()) ,1), dtype = float ))
    SB_Is_Done.value = 0 
    SB_Full_Records = np.zeros(shape=(len(Spec_handle.wavelengths()), len(Integration_list) ), dtype = float )
    read_signal = np.zeros(No_DAC_Sample*len(Integration_list))
    read_time   = np.zeros(No_DAC_Sample*len(Integration_list))
    
    
    # ########### The file containing the records (HDF5 format)###########'''
    File_name = "Opterode_Recording_At" + str('%i' %time.time())+ ".hdf5"
    f = h5py.File(File_name, "w")
    Spec_sub1 = f.create_group("Spectrumeter")
    Spec_specification = Spec_sub1.create_dataset("Spectrumeter", (10,), dtype='f')
    Spec_specification.attrs['Serial Number'] = np.string_(Spec_handle.serial_number)
    Spec_specification.attrs['Model'] = np.string_(Spec_handle.model)
    Spec_wavelength = f.create_dataset('Spectrumeter/Wavelength', data = Spec_handle.wavelengths())
    
Ejemplo n.º 47
0
def main():
    parser = argparse.ArgumentParser(description='ImageNet image scraper')
    parser.add_argument('-scrape_only_flickr', default=True, type=lambda x: (str(x).lower() == 'true'))
    parser.add_argument('-number_of_classes', default=10, type=int)
    parser.add_argument('-images_per_class', default=10, type=int)
    parser.add_argument('-data_root', default='', type=str)
    parser.add_argument('-use_class_list', default=False, type=lambda x: (str(x).lower() == 'true'))
    parser.add_argument('-class_list', default=[], nargs='*')
    parser.add_argument('-debug', default=False, type=lambda x: (str(x).lower() == 'true'))

    parser.add_argument('-multiprocessing_workers', default=8, type=int)

    args, args_other = parser.parse_known_args()

    if args.debug:
        logging.basicConfig(filename='imagenet_scarper.log', level=logging.DEBUG)

    if len(args.data_root) == 0:
        logging.error("-data_root is required to run downloader!")
        exit()

    def imagenet_api_wnid_to_urls(wnid):
        return f'http://www.image-net.org/api/imagenet.synset.geturls?wnid={wnid}'

    current_folder = os.path.dirname(os.path.realpath(__file__))

    class_info_json_filename = 'imagenet_class_info.json'
    class_info_json_filepath = os.path.join(current_folder, class_info_json_filename)

    with open(class_info_json_filepath) as class_info_json_f:
        class_info_dict = json.load(class_info_json_f)

    classes_to_scrape = []

    if args.use_class_list:
        for item in args.class_list:
            classes_to_scrape.append(item)
            if item not in class_info_dict:
                logging.error(f'Class {item} not found in ImageNete')
                exit()

    elif not args.use_class_list:
        potential_class_pool = []
        for key, val in class_info_dict.items():

            if args.scrape_only_flickr:
                if int(val['flickr_img_url_count']) * 0.9 > args.images_per_class:
                    potential_class_pool.append(key)
            else:
                if int(val['img_url_count']) * 0.8 > args.images_per_class:
                    potential_class_pool.append(key)

        if len(potential_class_pool) < args.number_of_classes:
            logging.error(
                f"With {args.images_per_class} images per class there are {len(potential_class_pool)} to choose from.")
            logging.error(f"Decrease number of classes or decrease images per class.")
            exit()

        picked_classes_idxes = np.random.choice(len(potential_class_pool), args.number_of_classes, replace=False)

        for idx in picked_classes_idxes:
            classes_to_scrape.append(potential_class_pool[idx])

    print("Picked the following clases: \nCount: %s" % len(classes_to_scrape))
    print([class_info_dict[class_wnid]['class_name'] for class_wnid in classes_to_scrape])

    if not os.path.isdir(args.data_root):
        os.mkdir(args.data_root)

    def add_debug_csv_row(row):
        with open('stats.csv', "a") as csv_f:
            csv_writer = csv.writer(csv_f, delimiter=",")
            csv_writer.writerow(row)

    class MultiStats:
        def __init__(self):
            self.lock = Lock()

            self.stats = dict(
                all=dict(
                    tried=Value('d', 0),
                    success=Value('d', 0),
                    time_spent=Value('d', 0),
                ),
                is_flickr=dict(
                    tried=Value('d', 0),
                    success=Value('d', 0),
                    time_spent=Value('d', 0),
                ),
                not_flickr=dict(
                    tried=Value('d', 0),
                    success=Value('d', 0),
                    time_spent=Value('d', 0),
                )
            )

        def inc(self, cls, stat, val):
            with self.lock:
                self.stats[cls][stat].value += val

        def get(self, cls, stat):
            with self.lock:
                ret = self.stats[cls][stat].value
            return ret

    multi_stats = MultiStats()

    if args.debug:
        row = [
            "all_tried",
            "all_success",
            "all_time_spent",
            "is_flickr_tried",
            "is_flickr_success",
            "is_flickr_time_spent",
            "not_flickr_tried",
            "not_flickr_success",
            "not_flickr_time_spent"
        ]
        add_debug_csv_row(row)

    def add_stats_to_debug_csv():
        row = [
            multi_stats.get('all', 'tried'),
            multi_stats.get('all', 'success'),
            multi_stats.get('all', 'time_spent'),
            multi_stats.get('is_flickr', 'tried'),
            multi_stats.get('is_flickr', 'success'),
            multi_stats.get('is_flickr', 'time_spent'),
            multi_stats.get('not_flickr', 'tried'),
            multi_stats.get('not_flickr', 'success'),
            multi_stats.get('not_flickr', 'time_spent'),
        ]
        add_debug_csv_row(row)

    def print_stats(cls, print_func):
        actual_all_time_spent = time.time() - scraping_t_start.value
        processes_all_time_spent = multi_stats.get('all', 'time_spent')

        if processes_all_time_spent == 0:
            actual_processes_ratio = 1.0
        else:
            actual_processes_ratio = actual_all_time_spent / processes_all_time_spent

        print_func(f'STATS For class {cls}:')
        print_func(f' tried {multi_stats.get(cls, "tried")} urls with'
                   f' {multi_stats.get(cls, "success")} successes')

        if multi_stats.get(cls, "tried") > 0:
            print_func(
                f'{100.0 * multi_stats.get(cls, "success") / multi_stats.get(cls, "tried")}% '
                f'success rate for {cls} urls ')
        if multi_stats.get(cls, "success") > 0:
            print_func(
                f'{multi_stats.get(cls, "time_spent") * actual_processes_ratio / multi_stats.get(cls, "success")}'
                f' seconds spent per {cls} successful image download')

    lock = Lock()
    url_tries = Value('d', 0)
    scraping_t_start = Value('d', time.time())
    class_folder = ''
    class_images = Value('d', 0)

    def get_image(img_url):
        def check():
            with lock:
                cls_imgs = class_images.value

            if cls_imgs >= args.images_per_class:
                return True

        if len(img_url) <= 1:
            return

        if check():
            return

        logging.debug(img_url)

        cls = ''

        if 'flickr' in img_url:
            cls = 'is_flickr'
        else:
            cls = 'not_flickr'
            if args.scrape_only_flickr:
                return

        t_start = time.time()

        def finish(status):
            t_spent = time.time() - t_start
            multi_stats.inc(cls, 'time_spent', t_spent)
            multi_stats.inc('all', 'time_spent', t_spent)

            multi_stats.inc(cls, 'tried', 1)
            multi_stats.inc('all', 'tried', 1)

            if status == 'success':
                multi_stats.inc(cls, 'success', 1)
                multi_stats.inc('all', 'success', 1)

            elif status == 'failure':
                pass
            else:
                logging.error(f'No such status {status}!!')
                exit()
            return

        with lock:
            url_tries.value += 1
            if url_tries.value % 250 == 0:
                print(f'\nScraping stats:')
                print_stats('is_flickr', print)
                print_stats('not_flickr', print)
                print_stats('all', print)
                if args.debug:
                    add_stats_to_debug_csv()

        try:
            img_resp = requests.get(img_url, timeout=1, verify=False)
        except ConnectionError:
            logging.debug(f"Connection Error for url {img_url}")
            return finish('failure')
        except ReadTimeout:
            logging.debug(f"Read Timeout for url {img_url}")
            return finish('failure')
        except TooManyRedirects:
            logging.debug(f"Too many redirects {img_url}")
            return finish('failure')
        except MissingSchema:
            return finish('failure')
        except InvalidURL:
            return finish('failure')

        if 'content-type' not in img_resp.headers:
            return finish('failure')

        if 'image' not in img_resp.headers['content-type']:
            logging.debug("Not an image")
            return finish('failure')

        if len(img_resp.content) < 1000:
            return finish('failure')

        logging.debug(img_resp.headers['content-type'])
        logging.debug(f'image size {len(img_resp.content)}')

        img_name = img_url.split('/')[-1]
        img_name = img_name.split("?")[0]

        if len(img_name) <= 1:
            return finish('failure')

        img_file_path = os.path.join(class_folder, img_name)
        logging.debug(f'Saving image in {img_file_path}')

        if check():
            return

        with open(img_file_path, 'wb') as img_f:
            img_f.write(img_resp.content)

            with lock:
                class_images.value += 1

            logging.debug(f'Scraping stats')
            print_stats('is_flickr', logging.debug)
            print_stats('not_flickr', logging.debug)
            print_stats('all', logging.debug)

            return finish('success')

    print(f"Multiprocessing workers: {args.multiprocessing_workers}")
    for class_wnid in tqdm(classes_to_scrape):

        class_name = class_info_dict[class_wnid]["class_name"]
        url_urls = imagenet_api_wnid_to_urls(class_wnid)

        time.sleep(0.05)
        resp = requests.get(url_urls, verify=False)

        class_folder = os.path.join(args.data_root, class_name)
        if not os.path.exists(class_folder):
            os.mkdir(class_folder)

        class_images.value = 0

        urls = [url.decode('utf-8') for url in resp.content.splitlines()]

        with ThreadPool(processes=args.multiprocessing_workers) as p:
            p.map(get_image, urls)
Ejemplo n.º 48
0
#             'lfc-lhcb.grid.sara.nl',
#             'lfclhcb.pic.es',
#             'lhcb-lfc.gridpp.rl.ac.uk']
lfcHosts = ['prod-lfc-lhcb-ro.cern.ch']


# path = "/lhcb/LHCb"
path = '/lhcb/user/c/chaen'
print "Queueing task for directory", path, lfcHosts[0]


writerProc = Process( target = writer, args = ( 'lfc_dfc.out', writerQueue, stopFlag ) )

writerProc.start()


result = pPool.createAndQueueTask( processDir, [path , writerQueue, False, lfcHosts[0]], callback = finalizeDirectory )
if not result['OK']:
  print "Failed queueing", path

for i in range(20):
  pPool.processResults()
  time.sleep(1)

pPool.processAllResults( timeout = 300 )

stopFlag.value = 1
writerQueue.put( "Exit" )
writerProc.join()

Ejemplo n.º 49
0
	def run(self, verbose = False, cmp_packet = None, scapy_packet = None):
		'''execute the test case.

		arguments:
		verbose -- if set to True, show debug output (default: False)
		cmp_packet -- packet of type CmpLayer (default: None)
		scapy_packet -- packet of type scapy.Packet (default: None)

		cmp_packet and scapy_packet are used by pre_run function, if such a function is given.

		return:
		True if the test case succeeded (access to the cmp_packet and the received packet via match_packets())
		False if the test case failed (no given cmp_packet matched the received packet(s))
		'''

		if self.pre_run_processing is not None and callable(self.pre_run_processing):
			self.pre_run_processing(self, cmp_packet, scapy_packet)

		if len(self._tx_packets) == 0 and len(self._rx_packets) == 0:
			# the empty test case always succeeds
			return True

		max_rx_timeout = 0
		rx_packets_by_if = {}
		rxps = []
		ret_queue = Queue()
		ret_queue_lock = Lock()
		rx_timeout_start = Value('d', 0.0)
		rx_timeout_start_lock = Lock()
		self._match_packets = None

		if len(self._rx_packets) > 0:
			for rxp in self._rx_packets:
				if rxp.timeout > max_rx_timeout:
					max_rx_timeout = rxp.timeout
				if rxp.interface['name'] not in rx_packets_by_if:
					rx_packets_by_if[rxp.interface['name']] = []
				rx_packets_by_if[rxp.interface['name']].append(rxp)
			
			for interface in rx_packets_by_if:
				rxps.append(Process(target=self._rx_worker, args=(interface, rx_packets_by_if[interface], rx_timeout_start, rx_timeout_start_lock, max_rx_timeout, ret_queue, ret_queue_lock)))

		if verbose is True:
			print('---')
			print('test case: %s' % (self.name))
			print('send-via-tcp-socket: %s' % (str(self._use_tcp)))
			print('tx-packets: %d' % (len(self._tx_packets)))
			print('rx-packets: %d' % (len(self._rx_packets)))
			print('max-rx-timeout: %d' % (max_rx_timeout))

		txp = None
		if len(self._tx_packets) > 0:
			txp = Process(target=self._tx_worker, args=(self,))		

		for rxp in rxps:
			rxp.start()

		''' wait a small amount of time to give the sniffers a chance to settle down 
		TODO: how to work around ?!? '''
		time.sleep(0.1)

		if txp is not None:
			txp.start()

			# wait until all packets are send or we got a packet that matches our criteria
			while True:
				txp.join(1)
				if txp.is_alive() is False or ret_queue.empty() is False:
					break

		# wait if one of the rx processes catches a packet or we run in timeout
		if ret_queue.empty() is True:

			with rx_timeout_start_lock:
				rx_timeout_start.value = scapy.time.time()
				tstart = rx_timeout_start.value

			while True:
				for rxp in rxps:
					rxp.join(1)

				if (ret_queue.empty() is False) or ((scapy.time.time() - tstart)*1000 >= max_rx_timeout):
					break

		# stop all processes
		if txp is not None and txp.is_alive():
			txp.terminate()

		with ret_queue_lock:
			for rxp in rxps:
				if rxp.is_alive():
					rxp.terminate()

		if (self._use_tcp == True) and (self.sock_conf['socket'] is not None):
			self.sock_conf['socket'].close()
			self.sock_conf['socket'] = None
			if DO_DEBUG:
				print('TCP socket closed.')

		if ret_queue.empty() and len(self._rx_packets) > 0:
			if verbose is True:
				print('test-result: failed')
			return False
		else:
			if verbose is True:
				print('test-result: success')

			if len(self._rx_packets) > 0:
				self._match_packets = ret_queue.get()
				pckts = self._match_packets

				if DO_DEBUG:

					while True:
						print('---')
						print('received packet: ' +  str(pckts['rx_packet']))
						print('matched packet: ' +  str(pckts['cmp_packet']))

						if ret_queue.empty():
							break
						pckts = ret_queue.get()

			return True
			
Ejemplo n.º 50
0
p2Speed = Value('d', 3)

# Start Process to start first array of lights
p1 = Process(target=lightsarray1, args=(p1Speed,))
p1.start()

# Start Process to start second array of lights
p2 = Process(target=lightsarray2, args=(p2Speed,))
p2.start()

Process(target=printValues, args=(p1Speed, p2Speed, )).start()

p1ButtonValue = 0
p2ButtonValue = 0
p3ButtonValue = 0

while True:
    if GPIO.input(21) == 1 and p1ButtonValue == 0:
        p1Speed.value = p1Speed.value - (p1Speed.value / 5)
    if GPIO.input(20) == 1 and p2ButtonValue == 0:
        p2Speed.value = p2Speed.value - (p2Speed.value / 5)
    if GPIO.input(16) == 1 and p3ButtonValue == 0:
        p2Speed.value = 1
        p1Speed.value = 3
    p1ButtonValue = GPIO.input(21)
    p2ButtonValue = GPIO.input(20)
    p3ButtonValue = GPIO.input(16)

# Exit the program
exit
            Shutter_CloseDelay = Blue_Shutter_CloseDelay
            break
        else:
            print 'Wrong input!'


    # ##################### Initializing the variables ###################
    #Integration_list = [8000, 16000, 32000, 64000, 128000, 256000, 512000, 1024000, 2048000]
    Integration_list_sec = [0.008, 0.016, 0.032, 0.064, 0.128, 0.256, 0.512 ]
    Integration_marging = 0.2                                        #(In seconds) This is the duration before the external edge trigger is given to the spectrometer while the specrumeter started the integration period
    #Integration_base = Integration_list_sec[-1]*1000000 + Integration_marging*2000000    # This is the integration time applied for all the trials
    Integration_base =  2*1000000    # This is the integration time applied for all the trials in microseconds
    No_DAC_Sample = 10000 # Number of samples for Photodiod per iteration of the laser exposer. Every sample takes ~0.6 ms.
    SB_Is_Done = Value('i', 0)
    SB_Current_Record = Array('d', np.zeros(shape=( len(Spec_handle.wavelengths()) ,1), dtype = float ))
    SB_Is_Done.value = 0
    Timer_Is_Done = Value('i', 0)
    Timer_Is_Done.value = 0
    Timer_Is_Done2 = Value('i', 0)
    Timer_Is_Done2.value = 0
    SB_Full_Records = np.zeros(shape=(len(Spec_handle.wavelengths()), len(Integration_list_sec)+1 ), dtype = float )
    read_signal = np.zeros(No_DAC_Sample*len(Integration_list_sec))
    read_time   = np.zeros(No_DAC_Sample*len(Integration_list_sec))
    '''
    Open_delay = np.zeros(50)
    Close_delay = np.zeros(50)
    '''
    read_signal_ref = np.zeros(No_DAC_Sample*len(Integration_list_sec))
    read_time_ref   = np.zeros(No_DAC_Sample*len(Integration_list_sec))

Ejemplo n.º 52
0
def analyze():
    """ Analyze setup process to create children """
    start = datetime.datetime.now()
    filename = "data/dataset.csv"

    # UFF!!! ALL THESE DECLARATIONS!!!
    #   So integers are faster than dictionaries, that hash lookup
    #   takes clock cycles.  Next, we need to declare a set of counter
    #   variables for each child process we're about to spawn.  We need
    #   return values from the proccess, so we need to allocate memory for
    #   them.  That's what we're doing here.  I could have written a loop
    #   to do this in less lines of code, but it results in more clock
    #   cycles to complete the task.

    # I tried this with global variabels before this, will all process
    #   trying to operate on them, and while it appeared to work, once
    #   the process was lost so were the results.  I also tried passing
    #   a single set of shared memory variabes to all consume functions
    #   this gave very strange results, the result would come up just a bit
    #   different every run.  I'd be expecting 8362, and I'd get 8320-8362.

    found_1 = Value("i", 0)
    _2013_1 = Value("i", 0)
    _2014_1 = Value("i", 0)
    _2015_1 = Value("i", 0)
    _2016_1 = Value("i", 0)
    _2017_1 = Value("i", 0)
    _2018_1 = Value("i", 0)

    found_2 = Value("i", 0)
    _2013_2 = Value("i", 0)
    _2014_2 = Value("i", 0)
    _2015_2 = Value("i", 0)
    _2016_2 = Value("i", 0)
    _2017_2 = Value("i", 0)
    _2018_2 = Value("i", 0)

    found_3 = Value("i", 0)
    _2013_3 = Value("i", 0)
    _2014_3 = Value("i", 0)
    _2015_3 = Value("i", 0)
    _2016_3 = Value("i", 0)
    _2017_3 = Value("i", 0)
    _2018_3 = Value("i", 0)

    # Loop was used to run the program multiple times for better results on
    #   a higher-performing system.  A single test of say .5 sec was less
    #   reliable than 10 tests at 4.95 seconds.  Reset to 1 loop for submission
    #   in case grading is done on a potato.
    for _ in range(1):

        # Need to make sure our pile of variables are 0'd out.
        found_1.value = 0
        _2013_1.value = 0
        _2014_1.value = 0
        _2015_1.value = 0
        _2016_1.value = 0
        _2017_1.value = 0
        _2018_1.value = 0

        found_2.value = 0
        _2013_2.value = 0
        _2014_2.value = 0
        _2015_2.value = 0
        _2016_2.value = 0
        _2017_2.value = 0
        _2018_2.value = 0

        found_3.value = 0
        _2013_3.value = 0
        _2014_3.value = 0
        _2015_3.value = 0
        _2016_3.value = 0
        _2017_3.value = 0
        _2018_3.value = 0

        # Here we define and spawn each process.
        process1 = Process(
            target=consume_1,
            args=(
                found_1,
                _2013_1,
                _2014_1,
                _2015_1,
                _2016_1,
                _2017_1,
                _2018_1,
                FIRST,
                filename,
            ),
        )
        process1.start()

        process2 = Process(
            target=consume_2,
            args=(
                found_2,
                _2013_2,
                _2014_2,
                _2015_2,
                _2016_2,
                _2017_2,
                _2018_2,
                SECOND,
                FIRST,
                filename,
            ),
        )
        process2.start()

        process3 = Process(
            target=consume_2,
            args=(
                found_3,
                _2013_3,
                _2014_3,
                _2015_3,
                _2016_3,
                _2017_3,
                _2018_3,
                THIRD,
                FIRST + SECOND,
                filename,
            ),
        )
        process3.start()

        # Wait for the processes to finish
        process1.join()
        process2.join()
        process3.join()

        # Add up the values of all that shared ram
        found_total = found_1.value + found_2.value + found_3.value
        _2013_total = _2013_1.value + _2013_2.value + _2013_3.value
        _2014_total = _2014_1.value + _2014_2.value + _2014_3.value
        _2015_total = _2015_1.value + _2015_2.value + _2015_3.value
        _2016_total = _2016_1.value + _2016_2.value + _2016_3.value
        _2017_total = _2017_1.value + _2017_2.value + _2017_3.value
        _2018_total = _2018_1.value + _2018_2.value + _2018_3.value

        # And we get results
        display_results(
            found_total,
            _2013_total,
            _2014_total,
            _2015_total,
            _2016_total,
            _2017_total,
            _2018_total,
        )

    end = datetime.datetime.now()

    return (
        start,
        end,
        {
            "2013": _2013_total,
            "2014": _2014_total,
            "2015": _2015_total,
            "2016": _2016_total,
            "2017": _2017_total,
            "2018": _2018_total,
        },
        found_total,
    )
Ejemplo n.º 53
0
        def draw_panel_label(surf):
        def draw_version_label(screen):
        def get_component_color(component_pipe):
        dlog.info('create console')
        except Exception as e :
        except:
        exit(1)
        exit(1)
        exit(1)
        for i in range(256):
        global bigned
        global color_ut
        global color_ut
        global dlog
        global llog
        global main_breakout
        global screen
        globals()["bigned"].signal_extinguish()
        globals()["color_ut"] = None
        globals()["gfont"] = g.create_font()
        globals()["gselected_component"] = 1
        globals()["llog"].info("extinguished")
        globals()["main_breakout"] = Value("d", 0)
        globals()["main_breakout"].value = 0
        globals()["screen"] = g.init_pygame()
        init_color_tweeners()
        inverse = [abs(250-color[0]), abs(250-color[1]), abs(250-color[2])]
        inverse = [color[0]-a, color[1]-a, color[2]-a]
        key_calls={"d": extinguish_and_deload})
        llog.error("error while init  [ FAIL ] ")
        llog.error("failed to init tweeners: %s", e)
        llog.error("unknown error while init tweeners")
        llog.info("* PROCESS: Draw")
        llog.info("---> ABOUT TO DELOAD")
        llog.info("entering main draw loop")
        llog.info("exiting main draw loop...")
        llog.info("here we go")
        llog.info("init'd font successful [ SUCCESS ] ")
        llog.info("init'd screen [ SUCCESS ]")
        llog.info("signaled extinguish  [ SUCCESS ]")
        llog.info("ut init'd successfully...  [ SUCCESS ] ")
        panel_label_surf = pygame.Surface((256, 100))
        panel_label_where = (20, 300)
        pass
        pass
        print e
        return
        return
        return inverse
        return inverse
        screen.fill((35, 35, 35))
        try:
        try:
        while main_breakout.value == 1:
"""2 processes: NedProcess, DrawProcess"""
"""draw a visual AI window"""
## Innit is done        
#### Start here
__version__ = "base"
def Draw(theNed):
def extinguish_and_deload():
def get_color_dimmer(color,a=20):
def get_color_inverse(color):
def init_color_tweeners():
def init_console(screen, key_calls):
def setup_color_tweens(): ## This is a list of tweens set up by default...
except Exception as e:
except ValueError as e:
except:
from getmylogger import silent_logger,loud_logger
from multiprocessing import Process, Value
from Text import Text
from time import sleep
globals()["bigned"] = None
globals()["dlog"] = silent_logger("drawing_examples") #silent drawing logger
globals()["llog"] = loud_logger("drawing_examples") #loud logger (shows in console)
globals()["llog"].info("log setup! [ SUCCESS ] ")
globals()["selected_component_id"] = 0
globals()["user_console"] = init_console(globals()["screen"],
if __name__ == '__main__':
import exceptions
import gui_helpers as g
import os,sys
import pygame,pyconsole
import random
import ut
llog = globals()["llog"]
try:
try:
USER_CONSOLE_ENABLED = True #only checked when drawing
Ejemplo n.º 54
0
mouse.setVisible(False)

trialClock = core.Clock()

stim1 = visual.PatchStim(win=mywin, mask='gauss', size=0.5, pos=[0,0], sf=0, color='black')
stim2 = visual.PatchStim(win=mywin, mask='gauss', size=0.5, pos=[-20,0], sf=0, color='black') 
stim3 = visual.PatchStim(win=mywin, mask='gauss', size=0.5, pos=[0,10], sf=0, color='black') 
stim4 = visual.PatchStim(win=mywin, mask='gauss', size=0.5, pos=[20,0], sf=0, color='black') 
stim5 = visual.PatchStim(win=mywin, mask='gauss', size=0.5, pos=[0,-10], sf=0, color='black') 

stim = stim1
stims = [stim1,stim2,stim3,stim4]

n_stims = len(stims)
n_reps=10
detect.value=1
detector.start()
#for n in range(n_reps):
#	vel=[]
#	stim = stims[np.mod(n,n_stims)]
t = 0 
trialClock.reset()
while t < 3:
	stim.draw()
	mywin.flip()
	t=trialClock.getTime()
	
detect.value=0
detector.join()

v=[]
Ejemplo n.º 55
0
    for i in range(0, int(CONFIG['BOT_CONFIG']['workers'])):
        worker_process.append(Process(target=process_updates))
        worker_process[i].start()
    time_worker = ThreadProcess(target=check_time_args)
    time_worker.start()
    while RUNNING.value:
        time.sleep(30)
        for index, worker in enumerate(worker_process):
            if not worker.is_alive():
                del worker_process[index]
                worker_process.append(Process(target=process_updates))
                worker_process[-1].start()
        if not time_worker.is_alive():
            time_worker = ThreadProcess(target=check_time_args)
            time_worker.start()
        if not get_update_process.is_alive():
            get_update_process = Process(target=get_updates)
            get_update_process.start()
    get_update_process.join()
    time_worker.join()
    for worker in worker_process:
        worker.join()


if __name__ == '__main__':
    try:
        main()
    except KeyboardInterrupt:
        print("\n\nShutting Down.....")
        RUNNING.value = 0
Ejemplo n.º 56
0
s.listen(1)
conn, addr = s.accept()

while True:
    try:
        # These three lines are usefull to debug wether to use MSB or LSB in the reading formats
        # for the first parameter of "hx.set_reading_format("LSB", "MSB")".
        # Comment the two lines "val = hx.get_weight(5)" and "print val" and uncomment these three lines to see what it prints.

        # np_arr8_string = hx.get_np_arr8_string()
        # binary_string = hx.get_binary_string()
        # print binary_string + " " + np_arr8_string

        lock3.acquire()

        load1_final_val.value = 0
        load2_final_val.value = 0
        load3_final_val.value = 0
        load4_final_val.value = 0

        lock1.acquire()
        try:
            load1_final_val.value = load1_per.value
        finally:
            lock1.release()

        lock2.acquire()
        try:
            load2_final_val.value = load2_per.value
        finally:
            lock2.release()
        Power_Signal[Power_Index[0]], Power_Time[Power_Index[0]] = Power_meter.readPower()
        Power_Index[0] = Power_Index[0] + 1
    Power_Is_Read.value = 1


if __name__ == "__main__":

    PhotoDiod_Port = "AIN1"
    Spec1 = SBO.open()
    Integration_Time = 2                                        # Integration time in ms
    Spec1.setTriggerMode(0)                                      # It is set for free running mode
    Spec1.setIntegrationTime(Integration_Time*1000)              # Integration time is in microseconds when using the library
    DAQ1 = DAQ.open()
    Power_meter = P100.open()
    Spec_Is_Read = Value('i', 0)
    Spec_Is_Read.value = 0
    Spec_Is_Done = Value('i', 0)
    Spec_Is_Done.value = 0
    DAQ_Is_Read = Value('i', 0)
    DAQ_Is_Read.value = 0
    Power_Is_Read = Value('i', 0)
    Power_Is_Read.value = 0
    Timer_Is_Over = Value('i', 0)
    Timer_Is_Over.value = 0

    DurationOfReading = 3.12      # Duration of reading in seconds.
    No_DAC_Sample =   int(round(DurationOfReading*1000/1.7))                # Number of samples for DAQ analogue to digital converter (AINx). Roughly DAQ can read AIN1 2 and 3 evry 1.5 ms and 2.4 ms for AIN0,
    No_Power_Sample = int(round(DurationOfReading*1000/5.1))                # Number of samples for P100D Power meter to read. Roughly P100 can read the power every 2.7 ms.
    No_Spec_Sample =  int(round(DurationOfReading*1000/(Integration_Time))) # Number of samples for spectrometer to read. It takes integration time can read the power every 2.7 ms.

    Current_Spec_Record = Array('d', np.zeros(shape=( len(Spec1.Handle.wavelengths()) ,1), dtype = float ))
Ejemplo n.º 58
0
from multiprocessing import Value
from multiprocessing import Process
import time

# 创建一个整数的共享内存值对象,初始值是10000
# 不加锁会引起逻辑混乱

shm_v = Value('i', 10000)


def process_task2():
    for _ in range(1000000):
        v = shm_v.value  # 获取共享内存的值对象的值
        v += 1
        shm_v.value = v  # 修改共享内存值对象


p = Process(target=process_task2)
p.start()

for _ in range(1000000):
    v = shm_v.value
    v -= 1
    shm_v.value = v

print("共享内存的值是:", shm_v.value)  # 10000

p.join()
print("程序正常退出")
            gyro[1] = (shortdata[4] / gyro_divider -
                       node.gyroBias[1]) * DEG2RAD
            gyro[2] = (shortdata[5] / gyro_divider -
                       node.gyroBias[2]) * DEG2RAD
            gyro = np.asmatrix(gyro)
            #smoothing data (acc and gyro)  - I found sma with buffer size 30 it is not enough for gyro
            filtterdData = SMA(np.concatenate((acc, gyro), axis=1), 30)
            if bufferFlag == 0:
                continue

            rawFilterdData = np.concatenate((acc, filtterdData[0, 0:3]),
                                            axis=1)
            ret = np.concatenate((ret, rawFilterdData), axis=0)

            if filtterdData.shape[0] != 0:
                if ret.shape[0] == numberOfPlot:
                    ret = ret.transpose()
                    # print ret
                    plot1[0:numberOfPlot] = ret[0, :].tolist()[0]
                    plot2[0:numberOfPlot] = ret[1, :].tolist()[0]
                    plot3[0:numberOfPlot] = ret[2, :].tolist()[0]
                    plot4[0:numberOfPlot] = ret[3, :].tolist()[0]
                    plot5[0:numberOfPlot] = ret[4, :].tolist()[0]
                    plot6[0:numberOfPlot] = ret[5, :].tolist()[0]
                    Idx.value = numberOfPlot
                    staticFlag.value = False
                    ret = np.zeros((0, 6))

        else:
            continue
Ejemplo n.º 60
0
        #等待run结束
        while not self.is_done:
            time.sleep(0.01)
            pass
        #结束
        print('子进程结束')
        super(DataSource, self).close()

    def end(self):
        print('子进程准备结束')
        #通知run结束
        self.need_end = False


if __name__ == '__main__':

    #准备开始
    need_end.value = False
    p1 = Process(target=one_channel, args=('1', need_end))
    #p1 = DataSource('asdf')
    p1.start()
    print('主进程')
    time.sleep(5)
    print('主进程退出前关闭进程')
    #通知结束
    need_end.value = True
    print('主进程中 need_end', need_end.value)
    #p1.need_end = False
    #p1.terminate()
    #print(p1.need_end)
    #p1.close()        # 结束子进程