Ejemplo n.º 1
0
class Counter(object):
    """
    A process-safe counter providing atomic incrementAndGet() and value() functions
    """
    def __init__(self, initval=0):
        """
        Initialize this counter
        Args:
            initval (int): set the initialize value of the counter
        """
        self.val = Value('i', initval)

    def incrementAndGet(self):
        """
        Atomically increment this counter, and return the new value stored.

        Returns:
            int: The updated value of this counter.
        """
        with self.val.get_lock():
            self.val.value += 1
            return self.val.value

    def value(self):
        """
        Atomically get the current value of this counter.

        Returns:
            int: The current value of this counter.
        """
        with self.val.get_lock():
            return self.val.value
Ejemplo n.º 2
0
    def execute_task(self, website: Website, busy: Value, post_id: str, comment_id: str):
        busy.value = 1
        if os.path.exists("data.json"):
            os.remove("data.json")
        print("Started crawling task")
        process = CrawlerProcess(get_project_settings())
        process.crawl("od_links", base_url=website.url)
        process.start()
        print("Done crawling")

        self.db.import_json("data.json", website)
        os.remove("data.json")
        print("Imported in SQLite3")

        if post_id:
            # Reply to post
            stats = self.db.get_website_stats(website.id)
            comment = self.reddit_bot.get_comment({"": stats}, website.id)
            print(comment)
            if "total_size" in stats and stats["total_size"] > 10000000:
                post = self.reddit_bot.reddit.submission(post_id)
                self.reddit_bot.reply(post, comment)
                pass
            else:
                self.reddit_bot.log_crawl(post_id)

        elif comment_id:
            # Reply to comment
            stats = self.db.get_website_stats(website.id)
            comment = self.reddit_bot.get_comment({"There you go!": stats}, website.id)
            print(comment)
            reddit_comment = self.reddit_bot.reddit.comment(comment_id)
            self.reddit_bot.reply(reddit_comment, comment)
        busy.value = 0
        print("Done crawling task")
Ejemplo n.º 3
0
def main():
    settings.setup()
    try:
        import miniupnpc
    except:
        safeprint("Dependency miniupnpc is not installed. Running in outbound only mode")
        settings.config['outbound'] = True
    safeprint("settings are:")
    safeprint(settings.config)
    queue = Queue()
    live = Value('b',True)
    ear = listener(settings.config['port'],settings.config['outbound'],queue,live,settings.config['server'])
    ear.daemon = True
    ear.start()
    feedback = []
    stamp = time()
    while queue.empty():
        if time() - 5 > stamp:
            break #pragma: no cover
    try:
        feedback = queue.get(False)
    except: #pragma: no cover
        safeprint("No feedback received from listener")
    ext_ip = ""     #Does this affect peers?
    ext_port = -1   #Does this affect peers?
    if feedback != []:
        settings.outbound = feedback[0]
        if settings.outbound is not True:
            ext_ip = feedback[1]
            ext_port = feedback[2]
    initializePeerConnections(settings.config['port'], ext_ip, ext_port)
    live.value = False
Ejemplo n.º 4
0
def call(args, stdout=None, stderr=None, stdin=None, daemonize=False,
         preexec_fn=None, shell=False, cwd=None, env=None):
    """
    Run an external command in a separate process and detach it from the current process. Excepting
    `stdout`, `stderr`, and `stdin` all file descriptors are closed after forking. If `daemonize`
    is True then the parent process exits. All stdio is redirected to `os.devnull` unless
    specified. The `preexec_fn`, `shell`, `cwd`, and `env` parameters are the same as their `Popen`
    counterparts. Return the PID of the child process if not daemonized.
    """
    stream = lambda s, m: s is None and os.open(os.devnull, m) or s
    stdout = stream(stdout, os.O_WRONLY)
    stderr = stream(stderr, os.O_WRONLY)
    stdin = stream(stdin, os.O_RDONLY)

    shared_pid = Value('i', 0)
    pid = os.fork()
    if pid > 0:
        os.waitpid(pid, 0)
        child_pid = shared_pid.value
        del shared_pid
        if daemonize:
            sys.exit(0)
        return child_pid
    else:
        os.setsid()
        proc = subprocess.Popen(args, stdout=stdout, stderr=stderr, stdin=stdin, close_fds=True,
                                preexec_fn=preexec_fn, shell=shell, cwd=cwd, env=env)
        shared_pid.value = proc.pid
        os._exit(0)
Ejemplo n.º 5
0
    def setup_heartbeat(self, client_controller):
        cond = multiprocessing.Condition()
        s_init_finish = Value('i', 0)

        do_sample = Value('i', 0)
        do_sample_lock = Lock()

        server_process = multiprocessing.Process(
                target=self.server_heart_beat, 
                args=(cond, s_init_finish, do_sample, do_sample_lock))
        server_process.daemon = False
        server_process.start()

        logger.info("Waiting for server init ...")
        cond.acquire()
        while (s_init_finish.value == 0):
            cond.wait()
        if s_init_finish.value == 5:
            logger.error("Waiting for server init ... FAIL")
            raise RuntimeError("server init failed.")
        cond.release()
        logger.info("Waiting for server init ... Done")
        
        # let all clients start running the benchmark
        client_controller.client_run(do_sample, do_sample_lock)
        cond.acquire()
        s_init_finish.value = 0
        cond.release()
        return server_process
    def run_stop_test(self):
        """ Subclass StoppableThread and stop method `run` """
        class IncrementThread(StoppableThread):
            """ Used to test _stop in `run` """
            def __init__(self, *args, **kwargs):
                self.x = args[0]
                super(IncrementThread, self).__init__(*args[1:], **kwargs)

            def run(self):
                while not self._stop.is_set():
                    with self.x.get_lock():
                        self.x.value += 1

        x = Value('i', 0)
        st = IncrementThread(x)
        st.start()
        assert_equals(st.stopped, False)
        assert_equals(st.is_alive(), True)
        sleep(0.5)
        st.stop()
        assert_equals(st.stopped, True)
        st.join()
        assert_equals(st.is_alive(), False)
        with x.get_lock():
            assert_greater(x.value, 0)
    def run_with_exception_except_test(self):
        """ Subclass StoppableExceptionThread and raise exception in method `run_with_exception` """
        class IncrementThread(StoppableExceptionThread):
            """ Used to test _stop in `run` """
            def __init__(self, *args, **kwargs):
                self.x = args[0]
                StoppableExceptionThread.__init__(self, *args[1:], **kwargs)

            def run_with_exception(self):
                while not self._stop.is_set():
                    with self.x.get_lock():
                        self.x.value += 1
                        if self.x.value > 5:
                            raise ValueError('x > 5')

        x = Value('i', 0)
        st = IncrementThread(x)
        st.start()
        sleep(1)
        assert_equals(st.stopped, False)
        with self.assertRaises(ValueError):
            st.join()
        assert_equals(st.is_alive(), False)
        with x.get_lock():
            assert_equals(x.value, 6)
Ejemplo n.º 8
0
def main():
    #Begin Init
    settings.setup()
    from common.safeprint import safeprint
    try:
        import miniupnpc
    except:
        safeprint("Dependency miniupnpc is not installed. Running in outbound only mode")
        settings.config['outbound'] = True
    safeprint("settings are:")
    safeprint(settings.config)
    queue = Queue()
    live = Value('b',True)
    ear = listener(settings.config['port'],settings.config['outbound'],queue,live,settings.config['server'])
    ear.daemon = True
    ear.items = sync()
    ear.start()
    mouth = propagator(settings.config['port'] + 1, live)
    mouth.daemon = True
    mouth.items = ear.items
    mouth.start()
    feedback = []
    stamp = time()
    while queue.empty():
        if time() - 5 > stamp:
            break #pragma: no cover
    try:
        feedback = queue.get(False)
    except: #pragma: no cover
        safeprint("No feedback received from listener")
    ext_ip = ""     #Does this affect peers?
    ext_port = -1   #Does this affect peers?
    if feedback != []:
        settings.outbound = feedback[0]
        if settings.outbound is not True:
            ext_ip = feedback[1]
            ext_port = feedback[2]
    initializePeerConnections(settings.config['port'], ext_ip, ext_port)
    #End Init
    
    #Begin main loop
    if settings.config.get('seed'):
        safeprint("Seed mode activated")
        try:
            while True and not settings.config.get('test'):
                sleep(0.1)
        except KeyboardInterrupt:
            safeprint("Keyboard Interrupt")
    elif settings.config.get('server'):
        safeprint("Server mode activated")
    else:
        safeprint("Client mode activated")
    #End main loop
    
    #Begin shutdown
    safeprint("Beginning exit process")
    live.value = False
    settings.saveSettings()
    saveToFile()
    bounty.saveToFile()
Ejemplo n.º 9
0
    def _init_visualization_and_io(self, sim):
        if self.config.output:
            output_cls = io.format_name_to_cls[self.config.output_format]
        else:
            output_cls = io.LBOutput

        if self.config.mode != 'visualization':
            return lambda subdomain: output_cls(self.config, subdomain.id)

        # basic_fields = sim.fields()
        # XXX compute total storage requirements

        for subdomain in self.subdomains:
            size = reduce(operator.mul, subdomain.size)
            vis_lock = mp.Lock()
            vis_buffer = Array(ctypes.c_float, size, lock=vis_lock)
            vis_geo_buffer = Array(ctypes.c_uint8, size, lock=vis_lock)
            subdomain.set_vis_buffers(vis_buffer, vis_geo_buffer)

        vis_lock = mp.Lock()
        vis_config = Value(io.VisConfig, lock=vis_lock)
        vis_config.iteration = -1
        vis_config.field_name = ''
        vis_config.all_blocks = False

        # Start the visualizatione engine.
        vis_class = None
        for engine in util.get_visualization_engines():
            if engine.name == self.config.vis_engine:
                vis_class = engine
                break

        if vis_class is None:
            self.config.logger.warning('Requested visualization engine not '
                                       'available.')
            try:
                vis_class = util.get_visualization_engines().next()
            except StopIteration:
                self.config.logger.warning(
                    'No visualization backends available. Falling back to '
                    'batch mode.')
                self.config.mode = 'batch'
                return lambda subdomain: output_cls(self.config, subdomain.id)

        # Event to signal that the visualization process should be terminated.
        self._vis_quit_event = Event()
        self._vis_process = Process(
                target=lambda: vis_class(
                    self.config, self.subdomains, self._vis_quit_event,
                    self._quit_event, vis_config).run(),
                name='VisEngine')
        self._vis_process.start()

        return lambda subdomain: io.VisualizationWrapper(
                self.config, subdomain, vis_config, output_cls)
Ejemplo n.º 10
0
def main():
	running = Value(c_int, 1)
	readQueue = Queue()
	reader = Process(target = Reader("/dev/ttyUSB0", 9600), args = (running, readQueue))
	worker = Process(target = Worker(), args = (running, readQueue))
	reader.start()
	worker.start()
	time.sleep(5)
	running.value = 0
	reader.join()
	worker.join()
Ejemplo n.º 11
0
    def __init__(self, nomenclature="", width=0., height=0.):

        Device.__init__(self, nomenclature, width, height)
        self.xpos_left = -0.5 * width
        self.xpos_right = 0.5 * width
        self.ypos_bottom = -0.5 * height
        self.ypos_top = 0.5 * height
        self.count_left = Value('i', 0)
        self.count_right = Value('i', 0)
        self.count_bottom = Value('i', 0)
        self.count_top = Value('i', 0)
Ejemplo n.º 12
0
    def _init_visualization_and_io(self, sim):
        if self.config.output:
            output_cls = io.format_name_to_cls[self.config.output_format]
        else:
            # Dummy output class. Does not actually save data, but does provide
            # utility functions common to all output classes.
            output_cls = io.LBOutput

        if self.config.mode != 'visualization':
            return lambda subdomain: output_cls(self.config, subdomain.id)

        # XXX compute total storage requirements
        self._vis_geo_queues = []
        for subdomain in self.subdomain_specs:
            self._vis_geo_queues.append(subdomain.init_visualization())

        vis_lock = mp.Lock()
        vis_config = Value(io.VisConfig, lock=vis_lock)
        vis_config.iteration = -1
        vis_config.field_name = ''
        vis_config.all_subdomains = False

        # Start the visualizatione engine.
        vis_class = None
        for engine in util.get_visualization_engines():
            if engine.name == self.config.vis_engine:
                vis_class = engine
                break

        if vis_class is None:
            self.config.logger.warning('Requested visualization engine not '
                                       'available.')
            try:
                vis_class = util.get_visualization_engines().next()
            except StopIteration:
                self.config.logger.warning(
                    'No visualization backends available. Falling back to '
                    'batch mode.')
                self.config.mode = 'batch'
                return lambda subdomain: output_cls(self.config, subdomain.id)

        # Event to signal that the visualization process should be terminated.
        self._vis_quit_event = Event()
        self._vis_process = Process(
                target=lambda: vis_class(
                    self.config, self.subdomain_specs, self._vis_quit_event,
                    self._quit_event, vis_config, self._vis_geo_queues).run(),
                name='VisEngine')
        self._vis_process.start()

        return lambda subdomain: io.VisualizationWrapper(
                self.config, subdomain, vis_config, output_cls)
Ejemplo n.º 13
0
class Control(object):
    """Shared (long) value for passing control information between main and
    worker threads.
    
    Args:
        initial_value: Initial value of the shared control variable.
    """
    def __init__(self, initial_value=CONTROL_ACTIVE):
        self.control = Value('l', initial_value)
    
    def check_value(self, value, lock=False):
        """Check that the current control value == `value`.
        
        Args:
            value: The value to check.
            lock: Whether to lock the shared variable before checking.
        
        Returns:
            True if the values are equal.
        """
        return self.get_value(lock=lock) == value
    
    def check_value_positive(self, lock=False):
        """Check that the current control value is positive.
        
        Args:
            lock: Whether to lock the shared variable before checking.
        """
        return self.get_value(lock=lock) > 0
    
    def get_value(self, lock=True):
        """Returns the current control value.
        
        Args:
            lock: Whether to lock the shared variable before checking.
        """
        if lock:
            with self.control.get_lock():
                return self.control.value
        else:
            return self.control.value
    
    def set_value(self, value):
        """Set the control value. The shared variable is always locked.
        
        Args:
            value: The value to set.
        """
        with self.control.get_lock():
            self.control.value = value
Ejemplo n.º 14
0
def camstream():
  print "CAMera STREAMer (OpenCV " + cv2.__version__ + ")"
  print "main(): OS: {}".format(os.name)
  
  # * Start CameraStreamer process
  print "main(): Starting CameraStreamer process..."
  if os.name == 'nt':  # [Windows]
    # ** Create shared objects (NOTE only necessary on Windows since it uses a different multiprocessing implementation)
    print "main(): [Windows] Creating shared objects..."
    # *** Stay alive flag
    stayAliveObj = Value(c_bool, True)
    
    # *** Frame counter
    frameCountObj = Value('i', 0)
    
    # *** Image array
    image = np.zeros((camera_frame_height, camera_frame_width, camera_frame_depth), dtype=np.uint8)
    imageShape = image.shape
    imageSize = image.size
    image.shape = imageSize  # flatten numpy array
    imageObj = Array(c_ubyte, image)  # create a synchronized shared array
    
    # *** Image shape
    imageShapeObj = Array('i', imageShape)
    cameraStreamerProcess = CameraStreamer(stayAliveObj, frameCountObj, imageObj, imageShapeObj)
  else:  # [POSIX]
    cameraStreamerProcess = CameraStreamer()
    # ** Grab generated shared objects to share with other child processes
    print "main(): [POSIX] Getting shared objects from CameraStreamer..."
    stayAliveObj = cameraStreamerProcess.stayAliveObj
    frameCountObj = cameraStreamerProcess.frameCountObj
    imageObj = cameraStreamerProcess.imageObj
    imageShapeObj = cameraStreamerProcess.imageShapeObj
  cameraStreamerProcess.start()
  
  # * Start StreamViewer process
  print "main(): Starting StreamViewer process..."
  streamViewerProcess = StreamViewer(stayAliveObj, frameCountObj, imageObj, imageShapeObj)
  streamViewerProcess.start()
  
  # * Wait for child processes to finish
  print "main(): Waiting for child processes to finish..."
  try:
    streamViewerProcess.join()
    cameraStreamerProcess.join()
  except KeyboardInterrupt:
    stayAliveObj.value = False
    streamViewerProcess.join()
    cameraStreamerProcess.join()
  print "main(): Done."
Ejemplo n.º 15
0
class Counter:
	def __init__(self):
		self.value = Value(ctypes.c_int)

	def __enter__(self):
		with self.value.get_lock():
			self.value.value += 1

	def __exit__(self, exc_type, exc_val, exc_tb):
		with self.value.get_lock():
			self.value.value -= 1

	def __repr__(self):
		return str(self.value.value)
Ejemplo n.º 16
0
    def start(self, reload_from=None):
        """Start this server process.

        :param int reload_from: Optional, the PID of a running game server
                                process that this process should reload from
        :returns None:

        """
        assert not self._process, "server instance already started"
        pid = Value("i")
        self._process = Process(target=self._start,
                                args=(pid, socket_queue),
                                kwargs={"reload_from": reload_from})
        self._process.start()
        pid.value = self._process.pid
Ejemplo n.º 17
0
    def send_mldquery_regularly(self):
        self.logger.debug("")

        requraly_query_type = self.config[const.REGURALY_QUERY_TYPE]
        reguraly_query_interval = self.config[const.REGURALY_QUERY_INTERVAL]
        mc_query_interval = self.config[const.MC_QUERY_INTERVAL]

        # 初回送信前に定期送信クエリの送信間隔の1/4秒待つ
        time.sleep(reguraly_query_interval / 4)

        # General Query
        if requraly_query_type == self.GENERAL_QUERY:
            self.logger.debug("create general query")
            mc_info = {const.MC_TAG_MC_ADDR: const.DELIMIT_DOUBLE_COLON,
                       const.MC_TAG_SERV_IP: None}
            while self.SEND_LOOP:
                self.send_mldquery([mc_info])
                # タイムアウトチェック
                self.check_user_timeout()
                time.sleep(reguraly_query_interval - self.QUERY_QRV)

        # Specific Query
        elif requraly_query_type == self.SPECIFIC_QUERY:
            self.logger.debug("create specific query")
            next_interval = Value(ctypes.c_bool, False)

            while self.SEND_LOOP:
                query_proc = Process(
                    target=self.wait_query_interval,
                    args=(next_interval, reguraly_query_interval))
                query_proc.daemon = True
                query_proc.start()
                self.logger.debug(
                    "next_interval : %s", str(next_interval.value))
                self.send_mldquery(
                    self.mc_info_list, mc_query_interval, next_interval)
                # タイムアウトチェック
                self.check_user_timeout()

                # 定期送信クエリの送信間隔が過ぎていない場合は待ち
                if not next_interval.value:
                    self.logger.debug(
                        "waiting query interval(%d sec)...",
                        reguraly_query_interval)
                    query_proc.join()

                next_interval.value = False
                query_proc.terminate()
Ejemplo n.º 18
0
 def __init__(self, initval=0):
     """
     Initialize this counter
     Args:
         initval (int): set the initialize value of the counter
     """
     self.val = Value('i', initval)
Ejemplo n.º 19
0
    def reset(self):
        self._pipes = {}
        self._pipe_processes = []
        self._first_pipe = None
        self._last_pipe = None
        self._func_read_stream = (lambda: range(0))

        self._cleanups = []
        self._already_cleanup = Value(ctypes.c_bool, False)

        self._running_status = Value(ctypes.c_int, Pipeline.RUNNING_STATUS_STANDBY)
        self._interrupted_by_exception = False

        self._thread_watching_running_status = None
        self._thread_watching_remaining_processes = None
        self._stream_reader_process = None
Ejemplo n.º 20
0
    def __init__(self, name, timeout=0, onstart=None, ondone=None, params=None, stdout=sys.stdout, stderr=sys.stderr):
        """Initialize task, which is a group of jobs to be executed

		name  - task name
		timeout  - execution timeout. Default: 0, means infinity
		onstart  - callback which is executed on the task starting (before the execution
			started) in the CONTEXT OF THE CALLER (main process) with the single argument,
			the task. Default: None
			ATTENTION: must be lightweight
		ondone  - callback which is executed on successful completion of the task in the
			CONTEXT OF THE CALLER (main process) with the single argument, the task. Default: None
			ATTENTION: must be lightweight
		params  - additional parameters to be used in callbacks
		stdout  - None or file name or PIPE for the buffered output to be APPENDED
		stderr  - None or file name or PIPE or STDOUT for the unbuffered error output to be APPENDED
			ATTENTION: PIPE is a buffer in RAM, so do not use it if the output data is huge or unlimited

		tstart  - start time is filled automatically on the execution start (before onstart). Default: None
		tstop  - termination / completion time after ondone
		"""
        assert isinstance(name, str) and timeout >= 0, "Parameters validaiton failed"
        self.name = name
        self.timeout = timeout
        self.params = params
        self.onstart = types.MethodType(onstart, self) if onstart else None
        self.ondone = types.MethodType(ondone, self) if ondone else None
        self.stdout = stdout
        self.stderr = stderr
        self.tstart = None
        self.tstop = None  # SyncValue()  # Termination / completion time after ondone
        # Private attributes
        self._jobsnum = Value(ctypes.c_uint)
        # Graceful completion of all tasks or at least one of the tasks was terminated
        self._graceful = Value(ctypes.c_bool)
        self._graceful.value = True
Ejemplo n.º 21
0
    def __init__(self):

        parser = argparse.ArgumentParser()
        parser.add_argument("thread", help="URL of thread to scrape")
        parser.add_argument("--directory", "-d", help="Specify dir to save to (Default: ~/4chan)")
        parser.add_argument("--name", "-n", help="Specify name of dir to download to (Default: Topic/OP Post number)")
        parser.add_argument("--workers", type=int, help="Number of threads to run (Default: 10)")
        parser.add_argument("--version", "-v", action="version", version=VERSION)
        self.args = parser.parse_args()

        save_path = self.args.directory or os.path.join(
            os.path.expanduser('~'), "4chan")

        self.header = {
            'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11\
            (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
            'Accept': 'text/html,application/xhtml+xml,\
                application/xml;q=0.9,*/*;q=0.8'}
        self.thread_url = self.args.thread
        self.board = self.thread_url.split('/')[3]
        self.thread_name = ""
        self.downloads = []
        self.filename = []
        self.save_path = save_path
        self.counter = Value('i', 0)
        self.total_count = Value('i', 0)
        self.workers = self.args.workers
        self.down_dir = ""
Ejemplo n.º 22
0
class Transformator(Device):
    def __init__(self, nomenclature="", width=0., height=0.):

        Device.__init__(self, nomenclature, width, height)
        self.count = Value('i', 0)

    def __repr__(self):

        r = str(self) + "("
        r += "width=" + str(self.width) + "m, "
        r += "height=" + str(self.height) + "m, "
        r += "length=" + str(self.length) + "m, "
        r += "count=" + str(self.count.value) + ")"
        return r

    def transport(self, particle):
        if not self.is_particle_lost(particle):
            with self.count.get_lock():
                self.count.value += 1
            if self.next:
                return self.next.transport(particle)

    def reset(self):

        self.count.value = 0
        if self.next:
            self.next.reset()
Ejemplo n.º 23
0
    def __init__(self, world_class, opt, agents):
        self.inner_world = world_class(opt, agents)

        self.queued_items = Semaphore(0)  # counts num exs to be processed
        self.epochDone = Condition()  # notifies when exs are finished
        self.terminate = Value('b', False)  # tells threads when to shut down
        self.cnt = Value('i', 0)  # number of exs that remain to be processed

        self.threads = []
        for i in range(opt['numthreads']):
            self.threads.append(HogwildProcess(i, world_class, opt,
                                               agents, self.queued_items,
                                               self.epochDone, self.terminate,
                                               self.cnt))
        for t in self.threads:
            t.start()
Ejemplo n.º 24
0
def main():

    guardian = Heimdall()
    # zoeh = Zoehmacarena()

    # guardian.add_bot(zoeh)

    # guardian.main_loop()

    cont = True
    ispaused = False
    # bobby = Bob()
    state = Value("i", 0)

    l = Lock()
    # proc = Process(target=bobby.main, args=(l, state,))

    print("starting")
    # proc.start()
    while cont:
        if ispaused:
            command = raw_input("Heimdall[paused]: ")
        else:
            command = raw_input("Heimdall[running]: ")


        if command == "quit":
            cont = False
            state.value = -1

        elif command == "pause":
            l.acquire()
            print("paused")
            ispaused = True

        elif command == "start":
            print("unpausing")
            l.release()
            ispaused = False

        else:
            print("unknown command")

    # proc.join()
    print("finished")
    return
Ejemplo n.º 25
0
def main():
    arg_parser = argparse.ArgumentParser()
    arg_parser.add_argument('-f', dest='logfile', required=True)
    arg_parser.add_argument('-t', dest='sleep', type=int, default=1)

    args = arg_parser.parse_args()

    count = Value('i', 0)
    p = Process(target=log_counter, args=(args.logfile, count,))
    p.start()

    while True:
        time.sleep(args.sleep)
        print('{0} events/{1}s'.format(count.value, args.sleep))
        count.value = 0

    p.join()
class Counter(object):
    def __init__(self):
        self.val = Value('i', 0)

    def increment(self, n=1):
        with self.val.get_lock():
            self.val.value += n

    @property
    def value(self):
        return self.val.value
Ejemplo n.º 27
0
  def __init__(self, config):
    configFile   = config
    try:
      self.readConfig(configFile)
    except Exception as e:
      print ('\n%s' % e)
      print('The Configuration is incomplete, exiting')
      exit(2)

    self.jobs   = Value('i', 0)
    self.logger.info('Slave %s initialized' % self.host)
Ejemplo n.º 28
0
class Control(object):
    def __init__(self, initial_value):
        self.control = Value('l', initial_value)
    
    def check_value(self, value, lock=False):
        return self.get_value(lock=lock) == value
    
    def check_value_positive(self, lock=False):
        return self.get_value(lock=lock) > 0
    
    def get_value(self, lock=True):
        if lock:
            with self.control.get_lock():
                return self.control.value
        else:
            return self.control.value
    
    def set_value(self, value):
        with self.control.get_lock():
            self.control.value = value
Ejemplo n.º 29
0
    def __init__(self, alias=None):
        self.logger = _get_logger(__name__)
        self._alias = alias
        self._pipe_builders = []

        self._pipes = {}
        self._pipe_processes = []
        self._first_pipe = None
        self._last_pipe = None
        self._func_read_stream = (lambda: range(0))

        self._cleanups = []
        self._already_cleanup = Value(ctypes.c_bool, False)

        self._running_status = Value(ctypes.c_int, Pipeline.RUNNING_STATUS_STANDBY)
        self._interrupted_by_exception = False

        self._thread_watching_running_status = None
        self._thread_watching_remaining_processes = None
        self._stream_reader_process = None
Ejemplo n.º 30
0
def hanging_get(my_id, messages, initial_peer_id):
    remote_sender = None
    remote_peer_id = Value("i", initial_peer_id)
    remote_sender = Process(target=to_remote_server, args=(5550,my_id,remote_peer_id)).start()

    while True:
        r = requests.get('http://localhost:8888/wait?peer_id=' + str(my_id))
        if r.status_code == 200:        

            if int(r.headers['pragma']) == my_id:
                connected = r.text.split("\n")
                for l in connected:
                    info = l.strip().split(",")

                    if len(info) == 3 and info[0] == "receiver" and info[2] == '1':
                        remote_peer_id.value = int(info[1])
                    if len(info) == 3 and info[0] == "receiver" and info[2] == '0':
                        remote_peer_id.value = -1                        
            else:
                messages.put(Message(int(r.headers['pragma']), r.text))
Ejemplo n.º 31
0
from flask import Flask, request, redirect, url_for, flash, jsonify
import numpy as np
import pickle as p
import pandas as pd
import json
import base64
from multiprocessing import Value
counter = Value('i', 0)

model_counter = 0

app = Flask(__name__)

@app.route('/', methods=['GET'])
def show_model_counter():
    with counter.get_lock():
        counter.value += 1
        out = counter.value

    return jsonify(count=out)


@app.route('/model_predict/', methods=['POST'])
def makecalc():
    j_data = request.get_json()
    j_data = j_data['data']
    print('j_data: ', j_data)
    prediction = np.array2string(model.predict(j_data))
    predict_proba = np.array2string(model.predict_proba(j_data))
    print(model.predict(j_data))
    print(prediction)
Ejemplo n.º 32
0
class imagequeue:
    """
    This class keeps a queue of images which may be worked on in threads.
    
    :param SAXS.calibration Cal: The SAXS Calibration to use for the processing
    :param optparser options: The object with the comandline options of the saxsdog
    :param list args: List of command line options
    
    """
    def __init__(self, Cals, options, directory, conf):

        self.pool = []
        self.cals = Cals
        self.conf = conf
        self.options = options
        self.picturequeue = Queue()
        self.histqueue = Queue(maxsize=10000)
        self.plotdataqueue = Queue(maxsize=1)
        self.directory = directory
        self.allp = Value('i', 0)
        self.stopflag = Value('i', 0)
        self.dirwalker = None
        self.observer = None
        self.filelist_output = np.array(["filename", 0, 0, 0])
        if not options.plotwindow:
            plt.switch_backend("Agg")
        self.fig = plt.figure()
        if options.plotwindow:
            plt.ion()

    def getlastdata(self):
        print "getdatata" + str(self.lastfile)
        return self.lastfile, self.lastdata

    def fillqueuewithexistingfiles(self):
        """
        Fill the queue with the list of images that is already there.
        """

        if self.options.walkdirinthreads:
            self.dirwalker = Thread(target=filler,
                                    args=(self.picturequeue, self.directory))
            self.dirwalker.start()
        else:

            filler(self.picturequeue, self.directory)

    def procimage(self, picture, threadid):
        filelist = {}
        max = 60
        data = []
        integparams = {}
        '''Setting output directory paths'''
        if self.options.outdir != "":
            basename = self.options.outdir + os.sep + ('_'.join(
                picture.replace('./', '').split(os.sep))[:-3]).replace(
                    '/', "_")
            basename = basename.replace(':', '').replace('.', '')
        else:
            reldir = os.path.join(os.path.dirname(picture),
                                  self.options.relpath)
            if not os.path.isdir(reldir):
                try:
                    os.mkdir(reldir)
                except:
                    print "Problem creating WORK directory!!!"
                    return
            basename = os.path.join(reldir, os.path.basename(picture)[:-4])
        '''Check if image exists or we are in Gisaxs mode'''
        skipfile = False
        if self.options["OverwriteFiles"] == False:
            for calnum, cal in enumerate(self.cals):
                if len(list(enumerate(self.cals))) == 1 or calnum == 0:
                    filename = basename
                else:
                    filename = basename + "_c" + cal.kind[0] + str(calnum)
                chifilename = filename + ".chi"
                if os.path.isfile(chifilename):
                    filelist[cal.kind + str(calnum)] = chifilename
                    skipfile = True
                    if self.options["livefilelist"] is not "xxx":
                        with open(self.options["livefilelist"],
                                  'a') as f_handle:
                            file_path = os.path.normpath(chifilename)
                            file_path = str.split(
                                str(file_path),
                                str(
                                    os.path.split(
                                        self.options["watchdir"])[0]))[1]
                            output = file_path + ", " + str(0) + ", " + str(
                                0) + ", " + str(0) + "\n"
                            f_handle.write(output)
                            f_handle.close()
        if self.options.GISAXSmode == True and calnum == 0:  #pass on GISAXSmode information to calibration.integratechi
            skipfile = True
        '''Check if image can be opened'''
        if skipfile == False:
            if not self.options.silent:
                print "[", threadid, "] open: ", picture
            for i in range(max):
                try:
                    image = misc.imread(picture)
                except KeyboardInterrupt:
                    return
                except IOError as e:
                    try:
                        print "cannot open ", picture, ", lets wait.", max - i, " s"
                        print e.message, sys.exc_info()[0]
                        time.sleep(1)
                        continue
                    except KeyboardInterrupt:
                        return
                except:
                    print "############"
                    print sys.exc_info()
                    continue
                if image.shape == tuple(
                        self.cals[0].config["Geometry"]["Imagesize"]):
                    break
                print "cannot open ", picture, ", lets wait.", max - i, " s"
                time.sleep(1)

            else:
                print "image ", picture, " has wrong format"
                return

        if skipfile == False:
            imgMetaData = datamerge.readtiff(picture)
            if "date" in imgMetaData:
                imgTime = imgMetaData["date"]
            else:
                imgTime = ""
        else:
            imgTime = ""

        if skipfile == False:
            for calnum, cal in enumerate(self.cals):
                if len(list(enumerate(self.cals))) == 1 or calnum == 0:
                    filename = basename
                else:
                    filename = basename + "_c" + cal.kind[0] + str(calnum)
                chifilename = filename + ".chi"
                filelist[cal.kind + str(calnum)] = chifilename
                if not self.options.resume or not os.path.isfile(chifilename):
                    result = cal.integratechi(image, chifilename, picture)
                    #print(chifilename, " has been integrated!")
                    result["Image"] = picture
                    if "Integparam" in result:
                        integparams[cal.kind[0] +
                                    str(calnum)] = result["Integparam"]
                    data.append(result)
                    if self.options["livefilelist"] is not "xxx":
                        #print result["Integparam"]["I0"]
                        with open(self.options["livefilelist"],
                                  'a') as f_handle:
                            file_path = os.path.normpath(chifilename)
                            file_path = str.split(
                                str(file_path),
                                str(
                                    os.path.split(
                                        self.options["watchdir"])[0]))[1]
                            if "Integparam" in result:
                                output = file_path +", "+str(result["Integparam"]["I0"])+ \
                                    ", "+str(result["Integparam"]["I1"])+", "+str(result["Integparam"]["I2"])+"\n"
                            else:
                                output = file_path +", "+str(0)+ \
                                    ", "+str(0)+", "+str(0)+"\n"
                            f_handle.write(output)
                            f_handle.close()
                            #np.savetxt(f_handle,self.filelist_output, delimiter=',', fmt="%s ")

                    if threadid == 0 and self.options.plotwindow:
                        # this is a hack it really schould be a proper GUI

                        cal.plot(image, fig=self.fig)
                        plt.draw()

                if self.options.writesvg:
                    if not self.options.resume or not os.path.isfile(filename +
                                                                     '.svg'):
                        cal.plot(image, filename + ".svg", fig=self.fig)
                if self.options.writepng:
                    if not self.options.resume or not os.path.isfile(filename +
                                                                     '.svg'):
                        misc.imsave(filename + ".png", image)
                if self.options.silent:
                    if np.mod(self.allp.value, 100) == 0:
                        print "[", threadid, "] ", self.allp.value
                else:
                    print "[", threadid, "] write: ", filename + ".chi"

        with self.allp.get_lock():
            self.allp.value += 1

        filelist["JSON"] = basename + ".json"

        try:
            self.histqueue.put(
                {
                    "Time": float(time.time()),
                    "ImgTime": imgTime,
                    "FileList": filelist,
                    "BaseName": basename,
                    "IntegralParameters": integparams
                },
                block=False)
        except Full:
            print "Full"
        return basename, data

    def clearqueue(self):
        while self.histqueue.empty() == False:
            self.histqueue.get()
        print "History Queue cleared"

    def start(self):
        """
        Start threads and directory observer.
        """
        #start threads

        for threadid in range(1, self.options.threads):
            print "start proc [", threadid, "]"

            worker = Process(target=funcworker, args=(self, threadid))
            worker.daemon = True
            self.pool.append(worker)
            worker.start()
            #self.processimage(picture,options)
        self.starttime = time.time()
        if self.options.watch:
            eventhandler = addtoqueue(self.picturequeue)
            self.observer = Observer()
            self.observer.schedule(eventhandler, self.args[0], recursive=True)
            self.observer.start()
        #We let the master process do some work because its useful for matplotlib.
        if not self.options.nowalk:
            self.fillqueuewithexistingfiles()
        if self.options.servermode:

            from Leash import addauthentication
        try:
            while (self.options.servermode or (not self.picturequeue.empty())
                   or (self.dirwalker and self.dirwalker.is_alive())
                   or self.options.watch):
                try:
                    picture = self.picturequeue.get(timeout=1)
                except Empty:
                    continue

#in Case something goes wrong
                try:
                    lastfile, data = self.procimage(picture, 0)
                except:
                    continue

                if self.options.servermode:
                    request = {
                        "command": "putplotdata",
                        "argument": {
                            "data": {
                                "result": "plot",
                                "data": {
                                    "filename": lastfile,
                                    "graphs": data,
                                    "stat": {}
                                }
                            }
                        }
                    }

                    self.plotdataqueue.put(request)
                if np.mod(self.allp.value, 500) == 0:
                    self.timreport()
        except KeyboardInterrupt:
            pass

        self.stop()
        self.timreport()
        return self.allp.value, time.time() - self.starttime

    def stop(self):
        print "\n\nWaiting for the processes to terminate."
        if self.observer:
            self.observer.stop()
            self.observer.observer.join(1)

        self.stopflag.value = 1
        for worker in self.pool:
            print "join worker"
            worker.join(1)
        if self.dirwalker:

            self.dirwalker.join(1)
        print "empty pic queue"
        while True:
            try:
                self.picturequeue.get(False)
            except Empty:
                break
        print "empty hist queue"
        while True:
            try:
                self.histqueue.get(False)
            except Empty:
                break
        print "empty plot queue"
        while True:
            try:
                self.plotdataqueue.get(False)
            except Empty:
                break
        if os.sys.platform != "win32":
            try:
                self.histqueue.close()
                self.plotdataqueue.close()
            except Exception as e:
                print e

    def timreport(self):
        tottime = time.time() - self.starttime
        count = self.allp.value
        #print count
        if count == 0:
            print "We didn't do any pictures "
        else:
            print "\n\nelapsed time: ", tottime
            print "\nProcessed: ", count, " pic"
            print " time per pic: ", tottime / count, "[s]"
            print " pic per second: ", count / tottime, "[/s]"
        time.sleep(1)
    try:
        # Send data
        nums = numbers[:]
        sock.sendall(bytes(str(nums), encoding='utf8'))

        data = sock.recv(4096)
        log.info("Received: %s" % str(data))
    except socket.error as e:
        log.error("Socket error: %s" % str(e))
    except Exception as e:
        log.error("Other exception: %s" % str(e))
    finally:
        log.info("Closing connection to the server")
        sock.close()


if __name__ == '__main__':

    port = 2000
    numbers = []
    howmanynumbers = click.prompt(
        'Enter number of integers you want to process', type=int)
    for _ in range(howmanynumbers):
        numbers.append(click.prompt('Please enter a valid integer', type=int))
    num = Value('i', port)
    arr = Array('i', numbers)
    p = Process(target=echo_client, args=(num, arr))

    p.start()
    p.join()
Ejemplo n.º 34
0
import multiprocessing as mp
from multiprocessing import Value
import array
import os
import time
import memory_profiler

init = " " * int(1e10)
arr = array.array('c', init)


def f(a, v):
    print "running", os.getpid(), len(a)
    print "in process", memory_profiler.memory_usage()
    #a[0] = 'x'
    print "did arr get changed in process?", a[:5]
    v.value = len(a)
    time.sleep(5)


value = Value('L')
print value
print "before making process", memory_profiler.memory_usage()
p = mp.Process(target=f, args=(arr, value))
print "after making process", memory_profiler.memory_usage()
p.start()
print "after making process 2", memory_profiler.memory_usage()
p.join()  # a sleep would work but this is neater
print "Value is:", value.value
print "did arr get changed?", arr[:5]
Ejemplo n.º 35
0
"""
value.py 开辟单一共享内存空间
注意: 共享内存只能有一个值
"""
from multiprocessing import Process, Value
import time
import random

# 创建共享内存
money = Value('i', 5000)


# 操作共享内存
def man():
    for i in range(30):
        time.sleep(0.2)
        money.value += random.randint(1, 1000)


def girl():
    for i in range(30):
        time.sleep(0.15)
        money.value -= random.randint(100, 800)


p1 = Process(target=man)
p2 = Process(target=girl)
p1.start()
p2.start()
p1.join()
p2.join()
Ejemplo n.º 36
0
def main():
    """Gets input hash and salt from command line,
       spawns worker threads, initializes queue,
       calls worker function"""

    # Check user input
    if len(sys.argv) != 2:
        sys.exit("Usage: python crack.py hash")

    # Extract hash and salt from user input
    user_hash = sys.argv[1]
    salt = user_hash[:2]

    # Input lists
    inputs = [HASH_INPUTS_1, HASH_INPUTS_2, HASH_INPUTS_3, HASH_INPUTS_4]

    # Shared memory map found to share state between processes
    found = Value('i', 0)

    # DOES NOT WORK BECAUSE SHARED MEM CANNOT BE PASSED TO pool.apply_async()
    # !!!!!!!!

    # The function to crack the DES hashed password.
    #  It is the callback function to main's calls to set_tuple_permutations
    #  via pool.apply_async. When the pool of child processes return results,
    #  the main process will process them via the callback function DES_crack.
    def print_n_return(result):
        """Callback function for worker_crack
            that prints the password found by
            one of the callers. The other callbacks 
            do not print their None values.
        """
        # Only let the caller print, who has found the password
        if result:
            print(f"password: {result}")

        return

    # Spawn pool of child processes calling the worker
    #  function: worker_crack in parallel and
    #  applying async, with callback function: print_n_terminate
    #  to the result returned from the workers.
    start_time = time.time()
    pool = Pool(NUM_PHYS_CORES)
    for aninput in inputs:
        pool.apply_async(worker_crack,
                         args=(aninput, user_hash, salt, found),
                         callback=print_n_return)
    pool.close()
    pool.join()

    # DOES NOT WORK BECAUSE SHARED MEM CANNOT BE PASSED TO pool.apply_async()
    # !!!!!!!!

    duration = time.time() - start_time
    print(f"Cracking password took {duration:.2f}")

    logging.debug(f"Found: {found.value}")

    # Success
    sys.exit(0)
Ejemplo n.º 37
0
            elif v[1] == 0:  #v[1] ==fq
                frame1 = v[0]

            #print(result)
            #process v
        except queue.Empty:
            continue
        except Exception as e:
            raise e
    cv2.destroyAllWindows()


if __name__ == "__main__":

    # Retrieve singleton reference to system object
    sharedK = Value('i', 2)
    sharedBaseExposure = Value('d', 4)
    system = PySpin.System.GetInstance()

    # Retrieve list of cameras from the system
    cam_list = system.GetCameras()

    num_cameras = cam_list.GetSize()

    print("Number of cameras detected:", num_cameras)
    # Finish if there are no cameras
    if num_cameras == 0:
        # Clear camera list before releasing system
        cam_list.Clear()

        # Release system
Ejemplo n.º 38
0
def test_cbsc002_callbacks_generating_children(dash_duo):
    """Modify the DOM tree by adding new components in the callbacks."""

    # some components don't exist in the initial render
    app = dash.Dash(__name__, suppress_callback_exceptions=True)
    app.layout = html.Div(
        [dcc.Input(id="input", value="initial value"), html.Div(id="output")]
    )

    @app.callback(Output("output", "children"), [Input("input", "value")])
    def pad_output(input):
        return html.Div(
            [
                dcc.Input(id="sub-input-1", value="sub input initial value"),
                html.Div(id="sub-output-1"),
            ]
        )

    call_count = Value("i", 0)

    @app.callback(
        Output("sub-output-1", "children"), [Input("sub-input-1", "value")]
    )
    def update_input(value):
        call_count.value = call_count.value + 1
        return value

    dash_duo.start_server(app)

    dash_duo.wait_for_text_to_equal("#sub-output-1", "sub input initial value")

    assert call_count.value == 1, "called once at initial stage"

    pad_input, pad_div = dash_duo.dash_innerhtml_dom.select_one(
        "#output > div"
    ).contents

    assert (
        pad_input.attrs["value"] == "sub input initial value"
        and pad_input.attrs["id"] == "sub-input-1"
    )
    assert pad_input.name == "input"

    assert (
        pad_div.text == pad_input.attrs["value"]
        and pad_div.get("id") == "sub-output-1"
    ), "the sub-output-1 content reflects to sub-input-1 value"

    dash_duo.percy_snapshot(name="callback-generating-function-1")

    assert dash_duo.redux_state_paths == {
        "input": ["props", "children", 0],
        "output": ["props", "children", 1],
        "sub-input-1": [
            "props",
            "children",
            1,
            "props",
            "children",
            "props",
            "children",
            0,
        ],
        "sub-output-1": [
            "props",
            "children",
            1,
            "props",
            "children",
            "props",
            "children",
            1,
        ],
    }, "the paths should include these new output IDs"

    # editing the input should modify the sub output
    dash_duo.find_element("#sub-input-1").send_keys("deadbeef")

    assert (
        dash_duo.find_element("#sub-output-1").text
        == pad_input.attrs["value"] + "deadbeef"
    ), "deadbeef is added"

    # the total updates is initial one + the text input changes
    dash_duo.wait_for_text_to_equal(
        "#sub-output-1", pad_input.attrs["value"] + "deadbeef"
    )

    rqs = dash_duo.redux_state_rqs
    assert rqs, "request queue is not empty"
    assert all((rq["status"] == 200 and not rq["rejected"] for rq in rqs))

    dash_duo.percy_snapshot(name="callback-generating-function-2")
    assert dash_duo.get_logs() == [], "console is clean"
Ejemplo n.º 39
0
class HTAPController:
    # have the shared-memory primitives static as otherwise the multiprocessing
    # inheritance scheme doesn't work. we want these primitives so we can use
    # "simple" synchronized primitives.
    latest_timestamp = Value('d', 0)  # database record timestamp
    next_tsx_timestamp = Value(
        'd', 0)  # rtc time at which we can do the next oltp tsx
    stats_queue = Queue()  # queue for communicating statistics

    def __init__(self, args):
        self.args = args
        self.next_tsx_timestamp.value = time.time()
        self.tsx_timestamp_increment = 1.0 / self.args.target_tps if self.args.target_tps is not None else 0
        self.num_warehouses = self._query_num_warehouses()
        self.range_delivery_date = self._query_range_delivery_date()

        # update the shared value to the actual last ingested timestamp
        self.latest_timestamp.value = self.range_delivery_date[1].timestamp()
        self.csv_interval = args.csv_interval if 'csv' in args.output else None
        self.stats = Stats(self.args.dsn, self.args.oltp_workers,
                           self.args.olap_workers, self.csv_interval,
                           self.args.ignored_queries)
        self.monitor = Monitor(self.stats, self.args.oltp_workers,
                               self.args.olap_workers, self.num_warehouses,
                               self.range_delivery_date[0])

        print(f'Warehouses: {self.num_warehouses}')

    def oltp_sleep(self):
        with self.next_tsx_timestamp.get_lock():
            self.next_tsx_timestamp.value = self.next_tsx_timestamp.value + self.tsx_timestamp_increment
            sleep_until = self.next_tsx_timestamp.value
        time_now = time.time()
        if time_now < sleep_until:
            time.sleep(sleep_until - time_now)

    def oltp_worker(self, worker_id):
        # do NOT introduce timeouts for the oltp queries! this will make that
        # the workload gets inbalanaced and eventually the whole benchmark stalls
        with DBConn(self.args.dsn) as conn:
            oltp_worker = TransactionalWorker(worker_id, self.num_warehouses,
                                              self.latest_timestamp, conn,
                                              self.args.dry_run)
            next_reporting_time = time.time() + 0.1
            while True:
                self.oltp_sleep()
                oltp_worker.next_transaction()
                if next_reporting_time <= time.time():
                    # its beneficial to send in chunks so try to batch the stats by accumulating 0.1s of samples
                    self.stats_queue.put(('oltp', oltp_worker.stats()))
                    next_reporting_time += 0.1

    def olap_worker(self, worker_id):
        stream = AnalyticalStream(worker_id, self.args,
                                  self.range_delivery_date[0],
                                  self.latest_timestamp, self.stats_queue)
        while True:
            stream.run_next_query()

    def analyze_worker(self):
        tables = [
            'customer', 'district', 'history', 'item', 'nation', 'new_orders',
            'order_line', 'orders', 'region', 'stock', 'supplier', 'warehouse'
        ]

        os.makedirs('results', exist_ok=True)
        with open('results/analyze.csv', 'w+') as csv:
            with DBConn(self.args.dsn) as conn:
                while True:
                    for table in tables:
                        start = time.time()
                        conn.cursor.execute(f'ANALYZE {table}')
                        runtime = time.time() - start
                        csv.write(
                            f'{datetime.now()}, {table}, {runtime:.2f}\n')
                        csv.flush()
                    time.sleep(600)

    def _sql_error(self, msg):
        import sys
        print(f'ERROR: {msg} Did you run `prepare_benchmark`?')
        sys.exit(-1)

    def _query_range_delivery_date(self):
        with DBConn(self.args.dsn) as conn:
            try:
                conn.cursor.execute(
                    'SELECT min(ol_delivery_d), max(ol_delivery_d) FROM order_line'
                )
                return conn.cursor.fetchone()
            except ProgrammingError:
                self._sql_error('Could not query the latest delivery date.')

    def _query_num_warehouses(self):
        with DBConn(self.args.dsn) as conn:
            try:
                conn.cursor.execute(
                    'SELECT count(distinct(w_id)) from warehouse')
                return conn.cursor.fetchone()[0]
            except ProgrammingError:
                self._sql_error('Could not query number of warehouses.')

    def _prepare_stats_db(self):
        dsn_url = urlparse(self.args.stats_dsn)
        dbname = dsn_url.path[1:]

        with DBConn(f'{dsn_url.scheme}://{dsn_url.netloc}/postgres') as conn:
            try:
                conn.cursor.execute(
                    f"CREATE DATABASE {dbname} TEMPLATE template0 ENCODING 'UTF-8'"
                )
            except DuplicateDatabase:
                pass

        with DBConn(self.args.stats_dsn) as conn:
            stats_schema_path = os.path.join('benchmarks', 'htap',
                                             'stats_schema.sql')
            with open(stats_schema_path, 'r') as schema:
                schema_sql = schema.read()
                try:
                    conn.cursor.execute(schema_sql)
                except DuplicateTable:
                    pass

    def run(self):
        begin = datetime.now()
        elapsed = timedelta()
        burnin_duration = None if not self.args.dont_wait_until_enough_data else timedelta(
        )

        if self.args.stats_dsn is not None:
            print(f"Statistics will be collected in '{self.args.stats_dsn}'.")
            self._prepare_stats_db()
            stats_conn_holder = DBConn(self.args.stats_dsn)
        else:
            print(f'Database statistics collection is disabled.')
            stats_conn_holder = nullcontext()

        def worker_init():
            signal.signal(signal.SIGINT, signal.SIG_IGN)

        num_total_workers = self.args.oltp_workers + self.args.olap_workers + 1
        with stats_conn_holder as stats_conn:
            with Pool(num_total_workers, worker_init) as pool:
                oltp_workers = pool.map_async(self.oltp_worker,
                                              range(self.args.oltp_workers))
                olap_workers = pool.map_async(self.olap_worker,
                                              range(self.args.olap_workers))
                analyze_worker = pool.apply_async(self.analyze_worker)

                try:
                    update_interval = timedelta(seconds=min(
                        self.args.monitoring_interval, self.args.csv_interval))
                    display_interval = timedelta(
                        seconds=self.args.monitoring_interval)
                    next_display = datetime.now() + display_interval
                    next_update = datetime.now() + update_interval
                    while True:
                        # the workers are not supposed to ever stop.
                        # so test for errors by testing for ready() and if so propagate them
                        # by calling .get()
                        if self.args.oltp_workers > 0 and oltp_workers.ready():
                            oltp_workers.get()
                        if self.args.olap_workers > 0 and olap_workers.ready():
                            olap_workers.get()
                        if analyze_worker.ready():
                            analyze_worker.get()

                        while datetime.now() < next_update:
                            self.stats.process_queue(self.stats_queue)
                            time.sleep(0.1)

                        time_now = datetime.now()
                        available_data = datetime.fromtimestamp(
                            self.latest_timestamp.value
                        ) - self.range_delivery_date[0]
                        if burnin_duration == None and available_data >= WANTED_RANGE:
                            burnin_duration = time_now - begin

                        elapsed = time_now - begin
                        if elapsed.total_seconds() >= self.args.duration:
                            break

                        self.stats.update()
                        next_update = time_now + update_interval
                        if 'print' in self.args.output and next_display <= time_now:
                            next_display += display_interval
                            self.monitor.update_display(
                                elapsed, burnin_duration, time_now, stats_conn,
                                datetime.fromtimestamp(
                                    self.latest_timestamp.value))
                except KeyboardInterrupt:
                    pass
                finally:
                    if burnin_duration == None:
                        burnin_duration = elapsed
                    self.monitor.display_summary(elapsed, burnin_duration)
                    self.stats.write_summary(self.args.csv_file, elapsed)
Ejemplo n.º 40
0
 def _create_factor_instance(cls, factor=1.0) -> "SyncedFloat":
     value = Value("d", factor)
     return SyncedFloat(value)
Ejemplo n.º 41
0

def create_process(fromaddr, toaddrs, msg, counter, username, password, host,
                   port, usessl):
    if not options.multiprocessing:
        return send_mail(fromaddr, toaddrs, msg, counter, username, password,
                         host, port, usessl)
    else:
        p = Process(target=send_mail,
                    args=(fromaddr, toaddrs, msg, counter, username, password,
                          host, port, usessl))
        p.start()
        return p


counter = Value('d', 0)
if options.sender:
    fromaddr = options.sender
else:
    fromaddr = '*****@*****.**'

if options.username:
    username = options.username
else:
    username = fromaddr

port = None
if options.host:
    host = options.host
    if len(host.split(":")) == 2:
        host, port = host.split(":")
Ejemplo n.º 42
0
if __name__ == "__main__":
    # Processor info
    cpu = cpuinfo.get_cpu_info()
    # Colorama
    init(autoreset=True)
    title(getString("duco_python_miner") + str(minerVersion) + ")")

    try:
        from multiprocessing import (Manager, Process, Value, cpu_count,
                                     current_process)
        manager = Manager()
        # Multiprocessing fix for pyinstaller
        freeze_support()
        # Multiprocessing globals
        khashcount = Value("i", 0)
        accepted = Value("i", 0)
        rejected = Value("i", 0)
        hashrates_list = manager.dict()
        totalhashrate_mean = manager.list()
    except Exception:
        prettyPrint(
            "sys0", " Multiprocessing is not available. " +
            "Please check permissions and/or your python installation. " +
            "Exiting in 15s.", "error")
        sleep(15)
        _exit(1)

    try:
        # Load config file or create new one
        loadConfig()
Ejemplo n.º 43
0
def load_from_bam(bam_path,
                  target_contig,
                  start_pos,
                  end_pos,
                  vcf_handler,
                  use_end_sentinels=False,
                  n_threads=1,
                  debug_reads=False,
                  debug_pos=False,
                  stepper="samtools"):
    """
    Load variants observed in a :py:class:`pysam.AlignmentFile` to
    an instance of :py:class:`hansel.hansel.Hansel`.

    Parameters
    ----------
    bam_path : str
        Path to the BAM alignment

    target_contig : str
        The name of the contig for which to recover haplotypes.

    start_pos : int
        The 1-indexed genomic position from which to begin considering variants.

    end_pos : int
        The 1-indexed genomic position at which to stop considering variants.

    vcf_handler : dict{str, any}
        Variant metadata, as provided by :py:func:`gretel.gretel.process_vcf`.

    use_end_sentinels : boolean, optional(default=False)
        Whether or not to append an additional pairwise observation between
        the final variant on a read towards a sentinel.

        .. note:: Experimental
          This feature is for testing purposes, currently it is recommended
          that the flag be left at the default of `False`. However, some
          data sets report minor performance improvements for some haplotypes
          when set to `True`.
          This flag may be removed at any time without warning.

    n_threads : int, optional(default=1)
        Number of threads to spawn for reading the BAM

    debug_reads : list{str}, optional
        A list of read names for which to print out debugging information

    debug_pos : list{int}, optional
        A list of positions for which to print out debugging information

    stepper : str, optional(default=samtools)
        The pysam pileup stepper to use

    Returns
    -------
    Hansel : :py:class:`hansel.hansel.Hansel`
    """

    hansel = Hansel.init_matrix(['A', 'C', 'G', 'T', 'N', "-", "_"],
                                ['N', "_"], vcf_handler["N"])

    if not debug_reads:
        debug_reads = set([])
    if not debug_pos:
        debug_pos = set([])

    import random

    def progress_worker(progress_q, n_workers, slices, total_snps, crumbs):
        worker_pos = []
        worker_done = []
        for _ in range(0, n_workers):
            worker_pos.append(0)
            worker_done.append(0)

        while sum(worker_done) < n_workers:
            work_block = progress_q.get()
            worker_pos[work_block["worker_i"]] = work_block["pos"]
            if work_block["pos"] is None:
                worker_done[work_block["worker_i"]] = 1

                crumbs.value += work_block["crumbs"]
                slices.value += work_block["slices"]
                total_snps.value += work_block["covered_snps"]
                sys.stderr.write("%s\n" % ([
                    worker_pos[i] if status != 1 else None
                    for (i, status) in enumerate(worker_done)
                ]))
            if random.random() < 0.1:
                sys.stderr.write("%s\n" % ([
                    worker_pos[i] if status != 1 else None
                    for (i, status) in enumerate(worker_done)
                ]))
        return (slices, total_snps, crumbs)

    def bam_worker(bam_q, progress_q, worker_i):

        worker = worker_i

        slices = 0
        crumbs = 0
        covered_snps = 0

        bam = pysam.AlignmentFile(bam_path)

        while True:
            work_block = bam_q.get()
            if work_block is None:
                progress_q.put({
                    "pos": None,
                    "worker_i": worker_i,
                    "slices": slices,
                    "crumbs": crumbs,
                    "covered_snps": covered_snps,
                })
                break

            reads = {}
            dreads = set([])

            for p_col in bam.pileup(reference=target_contig,
                                    start=work_block["start"] - 1,
                                    stop=work_block["end"],
                                    ignore_overlaps=False,
                                    min_base_quality=0,
                                    stepper=stepper):

                if p_col.reference_pos + 1 > end_pos:
                    # Ignore positions beyond the end_pos
                    break

                if vcf_handler["region"][p_col.reference_pos + 1] != 1:
                    # Ignore non-SNPs
                    continue

                for p_read in p_col.pileups:

                    curr_read_1or2 = 0
                    if p_read.alignment.is_paired:
                        if p_read.alignment.is_read1:
                            curr_read_1or2 = 1
                        elif p_read.alignment.is_read2:
                            curr_read_1or2 = 2
                        else:
                            #TODO Probably indicative of bad data
                            pass

                    curr_read_name = "%s_%s_%d" % (p_read.alignment.query_name,
                                                   str(p_read.alignment.flag),
                                                   curr_read_1or2)

                    LEFTMOST_1pos = p_read.alignment.reference_start + 1  # Convert 0-based reference_start to 1-based position (to match region array and 1-based VCF)

                    # Special case: Consider reads that begin before the start_pos, but overlap the 0th block
                    if work_block["i"] == 0:
                        if LEFTMOST_1pos < start_pos:
                            # Read starts before the start_pos
                            if p_read.alignment.reference_start + 1 + p_read.alignment.query_alignment_length < start_pos:
                                # Read ends before the start_pos
                                continue
                            LEFTMOST_1pos = start_pos
                    else:
                        # This read begins before the start of the current (non-0) block
                        # and will have already been covered by the block that preceded it
                        if LEFTMOST_1pos < work_block["start"]:
                            continue

                    sequence = None
                    qual = None
                    if p_read.is_del:
                        # TODO Not sure about how to estimate quality of deletion?
                        sequence = "-" * (abs(p_read.indel) + 1)
                        qual = p_read.alignment.query_qualities[
                            p_read.query_position_or_next] * (
                                abs(p_read.indel) + 1)
                    elif p_read.indel > 0:
                        # p_read.indel peeks to next CIGAR and determines whether the base FOLLOWING this one is an insertion or not
                        sequence = p_read.alignment.query_sequence[
                            p_read.query_position:p_read.query_position +
                            p_read.indel + 1]
                        qual = p_read.alignment.query_qualities[
                            p_read.query_position:p_read.query_position +
                            p_read.indel + 1]
                    else:
                        sequence = p_read.alignment.query_sequence[
                            p_read.query_position]
                        qual = p_read.alignment.query_qualities[
                            p_read.query_position]

                    if not sequence:
                        print(
                            "[WARN] Sequence data seems to not be correctly salvaged from read %s"
                            % p_read.alignment.query_name)
                        continue

                    if curr_read_name not in reads:
                        reads[curr_read_name] = {
                            "rank": np.sum(
                                vcf_handler["region"]
                                [1:LEFTMOST_1pos]),  # non-inclusive 1pos end
                            "seq": [],
                            "quals": [],
                            "refs_1pos": [],
                            "read_variants_0pos": [],
                        }
                        if p_read.alignment.query_name in debug_reads:
                            dreads.add(curr_read_name)
                    reads[curr_read_name]["seq"].append(sequence)
                    reads[curr_read_name]["quals"].append(qual)
                    reads[curr_read_name]["refs_1pos"].append(
                        p_col.reference_pos + 1)
                    reads[curr_read_name]["read_variants_0pos"].append(
                        p_read.query_position)

            for dread in sorted(dreads):
                r = reads[dread]
                if r:
                    for snp_i, ref_pos in enumerate(r["refs_1pos"]):
                        print(dread, ref_pos, r["seq"][snp_i])
                    print("RANK", dread, r["rank"])

            if debug_pos:
                for read in reads:
                    for d_pos in set(reads[read]["refs_1pos"]) & debug_pos:
                        i = reads[read]["refs_1pos"].index(d_pos)
                        print(read, d_pos, reads[read]["seq"][i])

            num_reads = len(reads)
            for qi, qname in enumerate(reads):
                progress_q.put({
                    "pos": num_reads - (qi + 1),
                    "worker_i": worker_i
                })

                if not len(reads[qname]["seq"]) > 1:
                    # Ignore reads without evidence
                    continue
                slices += 1

                rank = reads[qname]["rank"]
                support_len = len(reads[qname]["seq"])

                support_seq = "".join(
                    [b[0] for b in reads[qname]["seq"]]
                )  # b[0] has the affect of capturing the base before any insertion
                covered_snps += len(
                    support_seq.replace("N", "").replace("_", ""))

                # For each position in the supporting sequence (that is, each covered SNP)
                for i in range(0, support_len):
                    snp_a = support_seq[i]

                    #if support_len == 1:
                    #    if rank == 0:
                    #        hansel.add_observation('_', snp_a, 0, 1)
                    #        hansel.add_observation(snp_a, '_', 1, 2)
                    #    else:
                    #        hansel.add_observation(snp_a, '_', rank+1, rank+2)

                    # For each position in the supporting sequence following i
                    for j in range(i + 1, support_len):
                        snp_b = support_seq[j]

                        # Ignore observations who are from an invalid transition
                        if snp_a in ['_', 'N']:
                            continue

                        # Sentinel->A
                        if i == 0 and j == 1 and rank == 0:
                            # If this is the first position in the support (support_pos == 0)
                            # and rank > 0 (that is, this is not the first SNP)
                            # and SNPs a, b are adjacent
                            hansel.add_observation('_', snp_a, 0, 1)
                            hansel.add_observation(snp_a, snp_b, 1, 2)
                            crumbs += 1

                        # B->Sentinel
                        elif (j + rank +
                              1) == vcf_handler["N"] and abs(i - j) == 1:
                            # Last observation (abs(i-j)==1),
                            # that ends on the final SNP (j+rank+1 == N)
                            hansel.add_observation(snp_a, snp_b,
                                                   vcf_handler["N"] - 1,
                                                   vcf_handler["N"])
                            hansel.add_observation(snp_b, '_',
                                                   vcf_handler["N"],
                                                   vcf_handler["N"] + 1)
                            crumbs += 1

                        # A regular observation (A->B)
                        else:
                            hansel.add_observation(snp_a, snp_b, i + rank + 1,
                                                   j + rank + 1)
                            crumbs += 1

                            if use_end_sentinels:
                                if j == (support_len - 1) and abs(i - j) == 1:
                                    # The last SNP on a read, needs a sentinel afterward
                                    hansel.add_observation(
                                        snp_b, '_', j + rank + 1, j + rank + 2)

    bam_queue = Queue()
    progress_queue = Queue()

    # Queue the wokers
    # TODO Evenly divide, but in future, consider the distn
    # TODO Also consider in general block0 has more work to do
    window_l = round((end_pos - start_pos) / float(n_threads))
    for window_i, window_pos in enumerate(
            range(start_pos, end_pos + 1, window_l)):
        bam_queue.put({
            "start": window_pos,
            "end": window_pos + window_l -
            1,  # add -1 to stop end of window colliding with next window
            "i": window_i,
            "region_end": end_pos,
        })

    processes = []
    for _ in range(n_threads):
        p = Process(target=bam_worker, args=(bam_queue, progress_queue, _))
        processes.append(p)

    # ...and a progress process
    n_reads = Value('i', 0)
    n_observations = Value('i', 0)
    total_covered_snps = Value('i', 0)
    p = Process(target=progress_worker,
                args=(progress_queue, n_threads, n_reads, total_covered_snps,
                      n_observations))
    processes.append(p)

    for p in processes:
        p.start()

    # Add sentinels
    for _ in range(n_threads):
        bam_queue.put(None)

    # Wait for processes to complete work
    for p in processes:
        p.join()

    hansel.n_slices = n_reads.value
    hansel.n_crumbs = n_observations.value
    sys.stderr.write("[NOTE] Loaded %d breadcrumbs from %d bread slices.\n" %
                     (hansel.n_crumbs, hansel.n_slices))

    hansel.L = int(ceil(float(total_covered_snps.value) / n_reads.value))
    sys.stderr.write("[NOTE] Setting Gretel.L to %d\n" % hansel.L)
    return hansel
Ejemplo n.º 44
0
        packet_buff = buff[header_position:header_position + RX_SIZE]
        # check crc
        crc = struct.unpack(CRC_PACKSTR, packet_buff[-CRC_SIZE:])[0]
        if crc_fun(packet_buff[:-CRC_SIZE]) == crc:
            packet[:] = packet_buff

            with init_flag.get_lock():
                init_flag.value = 1
        else:
            print('crc discrepancy')
        time.sleep(max(0, latest_time + 1.0 / freq - time.time()))


packet = Array('c', RX_SIZE)
init_flag = Value('i', 0)
exit_flag = Value('i', 0)
established_flag = False
ser = None


def terminate_process():
    exit_flag.value = 1


atexit.register(terminate_process)


# share serial instance / process among multiple environments
def establish_connection():
    global established_flag, ser
Ejemplo n.º 45
0
 def __init__(self):
     self.lock = Lock()
     self.n = Value('i', 0)
Ejemplo n.º 46
0
def main(ip):
    # use fixed random state
    rand_state = np.random.RandomState(1).get_state()
    np.random.set_state(rand_state)
    tf_set_seeds(np.random.randint(1, 2**31 - 1))

    # Create UR5 Reacher2D environment
    env = ReacherEnv(setup="UR5_6dof",
                     host=ip,
                     dof=6,
                     control_type="velocity",
                     target_type="position",
                     reset_type="zero",
                     reward_type="precision",
                     derivative_type="none",
                     deriv_action_max=5,
                     first_deriv_max=2,
                     accel_max=1.4,
                     speed_max=0.3,
                     speedj_a=1.4,
                     episode_length_time=4.0,
                     episode_length_step=None,
                     actuation_sync_period=1,
                     dt=0.04,
                     run_mode="multiprocess",
                     rllab_box=False,
                     movej_t=2.0,
                     delay=0.0,
                     random_state=rand_state)
    env = NormalizedEnv(env)
    # Start environment processes
    env.start()
    # Create baselines TRPO policy function
    sess = U.single_threaded_session()
    sess.__enter__()

    def policy_fn(name, ob_space, ac_space):
        return MlpPolicy(name=name,
                         ob_space=ob_space,
                         ac_space=ac_space,
                         hid_size=64,
                         num_hid_layers=2)

    # Create and start plotting process
    plot_running = Value('i', 1)
    shared_returns = Manager().dict({
        "write_lock": False,
        "episodic_returns": [],
        "episodic_lengths": [],
    })
    # Spawn plotting process
    pp = Process(target=plot_ur5_reacher,
                 args=(env, 2048, shared_returns, plot_running))
    pp.start()

    # Create callback function for logging data from baselines TRPO learn
    kindred_callback = create_callback(shared_returns)

    # Train baselines TRPO
    learn(env,
          policy_fn,
          max_timesteps=200000,
          timesteps_per_batch=2048,
          max_kl=0.05,
          cg_iters=10,
          cg_damping=0.1,
          vf_iters=5,
          vf_stepsize=0.001,
          gamma=0.995,
          lam=0.995,
          callback=kindred_callback)

    # Safely terminate plotter process
    plot_running.value = 0  # shutdown ploting process
    time.sleep(2)
    pp.join()

    env.close()
Ejemplo n.º 47
0
            f3.write("" + str(rstack[i]) + " ")
        f3.close()

        path6 = 'stack.txt'
        f6 = open(path6, 'w')
        for i in range(0, len(value), 1):
            f6.write("" + str(value[i]) + " ")
        f6.close()


if __name__ == '__main__':
    start_time = time.time()
    start = []
    end = []
    tabledata = []
    tablecount = Value('i', 0)
    address = Array('i', 10)
    value = Array('i', 10)
    rstack = Array('i', 100000)
    lstack = Array('i', 100000)
    rtop = Value('i', 0)
    ltop = Value('i', 0)
    endflag = {}
    endflag0 = Value('i', 0)
    notlabelflag = 0
    lock = {}
    variable_region = []

    mlock = Lock()
    lockfree = Lock()
    a = '1'
Ejemplo n.º 48
0
    def __init__(self, opt, shared=None):
        super().__init__(opt, shared)

        if not hasattr(self, 'datatype'):
            self.datatype = opt['datatype']
        if not hasattr(self, 'random'):
            self.random = self.datatype == 'train'
        if not hasattr(self, 'training'):
            self.training = self.datatype.startswith('train')
        if not hasattr(self, 'datafile'):
            self.datafile = opt.get('datafile', opt.get('pytorch_datafile'))
        # set up support for multithreaded data loading
        self.data_queue = queue.Queue()
        if shared:
            self.index = shared['index']
            if 'data_loader' in shared:
                self.data_loader = shared['data_loader']
            if 'threadindex' in shared:
                self.threadindex = shared['threadindex']
            if 'examples' in shared:
                self.examples = shared['examples']
        else:
            self.index = AttrDict(value=-1)

        if not hasattr(self, 'data_loader'):
            self.data_loader = DataLoader(opt)
            self.data_loader.start()

        # set up batching
        self.bsz = opt.get('batchsize', 1)
        self.batchindex = opt.get('batchindex', 0)

        dt = opt.get('datatype', '').split(':')
        self.use_batch_act = (opt.get('batch_sort', False) and self.bsz > 1
                              and 'stream' not in dt)

        if self.use_batch_act:
            if shared:
                self.lastYs = shared['lastYs']
                if 'sorted_data' in shared:
                    self.sorted_data = shared['sorted_data']
                    self.batches = shared['batches']
            else:
                self.lastYs = [None] * self.bsz
                ordered_opt = opt.copy()
                ordered_opt['datatype'] = ':'.join((dt[0], 'ordered'))
                ordered_opt['batchsize'] = 1
                ordered_opt['numthreads'] = 1
                ordered_opt['hide_labels'] = False
                ordered_teacher = create_task_agent_from_taskname(ordered_opt)[0]

                clen = opt.get('context_length', -1)
                incl = opt.get('include_labels', True)

                if ordered_teacher.num_examples() > 1000000:  # one million
                    print('WARNING: this dataset is large, and batch sorting '
                          'may use too much RAM or take too long to set up. '
                          'Consider disabling batch sorting, setting '
                          'context-length to a small integer (if this dataset '
                          'has episodes of multiple examples), or streaming '
                          'the data using a streamed data mode if supported.')

                flatdata = flatten(ordered_teacher,
                                   context_length=clen, include_labels=incl)
                self.sorted_data = sort_data(flatdata)
                self.batches = make_batches(self.sorted_data, self.bsz)
                # one fixed-seed shuffle keeps determinism but makes sure that
                # examples aren't presented in sorted order (bad for `-vme`)
                random.Random(42).shuffle(self.batches)
Ejemplo n.º 49
0
class FixedDialogTeacher(Teacher):
    """A teacher agent for all teachers involved in tasks with fixed data.

    This class provides the following functionality for its subclasses:

    - Resets a teacher
    - Provides an observe method
    - Computes and retrieves the next episode index for a teacher
    - Provides a threadpool option for loading data (especially useful for
      large data, e.g. images)

    In order to take advantage of the first few features, all a subclass has to
    implement is three functions: ``num_episodes``, ``num_examples``, and
    ``get`` (which returns a specific example from a specific episode).

    To utilize the DataLoader for threadpool loading, a teacher should
    implement the ``submit_load_request`` function to send a load request
    to the DataLoader by calling ``self.data_loader.request_load`` with the
    appropriate arguments (``receive_fn, load_fn, args``). The DataLoader then
    returns the data to the teacher's ``data_queue``, which the teacher can
    poll in its ``act`` method.

    The following is an example of the DataLoader usage in the VQA-V1 teacher.

    1. In the teacher's ``init`` function, the teacher calls its
       ``submit_load_request`` function to preload an image.
    2. The ``submit_load_request`` function gets the next ``episode_idx``,
       and computes the image path for the load request.
    3. At the end of ``submit_load_request``, the teacher calls
       ``self.data_loader.request_load`` with three args:

        - ``self.receive_data`` - the function that the DataLoader calls to
          return the the loaded object
        - ``self.image_loader.load`` - the function used to load the image
          from the image path
        - ``[img_path]`` - a list of arguments for the load function, which
          in this case is the path of the image.

    4. In the teacher's ``act`` function, the teacher loads the data from
       its data queue.
    5. At the end of the ``act`` function, the teacher calls
       ``submit_load_request`` to preload an image for the next example.

    To see this in action, take a look at this teacher in ``tasks.vqa_v1.agents``.
    """
    def __init__(self, opt, shared=None):
        super().__init__(opt, shared)

        if not hasattr(self, 'datatype'):
            self.datatype = opt['datatype']
        if not hasattr(self, 'random'):
            self.random = self.datatype == 'train'
        if not hasattr(self, 'training'):
            self.training = self.datatype.startswith('train')
        if not hasattr(self, 'datafile'):
            self.datafile = opt.get('datafile', opt.get('pytorch_datafile'))
        # set up support for multithreaded data loading
        self.data_queue = queue.Queue()
        if shared:
            self.index = shared['index']
            if 'data_loader' in shared:
                self.data_loader = shared['data_loader']
            if 'threadindex' in shared:
                self.threadindex = shared['threadindex']
            if 'examples' in shared:
                self.examples = shared['examples']
        else:
            self.index = AttrDict(value=-1)

        if not hasattr(self, 'data_loader'):
            self.data_loader = DataLoader(opt)
            self.data_loader.start()

        # set up batching
        self.bsz = opt.get('batchsize', 1)
        self.batchindex = opt.get('batchindex', 0)

        dt = opt.get('datatype', '').split(':')
        self.use_batch_act = (opt.get('batch_sort', False) and self.bsz > 1
                              and 'stream' not in dt)

        if self.use_batch_act:
            if shared:
                self.lastYs = shared['lastYs']
                if 'sorted_data' in shared:
                    self.sorted_data = shared['sorted_data']
                    self.batches = shared['batches']
            else:
                self.lastYs = [None] * self.bsz
                ordered_opt = opt.copy()
                ordered_opt['datatype'] = ':'.join((dt[0], 'ordered'))
                ordered_opt['batchsize'] = 1
                ordered_opt['numthreads'] = 1
                ordered_opt['hide_labels'] = False
                ordered_teacher = create_task_agent_from_taskname(ordered_opt)[0]

                clen = opt.get('context_length', -1)
                incl = opt.get('include_labels', True)

                if ordered_teacher.num_examples() > 1000000:  # one million
                    print('WARNING: this dataset is large, and batch sorting '
                          'may use too much RAM or take too long to set up. '
                          'Consider disabling batch sorting, setting '
                          'context-length to a small integer (if this dataset '
                          'has episodes of multiple examples), or streaming '
                          'the data using a streamed data mode if supported.')

                flatdata = flatten(ordered_teacher,
                                   context_length=clen, include_labels=incl)
                self.sorted_data = sort_data(flatdata)
                self.batches = make_batches(self.sorted_data, self.bsz)
                # one fixed-seed shuffle keeps determinism but makes sure that
                # examples aren't presented in sorted order (bad for `-vme`)
                random.Random(42).shuffle(self.batches)

    def _lock(self):
        if hasattr(self.index, 'get_lock'):
            return self.index.get_lock()
        else:
            return no_lock()

    def reset(self):
        """Reset the dialog so that it is at the start of the epoch,
        and all metrics are reset.
        """
        super().reset()
        self.metrics.clear()
        self.lastY = None
        self.episode_done = True
        self.epochDone = False
        self.data_queue = queue.Queue()

        self.episode_idx = -1
        with self._lock():
            self.index.value = -1
        if self.use_batch_act and self.random and hasattr(self, 'batches'):
            random.shuffle(self.batches)

    def submit_load_request(self):
        """An agent should implement this method to submit requests to the
        data loader. At the end of this method, the agent should call
        ``self.data_loader.request_load()`` with the appropriate args.

        By default, this method does nothing.
        """
        pass

    def receive_data(self, future):
        """Function for receiving data from the data loader.

        :param future: result from the load request.
        """
        data = future.result()
        self.data_queue.put(data)

    def share(self):
        """Shares data structures between other instances created for batching
        or hogwild.
        """
        shared = super().share()

        if hasattr(self, 'lastYs'):
            # share lastYs to communicate between batch_act and observe
            shared['lastYs'] = self.lastYs

        if hasattr(self, 'examples'):
            shared['examples'] = self.examples

        if self.opt.get('numthreads', 1) > 1:
            if type(self.index) is not multiprocessing.sharedctypes.Synchronized:
                # for multithreading need to move index into threadsafe memory
                self.index = Value('l', -1)
            if hasattr(self, 'sorted_data'):
                shared['sorted_data'] = self.sorted_data
                shared['batches'] = self.batches
        else:
            shared['data_loader'] = self.data_loader
        shared['index'] = self.index

        return shared

    def next_episode_idx(self, num_eps=None, loop=None):
        """Returns the next episode index.

        :param num_eps: default None uses ``num_episodes`` value.
        :param loop: default None loops during training but not evaluation.
        """
        if num_eps is None:
            num_eps = self.num_episodes()
        if loop is None:
            loop = self.training
        if self.random:
            new_idx = random.randrange(num_eps)
        else:
            with self._lock():
                self.index.value += 1
                if loop:
                    self.index.value %= num_eps
                new_idx = self.index.value
        return new_idx

    def next_example(self):
        """Returns the next example.
        If there are multiple examples in the same episode, returns the next
        one in that episode. If that episode is over, gets a new episode index
        and returns the first example of that episode.
        """
        if self.episode_done:
            self.episode_idx = self.next_episode_idx()
            self.entry_idx = 0
        else:
            self.entry_idx += 1

        if self.episode_idx >= self.num_episodes():
            return {'episode_done': True}, True

        ex = self.get(self.episode_idx, self.entry_idx)
        self.episode_done = ex.get('episode_done', False)

        if (not self.random and self.episode_done
                and self.episode_idx + self.opt.get("batchsize", 1) >= self.num_episodes()):
            epoch_done = True
        else:
            epoch_done = False

        return ex, epoch_done

    def next_batch(self):
        """Returns the next batch of examples."""
        # get next batch
        with self._lock():
            self.index.value += 1
            if self.training:
                self.index.value %= len(self.batches)
            batch_idx = self.index.value

            if batch_idx + 1 >= len(self.batches):
                if self.random:
                    random.shuffle(self.batches)
                self.epochDone = True
            else:
                self.epochDone = False

        if batch_idx >= len(self.batches):
            return [{'episode_done': True, 'id': self.getID()}] * self.bsz

        return self.batches[batch_idx]

    def num_episodes(self):
        """Get the number of episodes in this dataset."""
        if self.use_batch_act:
            # when using batch_act, this is length of sorted data
            return len(self.sorted_data)
        raise RuntimeError('"num_episodes" must be overriden by children.')

    def num_examples(self):
        """Get the total number of examples in this dataset."""
        if self.use_batch_act:
            # when using batch_act, this is length of sorted data
            return len(self.sorted_data)
        raise RuntimeError('"num_examples" must be overriden by children.')

    def get(self, episode_idx, entry_idx=0):
        """Get the specified episode and the specified entry in that episode.
        Children must override this method in order to inherit the
        `next_example` method.

        :param episode_idx: which episode to return examples from
        :param entry_idx: which example to return from the episode.
                          Many datasets have only single-entry episodes,
                          so this defaults tozero.
        """
        raise RuntimeError('"Get" method must be overriden by children.')

    def observe(self, observation):
        """Process observation for metrics."""
        if self.use_batch_act:
            self.lastY = self.lastYs[self.batchindex]
            self.lastYs[self.batchindex] = None

        if hasattr(self, 'lastY') and self.lastY is not None:
            self.metrics.update(observation, self.lastY)
            self.lastY = None
        return observation

    def batch_act(self, observations):
        """Returns an entire batch of examples instead of just one."""
        # we ignore observations
        if not hasattr(self, 'epochDone'):
            # reset if haven't yet
            self.reset()

        batch = self.next_batch()
        # pad batch
        if len(batch) < self.bsz:
            batch += [{'episode_done': True, 'id': self.getID()}] * (self.bsz - len(batch))

        # remember correct answer if available (for padding, None)
        for i, ex in enumerate(batch):
            self.lastYs[i] = ex.get('labels', ex.get('eval_labels'))

        return batch

    def act(self):
        """Send new dialog message."""
        if not hasattr(self, 'epochDone'):
            # reset if haven't yet
            self.reset()

        # get next example, action is episode_done dict if already out of exs
        action, self.epochDone = self.next_example()
        action['id'] = self.getID()

        # remember correct answer if available
        self.lastY = action.get('labels', None)
        if ((not self.datatype.startswith('train') or 'evalmode' in self.datatype)
            and 'labels' in action):
            # move labels to eval field so not used for training
            # but this way the model can use the labels for perplexity or loss
            labels = action.pop('labels')
            if not self.opt.get('hide_labels', False):
                action['eval_labels'] = labels

        return action
Ejemplo n.º 50
0
import cv2
import os

from Step2.preProcessing.PreProcess import GrayImg, lightRemove, gamma_trans, letter_box

import numpy as np
import math

# The moving area
delta_x = 960
delta_y = 540
m = 0
n = 0

# global value to stop/start process
alive = Value('b', True)
flag = False


# Click the button then the window will move
def moveit():
    global delta_x, delta_y, flag, m, n
    window2 = tk.Tk()
    # On the top
    window2.attributes("-topmost", True)
    window2.overrideredirect(True)
    # Fill with red
    Full_color = tk.Label(window2, bg='red', width=10, height=10)
    Full_color.pack()
    n = 0
    # from(300,150),steps(330. 195),20 times in total
Ejemplo n.º 51
0
import math
from DeteccionObj_detob import *
import threading
import cv2
import serial  # Importing the serial library to communicate with Arduino
from multiprocessing import Value, Process
from ctypes import c_bool
import numpy as np
from interpreter import Automata
import time
from map_grilla_detob import map_grilla
from solucion import generar_solucion

flag = True
i = 30
angulo = Value('i', 0)
x = Value('i', 0)
y = Value('i', 0)

t_ref = Value('i', 0)  # es el ángulo de referencia
x_ref = Value('i', 0)
y_ref = Value('i', 0)

coord_x_ref = Value('i', 0)
coord_y_ref = Value('i', 0)

coord_x_ref_ant = Value('i', 0)
coord_y_ref_ant = Value('i', 0)

ready = Value(c_bool, False)
flag = Value(c_bool, True)
Ejemplo n.º 52
0
import os
import sys
from functools import partial
from multiprocessing import Pool, cpu_count, Value
from pathlib import Path
from typing import Dict

from google.appengine.api import datastore
from google.appengine.api.datastore_types import EmbeddedEntity
from google.appengine.datastore import entity_bytes_pb2 as entity_pb2

from converter import records
from converter.exceptions import BaseError, ValidationError
from converter.utils import embedded_entity_to_dict, get_dest_dict, serialize_json

num_files: Value = Value("i", 0)
num_files_processed: Value = Value("i", 0)


def main(args=None):
    if args is None:
        args = sys.argv[1:]

    parser = argparse.ArgumentParser(
        prog="fs_to_json", description="Firestore DB export to JSON"
    )

    parser.add_argument(
        "source_dir",
        help="Destination directory to store generated JSON",
        type=str,
Ejemplo n.º 53
0
def run(rank, workers, model, save_path, train_data, test_data, global_lr):
    # 获取ps端传来的模型初始参数
    print(workers)

    _group = [w for w in workers].append(0)
    group = dist.new_group(_group)

    for p in model.parameters():
        tmp_p = torch.zeros_like(p)
        dist.scatter(tensor=tmp_p, src=0, group=group)
        p.data = tmp_p
    print('Model recved successfully!')

    temp_lr = global_lr.get()

    if args.model in ['MnistCNN', 'AlexNet', 'ResNet18OnCifar10']:
        optimizer = MySGD(model.parameters(), lr=temp_lr)
    else:
        optimizer = MySGD(model.parameters(), lr=temp_lr)

    if args.model in ['MnistCNN', 'AlexNet']:
        criterion = torch.nn.NLLLoss()
    else:
        criterion = torch.nn.CrossEntropyLoss()

    print('Begin!')

    # the parameters that will be transferred to the thread
    model_cache = [p.data + 0.0 for p in model.parameters()]
    global_update = [torch.zeros_like(p) for p in model.parameters()]
    local_update = [torch.zeros_like(p) for p in model.parameters()]
    it_count = Value(c_float,
                     0.)  # count update times in an iteration by local worker
    data_lock = Lock()
    update_lock = Queue()
    update_lock.put(1)

    loss_t = torch.tensor(0.0)
    receive_end = Value(c_bool, False)
    batch_communication_interval = Value(c_float, 0.0)
    stale_in_iteration = Value(c_float, 0.)

    sender_td = Thread(target=sender,
                       args=(
                           model_cache,
                           global_update,
                           local_update,
                           it_count,
                           loss_t,
                           update_lock,
                           data_lock,
                           group,
                           receive_end,
                           batch_communication_interval,
                           stale_in_iteration,
                       ),
                       daemon=True)
    sender_td.start()

    time_logs = open("./record" + str(rank), 'w')
    osp_logs = open("./log" + str(rank), 'w')
    Stale_Threshold = args.stale_threshold
    for epoch in range(args.epochs):
        batch_interval = 0.0
        batch_comp_interval = 0.0
        s_time = time.time()
        model.train()

        # AlexNet在指定epoch减少学习率LR
        # learning rate should be decreased on server due to unmatched updating speed between local worker and server
        if not global_lr.empty():
            g_lr = global_lr.get()
            if args.model == 'AlexNet':
                for param_group in optimizer.param_groups:
                    param_group['lr'] = g_lr
                    print('LR Decreased! Now: {}'.format(param_group['lr']))

        for batch_idx, (data, target) in enumerate(train_data):
            batch_start_time = time.time()
            data, target = Variable(data), Variable(target)
            optimizer.zero_grad()
            output = model(data)
            loss = criterion(output, target)
            loss.backward()
            delta_ws = optimizer.get_delta_w()

            optimizer.step()

            # Aggregate local update
            data_lock.acquire()
            # aggregate loss
            loss_t.data += loss.data
            it_count.value += 1
            for g_idx, update in enumerate(local_update):
                update.data += delta_ws[g_idx].data
            data_lock.release()

            batch_computation_time = time.time()

            # Open the lock once the local update has at least one gradient
            if it_count.value == 1:
                update_lock.put(1)
            while it_count.value >= Stale_Threshold:
                pass

            if receive_end.value:
                receive_end.value = False
                for idx, param in enumerate(model.parameters()):
                    param.data = model_cache[idx]  # without local update
                    # param.data = model_cache[idx] - global_update[idx] # with local update

            batch_end_time = time.time()
            batch_interval += batch_end_time - batch_start_time
            batch_comp_interval += batch_computation_time - batch_start_time
            osp_logs.write(
                str(batch_end_time - batch_start_time) + "\t" +
                str(batch_computation_time - batch_start_time) + "\n")
            osp_logs.flush()

        print('Rank {}, Epoch {}, Loss:{}'.format(rank, epoch,
                                                  loss.data.item()))

        e_time = time.time()
        # 训练结束后进行test
        #test_loss, acc = test_model(rank, model, test_data, criterion=criterion)
        acc = 0.0
        batch_interval /= batch_idx
        batch_comp_interval /= batch_idx
        logs = torch.tensor([
            acc, batch_interval, batch_comp_interval,
            batch_communication_interval.value, stale_in_iteration.value
        ])
        time_logs.write(str(logs) + '\n')
        time_logs.flush()
        # dist.gather(tensor=logs, dst = 0, group = group)
    time_logs.close()
    sender_td.join()
Ejemplo n.º 54
0
 def create_should_run_instance(cls) -> Value:
     return Value(c_bool, True)
Ejemplo n.º 55
0
	def __init__(self):
		self.val = Value('i', 0)
Ejemplo n.º 56
0
        Power_Index[0] = Power_Index[0] + 1
    Power_Is_Read.value = 1


if __name__ == "__main__":

    PhotoDiod_Port = "AIN1"
    Spec1 = SBO.DetectSpectrometer()
    Integration_Time = 10  # Integration time in ms
    Spec1.setTriggerMode(0)  # It is set for free running mode
    Spec1.setIntegrationTime(
        Integration_Time *
        1000)  # Integration time is in microseconds when using the library
    DAQ1 = DAQ.DetectDAQT7()
    Power_meter = P100.DetectPM100D()
    Spec_Is_Read = Value('i', 0)
    Spec_Is_Read.value = 0
    Spec_Is_Done = Value('i', 0)
    Spec_Is_Done.value = 0
    DAQ_Is_Read = Value('i', 0)
    DAQ_Is_Read.value = 0
    Power_Is_Read = Value('i', 0)
    Power_Is_Read.value = 0
    Timer_Is_Over = Value('i', 0)
    Timer_Is_Over.value = 0

    DurationOfReading = 1.12  # Duration of reading in seconds.
    No_DAC_Sample = int(
        round(DurationOfReading * 1000 / 0.5)
    )  # Number of samples for DAQ analogue to digital converter (AINx). Roughly DAQ can read AINx every 0.4 ms
    No_Power_Sample = int(
Ejemplo n.º 57
0
import os
import time
from datetime import datetime
from multiprocessing import Process, Value
import sys
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer

from constant import MAX_TIME
# 日志输出
from logger import logger as logging
from util import check_game_state, start_game, initDevice

waitTime = Value('i', 0)


def startGame(waitTime1):
    initDevice()
    global waitTime
    waitTime = waitTime1
    start_game()
    check_game_state(waitTime)


def updateTime(event):
    if "screen.png" in event.src_path:
        global lastChangeTime
        lastChangeTime = datetime.now()
        logging.debug('{0}:{1}'.format(event.event_type, event.src_path))

Ejemplo n.º 58
0
            # clean expired sessions oonce on first use in case lifetime was changed
            def cleaner():
                with cleaned.get_lock():
                    if not cleaned.value:
                        cleaned.value = True
                        app.logger.info('cleaning session store')
                        MailuSessionExtension.cleanup_sessions(app)

            app.before_first_request(cleaner)

        app.session_config = MailuSessionConfig(app)
        app.session_interface = MailuSessionInterface()


cleaned = Value('i', False)
session = MailuSessionExtension()


# this is used by the webmail to authenticate IMAP/SMTP
def verify_temp_token(email, token):
    try:
        if token.startswith('token-'):
            if sessid := app.session_store.get(token):
                session = MailuSession(sessid, app)
                if session.get('_user_id', '') == email:
                    return True
    except:
        pass

Ejemplo n.º 59
0
    conversation.start()
    video_engine.start()
    control_engine.start()
    video_engine.join()
    control_engine.join()


def start_api(bot_state, is_face_detected):
    # flask server
    application.run()


if __name__ == "__main__":
    # globals
    global is_face_detected
    is_face_detected = Value('d')
    is_face_detected.value = 0

    global bot_state
    bot_state = Value('d')
    bot_state.value = 1

    engines = Process(target=start_engines, args=(bot_state, is_face_detected))
    api = Process(target=start_api, args=(bot_state, is_face_detected))

    engines.start()
    api.start()

    engines.join()
    api.join()
Ejemplo n.º 60
0
from flask import Flask, request, send_from_directory, jsonify, make_response
from flask_cors import CORS
import os
from flask_jwt_extended import JWTManager, jwt_required
from kazoo.client import KazooClient
from multiprocessing import Value
from datetime import datetime

count_post_requests = Value('i', 0)
count_get_requests = Value('i', 0)
count_delete_requests = Value('i', 0)

app = Flask(__name__)

app.config.from_envvar('APP_CONFIG_FILE')
app.config['STORAGE_ID'] = os.environ.get('STORAGE_ID')
app.config['UPLOAD_FOLDER'] = "/app/images"  #+app.config['STORAGE_PORT']
app.config['ZK_HOST'] = os.environ.get('ZK_HOST')

LOG_FILE = 'storage_accesses.txt'

jwt = JWTManager(app)
CORS(app)

client = KazooClient(hosts=app.config['ZK_HOST'])

client.start()
client.ensure_path("/storage")

if not client.exists('/storage/' + app.config['STORAGE_ID']):
    client.create('/storage/' + app.config['STORAGE_ID'],