def __init__(self, subscriber_type, *args):
     self._subscriber_dict = {}
     print(args)
     self._data_streamer = self._create_data_streamer(
         subscriber_type, *args)
     self._subscribers_lock = RLock()
     self._data_stream_lock = RLock()
Exemplo n.º 2
0
 def __init__(self, backend, main, kwargs, mode='interpreted', verbosity='normal'):
     self.verbosity = self.verbosity_numeric(verbosity)
     self.log = logging.getLogger('%s.Runtime' % __name__)
     self.network = ConcreteNetwork(self)
     self.prev_network = self.network.copy()
     self.policy = main(**kwargs)
     self.mode = mode
     self.backend = backend
     self.backend.runtime = self
     self.policy_lock = RLock()
     self.network_lock = Lock()
     self.switch_lock = Lock()
     self.vlan_to_extended_values_db = {}
     self.extended_values_to_vlan_db = {}
     self.extended_values_lock = RLock()
     self.dynamic_sub_pols = set()
     self.update_dynamic_sub_pols()
     self.in_network_update = False
     self.in_bucket_apply = False
     self.network_triggered_policy_update = False
     self.bucket_triggered_policy_update = False
     self.global_outstanding_queries_lock = Lock()
     self.global_outstanding_queries = {}
     self.manager = Manager()
     self.old_rules_lock = Lock()
     self.old_rules = self.manager.list()
     self.update_rules_lock = Lock()
     self.update_buckets_lock = Lock()
Exemplo n.º 3
0
    def __init__(self):
        self._r_lock = RLock()
        self._obs_lock = RLock()
        self._running_tasks = {}
        self._observers = []

        self._message_queue = Queue()
        self._process_queue = Queue()

        self._start_listener(self._process_message, self._message_queue)
Exemplo n.º 4
0
def main():
    with Manager() as manager:
        
        # create logs files
        init = open("logs/init.txt", "w+")
        datakeepers_death = open("logs/datakeepers_death.txt", "w+")


        num_of_replicates = int(input("minimum number of replicates: "))
        
        # creating shared variables and their locks
        files_table_lock = RLock()
        ports_table_lock = RLock()
        unique_files_lock = RLock()

        files_table = manager.list()  
        ports_table = manager.list()
        unique_files = manager.list()

        # initialize ports table
        initialize_ports_table(ports_table)

        # initialize files table
        initialize_files_table(manager, files_table, unique_files)
        print(len(files_table))
        
        # create processes
        p1 = Process(target=process, args=(files_table, files_table_lock, unique_files, unique_files_lock, ports_table, ports_table_lock, master_ports[0]))
        p2 = Process(target=process, args=(files_table, files_table_lock, unique_files, unique_files_lock, ports_table, ports_table_lock, master_ports[1]))
        p3 = Process(target=process, args=(files_table, files_table_lock, unique_files, unique_files_lock, ports_table, ports_table_lock, master_ports[2]))
        
        alive_process = Process(target=alive, args=(files_table, ports_table, files_table_lock, ports_table_lock))
        dead_process = Process(target=undertaker, args=(files_table, ports_table, files_table_lock, ports_table_lock))

        replicate_process = Process(target=replicate, args=(files_table, files_table_lock, unique_files, unique_files_lock, ports_table, ports_table_lock, num_of_replicates))

        # start processes
        p1.start()
        p2.start()
        p3.start()

        alive_process.start()
        dead_process.start()
        replicate_process.start()

        # join processes
        p1.join()
        p2.join()
        p3.join()

        alive_process.join()
        dead_process.join()
        replicate_process.join()
Exemplo n.º 5
0
 def incoming_connection(self, con, address, req_count):
     try:
         # Unpack message length
         self.logger.debug(
             "processing connection from %s, %d (%d)", address[0], address[1], req_count)
         # Get content length.
         content_length = unpack("!L", con.recv(4))[0]
         # Receive data
         raw_data = receive_data(con, content_length)
         self.logger.debug(
             "Recv raw data from %s, %d (%d)", address[0], address[1], req_count)
         data = cbor.loads(raw_data)
         self.logger.debug(
             "Converted data from %s, %d (%d)", address[0], address[1], req_count)
         # Get app name
         req_app = data[enums.TransferFields.AppName]
         # Versions
         versions = data[enums.TransferFields.Versions]
         # Received push request.
         if data[enums.TransferFields.RequestType] is enums.RequestType.Push:
             self.logger.debug("Processing push request. (%d)", req_count)
             # Actual payload
             package = data[enums.TransferFields.Data]
             # Send bool status back.
             with self.app_lock.setdefault(req_app, RLock()):
                 succ = self.push_call_back(req_app, versions, package)
             con.send(pack("!?", succ))
             self.logger.debug("Push complete. sent back ack. (%d)", req_count)
         # Received pull request.
         elif data[enums.TransferFields.RequestType] is enums.RequestType.Pull:
             self.logger.debug("Processing pull request. (%d)", req_count)
             with self.app_lock.setdefault(req_app, RLock()):
                 dict_to_send, new_versions = self.pull_call_back(req_app, versions)
                 self.logger.debug("Pull call back complete. sending back data. (%d)", req_count)
                 data_to_send = cbor.dumps({
                     enums.TransferFields.AppName: self.port,
                     enums.TransferFields.Data: dict_to_send,
                     enums.TransferFields.Versions: new_versions})
                 con.send(pack("!L", len(data_to_send)))
                 self.logger.debug("Pull complete. sent back data. (%d)", req_count)
                 send_all(con, data_to_send)
                 if unpack("!?", con.recv(1))[0]:
                     self.confirm_pull_req(req_app, new_versions)
                     self.logger.debug("Pull completed successfully. Recved ack. (%d)", req_count)
     except Exception as e:
         print (e)
         print(traceback.format_exc())
         raise
     con.close()
     self.logger.debug("Incoming Connection closed. (%d)", req_count)
     return req_count
Exemplo n.º 6
0
 def __init__(self,
              rigol_backend="usbtmc",
              checkqueuedelay=.09,
              addqueuetime=.2):
     """
     checkqueuedelay -> How quickly python will check the queues for new voltage
         data to plot.  This value must be less than addqueuetime to ensure that
         every waveform is being plotted.
     addqueuetime -> python waits <addqueuetime> seconds between each query about
         the voltages from the oscilloscope.
     q1 and q2 are the the queues which hold the voltage data for channel 1 and 2
         on the oscilloscope.
     """
     self.lock = RLock()
     self.checkqueuedelay = int(checkqueuedelay * 1000)  # in ms
     self.addqueuetime = addqueuetime  # in sec
     self.dev = rigol.Rigol(rigol_backend)
     self.dev.waveformPointsMode("NORM")
     self.vpp = self.dev.askChannelScale(1) * 4
     self.vpp2 = self.dev.askChannelScale(2) * 4
     self.x = self.dev.getTimebase()
     q1 = Queue()
     q1.cancel_join_thread()
     q2 = Queue()
     q2.cancel_join_thread()
     self.ch1 = False
     self.ch2 = False
     self.start(q1, q2)
Exemplo n.º 7
0
    def __init__(self, scheduler, max_events_per_job=100):
        """
        Inspects the scheduler, registers itself as a scheduler event listener and keeps track of all changes to the
        scheduler and its jobs.

        Args:
            scheduler (apscheduler.schedulers.base.BaseScheduler):
                A reference to the scheduler we'd like to watch.

            max_events_per_job (int):
                The maximum amount of events we'll keep in-memory for each job to send to the clients when they connect.
        """
        self.scheduler = scheduler
        self.listeners = []
        self.max_events_per_job = max_events_per_job

        self.jobstores = {}
        self.executors = {}
        self.scheduler_info = {}
        self.jobs = {}

        self.write_lock = RLock()

        with self.write_lock:
            # Append ourselves as listeners of the scheduler first.
            self.scheduler.add_listener(self._process_event, mask=apscheduler.events.EVENT_ALL)
            # Inspect scheduler to init our attributes.
            self._inspect_scheduler()
    def __init__(self,
                 dataname_tuples,
                 pdgIDs,
                 nWorkers,
                 num_loaders,
                 filters=[]):
        self.dataname_tuples = sorted(dataname_tuples)
        self.nClasses = len(dataname_tuples[0])
        self.total_files = len(dataname_tuples)  # per class
        self.num_per_file = len(dataname_tuples) * [0]
        self.num_loaders = num_loaders
        self.lock = RLock()
        self.fileInMemory = Value('i', 0, lock=self.lock)
        self.fileInMemoryFirstIndex = Value('i', 0, lock=self.lock)
        self.fileInMemoryLastIndex = Value('i', -1, lock=self.lock)
        self.mem_index = Value('i',
                               1)  # either 0 or 1. used for mem management.
        self.loadNext = Event()
        self.loadFile = Event()
        self.load_barrier = Barrier(self.num_loaders + 1)
        self.batch_barrier = Barrier(nWorkers - (self.num_loaders + 1))
        self.worker_files = [
            RawArray(ctypes.c_char,
                     len(dataname_tuples[0][0]) + 50)
            for _ in range(self.num_loaders)
        ]
        self.data = {}
        ###########################################
        # prepare memory to share with workers #
        # take a sample file and get keys and over allocate
        # what if we overallocate for both classes?
        # we should overallocate for both classes
        # if user runs into memory problems, use fewer num_loaders.
        with h5py.File(dataname_tuples[0][0]) as sample:
            for key in sample.keys():
                #                 print(key)
                old_shape = sample[key].shape
                size = self.nClasses * self.num_loaders
                self.new_shape = list(old_shape)
                for dim in old_shape:
                    size *= dim
                self.new_shape[
                    0] = self.nClasses * self.num_loaders * old_shape[0]
                buff = RawArray(ctypes.c_float,
                                size)  # prepare mem for num_loaders
                self.data[key] = np.frombuffer(buff, dtype=np.float32).reshape(
                    self.new_shape)  # map numpy array on buffer
            classID_buff = RawArray(
                ctypes.c_int, (2 * self.nClasses * self.num_loaders * 200))
            #             print(classID_buff)
            self.data['classID'] = np.frombuffer(
                classID_buff,
                dtype=np.int)  #.reshape(self.nClasses*self.num_loaders*200)
#             print(self.data['classID'].shape)
###########################################
        self.pdgIDs = {}
        self.filters = filters
        for i, ID in enumerate(pdgIDs):
            self.pdgIDs[ID] = i
        self.countEvents()
Exemplo n.º 9
0
def main():
    global BLOCK_SIZE
    get_file_size(file)
    BLOCK_SIZE = FILE_SIZE / WORKERS
    rlock = RLock()
    array = Array('1', WORKERS)
    print array
Exemplo n.º 10
0
def fork_child(request, comms):
    val = Value('i', 0)
    lock = RLock()
    cond = Condition(lock)

    pid = os.fork()
    if pid:
        # parent
        with lock:
            val.value = 1
            cond.notify_all()
            cond.wait_for(lambda: val.value == 2)
        return pid
    else:
        # child
        # noinspection PyBroadException
        try:
            handler = CaptureHTTPHandler(request, comms)
            with lock:
                cond.wait_for(lambda: val.value == 1)
                val.value = 2
                cond.notify_all()
            handler.serve()
        except Exception:
            request.server.handle_error(request.req, request.client_address)
            with lock:
                cond.wait_for(lambda: val.value == 1)
                val.value = 2
                cond.notify_all()
        finally:
            request.server.shutdown_request(request.req)
            comms.close()
            # child does not exit normally
            import signal
            os.kill(os.getpid(), signal.SIGKILL)
Exemplo n.º 11
0
 def __init__(self, resolver=None, host: str = "nonhost") -> None:
     self._hostResolvers = [resolver] if resolver is not None else []
     # python dict structure is atomic for primitive actions. Need to be checked
     self.__local_actor_refs = {}
     self.__sequence_id = 0
     self.__address = host
     self.__lock = RLock()
Exemplo n.º 12
0
def main():
    parser = argparse.ArgumentParser()
    # Required parameters
    parser.add_argument("--datapath", type=str, default="data")
    parser.add_argument("--data_type", type=str, default="train")
    parser.add_argument("--pilot_version", type=int, choices=[1, 2], default=1)
    parser.add_argument("--processes", type=int, default=4)
    parser.add_argument("--data_nums", type=int, default=64)
    parser.add_argument("--seed", type=int, default=43)
    parser.add_argument("--mode", type=int, choices=[0, 1, 2], default=None)
    parser.add_argument("--SNRdb", type=float, default=None)
    parser.add_argument("--with_pure_y", action='store_true')
    parser.add_argument("--debug", action='store_true')
    args = parser.parse_args()

    H, Htest = read_data(args.datapath)
    using_H = H if args.data_type == "train" else Htest

    generate_data_fix = partial(generate_data, args=args, H=using_H)

    tqdm.set_lock(RLock())
    with Pool(processes=args.processes,
              initializer=tqdm.set_lock,
              initargs=(tqdm.get_lock(), )) as pool:
        [
            pool.map(generate_data_fix,
                     range(args.processes * i, args.processes * (i + 1)))
            for i in range(args.data_nums // args.processes)
        ]
Exemplo n.º 13
0
    def start_cache_process(self, rules=URLFrontierRules()):
        """
        Starts the child process that maintains the URL cache.

        Arguments:
            rules (URLFrontierRules): The rules to be applied to the cache.
        """
        with self._start_term_lock:
            cs = rules.checksum
            if cs not in self._cache_procs.keys():
                self._url_queues[cs] = Queue(maxsize=self._max_size)
                self._job_queues[cs] = Queue()
                self._next_url_locks[cs] = RLock()
                self._fill_conds[cs] = Condition()
                self._empty_conds[cs] = Condition()
                self._mid_empty_conds[cs] = Condition()
                self._job_conds[cs] = Condition()
                self._cache_procs[cs] = Process(target=_monitor_cache,
                                                args=(self.dao,
                                                      self._max_size,
                                                      self._url_queues[cs],
                                                      self._job_queues[cs],
                                                      self._job_conds[cs],
                                                      self._fill_conds[cs],
                                                      self._empty_conds[cs],
                                                      rules.required_domains,
                                                      rules.blocked_domains,
                                                      rules.sort_list,
                                                      self._logger_lock))
                self._proc_counts[cs] = 0
            if not self._cache_procs[cs].is_alive():
                with self._logger_lock:
                    logger.info('Starting the cache process for rule=%s' % cs)
                self._cache_procs[cs].start()
            self._proc_counts[cs] += 1
Exemplo n.º 14
0
 def __init__(self, email, password, update_interval):
     self.__connection = Connection(email, password)
     self.__vehicles = []
     self.update_interval = update_interval
     self.__climate = {}
     self.__charging = {}
     self.__state = {}
     self.__driving = {}
     self.__gui = {}
     self.__last_update_time = {}
     self.__lock = RLock()
     cars = self.__connection.get('vehicles')['response']
     for car in cars:
         self.__last_update_time[car['id']] = 0
         self.update(car['id'])
         self.__vehicles.append(Climate(car, self))
         self.__vehicles.append(Battery(car, self))
         self.__vehicles.append(Range(car, self))
         self.__vehicles.append(TempSensor(car, self))
         self.__vehicles.append(Lock(car, self))
         self.__vehicles.append(ChargerConnectionSensor(car, self))
         self.__vehicles.append(ChargerSwitch(car, self))
         self.__vehicles.append(RangeSwitch(car, self))
         self.__vehicles.append(ParkingSensor(car, self))
         self.__vehicles.append(GPS(car, self))
         self.__vehicles.append(Odometer(car, self))
Exemplo n.º 15
0
    def __init__(self,
                 js_topic,
                 obs_topic,
                 integration_factor=0.05,
                 iterations=30,
                 visualize=False,
                 use_timer=True):
        self.km_client = GeometryModel()  # ModelClient(GeometryModel)

        self._js_msg = ValueMapMsg()
        self._integration_factor = integration_factor
        self._iterations = iterations
        self._use_timer = use_timer

        self._unintialized_poses = set()
        self.aliases = {}
        self.tracked_poses = {}
        self.integrator = None
        self.lock = RLock()
        self.soft_constraints = {}
        self.joints = set()
        self.joint_aliases = {}

        self.visualizer = ROSVisualizer('/tracker_vis',
                                        'world') if visualize else None

        self.pub_js = rospy.Publisher(js_topic, ValueMapMsg, queue_size=1)
        self.sub_obs = rospy.Subscriber(obs_topic,
                                        PSAMsg,
                                        self.cb_process_obs,
                                        queue_size=5)
Exemplo n.º 16
0
class TaskQueue():
    _TaskList = []
    _Mutex = RLock()

    def hasNext(self) -> bool:
        with self._Mutex:
            return len(self._TaskList) > 0

    def Next(self) -> tuple:
        with self._Mutex:
            t = self._TaskList.pop(0) if self.hasNext() else None
            return t

    def Add(self, fn, arg):
        with self._Mutex:
            self._TaskList.append((fn, arg))

    def _execute_task(self, t):
        f = t[0]
        a = t[1]
        f(*a)

    def ExecuteNext(self):
        with self._Mutex:
            t = self.Next()
            self._execute_task(t)

    def ExecuteAll(self):
        with self._Mutex:
            while self.hasNext():
                t = self.Next()
                self._execute_task(t)
Exemplo n.º 17
0
def main():
    assert (args.p in ['non-parallel', 'parallel'])
    assert (args.merge_method
            in ['sliding_window', 'merge_and_sort', 'momentum_merge'])
    files = glob.glob(f"{args.dir}/*.mzML")
    files = list(files)
    new_directory = os.path.dirname(files[0]) + '/boxcar_converted'
    if not os.path.exists(new_directory):
        os.mkdir(new_directory)
    if args.exclude != '':
        files_remove = [i for i in files if args.exclude in i]  # REMOVE!!
        files = [i for i in files if i not in files_remove]
    files = [(id + 1, file) for (id, file) in enumerate(files)]
    start_time = time.time()
    if args.p == 'non-parallel':
        for id, file in files:
            start_convert((id, file))
    elif args.p == 'parallel':
        if args.verbose:
            p = Pool(args.c,
                     initializer=tqdm.tqdm.set_lock,
                     initargs=(RLock(), ))
        else:
            p = Pool(args.c)
        p.map(start_convert, files)
    end_time = time.time()
    print('\n')
    print("It took {0} seconds".format(round(end_time - start_time, 2)))
Exemplo n.º 18
0
def outer_function(**config):
    """Outer function running inner function for each task in input dict"""
    freeze_support()  # for Windows support
    tqdm.set_lock(RLock())

    with concurrent.futures.ThreadPoolExecutor(initializer=tqdm.set_lock,
                                               initargs=(tqdm.get_lock(), ),
                                               max_workers=3) as executor:
        results_list = []
        outer_loop_kwarg = {
            'total': len(config['package']['tasks']),
            'desc': 'Outer',
            'ascii': True,
            'position': len(config['package']['tasks']),
            'leave': True
        }

        with tqdm(**outer_loop_kwarg) as out_progress:
            futuresListComp = [
                executor.submit(inner_function, **node)
                for node in config['package']['tasks']
            ]

            # Update after each completed task
            for future in concurrent.futures.as_completed(futuresListComp):
                out_progress.update()
                results_list.append(future.result())

        return results_list
Exemplo n.º 19
0
 def __init__(self):
     self.sub = rospy.Subscriber('/head/kinect2/k2_bodies/bodies',BodyArray,self.process_message)
     self.recording_status=False
     self.writing_process=None
     self.last_message_time=[]
     self.timer=None
     self.lock=RLock()
Exemplo n.º 20
0
 def __init__(self, addr=None, manager=None, mutex=None, format_list=None,
              size=0, ratio=2):
     if mutex is None:
         self.mutex = RLock()
     else:
         self.mutex = mutex
     if manager is None:
         self._manager = DummyManager()
     elif isinstance(manager, SharedMemoryManager) or isinstance(manager, DummyManager):
         self._manager = manager
     else:
         self._manager = SharedMemoryManager(manager)
     capacity = int(size*ratio)
     if capacity == 0:
         capacity = ratio
     with self.mutex:
         if addr is None:
             if format_list is None:
                 raise ValueError("Either addr or format_list must be provided")
             self._shl = self._manager.ShareableList(format_list)
             self._shl_addr = self._shl.shm.name
             self._shm = self._manager.SharedMemory(capacity)
             self._shm_addr = self._shm.name
             self._shl[0] = self._shm_addr
             self._shl[1] = int(size)
             self._shl[2] = int(capacity)
         else:
             self._shl_addr = addr
             self._shl = shared_memory.ShareableList(name=addr)
             self._shm_addr = self._shl[0]
             self._shm = shared_memory.SharedMemory(name=self._shm_addr)
Exemplo n.º 21
0
    def calculate_sharpness_video_capture(
            self,
            cv_video_capture: CVVideoCapture,
            frame_start=0,
            frame_end=None,
            batch_size=200,
            gray_scale_conversion_code=cv2.COLOR_BGR2GRAY,
            progress_tracker: CVProgressTracker = None):
        frame_count = int(cv_video_capture.get_frame_count())
        if frame_end:
            cv_video_capture.set_position_frame(frame_start)
            frame_count = min(frame_end - frame_start, frame_count)
            frame_count = max(frame_count, 0)

        if progress_tracker:
            progress_tracker.running = True

        frame_sharpness_ctype = multiprocessing.Array('d', frame_count)
        progress_value = multiprocessing.Value('d')
        progress_value.value = 0
        lock_video_capture = RLock()

        worker_count = multiprocessing.cpu_count()
        task_per_worker = int(frame_count / worker_count)
        args_list = [
            (task_per_worker * i, task_per_worker * (i + 1), frame_start,
             frame_count, batch_size, self.kernel_x, self.kernel_y,
             frame_sharpness_ctype, cv_video_capture.file_handle,
             progress_value, lock_video_capture, gray_scale_conversion_code)
            for i in range(0, worker_count - 1)
        ]
        args_list.append(
            (task_per_worker * (worker_count - 1), frame_count, frame_start,
             frame_count, batch_size, self.kernel_x, self.kernel_y,
             frame_sharpness_ctype, cv_video_capture.file_handle,
             progress_value, lock_video_capture, gray_scale_conversion_code))

        processes = [
            Process(target=_calculate_sharpness_video_capture_worker,
                    args=arg_tuple) for arg_tuple in args_list
        ]

        def update_progress_tracker():
            progress_tracker.progress = progress_value.value

        progress_timer = RepeatingTimer(0.5, update_progress_tracker)

        if progress_tracker:
            progress_timer.start()

        if progress_tracker:
            progress_tracker.running = True
        for p in processes:
            p.start()
        for p in processes:
            p.join()
        if progress_tracker:
            progress_timer.cancel()
            progress_tracker.complete()
        return np.array(frame_sharpness_ctype)
Exemplo n.º 22
0
 def __init__(self, config):
     '''*config* can be obtained from the function :func:`cloudfusion.store.sugarsync.sugarsync_store.SugarsyncStore.get_config`,
     but you need to add user and password::
     
         config = SugarsyncStore.get_config()
         config['user'] = '******' #your account username/e-mail address
         config['password'] = '******' #your account password
     
     Or you can use a configuration file that already has password and username set by specifying a path::
     
         path_to_my_config_file = '/home/joe/MySugarsync.ini'       
         config = get_config(path_to_my_config_file)
     
     :param config: dictionary with key value pairs'''
     #self.dir_listing_cache = {}
     self._logging_handler = 'sugarsync'
     self.logger = logging.getLogger(self._logging_handler)
     self.logger = db_logging_thread.make_logger_multiprocessingsave(self.logger)
     manager = Manager()
     self.path_cache = manager.dict()
     # use a lock for synchronized appends
     self._dir_listing_cache = manager.dict()
     self._dir_listing_cache_lock = RLock()
     self._last_partial_cache = manager.list()
     self._time_of_last_partial_cache = 0
     #error handling for authorization error
     self.root = config["root"]
     try:
         self.client = SugarsyncClient(config)
     except Exception, e:
         raise StoreAutorizationError(repr(e), 0)
Exemplo n.º 23
0
 def __init__(self) -> None:
     self._hostResolvers = []
     # python dict structure is atomic for primitive actions. Need to be checked
     self._local_actor_refs = {}
     self._sequence_id = 0
     self._address = "nonhost"
     self._lock = RLock()
Exemplo n.º 24
0
        def __call__(self):
            img_list = [img_name for img_name in os.listdir(self.img_dir)]
            miss = []

            if opt.batch_size_per_gpu > 1:
                set_start_method('forkserver')
                lock_ = RLock()
                processes = 2
                pool = Pool(processes=processes, initializer=self.init_lock, initargs=(lock_,))
                with tqdm(total=len(img_list), desc='Detect') as pbar:
                    for _, r in enumerate(pool.imap_unordered(self.process, img_list)):
                        if r[0] == 0:
                            miss.append(r[1])
                        pbar.update()
                pool.close()
                pool.join()
            else:
                for img_name in tqdm(img_list):
                    r = self.process(img_name)
                    if r[0] == 0:
                        miss.append(r[1])

            print(f"{len(miss)} images no detection.")
            print(miss)
            input_json_path = res2json(self.result_dir)
            gt_json_path = opt.gt_json_path
            eval_func(input_json_path, gt_json_path, opt.iou_threshold)
Exemplo n.º 25
0
def main():
    global FILE_SIZE
    print datetime.datetime.now().strftime("%Y/%d/%m %H:%M:%S")

    file = "D:\\debian-8.2.0-amd64-DVD-1.iso"
    getFilesize(file)
    print FILE_SIZE

    file_size = FILE_SIZE

    rlock = RLock()
    array = Array('l', WORKERS, lock=rlock)

    for t in array:
        print t

    sys.exit(1)

    threads = []
    for i in range(WORKERS):
        p = Process(target=process_found,
                    args=[i, array, file, rlock, file_size])
        threads.append(p)

    for i in range(WORKERS):
        threads[i].start()

    for i in range(WORKERS):
        threads[i].join()

    print datetime.datetime.now().strftime("%Y/%d/%m %H:%M:%S")
Exemplo n.º 26
0
class ObjectLock(object):
    """
    This class provides re-entrant locking over object instances via
    "with" context.
    """
    # Synchronize access to objects within this class
    _lock = RLock()

    # Attribute name for instance locks
    __LOCK_NAME = '__ObjectLock__'

    def __init__(self, obj):
        self.lock = ObjectLock._get_lock(obj)

    def __enter__(self):
        self.lock.acquire()

    def __exit__(self, exc_type, exc_value, traceback):
        self.lock.release()

    @classmethod
    def _get_lock(cls, obj):
        with cls._lock:
            lock_name = cls.__LOCK_NAME
            if not hasattr(obj, lock_name):
                setattr(obj, lock_name, RLock())
            return getattr(obj, lock_name)
Exemplo n.º 27
0
 def __init__(self):
     self._running: bool = False
     self._instance: Optional[DataCollector] = None
     self._runLock: RLock = RLock()
     self._stopEvent: Event = Event()
     self._taskQueue: Queue = Queue()
     self._process: Optional[Process] = None
Exemplo n.º 28
0
    def __init__(self,
                 serial_connection,
                 discard_echo: bool = False,
                 on_enter_power_on: bool = False,
                 on_exit_power_off: bool = True,
                 verify_checksum: bool = True) -> None:
        """
        :param serial_connection: An object representing the serial connection of the computer to the servo bus, such as
            an instance of serial.Serial from the pyserial library. Must implement methods ``read(cnt: int) -> bytes``,
            ``write(data: bytes)``, and ``flush()``. If method ``close()`` is defined, it will be called when exiting a
            "with ..." statement.
        :param discard_echo: If the hardware connection from the computer to the servo bus is implemented in such a way
            that any bytes transmitted by the computer are echoed on the receiving line, set this to True; that will
            cause a byte to be read and discarded for every byte written by the computer.
        :param on_enter_power_on: If the servos should be powered on when entering a "with ..." statement.
        :param on_exit_power_off: If the servos should be powered off when exiting a "with ..." statement.
        :param verify_checksum: If the checksum byte of received packets should be checked.
        """

        self.discard_echo = discard_echo
        self.on_enter_power_on = on_enter_power_on
        self.on_exit_power_off = on_exit_power_off
        self.verify_checksum = verify_checksum

        self._conn = serial_connection
        self._conn_lock = RLock()
Exemplo n.º 29
0
 def main(self, file):
     """
     根据文件总行数,每次处理2000行, 判断起几个进程,然后每个进程对文件进行处理
     :param file: 文件
     :return: result,最终对文件的统计结果
     """
     BLOCKLINE = 10000
     global BLOCKLINE
     print(datetime.datetime.now().strftime("%Y/%d/%m %H:%M:%S"))
     count = self.func_countfileline(file)
     WORKERS = count / BLOCKLINE + 1 if count % BLOCKLINE else count / BLOCKLINE
     rlock = RLock()
     manager = Manager()
     result = manager.dict()
     processes = []
     for i in range(WORKERS):
         p = Process(target=self.process_found_new,
                     args=[i * BLOCKLINE, i, result, file, rlock])
         p.daemon = True
         processes.append(p)
     for i in range(WORKERS):
         processes[i].start()
     for i in range(WORKERS):
         processes[i].join()
     print(result)
     print(datetime.datetime.now().strftime("%Y/%d/%m %H:%M:%S"))
Exemplo n.º 30
0
 def __init__(self):
     self.webserver = None
     self.build_webserver()
     self.identifier = 'future'
     self.general_symbol = 'eos_this_week'
     self.order_dict = {}
     self.lock = RLock()