class SpeechRecoModule(ALModule): #""" A module to use speech recognition """ def __init__(self, name): ALModule.__init__(self, name) try: self.asr = ALProxy("ALSpeechRecognition") except Exception as e: self.asr = None self.memory = ALProxy("ALMemory") def onLoad(self): from threading import Lock self.bIsRunning = False self.mutex = Lock() self.hasPushed = False self.hasSubscribed = False self.BIND_PYTHON("SpeechReco", "onWordRecognized") def onUnload(self): from threading import Lock self.mutex.acquire() try: if (self.bIsRunning): if (self.hasSubscribed): self.memory.unsubscribeToEvent("WordRecognized", "SpeechReco") if (self.hasPushed and self.asr): self.asr.popContexts() except RuntimeError, e: self.mutex.release() raise e self.bIsRunning = False; self.mutex.release()
def _create_lock(locked, *args): from threading import Lock lock = Lock() if locked: if not lock.acquire(False): raise UnpicklingError("Cannot acquire lock") return lock
def handle(self): #try: data, socket = self.request lock = Lock() lock.acquire() DataOffset = struct.unpack('<H',data[139:141])[0] BrowserPacket = data[82+DataOffset:] ReqType = RequestType(BrowserPacket[0]) Domain = Decode_Name(data[49:81]) Name = Decode_Name(data[15:47]) Role1 = NBT_NS_Role(data[45:48]) Role2 = NBT_NS_Role(data[79:82]) Fprint = WorkstationFingerPrint(data[190:192]) Roles = ParseRoles(data[192:196]) print text("[BROWSER] Request Type : %s" % ReqType) print text("[BROWSER] Address : %s" % self.client_address[0]) print text("[BROWSER] Domain : %s" % Domain) print text("[BROWSER] Name : %s" % Name) print text("[BROWSER] Main Role : %s" % Role1) print text("[BROWSER] 2nd Role : %s" % Role2) print text("[BROWSER] Fingerprint : %s" % Fprint) print text("[BROWSER] Role List : %s" % Roles) RAPThisDomain(self.client_address[0], Domain) lock.release()
class TempDirs(object): """Tempdir manager.""" def __init__(self, tmpdir, prefix="rez_"): self.tmpdir = tmpdir self.prefix = prefix self.dirs = set() self.lock = Lock() # previous version overloaded TempDirs.__del__ in an unsafe manner; # the __del__ method is not guaranteed to be called before sys.modules begins # breaking down, so the os.path call was failing with a muted AttributeError, # leaving the directory on disk even when the program exited normally; by registering # an atexit callback we should ensure the directories are cleared at shutdown atexit.register(self.clear) def mkdtemp(self, cleanup=True): path = mkdtemp(dir=self.tmpdir, prefix=self.prefix) if not cleanup: return path try: self.lock.acquire() self.dirs.add(path) finally: self.lock.release() return path def clear(self): dirs = self.dirs for path in dirs: if os.path.exists(path): shutil.rmtree(path)
class _CoreScheduleThread(Thread): def __init__(self,threadpool): self.scheduletasks = []; self.tasklock = Lock(); self.condition = Condition(Lock()) self.threadpool = threadpool Thread.__init__(self) def run(self): while True: self.condition.acquire() if len(self.scheduletasks) == 0: self.condition.wait(); else: task = self.scheduletasks.pop(0) if dates.current_timestamps()>=task.nexttime: self.threadpool.execute(task.function,*task.args,**task.kwargs) task.nexttime = dates.current_timestamps()+task.period; else: self.condition.wait(task.nexttime-dates.current_timestamps()) self.addtask(task) self.condition.release() def addtask(self,task): # copy on write self.tasklock.acquire() tasks = [ t for t in self.scheduletasks ] tasks.append(task) tasks.sort(key=lambda task:task.nexttime) self.scheduletasks = tasks self.tasklock.release()
class Skeleton(object): def __init__(self, config_file): self.logger = logging.getLogger(self.__class__.__name__) config = Config(config_file) config.get_configs() self.config = config self.lock = Lock() self.queues = dict() def produce_task(self, tasker_name, tasker): while True: try: queue = self.queues.get(tasker_name) self.logger.info("old {0} queue size: {1}".format(tasker_name, queue.qsize())) queue.put(tasker.size) self.logger.info("new {0} queue size: {1}".format(tasker_name, queue.qsize())) except Exception as error: self.logger.exception("{0} {1}".format(tasker_name, error)) finally: time.sleep(self.config.scan_task_interval) return def consume_task(self, n, tasker_name, tasker): while True: handler = Handler() queue = self.queues.get(tasker_name) if queue.empty(): time.sleep(self.config.wait_time) continue try: while not queue.empty(): size = queue.get() self.lock.acquire() handler.human_readable(size) self.lock.release() except Exception as error: self.logger.exception('Thread-{0}: error {1}'.format(n, error)) finally: del(handler) def do_work(self): for tasker_name, tasker in self.config.taskers.items(): self.queues[tasker_name] = Queue() # Spwan produce_task thread t = Thread(target=self.produce_task, args=(tasker_name, tasker)) t.setDaemon(True) t.start() # Spwan consume_task thread for n in range(tasker.max_workers): t = Thread(target=self.consume_task, args=(n, tasker_name, tasker)) t.setDaemon(True) t.start() while True: signal.signal(signal.SIGTERM, sigterm_handler) # Round robin and Sleep some seconds. time.sleep(self.config.scan_task_interval) return
class ProgressBarLogger: def __init__(self, msg, total): self.msg = msg self.total = total self.status = 0 self.lock = Lock() def log(self, *_): self.lock.acquire() self.status += 1 self._print_progress_bar(self.status, self.total, prefix=self.msg, bar_length=50) self.lock.release() # from here http://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console # Print iterations progress @staticmethod def _print_progress_bar(iteration, total, prefix='', suffix='', decimals=2, bar_length=100): """ Call in a loop to create terminal progress bar @params: iteration - Required : current iteration (Int) total - Required : total iterations (Int) prefix - Optional : prefix string (Str) suffix - Optional : suffix string (Str) decimals - Optional : number of decimals in percent complete (Int) bar_length - Optional : character length of bar (Int) """ filled_length = int(round(bar_length * iteration / float(total))) percents = round(100.00 * (iteration / float(total)), decimals) bar_char = '#' * filled_length + '-' * (bar_length - filled_length) sys.stdout.write('\r%s |%s| %s%s %s' % (prefix, bar_char, percents, '%', suffix)) sys.stdout.flush() if iteration == total: sys.stdout.write('\n') sys.stdout.flush()
class Tips(object): """ Manage Tips Events. """ def __init__(self, enable): self.enable = enable self._tips = {} self._new_tips = set() self.lock = Lock() if self.enable: self.fetcher = Fetcher(self._tips, self.lock, self._new_tips) self.cleaner = Cleaner(self._tips, self.lock, self._new_tips) self.fetcher.start() self.cleaner.start() def tips(self): return self._tips.values() def new_tips(self): if self._new_tips: wait_free_acquire(self.lock) res = [self._tips[x] for x in self._new_tips] self._new_tips.clear() self.lock.release() return res else: return [] def stop(self): if self.enable: self.fetcher.finnish() self.cleaner.finnish()
class InMemoryItemValue(object): _lock = None """:type _lock Lock""" def __init__(self, value=None, expire_in=None): self._lock = Lock() self._value = value self._expire_in = None self._expire_in_time = None self.update_expire_time(expire_in) @property def value(self): return self._value @value.setter def value(self, val): self._lock.acquire() self._value = val self._expire_in = datetime.now() + timedelta(seconds=float(self._expire_in_time)) if self._expire_in_time else None self._lock.release() def update_expire_time(self, t): self._expire_in_time = t @property def is_expired(self): return (self._expire_in - datetime.now()).days < 0 if self._expire_in else False
def start_manager(self): exit_flags[self.tab_id] = 0 log.info('START | Layers Download Manager') thread_list = ['Alpha', 'Bravo', 'Charlie', 'Delta', 'Echo', 'Foxtrot', 'Golf', 'Hotel', 'India', 'Juliet'] queue_lock = Lock() work_queue = Queue.Queue(len(self.file_paths_and_sizes)) threads = [] for thread_name in thread_list: key = str(uuid.uuid4()) thread = LayerDownloadThread(self.source, thread_name, work_queue, queue_lock, key, self.target_dir, self.tab_id) thread.start() if not threads_map_key in thread_manager_processes: thread_manager_processes[threads_map_key] = {} thread_manager_processes[threads_map_key][key] = thread threads.append(thread) queue_lock.acquire() for word in self.file_paths_and_sizes: work_queue.put(word) queue_lock.release() while not work_queue.empty(): pass exit_flags[self.tab_id] = 1 for t in threads: t.join() log.info('DONE | Layers Download Manager')
def __init__(self, component_builder=None, project=None, project_dir=None, remote_storage=None, fingerprint=None, pull_models=None): self._component_builder = component_builder self._models = {} self.status = STATUS_READY self.current_training_processes = 0 self._reader_lock = Lock() self._loader_lock = Lock() self._writer_lock = Lock() self._readers_count = 0 self._path = None self._project = project self.remote_storage = remote_storage self.fingerprint = fingerprint self.pull_models = pull_models self.error_message = None if project and project_dir: self._path = os.path.join(project_dir, project) self._search_for_models()
class Manager: def __init__(self): self.networks = set() self.logs = [] self.next_log_id = 0 self.log_lock = Lock() def notifyNetUp(self, pno, net_name): self.networks.add(net_name) sys.stdout.write(">> network: %s is up (%d)\n" % (net_name, len(self.networks))) def getNetworks(self): return self.networks def getLogs(self, since=-1): if since >= 0: return filter(lambda l: l['id'] > since, self.logs) else: return self.logs def putLog(self, host, log): self.log_lock.acquire() l = {'id': self.next_log_id, 'host': host, 'log': log} self.next_log_id = self.next_log_id + 1 sys.stdout.write(">> log: %s\n" % json.dumps(l)) self.logs.append(l) self.log_lock.release()
class Queue: """Command queue class """ def __init__(self): self.lock = Lock() self.locks = {} def queue(self, command, *args): check = inspect.getargspec(command) cmdname = command.__name__ if command.__name__ else "uknown_cmd" if len(check[0]) != len(args): logging.warn("Queue command '%s' expected %u args, got %u!" % (cmdname, len(check[0]), len(args))) # If we have enough args, try running the command if len(args) >= len(check[0]): args = args[:len(check[0])] # Resize arg list if needed ret = None server = args[0] self.lock.acquire() if not server in self.locks: self.locks[server] = Lock() self.lock.release() self.locks[server].acquire() # Run in an enclosure, so as to be able to release lock if it fails try: ret = command(*args) except Exception as err: logging.warn("Queue command returned error: %s" % err) self.locks[server].release() if ret: return ret return None
class DPMClient(): def __init__(self, uid=None, key=None): self._lock = Lock() self._uid = uid self._key = None if key: self._key = rsa.PublicKey.load_pkcs1(key) def request(self, addr, port, buf): self._lock.acquire() try: return self._request(addr, port, buf) finally: self._lock.release() def _request(self, addr, port, buf): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.connect((addr, port)) try: if self._key: stream = Stream(sock, uid=self._uid, key=self._key) else: stream = Stream(sock) stream.write( buf) if self._key: stream = Stream(sock) _, _, res = stream.readall() return res finally: sock.close()
def __init__(self, item_number, person_capacity): self.items_remaining_lock = Lock() self.space_available_lock = Lock() self.item_number = item_number self.person_capacity = person_capacity self.items_remaining = self.item_number self.space_available = self.person_capacity
class shm: def __init__(self, data=None): self._data = data self._lock = Lock() def set(self, data, lock=True): if lock: with self._lock: self._data = data else: self._data = data def get(self, lock=True): if lock: with self._lock: data = self._data return data else: return self._data def acquire(self): self._lock.acquire() def release(self): self._lock.release() data = property(get, set)
def __init__(self, service, channel, config = {}, _lazy = False): # Close the connection while we set it up self._closed = True # Setup the config self._config = deepcopy(DEFAULT_CONFIG) self._config.update(config) if self._config["connid"] is None: self._config["connid"] = "conn{conn_id}".format(conn_id=self._connection_id_generator.__next__()) self._channel = channel self._local_root = service(weakref.proxy(self)) # Why does this have to be weak self._remote_root = None # This is set by root. Why?????? self._local_objects = Locking_dict() # {oid: native_obj} dictionary, orginal objects to object ids self._proxy_cache = WeakValueDictionary() # {oid: proxy_obj} oid to proxy objects not owned by this connection self._netref_classes_cache = {} # classes that have been?????? self._netref_proxy_builtin_cls = netref.PROXY_BUILTIN_TYPE_DICT # Already created in netref self._seqcounter = itertools.count() # With this we will generate the msg seq numbers self._recvlock = Lock() self._sendlock = Lock() self._sync_replies = {} self._async_callbacks = {} self._last_traceback = None if not _lazy: # Should object automatically be set to go self._init_service() # Open this connection as we have finished setting it up self._closed = False
class Syncer(object): def __init__(self, slave): self.slave = slave self.lock = Lock() self.pb = PostBox() for i in range(int(gconf.sync_jobs)): t = Thread(target=self.syncjob) t.start() def syncjob(self): while True: pb = None while True: self.lock.acquire() if self.pb: pb, self.pb = self.pb, PostBox() self.lock.release() if pb: break time.sleep(0.5) pb.close() pb.wakeup(self.slave.rsync(pb)) def add(self, e): while True: try: self.pb.append(e) return self.pb except BoxClosedErr: pass
class BlaLock(object): """ Simple wrapper class for the thread.lock class which only raises an exception when trying to release an unlocked lock if it's initialized with strict=True. """ def __init__(self, strict=False, blocking=True): self.__strict = strict self.__blocking = blocking self.__lock = Lock() def acquire(self): self.__lock.acquire(self.__blocking) def release(self): try: self.__lock.release() except ThreadError: if self.__strict: raise def locked(self): return self.__lock.locked() def __enter__(self, *args): self.acquire() def __exit__(self, *args): self.release()
class ProcessThread(Thread): # Initialize this thread def __init__(self): Thread.__init__(self) self.stopped = Event() # Cancel Event self.mutex = Lock() self.data = None # Threaded code def run(self): while not self.stopped.isSet(): # Check if status data is available and process it data = None self.mutex.acquire() if self.data: data = self.data self.data = None self.mutex.release() if data: global outputfile try: fp = file(outputfile, 'wu') fp.write(data.encode('utf-8')) fp.close() except Exception, e: console.log(LOG_PYTHON, "Couldn't write status to '%s': %s.\n" % (outputfile, str(e))) self.stopped.wait(0.5)
def __init__(self,limit,post_inputs,nodes): self.limit = limit self.evaluations = 0 self.evaluation_lock = Lock() self.last_evaluation = None self.start_tuples_sent = False self.nodes = [x[0] for x in nodes] self.all_started = False self.starting_lock = Lock() self.resumes_sent = 0 self.to_send = len(nodes) self.resume_lock = Lock() self.suspended_flags = {} self.suspended_lock = Lock() self.last_state_lock = Lock() self.last_state = {} self.waiting = {} self.waiting_lock = Lock() for node in self.nodes: self.suspended_flags[node] = False self.last_state[node] = None self.waiting[node] = False self.pending_lock = Lock() self.pending_dict = {}
def __init__( self, all_cb=None, new_data_cb=None, ack_packet_finished_cb=None, error_cb=None, client_unreachable=None ): self._com = "" self._serial = serial.Serial self._idx = 0 self._buffer = [] self._crc = crc8() ## @brief comport reading thread self._thread = Thread(target=self._loop, name="comport") # self._thread.setDaemon(True) self._comportContinue = threading.Event() self._comportContinue.set() self._newdata_cb = new_data_cb self._ack_packet_finished_cb = ack_packet_finished_cb self._client_unreachable = client_unreachable self._error_cb = error_cb self._master_cb = all_cb self._lock = Lock() ## @brief wireless transmition packet queue processing thread self._send_thread = Thread(target=self._sending_loop2, name="ack payload sender") # self._send_thread.setDaemon(True) self._sendContinue = threading.Event() self._sendContinue.set() self._send_queue = Queue.Queue() self._packet_inside = False self._buffer = [] self._log = logging.getLogger("root.serialHardware") self._tx_finished_lock = Lock() self._loop_idx = 0 self._loop_reciver_timeout_lock = Lock()
class PandoraPool(object): def __init__(self, poolSize, proxy=None, expireTime=3600): self.size = poolSize self.proxy = proxy self.expire = expireTime self.pool = [self.createPandoraAgent() for i in xrange(self.size)] self.mutex = Lock() def createPandoraAgent(self): return PandoraAgent(datetime.now() + timedelta(0, self.expire), self.proxy) def refreshPandoraAgent(self, agent): if agent.isExpired(): agent.authenticate_connection() agent.setExpireDate(datetime.now() + timedelta(0, self.expire)) return agent def getAgent(self): try: return self.refreshPandoraAgent(self.pool.pop()) except IndexError: return self.createPandoraAgent() def hasAvailableConnections(self): return len(self.pool) > 0 def releaseAgent(self, agent): self.mutex.acquire() if len(self.pool) < self.size: self.pool.append(agent) self.mutex.release()
class DataWindow(Thread): def __init__(self,data_adapter): Thread.__init__(self) self.win = N.zeros((100,3)) self.winlock = Lock() self.data_adapter = data_adapter def run(self): self.data_adapter.start() self.running = True while self.running: self.winlock.acquire() try: while 1: newdata = self.data_adapter.q.get(block=False) self.win[:-1,:] = self.win[1:,:] self.win[-1,:] = newdata[1:] except Queue.Empty: pass finally: self.winlock.release() self.data_adapter.stop() def stop(self): self.running = False
class DebuggingLock: def __init__(self, name): self.lock = Lock() self.name = name def acquire(self, blocking = 1): self.print_tb("Acquire lock") self.lock.acquire(blocking) self.logmsg("===== %s: Thread %s acquired lock\n"% (self.name, currentThread().getName())) def release(self): self.print_tb("Release lock") self.lock.release() def logmsg(self, msg): loglock.acquire() logfile.write(msg + "\n") logfile.flush() loglock.release() def print_tb(self, msg): self.logmsg(".... %s: Thread %s attempting to %s\n"% \ (self.name, currentThread().getName(), msg) + \ "\n".join(traceback.format_list(traceback.extract_stack())))
def __init__(self): self.lock_mess=Lock() self.lock_userlist = Lock() self.users = [] self.message_list = [] super().__init__() self.server = self.get_socket()
def run(conn): """Function to handle running implosion generation in separate :py:class:`multithreading.Process` :param conn: A connection, i.e. one end of a `Pipe()` """ # Need duck-checking instead of real type-checking... assert hasattr(conn, 'send') and hasattr(conn, 'recv') # Get the implosion object from the pipe: imp = conn.recv() assert isinstance(imp, Implosion) connLock = Lock() # Run in a separate thread in this process: def impRun(): nonlocal imp, conn try: imp.generate() except Exception as e: connLock.acquire() conn.send(e) connLock.release() t = Thread(target=impRun) t.start() while t.is_alive(): connLock.acquire() conn.send(imp.progress()) connLock.release() time.sleep(0.01) # When the thread is done, send the Implosion object back: conn.send(imp)
def __init__(self, url, bucket, password="", verbose=False): self.log = logger.logger("VBucketAwareMemcachedClient") self.bucket = bucket self.rest_username = bucket self.rest_password = password self._memcacheds = {} self._vBucketMap = {} self._vBucketMap_lock = Lock() self._vBucketMapFastForward = {} self._vBucketMapFastForward_lock = Lock() #TODO: use regular expressions to parse the url server = {} if not bucket: raise InvalidArgumentException("bucket can not be an empty string", parameters="bucket") if not url: raise InvalidArgumentException("url can not be an empty string", parameters="url") if url.find("http://") != -1 and url.rfind(":") != -1 and url.find("/pools/default") != -1: server["ip"] = url[url.find("http://") + len("http://"):url.rfind(":")] server["port"] = url[url.rfind(":") + 1:url.find("/pools/default")] server["username"] = self.rest_username server["password"] = self.rest_password self.servers = [server] self.servers_lock = Lock() self.rest = RestConnection(server) self.reconfig_vbucket_map() self.init_vbucket_connections() self.dispatcher = CommandDispatcher(self) self.dispatcher_thread = Thread(name="dispatcher-thread", target=self._start_dispatcher) self.dispatcher_thread.daemon = True self.dispatcher_thread.start() self.streaming_thread = Thread(name="streaming", target=self._start_streaming, args=()) self.streaming_thread.daemon = True self.streaming_thread.start() self.verbose = verbose
class BatchInsertCollector(): def __init__(self, cur, table_name, header=None, threshold=1000000): if not isinstance(cur, MyCursor): raise TypeError self.cur = cur self.table_name = table_name print 'lyc here' print table_name if header is None: print 'this way' self.header = cur.get_header(table_name) else: print 'that way' self.header = header print 'I have a header' self.sql_header = '' self.cur_len = 0 self.reset_header() self.threshold = threshold self.values = [] self.stat_total = 0 self.mutex = Lock() print 'initial finish' def __del__(self): self.flush() self.cur.con.commit() def reset_header(self): self.sql_header = 'insert into %s (%s) values ' % (self.table_name, ','.join(self.header)) self.cur_len = len(self.sql_header) def flush(self): if len(self.values) == 0: return self.cur.cur.execute(self.sql_header + ','.join(self.values)) self.cur_len = len(self.sql_header) self.cur.con.commit() print 'flush called: %d records, total %d records' % (len(self.values), self.stat_total) self.values = [] def append(self, data): assert isinstance(data, DictItem) self.mutex.acquire() def find(val): if val not in data.fields: return u"''" else: return u"'%s'" % unicode(data[val]) cvalues = u','.join(map(find, self.header)) val1 = u"(%s)" % cvalues # print self.cur_len if self.cur_len + len(val1) > self.threshold: self.flush() self.values.append(val1) self.cur_len += len(val1) + 1 self.stat_total += 1 self.mutex.release()
def _setup_to_do_n_cycles(self, number_of_cycles: int, updates_each_cycle: UpdateCollection=None): """ Sets up the test so that the retriever will only do n cycles. :param number_of_cycles: the number of cycles to do """ if updates_each_cycle is None: updates_each_cycle = UpdateCollection([]) semaphore = Semaphore(0) lock_until_counted = Lock() lock_until_counted.acquire() def increase_counter(*args) -> UpdateCollection: semaphore.release() lock_until_counted.acquire() return updates_each_cycle self.retrieval_manager.update_mapper.get_all_since.side_effect = increase_counter self.retrieval_manager.start() run_counter = 0 while run_counter < number_of_cycles: semaphore.acquire() run_counter += 1 lock_until_counted.release() if run_counter == number_of_cycles: self.retrieval_manager.stop() self.retrieval_manager.update_mapper.get_all_since.side_effect = None
def __init__(self): mpv_config = conffile.get(APP_NAME,"mpv.conf", True) input_config = conffile.get(APP_NAME,"input.conf", True) extra_options = {} self._video = None self._lock = RLock() self._finished_lock = Lock() self.last_update = Timer() self.__part = 1 self.timeline_trigger = None self.action_trigger = None self.external_subtitles = {} self.external_subtitles_rev = {} self.url = None self.evt_queue = Queue() self.is_in_intro = False self.intro_has_triggered = False if is_using_ext_mpv: extra_options = { "start_mpv": settings.mpv_ext_start, "ipc_socket": settings.mpv_ext_ipc, "mpv_location": settings.mpv_ext_path, "player-operation-mode": "cplayer" } # todo figure out how to put these in a file extra_options = { 'script-opts': 'osc-layout=slimbox,osc-deadzonesize=.9,osc-valign=1.05', } self._player = mpv.MPV(input_default_bindings=True, input_vo_keyboard=True, input_media_keys=True, include=mpv_config, input_conf=input_config, log_handler=mpv_log_handler, loglevel=settings.mpv_log_level, **extra_options) self.menu = OSDMenu(self) self.auto_insert = False def on_new_sub(name, text): if not self.auto_insert: return if not text or not text.strip(): return pyperclip.copy(text.replace('\n', ' ')) self._player.observe_property('sub-text', on_new_sub) if hasattr(self._player, 'osc'): self._player.osc = settings.enable_osc else: log.warning("This mpv version doesn't support on-screen controller.") # Wrapper for on_key_press that ignores None. def keypress(key): def wrapper(func): if key is not None: self._player.on_key_press(key)(func) return func return wrapper @self._player.on_key_press('CLOSE_WIN') @self._player.on_key_press('STOP') @keypress(settings.kb_stop) def handle_stop(): self.stop() self.timeline_handle() @keypress(settings.kb_prev) def handle_prev(): self.put_task(self.play_prev) @keypress(settings.kb_next) def handle_next(): self.put_task(self.play_next) @self._player.on_key_press('PREV') @self._player.on_key_press('XF86_PREV') def handle_media_prev(): if settings.media_key_seek: self._player.command("seek", -15) else: self.put_task(self.play_prev) @self._player.on_key_press('NEXT') @self._player.on_key_press('XF86_NEXT') def handle_media_next(): if settings.media_key_seek: if self.is_in_intro: self.skip_intro() else: self._player.command("seek", 30) else: self.put_task(self.play_next) @keypress(settings.kb_watched) def handle_watched(): self.put_task(self.watched_skip) @keypress(settings.kb_unwatched) def handle_unwatched(): self.put_task(self.unwatched_quit) @keypress(settings.kb_menu) def menu_open(): if not self.menu.is_menu_shown: self.menu.show_menu() else: self.menu.hide_menu() @keypress(settings.kb_menu_esc) def menu_back(): if self.menu.is_menu_shown: self.menu.menu_action('back') else: self._player.command('set', 'fullscreen', 'no') @keypress(settings.kb_menu_ok) def menu_ok(): self.menu.menu_action('ok') @keypress(settings.kb_menu_left) def menu_left(): if self.menu.is_menu_shown: self.menu.menu_action('left') else: self._player.command("seek", settings.seek_left) @keypress(settings.kb_menu_right) def menu_right(): if self.menu.is_menu_shown: self.menu.menu_action('right') else: if self.is_in_intro: self.skip_intro() else: self._player.command("seek", settings.seek_right) @keypress(settings.kb_menu_up) def menu_up(): if self.menu.is_menu_shown: self.menu.menu_action('up') else: if self.is_in_intro: self.skip_intro() else: self._player.command("seek", settings.seek_up) @keypress(settings.kb_menu_down) def menu_down(): if self.menu.is_menu_shown: self.menu.menu_action('down') else: self._player.command("seek", settings.seek_down) @keypress(settings.kb_pause) def handle_pause(): if self.menu.is_menu_shown: self.menu.menu_action('ok') else: self.toggle_pause() # This gives you an interactive python debugger prompt. @keypress(settings.kb_debug) def handle_debug(): import pdb pdb.set_trace() @self._player.on_key_press('ctrl+c') def copy_current_sub(): try: sub = self._player.sub_text pyperclip.copy(sub) except AttributeError: pass # no subtitle available. def copy_screenshot(subtitles=True): includes = 'subtitles' if subtitles else 'video' from io import BytesIO import win32clipboard image = self._player.screenshot_raw(includes=includes) output = BytesIO() image.convert("RGB").save(output, "BMP") data = output.getvalue()[14:] output.close() win32clipboard.OpenClipboard() win32clipboard.EmptyClipboard() win32clipboard.SetClipboardData(win32clipboard.CF_DIB, data) win32clipboard.CloseClipboard() @self._player.on_key_press('ctrl+s') def copy_current_image(): copy_screenshot(subtitles=True) @self._player.on_key_press('ctrl+shift+s') def copy_current_image(): copy_screenshot(subtitles=False) @self._player.on_key_press('ctrl+v') def output_audio(): import subprocess import string import unicodedata sub_delay = round(self._player.sub_delay, 4) # round b/c of weird mpv precision sub_start = self._player.sub_start + sub_delay if sub_start: print("Outputting current subtitle...") valid_fn_chars = "-_.() %s%s" % (string.ascii_letters, string.digits) fn_dirty = "%s - %s" % (self._player.media_title, str(int(sub_start * 1000))) fn = unicodedata.normalize('NFKD', fn_dirty).encode('ASCII', 'ignore') fn = ''.join(chr(c) for c in fn if chr(c) in valid_fn_chars) aid = [x for x in self._player.track_list if x.get("type") == "audio" and x.get("selected")][0].get("id") subprocess.Popen([ 'mpv', self.url, '-o', '%s.mp3' % fn, '--no-video', '--start=%s' % sub_start, '--end=%s' % (self._player.sub_end + sub_delay), '--aid=%s' % aid, ]) self._player.screenshot_to_file("%s.png" % fn, includes='video') with open('%s.txt' % fn, 'w+', encoding='utf-8') as f: f.write(self._player.sub_text) @self._player.on_key_press('ctrl+a') def toggle_auto_insert(): self.auto_insert = not self.auto_insert self._player.show_text('Auto insert %s' % ("on" if self.auto_insert else "off")) # Fires between episodes. @self._player.property_observer('eof-reached') def handle_end(_name, reached_end): if self._video and reached_end: has_lock = self._finished_lock.acquire(False) self.put_task(self.finished_callback, has_lock) # Fires at the end. @self._player.event_callback('idle') def handle_end_idle(event): if self._video: has_lock = self._finished_lock.acquire(False) self.put_task(self.finished_callback, has_lock)
def __init__(self): self.lock = Lock() self.elements = []
class Queue(object): """ Queue class with the basic enqueue and dequeue functionalities implemented. """ def __init__(self): self.lock = Lock() self.elements = [] def empty(self): ''' Check if the queue is empty at the current timestamp. ''' return len(self.elements) == 0 def enqueue(self, entry): ''' Push an element at the rear end of the queue. ''' self.lock.acquire() self.elements.append(entry) self.lock.release() def dequeue(self): ''' Dequeue an element if the queue is not empty. Raise exception otherwise. ''' try: ''' Acquire the lock if `unlocked` else wait for it to get unlocked and then acquire it. ''' self.lock.acquire() front = None if self.empty(): raise Exception('\nCan not dequeue from an empty queue.') front = self.elements.pop(0) ''' Release the acquired lock ''' self.lock.release() except Exception as exc: ''' Can not hold on to the lock. Release it. ''' self.lock.release() raise exc return front def size(self): ''' Size of queue ''' return len(self.elements) def printQueue(self): ''' Print the queue ''' print('\nCurrently, Following are the elements in the queue : ', end='') List = self.elements print(*List)
def __init__(self): self._lock = Lock() self._data = dict()
def __init__(self): self.__lock = Lock() self.__failList = dict() self.__maxRetry = 3 self.__maxTime = 600 self.__failTotal = 0
class FailManager: def __init__(self): self.__lock = Lock() self.__failList = dict() self.__maxRetry = 3 self.__maxTime = 600 self.__failTotal = 0 def setFailTotal(self, value): try: self.__lock.acquire() self.__failTotal = value finally: self.__lock.release() def getFailTotal(self): try: self.__lock.acquire() return self.__failTotal finally: self.__lock.release() def setMaxRetry(self, value): try: self.__lock.acquire() self.__maxRetry = value finally: self.__lock.release() def getMaxRetry(self): try: self.__lock.acquire() return self.__maxRetry finally: self.__lock.release() def setMaxTime(self, value): try: self.__lock.acquire() self.__maxTime = value finally: self.__lock.release() def getMaxTime(self): try: self.__lock.acquire() return self.__maxTime finally: self.__lock.release() def addFailure(self, ticket): try: self.__lock.acquire() ip = ticket.getIP() unixTime = ticket.getTime() if self.__failList.has_key(ip): fData = self.__failList[ip] fData.inc() fData.setLastTime(unixTime) else: fData = FailData() fData.inc() fData.setLastTime(unixTime) self.__failList[ip] = fData self.__failTotal += 1 finally: self.__lock.release() def size(self): try: self.__lock.acquire() return len(self.__failList) finally: self.__lock.release() def cleanup(self, time): try: self.__lock.acquire() tmp = self.__failList.copy() for item in tmp: if tmp[item].getLastTime() < time - self.__maxTime: self.__delFailure(item) finally: self.__lock.release() def __delFailure(self, ip): if self.__failList.has_key(ip): del self.__failList[ip] def toBan(self): try: self.__lock.acquire() for ip in self.__failList: data = self.__failList[ip] if data.getRetry() >= self.__maxRetry: self.__delFailure(ip) # Create a FailTicket from BanData failTicket = FailTicket(ip, data.getLastTime()) failTicket.setAttempt(data.getRetry()) return failTicket raise FailManagerEmpty finally: self.__lock.release()
(select distinct 'LEVEL2' as LEVEL, name_2||','||name_1||','||name_0 as NAME from location_locationlevel l where name_2=UPPER(%s) limit 5) union (select distinct 'LEVEL1' as LEVEL, name_1||','||name_0 as NAME from location_locationlevel l where name_1=UPPER(%s) limit 5) union (select distinct 'LEVEL0' as LEVEL, name_0 as NAME from location_locationlevel l where name_0=UPPER(%s) limit 5) order by LEVEL;""" cursor.execute( sql, [lowest_level, lowest_level, lowest_level, lowest_level, lowest_level]) rows = cursor.fetchall() location_hierarchy = [] for level, location in rows: location_hierarchy.append(location.split(',')) return location_hierarchy[0] if len( location_hierarchy) > 0 else lowest_level.split(',') _tree = None _tree_lock = Lock() def get_location_tree(): global _tree with _tree_lock: if _tree is None: _tree = LocationTree() return _tree class LocationTree(object): def get_location_hierarchy_for_geocode(self, lat, long): row = self._get_location_level_row_for_geo_code(lat, long) lowest_level = self._get_lowest_level(row) location = []
from threading import Thread,Lock a = b = 0 lock = Lock() def value(): while True: lock.acquire() if a != b: print("a = %d,b = %d"%(a,b)) lock.release() t = Thread(target = value) t.start() while True: with lock: a += 1 b += 1 t.join()
def __init__(self, token=None, base_url=None, workers=4, bot=None, private_key=None, private_key_password=None, user_sig_handler=None, request_kwargs=None, persistence=None, defaults=None, use_context=False, dispatcher=None, base_file_url=None): if dispatcher is None: if (token is None) and (bot is None): raise ValueError('`token` or `bot` must be passed') if (token is not None) and (bot is not None): raise ValueError('`token` and `bot` are mutually exclusive') if (private_key is not None) and (bot is not None): raise ValueError('`bot` and `private_key` are mutually exclusive') else: if bot is not None: raise ValueError('`dispatcher` and `bot` are mutually exclusive') if persistence is not None: raise ValueError('`dispatcher` and `persistence` are mutually exclusive') if workers is not None: raise ValueError('`dispatcher` and `workers` are mutually exclusive') if use_context != dispatcher.use_context: raise ValueError('`dispatcher` and `use_context` are mutually exclusive') self.logger = logging.getLogger(__name__) if dispatcher is None: con_pool_size = workers + 4 if bot is not None: self.bot = bot if bot.request.con_pool_size < con_pool_size: self.logger.warning( 'Connection pool of Request object is smaller than optimal value (%s)', con_pool_size) else: # we need a connection pool the size of: # * for each of the workers # * 1 for Dispatcher # * 1 for polling Updater (even if webhook is used, we can spare a connection) # * 1 for JobQueue # * 1 for main thread if request_kwargs is None: request_kwargs = {} if 'con_pool_size' not in request_kwargs: request_kwargs['con_pool_size'] = con_pool_size self._request = Request(**request_kwargs) self.bot = Bot(token, base_url, base_file_url=base_file_url, request=self._request, private_key=private_key, private_key_password=private_key_password, defaults=defaults) self.update_queue = Queue() self.job_queue = JobQueue() self.__exception_event = Event() self.persistence = persistence self.dispatcher = Dispatcher(self.bot, self.update_queue, job_queue=self.job_queue, workers=workers, exception_event=self.__exception_event, persistence=persistence, use_context=use_context) self.job_queue.set_dispatcher(self.dispatcher) else: con_pool_size = dispatcher.workers + 4 self.bot = dispatcher.bot if self.bot.request.con_pool_size < con_pool_size: self.logger.warning( 'Connection pool of Request object is smaller than optimal value (%s)', con_pool_size) self.update_queue = dispatcher.update_queue self.__exception_event = dispatcher.exception_event self.persistence = dispatcher.persistence self.job_queue = dispatcher.job_queue self.dispatcher = dispatcher self.user_sig_handler = user_sig_handler self.last_update_id = 0 self.running = False self.is_idle = False self.httpd = None self.__lock = Lock() self.__threads = [] # Just for passing to WebhookAppClass self._default_quote = defaults.quote if defaults else None
class PlayerManager(object): """ Manages the relationship between a ``Player`` instance and a ``Media`` item. This is designed to be used as a singleton via the ``playerManager`` instance in this module. All communication between a caller and either the current ``player`` or ``media`` instance should be done through this class for thread safety reasons as all methods that access the ``player`` or ``media`` are thread safe. """ def __init__(self): mpv_config = conffile.get(APP_NAME,"mpv.conf", True) input_config = conffile.get(APP_NAME,"input.conf", True) extra_options = {} self._video = None self._lock = RLock() self._finished_lock = Lock() self.last_update = Timer() self.__part = 1 self.timeline_trigger = None self.action_trigger = None self.external_subtitles = {} self.external_subtitles_rev = {} self.url = None self.evt_queue = Queue() self.is_in_intro = False self.intro_has_triggered = False if is_using_ext_mpv: extra_options = { "start_mpv": settings.mpv_ext_start, "ipc_socket": settings.mpv_ext_ipc, "mpv_location": settings.mpv_ext_path, "player-operation-mode": "cplayer" } # todo figure out how to put these in a file extra_options = { 'script-opts': 'osc-layout=slimbox,osc-deadzonesize=.9,osc-valign=1.05', } self._player = mpv.MPV(input_default_bindings=True, input_vo_keyboard=True, input_media_keys=True, include=mpv_config, input_conf=input_config, log_handler=mpv_log_handler, loglevel=settings.mpv_log_level, **extra_options) self.menu = OSDMenu(self) self.auto_insert = False def on_new_sub(name, text): if not self.auto_insert: return if not text or not text.strip(): return pyperclip.copy(text.replace('\n', ' ')) self._player.observe_property('sub-text', on_new_sub) if hasattr(self._player, 'osc'): self._player.osc = settings.enable_osc else: log.warning("This mpv version doesn't support on-screen controller.") # Wrapper for on_key_press that ignores None. def keypress(key): def wrapper(func): if key is not None: self._player.on_key_press(key)(func) return func return wrapper @self._player.on_key_press('CLOSE_WIN') @self._player.on_key_press('STOP') @keypress(settings.kb_stop) def handle_stop(): self.stop() self.timeline_handle() @keypress(settings.kb_prev) def handle_prev(): self.put_task(self.play_prev) @keypress(settings.kb_next) def handle_next(): self.put_task(self.play_next) @self._player.on_key_press('PREV') @self._player.on_key_press('XF86_PREV') def handle_media_prev(): if settings.media_key_seek: self._player.command("seek", -15) else: self.put_task(self.play_prev) @self._player.on_key_press('NEXT') @self._player.on_key_press('XF86_NEXT') def handle_media_next(): if settings.media_key_seek: if self.is_in_intro: self.skip_intro() else: self._player.command("seek", 30) else: self.put_task(self.play_next) @keypress(settings.kb_watched) def handle_watched(): self.put_task(self.watched_skip) @keypress(settings.kb_unwatched) def handle_unwatched(): self.put_task(self.unwatched_quit) @keypress(settings.kb_menu) def menu_open(): if not self.menu.is_menu_shown: self.menu.show_menu() else: self.menu.hide_menu() @keypress(settings.kb_menu_esc) def menu_back(): if self.menu.is_menu_shown: self.menu.menu_action('back') else: self._player.command('set', 'fullscreen', 'no') @keypress(settings.kb_menu_ok) def menu_ok(): self.menu.menu_action('ok') @keypress(settings.kb_menu_left) def menu_left(): if self.menu.is_menu_shown: self.menu.menu_action('left') else: self._player.command("seek", settings.seek_left) @keypress(settings.kb_menu_right) def menu_right(): if self.menu.is_menu_shown: self.menu.menu_action('right') else: if self.is_in_intro: self.skip_intro() else: self._player.command("seek", settings.seek_right) @keypress(settings.kb_menu_up) def menu_up(): if self.menu.is_menu_shown: self.menu.menu_action('up') else: if self.is_in_intro: self.skip_intro() else: self._player.command("seek", settings.seek_up) @keypress(settings.kb_menu_down) def menu_down(): if self.menu.is_menu_shown: self.menu.menu_action('down') else: self._player.command("seek", settings.seek_down) @keypress(settings.kb_pause) def handle_pause(): if self.menu.is_menu_shown: self.menu.menu_action('ok') else: self.toggle_pause() # This gives you an interactive python debugger prompt. @keypress(settings.kb_debug) def handle_debug(): import pdb pdb.set_trace() @self._player.on_key_press('ctrl+c') def copy_current_sub(): try: sub = self._player.sub_text pyperclip.copy(sub) except AttributeError: pass # no subtitle available. def copy_screenshot(subtitles=True): includes = 'subtitles' if subtitles else 'video' from io import BytesIO import win32clipboard image = self._player.screenshot_raw(includes=includes) output = BytesIO() image.convert("RGB").save(output, "BMP") data = output.getvalue()[14:] output.close() win32clipboard.OpenClipboard() win32clipboard.EmptyClipboard() win32clipboard.SetClipboardData(win32clipboard.CF_DIB, data) win32clipboard.CloseClipboard() @self._player.on_key_press('ctrl+s') def copy_current_image(): copy_screenshot(subtitles=True) @self._player.on_key_press('ctrl+shift+s') def copy_current_image(): copy_screenshot(subtitles=False) @self._player.on_key_press('ctrl+v') def output_audio(): import subprocess import string import unicodedata sub_delay = round(self._player.sub_delay, 4) # round b/c of weird mpv precision sub_start = self._player.sub_start + sub_delay if sub_start: print("Outputting current subtitle...") valid_fn_chars = "-_.() %s%s" % (string.ascii_letters, string.digits) fn_dirty = "%s - %s" % (self._player.media_title, str(int(sub_start * 1000))) fn = unicodedata.normalize('NFKD', fn_dirty).encode('ASCII', 'ignore') fn = ''.join(chr(c) for c in fn if chr(c) in valid_fn_chars) aid = [x for x in self._player.track_list if x.get("type") == "audio" and x.get("selected")][0].get("id") subprocess.Popen([ 'mpv', self.url, '-o', '%s.mp3' % fn, '--no-video', '--start=%s' % sub_start, '--end=%s' % (self._player.sub_end + sub_delay), '--aid=%s' % aid, ]) self._player.screenshot_to_file("%s.png" % fn, includes='video') with open('%s.txt' % fn, 'w+', encoding='utf-8') as f: f.write(self._player.sub_text) @self._player.on_key_press('ctrl+a') def toggle_auto_insert(): self.auto_insert = not self.auto_insert self._player.show_text('Auto insert %s' % ("on" if self.auto_insert else "off")) # Fires between episodes. @self._player.property_observer('eof-reached') def handle_end(_name, reached_end): if self._video and reached_end: has_lock = self._finished_lock.acquire(False) self.put_task(self.finished_callback, has_lock) # Fires at the end. @self._player.event_callback('idle') def handle_end_idle(event): if self._video: has_lock = self._finished_lock.acquire(False) self.put_task(self.finished_callback, has_lock) # Put a task to the event queue. # This ensures the task executes outside # of an event handler, which causes a crash. def put_task(self, func, *args): self.evt_queue.put([func, args]) if self.action_trigger: self.action_trigger.set() # Trigger the timeline to update all # clients immediately. def timeline_handle(self): if self.timeline_trigger: self.timeline_trigger.set() def skip_intro(self): self._player.playback_time = self._video.intro_end self.timeline_handle() self.is_in_intro = False @synchronous('_lock') def update(self): if ((settings.skip_intro_always or settings.skip_intro_prompt) and self._video is not None and self._video.intro_start is not None and self._player.playback_time is not None and self._player.playback_time > self._video.intro_start and self._player.playback_time < self._video.intro_end): if not self.is_in_intro: if settings.skip_intro_always and not self.intro_has_triggered: self.intro_has_triggered = True self.skip_intro() self._player.show_text("Skipped Intro", 3000, 1) elif settings.skip_intro_prompt: self._player.show_text("Seek to Skip Intro", 3000, 1) self.is_in_intro = True else: self.is_in_intro = False while not self.evt_queue.empty(): func, args = self.evt_queue.get() func(*args) if self._video and not self._player.playback_abort: if not self.is_paused(): self.last_update.restart() def play(self, video, offset=0): url = video.get_playback_url() if not url: log.error("PlayerManager::play no URL found") return self._play_media(video, url, offset) @synchronous('_lock') def _play_media(self, video, url, offset=0): self.url = url self.menu.hide_menu() if settings.log_decisions: log.debug("Playing: {0}".format(url)) self._player.play(self.url) self._player.wait_for_property("duration") if settings.fullscreen: self._player.fs = True self._player.force_media_title = video.get_proper_title() self._video = video self.is_in_intro = False self.intro_has_triggered = False self.update_subtitle_visuals(False) self.upd_player_hide() self.external_subtitles = {} self.external_subtitles_rev = {} if win_utils: win_utils.raise_mpv() if offset > 0: self._player.playback_time = offset if not video.is_transcode: audio_idx = video.get_audio_idx() if audio_idx is not None: log.debug("PlayerManager::play selecting audio stream index=%s" % audio_idx) self._player.audio = audio_idx sub_idx = video.get_subtitle_idx() xsub_id = video.get_external_sub_id() if sub_idx is not None: log.debug("PlayerManager::play selecting subtitle index=%s" % sub_idx) self._player.sub = sub_idx elif xsub_id is not None: log.debug("PlayerManager::play selecting external subtitle id=%s" % xsub_id) self.load_external_sub(xsub_id) else: self._player.sub = 'no' self._player.pause = False self.timeline_handle() if self._finished_lock.locked(): self._finished_lock.release() def exec_stop_cmd(self): if settings.stop_cmd: os.system(settings.stop_cmd) @synchronous('_lock') def stop(self, playend=False): if not playend and (not self._video or self._player.playback_abort): self.exec_stop_cmd() return if not playend: log.debug("PlayerManager::stop stopping playback of %s" % self._video) self._video.terminate_transcode() self._video = None self._player.command("stop") self._player.pause = False self.timeline_handle() if not playend: self.exec_stop_cmd() @synchronous('_lock') def get_volume(self, percent=False): if self._player: if not percent: return self._player.volume / 100 return self._player.volume @synchronous('_lock') def toggle_pause(self): if not self._player.playback_abort: self._player.pause = not self._player.pause self.timeline_handle() @synchronous('_lock') def seek(self, offset): """ Seek to ``offset`` seconds """ if not self._player.playback_abort: if self.is_in_intro and offset > self._player.playback_time: self.skip_intro() else: self._player.playback_time = offset self.timeline_handle() @synchronous('_lock') def set_volume(self, pct): if not self._player.playback_abort: self._player.volume = pct self.timeline_handle() @synchronous('_lock') def get_state(self): if self._player.playback_abort: return "stopped" if self._player.pause: return "paused" return "playing" @synchronous('_lock') def is_paused(self): if not self._player.playback_abort: return self._player.pause return False @synchronous('_lock') def finished_callback(self, has_lock): if not self._video: return self._video.set_played() if self._video.is_multipart(): if has_lock: log.debug("PlayerManager::finished_callback media is multi-part, checking for next part") # Try to select the next part next_part = self.__part+1 if self._video.select_part(next_part): self.__part = next_part log.debug("PlayerManager::finished_callback starting next part") self.play(self._video) else: log.debug("PlayerManager::finished_callback No lock, skipping...") elif self._video.parent.has_next and settings.auto_play: if has_lock: log.debug("PlayerManager::finished_callback starting next episode") self.play(self._video.parent.get_next().get_video(0)) else: log.debug("PlayerManager::finished_callback No lock, skipping...") else: if settings.media_ended_cmd: os.system(settings.media_ended_cmd) log.debug("PlayerManager::finished_callback reached end") self.stop(playend=True) @synchronous('_lock') def watched_skip(self): if not self._video: return self._video.set_played() self.play_next() @synchronous('_lock') def unwatched_quit(self): if not self._video: return self._video.set_played(False) self.stop() @synchronous('_lock') def play_next(self): if self._video.parent.has_next: self.play(self._video.parent.get_next().get_video(0)) return True return False @synchronous('_lock') def skip_to(self, key): media = self._video.parent.get_from_key(key) if media: self.play(media.get_video(0)) return True return False @synchronous('_lock') def play_prev(self): if self._video.parent.has_prev: self.play(self._video.parent.get_prev().get_video(0)) return True return False @synchronous('_lock') def restart_playback(self): current_time = self._player.playback_time self.play(self._video, current_time) return True @synchronous('_lock') def get_video_attr(self, attr, default=None): if self._video: return self._video.get_video_attr(attr, default) return default @synchronous('_lock') def set_streams(self, audio_uid, sub_uid): if not self._video.is_transcode: if audio_uid is not None: log.debug("PlayerManager::play selecting audio stream index=%s" % audio_uid) self._player.audio = self._video.audio_seq[audio_uid] if sub_uid == '0': log.debug("PlayerManager::play selecting subtitle stream (none)") self._player.sub = 'no' elif sub_uid is not None: log.debug("PlayerManager::play selecting subtitle stream index=%s" % sub_uid) if sub_uid in self._video.subtitle_seq: self._player.sub = self._video.subtitle_seq[sub_uid] else: log.debug("PlayerManager::play selecting external subtitle id=%s" % sub_uid) self.load_external_sub(sub_uid) self._video.set_streams(audio_uid, sub_uid) if self._video.is_transcode: self.restart_playback() self.timeline_handle() @synchronous('_lock') def load_external_sub(self, sub_id): if sub_id in self.external_subtitles: self._player.sub = self.external_subtitles[sub_id] else: try: sub_url = self._video.get_external_sub(sub_id) if settings.log_decisions: log.debug("Load External Subtitle: {0}".format(sub_url)) self._player.sub_add(sub_url) self.external_subtitles[sub_id] = self._player.sub self.external_subtitles_rev[self._player.sub] = sub_id except SystemError: log.debug("PlayerManager::could not load external subtitle") def get_track_ids(self): if self._video.is_transcode: return self._video.get_transcode_streams() else: aid, sid = None, None if self._player.sub != 'no': if self._player.sub in self.external_subtitles_rev: sid = self.external_subtitles_rev.get(self._player.sub, '') else: sid = self._video.subtitle_uid.get(self._player.sub, '') if self._player.audio != 'no': aid = self._video.audio_uid.get(self._player.audio, '') return aid, sid def update_subtitle_visuals(self, restart_transcode=True): if self._video.is_transcode: if restart_transcode: self.restart_playback() else: self._player.sub_pos = SUBTITLE_POS[settings.subtitle_position] self._player.sub_scale = settings.subtitle_size / 100 self._player.sub_color = settings.subtitle_color self.timeline_handle() def upd_player_hide(self): self._player.keep_open = self._video.parent.has_next def terminate(self): self.stop() if is_using_ext_mpv: self._player.terminate()
def __init__(self, cap=10): #cap for queue basic value of 10 can be change self.queue = [] # create a list self.qLock = Lock() # lock self.full = Semaphore(0) #counting lock to see if its full starts at 0 self.empty = Semaphore( cap) #counting lock to see if its empty starts at cap
# Copyright (C) 2007-2018 The NOC Project # See LICENSE for details # --------------------------------------------------------------------- # Python modules from __future__ import absolute_import from threading import Lock # Third-party modules import creole # NOC modules from ..macros.loader import loader as macro_loader from .base import BaseParser mw_lock = Lock() class MacroWrapper(object): pass class CreoleParser(BaseParser): name = "Creole" macro_wrapper = None @classmethod def to_html(cls, kb_entry): def custom_link_emit(node): if node.children: text = html_emitter.emit_children(node)
def __init__(self): # --------------------------------------------------------------------------------------------- # define the mutex to avoid concurency # --------------------------------------------------------------------------------------------- self.inputLock = threading.Lock() self.outputLock = threading.Lock() # --------------------------------------------------------------------------------------------- # message sequences # --------------------------------------------------------------------------------------------- # catheter control commandes in speed mode self.catheterMoveInstructionSequence = [] # guidewire control commandes in speed mode self.guidewireProgressInstructionSequence = [] self.guidewireRotateInstructionSequence = [] # guidewire control commandes in position mode self.guidewireMovingDistance = [] # to be verified... self.contrastMediaPushInstructionSequence = [] self.injectionCommandSequence = [] self.retractInstructionSequence = [] # forcefeedback self.forcefeedbackSequence = [] # push catheter and guidewire together self.catheter_guidewire_push_sequence = [] # system control self.closeSessionSequence = [] self.sensingParameterSequence = [] # --------------------------------------------------------------------------------------------- # system status variable # --------------------------------------------------------------------------------------------- self.systemStatus = True # ------------------------------------------------------------------------------------------------------------ # control variables: # # guidewireControlState # where # 0: uncontrolled, # 1: valid, # 2: nonvalid_prepare_for_push, # 3: nonvalid_prepare_for_drawn, # 4: exception # # catheterControlState # where # 0: uncontrolled, # 1: valid # 2: nonvalid_beyond_guidewire # 3: exception # contrastMediaControlState # where # 0: uncontrolled, # 1: valid # 2: exception self.guidewireControlState = 0 self.catheterControlState = 0 self.contrastMediaControlState = 0 self.globalContrastMediaVolumn = 0 self.globalForceFeedback = 0.0 self.globalTorqueFeedback = 0.0 self.globalDistanceFromChuckToCatheter = 0.0 self.globalTelescopicRodLength = 0.0 self.globalDistanceFromCatheterToGuidewire = 0.0 self.globalGuidewireAngle = 0.0 self.globalTranslationVelocity = 0.0 self.globalRotationVelocity = 0.0 self.globalDecisionMade = 1 informationAnalysisTask = threading.Thread( None, self.coreInformationAnalysis) informationAnalysisTask.start() decisionMaking_task = threading.Thread(None, self.decisionMaking) decisionMaking_task.start() self.storingDataLock = Lock() storingDataTask = threading.Thread(None, self.storingData) storingDataTask.start()
from mindspore._checkparam import check_input_data, Validator from mindspore.compression.export import quant_export from mindspore.parallel._tensor import _load_tensor from mindspore.parallel._utils import _infer_rank_list, _remove_repeated_slices tensor_to_ms_type = {"Int8": mstype.int8, "Uint8": mstype.uint8, "Int16": mstype.int16, "Uint16": mstype.uint16, "Int32": mstype.int32, "Uint32": mstype.uint32, "Int64": mstype.int64, "Uint64": mstype.uint64, "Float16": mstype.float16, "Float32": mstype.float32, "Float64": mstype.float64, "Bool": mstype.bool_} tensor_to_np_type = {"Int8": np.int8, "Uint8": np.uint8, "Int16": np.int16, "Uint16": np.uint16, "Int32": np.int32, "Uint32": np.uint32, "Int64": np.int64, "Uint64": np.uint64, "Float16": np.float16, "Float32": np.float32, "Float64": np.float64, "Bool": np.bool_} _ckpt_mutex = Lock() SLICE_SIZE = 512 * 1024 * 1024 def _special_process_par(par, new_par): """ Processes the special condition. Like (12,2048,1,1)->(12,2048), this case is caused by GE 4 dimensions tensor. """ par_shape_len = len(par.data.shape) new_par_shape_len = len(new_par.data.shape) delta_len = new_par_shape_len - par_shape_len delta_i = 0 for delta_i in range(delta_len): if new_par.data.shape[par_shape_len + delta_i] != 1:
class FEPool(Borg): """ A singleton that manages a pool of Frontends; i.e. key cascades with attached swift backends only one cascade exists per container/user combination """ def __init__(self): Borg.__init__(self) try: self.__lock except: self.__lock = Lock() try: self.__pool except: self.__pool = dict() def addFE(self, container, swiftTenant, swiftToken, fe): # self.__pool[(container, swiftTenant, swiftToken)] = fe # TODO: multi-backend in the Key Cascade is necessary. # currently, we would re-use the first users token for all requests... self.__pool[(container, swiftTenant)] = fe def getFE(self, container, swiftTenant, swiftToken): logging.info( "looking for Frontend for: container {}, swiftTenant {}, swiftToken {}" .format(container, swiftTenant, swiftToken)) logging.debug("Lock \/ acquiring") self.__lock.acquire() logging.debug("Lock || locked") try: sp = SwiftPool() swift_backend_current = sp.getConn(swiftTenant, swiftToken) sdos_frontend = self.__pool[(container, swiftTenant)] sdos_frontend.refresh_swift_backend( swift_backend_new=swift_backend_current) return sdos_frontend except KeyError: logging.info( "Frontend not found in pool, creating new for: container {}, swiftTenant {}, swiftToken {}" .format(container, swiftTenant, swiftToken)) props = swift_backend_current.get_sdos_properties(container) # print(props) h = props[ 2] - 1 # tree height is without root internally, but with root externally cascadeProperties = CascadeProperties(container_name=container, partition_bits=props[1], tree_height=h, master_key_type=props[3], use_batch_delete=props[4], tpm_key_id=props[5]) fe = Frontend.SdosFrontend(container, swiftBackend=swift_backend_current, cascadeProperties=cascadeProperties, useCache=True) self.addFE(container, swiftTenant, swiftToken, fe) return fe finally: self.__lock.release() logging.debug("Lock /\ release CREATED NEW FE (probably...)")
class RCPContext: def __init__(self): # --------------------------------------------------------------------------------------------- # define the mutex to avoid concurency # --------------------------------------------------------------------------------------------- self.inputLock = threading.Lock() self.outputLock = threading.Lock() # --------------------------------------------------------------------------------------------- # message sequences # --------------------------------------------------------------------------------------------- # catheter control commandes in speed mode self.catheterMoveInstructionSequence = [] # guidewire control commandes in speed mode self.guidewireProgressInstructionSequence = [] self.guidewireRotateInstructionSequence = [] # guidewire control commandes in position mode self.guidewireMovingDistance = [] # to be verified... self.contrastMediaPushInstructionSequence = [] self.injectionCommandSequence = [] self.retractInstructionSequence = [] # forcefeedback self.forcefeedbackSequence = [] # push catheter and guidewire together self.catheter_guidewire_push_sequence = [] # system control self.closeSessionSequence = [] self.sensingParameterSequence = [] # --------------------------------------------------------------------------------------------- # system status variable # --------------------------------------------------------------------------------------------- self.systemStatus = True # ------------------------------------------------------------------------------------------------------------ # control variables: # # guidewireControlState # where # 0: uncontrolled, # 1: valid, # 2: nonvalid_prepare_for_push, # 3: nonvalid_prepare_for_drawn, # 4: exception # # catheterControlState # where # 0: uncontrolled, # 1: valid # 2: nonvalid_beyond_guidewire # 3: exception # contrastMediaControlState # where # 0: uncontrolled, # 1: valid # 2: exception self.guidewireControlState = 0 self.catheterControlState = 0 self.contrastMediaControlState = 0 self.globalContrastMediaVolumn = 0 self.globalForceFeedback = 0.0 self.globalTorqueFeedback = 0.0 self.globalDistanceFromChuckToCatheter = 0.0 self.globalTelescopicRodLength = 0.0 self.globalDistanceFromCatheterToGuidewire = 0.0 self.globalGuidewireAngle = 0.0 self.globalTranslationVelocity = 0.0 self.globalRotationVelocity = 0.0 self.globalDecisionMade = 1 informationAnalysisTask = threading.Thread( None, self.coreInformationAnalysis) informationAnalysisTask.start() decisionMaking_task = threading.Thread(None, self.decisionMaking) decisionMaking_task.start() self.storingDataLock = Lock() storingDataTask = threading.Thread(None, self.storingData) storingDataTask.start() def coreInformationAnalysis(self): while True: parameter = SensingParameter() parameter.setTimestamps(10) parameter.setForceFeedback(self.globalForceFeedback) parameter.setTorqueFeedback(self.globalTorqueFeedback) parameter.setDistanceFromChuckToCatheter(10) parameter.setTelescopicRodLength(10) parameter.setDistanceFromCatheterToGuidewire(10) parameter.setGuidewireAngle(10) parameter.setTranslationVelocity(10) parameter.setRotationVelocity(10) self.sensingParameterSequence.append(parameter) #print 'length',len(self.sensingParameterSequence) #print "forcefeedback ", parameter.getForceFeedback(), "torquefeedback ", parameter.getTorqueFeedback() time.sleep(0.03) def decisionMaking(self): while True: self.globalDecisionMade = 1 time.sleep(0.01) #return ret def decision_made(self): ret = self.decision_made return ret def storingData(self): while True: data = list() self.storingDataLock.acquire() if len(self.sensingParameterSequence) >= 100: data = self.sensingParameterSequence[0:100] del self.sensingParameterSequence[0:100] self.storingDataLock.release() path = "./hapticData/hapticFeedback.csv" for var in data: tmpData = list() tmpData.append(str(var.getTimestamps())) tmpData.append(str(var.getForceFeedback())) tmpData.append(str(var.getTorqueFeedback())) tmpData.append(str(var.getDistanceFromChuckToCatheter())) tmpData.append(str(var.getTelescopicRodLength())) tmpData.append(str(var.getDistanceFromCatheterToGuidewire())) tmpData.append(str(var.getGuidewireAngle())) tmpData.append(str(var.getTranslationVelocity())) tmpData.append(str(var.getRotationVelocity())) # for x in tmpData: # print x with open(path, 'a+') as f: csv_writer = csv.writer(f) csv_writer.writerow(tmpData) #f.write(tmpData[0]) time.sleep(1) def clear_guidewire_message(self): self.guidewireProgressInstructionSequence = [] def get_guidewire_control_state(self): return self.guidewireControlState def set_guidewire_control_state(self, guidewire_state): self.guidewireControlState = guidewire_state def get_catheter_control_state(self): return self.catheterControlState def set_catheter_control_state(self, catheter_state): self.catheterControlState = catheter_state def get_contrast_media_control_state(self): return self.contrastMediaControlstate def set_contrast_media_control_state(self, contrast_media_control_state): self.contrastMediaControlState = contrast_media_control_state def getGlobalForceFeedback(self): return self.globalForceFeedback def setGlobalForceFeedback(self, globalForceFeedback): self.globalForceFeedback = globalForceFeedback def getGlobalTorqueFeedback(self): return self.globalTorqueFeedback def setGlobalTorqueFeedback(self, globalTorqueFeedback): self.globalTorqueFeedback = globalTorqueFeedback def getGlobalDistanceFromChuckToCatheter(self): return self.globalDistanceFromChuckToCatheter def setGlobalDistanceFromChuckToCatheter( self, globalDistanceFromChuckToCatheter): self.globalDistanceFromChuckToCatheter = globalDistanceFromChuckToCatheter def getGlobalTelescopicRodLength(self): return self.globalTelescopicRodLength def setGlobalTelescopicRodLength(self, globalTelescopicRodLength): self.globalTelescopicRodLength = globalTelescopicRodLength def getGlobalDistanceFromCatheterToGuidewire(self): return self.globalDistanceFromCatheterToGuidewire def setGlobalDistanceFromCatheterToGuidewire( self, globalDistanceFromCatheterToGuidewire): self.globalDistanceFromCatheterToGuidewire = globalDistanceFromCatheterToGuidewire def getGlobalGuidewireAngle(self): return self.globalGuidewireAngle def setGlobalGuidewireAngle(self, globalGuidewireAngle): self.globalGuidewireAngle = globalGuidewireAngle def getGlobalTranslationVelocity(self): return globalTranslationVelocity def setGlobalTranslationVelocity(self, globalTranslationVelocity): self.globalTranslationVelocity = globalTranslationVelocity def getGlobalRotationVelocity(self): return self.globalRotationVelocity def setGlobalRotationVelocity(self, globalRotationVelocity): self.globalRotationVelocity = globalRotationVelocity def setGlobalRotationVelocity(self, globalRotationVelocity): self.globalRotationVelocity = globalRotationVelocity def getGlobalDecisionMade(self): ret = self.globalDecisionMade return ret def setGlobalParameter(self, ID, parameter): if ID is GlobalParameterType.FORCEFEEDBACK: self.setGlobalForceFeedback(parameter) elif ID is GlobalParameterType.TORQUEFEEDBACK: self.setGlobalTorqueFeedback(parameter) elif ID is GlobalParameterType.DISTANCEFROMCHUCKTOCATHETER: self.setGlobalDistanceFromChuckToCatheter(parameter) elif ID is GlobalParameterType.TELESCOPICRODLENGTH: self.setGlobalTelescopicRodLength(parameter) elif ID is GlobalParameterType.DISTANCEFROMCATHETERTOGUIDEWIRE: self.setGlobalDistanceFromCatheterToGuidewire(parameter) elif ID is GlobalParameterType.GUIDEWIREANGLE: self.setGlobalGuidewireAngle(parameter) elif ID is TRANSLATIONVELOCITY: self.setGlobalTranslationVelocity(parameter) elif ID is GlobalParameterType.ROTATIONVELOCITY: self.setGlobalRotationVelocity(parameter) else: print("ParameterType error") def append_close_session_msg(self, close_session_msg): self.closeSessionSequence.append(close_session_msg) def fetch_close_session_msg(self): self.inputLock.acquire() length = len(self.closeSessionSequence) ret = self.closeSessionSequence.pop(length - 1) self.inputLock.release() return ret def get_close_session_sequence_length(self): self.inputLock.acquire() length = len(self.closeSessionSequence) self.inputLock.release() return length def append_new_injection_msg(self, msg): self.inputLock.acquire() self.injectionCommandSequence.append(msg) self.inputLock.release() def fetch_latest_injection_msg_msg(self): self.inputLock.acquire() length = len(self.injectionCommandSequence) ret = self.injectionCommandSequence.pop(length - 1) self.inputLock.release() return ret def get_injection_command_sequence_length(self): self.inputLock.acquire() length = len(self.injectionCommandSequence) self.inputLock.release() return length def close_system(self): self.systemStatus = False self.catheterMoveInstructionSequence = [] self.guidewireProgressInstructionSequence = [] self.guidewireRotateInstructionSequence = [] self.contrastMediaPushInstructionSequence = [] self.retractInstructionSequence = [] self.guidewireMovingDistance = [] self.closeSessionSequence = [] def open_system(self): self.systemStatus = True def get_system_status(self): return self.systemStatus def clear(self): self.catheterMoveInstructionSequence = [] self.guidewireProgressInstructionSequence = [] self.guidewireRotateInstructionSequence = [] self.contrastMediaPushInstructionSequence = [] self.retractInstructionSequence = [] self.guidewireMovingDistance = [] self.closeSessionSequence = [] def set_distance(self, dis): self.guidewireMovingDistance.append(dis) def fetch_latest_guidewire_moving_distance(self): self.outputLock.acquire() length = len(self.guidewireMovingDistance) ret = self.guidewireMovingDistance[length - 1] self.outputLock.release() return ret def fetch_latest_guidewire_moving_distance_msg(self): self.outputLock.acquire() length = len(self.guidewireMovingDistance) ret = self.guidewireMovingDistance.pop(length - 1) self.outputLock.release() return ret def get_latest_guidewire_moving_distance_sequence_length(self): self.outputLock.acquire() length = len(self.guidewireMovingDistance) self.outputLock.release() return length def append_new_catheter_move_message(self, msg): self.inputLock.acquire() self.catheterMoveInstructionSequence.append(msg) self.inputLock.release() def fetch_latest_catheter_move_msg(self): self.inputLock.acquire() length = len(self.catheterMoveInstructionSequence) ret = self.catheterMoveInstructionSequence.pop(length - 1) self.inputLock.release() return ret def get_catheter_move_instruction_sequence_length(self): self.inputLock.acquire() length = len(self.catheterMoveInstructionSequence) self.inputLock.release() return length def append_new_guidewire_progress_move_message(self, msg): self.inputLock.acquire() self.guidewireProgressInstructionSequence.append(msg) self.inputLock.release() def fetch_latest_guidewire_progress_move_msg(self): self.inputLock.acquire() length = len(self.guidewireProgressInstructionSequence) ret = self.guidewireProgressInstructionSequence.pop(length - 1) self.inputLock.release() return ret def get_guidewire_progress_instruction_sequence_length(self): self.inputLock.acquire() length = len(self.guidewireProgressInstructionSequence) self.inputLock.release() return length def append_new_guidewire_rotate_move_message(self, msg): self.inputLock.acquire() self.guidewireRotateInstructionSequence.append(msg) self.inputLock.release() def fetch_latest_guidewire_rotate_move_msg(self): self.inputLock.acquire() length = len(self.guidewireRotateInstructionSequence) ret = self.guidewireRotateInstructionSequence.pop(length - 1) self.inputLock.release() return ret def get_guidewire_rotate_instruction_sequence_length(self): self.inputLock.acquire() length = len(self.guidewireRotateInstructionSequence) self.inputLock.release() return length def append_new_contrast_media_push_move_message(self, msg): self.inputLock.acquire() self.contrastMediaPushInstructionSequence.append(msg) self.inputLock.release() def fetch_latest_contrast_media_push_move_msg(self): self.inputLock.acquire() length = len(self.contrastMediaPushInstructionSequence) ret = self.contrastMediaPushInstructionSequence.pop(length - 1) self.inputLock.release() return ret def get_contrast_media_push_instruction_sequence_length(self): self.inputLock.acquire() length = len(self.contrastMediaPushInstructionSequence) self.inputLock.release() return length def append_latest_retract_message(self, msg): self.inputLock.acquire() self.retractInstructionSequence.append(msg) self.inputLock.release() def fetch_latest_retract_msg(self): self.inputLock.acquire() length = len(self.retractInstructionSequence) ret = self.retractInstructionSequence.pop(length - 1) self.inputLock.release() return ret def get_retract_instruction_sequence_length(self): self.inputLock.acquire() length = len(self.retractInstructionSequence) self.inputLock.release() return length # get forcefeedbqck def append_latest_forcefeedback_msg(self, msg): self.outputLock.acquire() self.forcefeedbackSequence.append(msg) self.outputLock.release() def fetch_latest_feedback_msg(self): self.outputLock.acquire() length = len(self.forcefeedbackSequence) ret = self.forcefeedbackSequence.pop(length - 1) self.outputLock.release() return ret def get_feedback_sequence_length(self): self.outputLock.acquire() length = len(self.forcefeedbackSequence) self.outputLock.release() return length # ------------------------------------------------- # catheter and guidewire push together # -------------------------------------------------- def get_catheter_guidewire_push_sequence_length(self): self.inputLock.acquire() length = len(self.catheter_guidewire_push_sequence) self.inputLock.release() return length def get_fetch_latest_catheter_guidewire_push_msg(self): self.inputLock.acquire() length = len(self.catheter_guidewire_push_sequence) ret = self.catheter_guidewire_push_sequence.pop(length - 1) self.input.release() return ret
import sys import os import re import socket import urllib2 import urlparse import pickle import random from time import time, sleep from threading import Thread, Lock, Semaphore progs = {} progsAnalizados = 0 totalProgramas = 0 lockCB = Lock() lockProgs = Lock() conexiones = Semaphore(10) threads = [] def dl(url): #print url for i in range(2): #n intentos conexiones.acquire() try: try: f = urllib2.urlopen(url) html = f.read() return html except IOError:
from redis.client import Script, StrictRedis from redis.connection import ConnectionPool, Encoder from redis.exceptions import ConnectionError, BusyLoadingError from rediscluster import StrictRedisCluster from sentry import options from sentry.exceptions import InvalidConfiguration from sentry.utils import warnings from sentry.utils.warnings import DeprecatedSettingWarning from sentry.utils.versioning import Version, check_versions from sentry.utils.compat import map logger = logging.getLogger(__name__) _pool_cache = {} _pool_lock = Lock() def _shared_pool(**opts): if "host" in opts: key = "%s:%s/%s" % (opts["host"], opts["port"], opts["db"]) else: key = "%s/%s" % (opts["path"], opts["db"]) pool = _pool_cache.get(key) if pool is not None: return pool with _pool_lock: pool = _pool_cache.get(key) if pool is not None: return pool pool = ConnectionPool(**opts)
return total def frame_probe(source: Path): """Get frame count.""" cmd = [ "ffmpeg", "-hide_banner", "-i", source.absolute(), "-map", "0:v:0", "-f", "null", "-" ] r = subprocess.run(cmd, stdout=PIPE, stderr=PIPE) matches = re.findall(r"frame=\s*([0-9]+)\s", r.stderr.decode("utf-8") + r.stdout.decode("utf-8")) return int(matches[-1]) doneFileLock = Lock() def frame_check(source: Path, encoded: Path, temp, check): """Checking is source and encoded video frame count match.""" try: status_file = Path(temp / 'done.json') if check: s1 = frame_probe(source) doneFileLock.acquire() with status_file.open() as f: d = json.load(f) d['done'][source.name] = s1 with status_file.open('w') as f: json.dump(d, f)
class DRPReactiveController(object): def __init__(self): rospy.init_node('drp_reactive_controller') self.params_map = {} self.vx_pid = PID(kp=3, ki=0, deriv_prediction_dt=0.3, max_deriv_noise_gain=3) self.yaw_pid = PID(kp=3, ki=0, deriv_prediction_dt=0.3, max_deriv_noise_gain=3) self.pitch_pid = PID(kp=3, ki=0, deriv_prediction_dt=0.3, max_deriv_noise_gain=3) self.controller_active = False self.current_state = None self.current_observation = None self.observation_ts = None self.current_state_mutex = Lock() self.current_observation_mutex = Lock() self.current_pid_mutex = Lock() self.rate = 20 print ("Waiting for /drp/drp_target to come up") msg = rospy.wait_for_message('/drp/drp_target', DiverRelativePosition) self.image_w = msg.image_w self.image_h = msg.image_h print ("/drp/drp_target has come up") self.controller_params_cfg = None self.dynamic_reconfigure_srv = Server(DRPControllerParamsConfig, self.dynamic_reconfigure_callback) self.set_pid_params() self.observation_sub = rospy.Subscriber("/drp/drp_target", DiverRelativePosition, self.observation_callback, queue_size=3) self.rpy_pub = rospy.Publisher('/loco/command', Command, queue_size=3) self.cmd_msg = Command() rospy.Service('drp_reactive_controller/start', Trigger, self.start_service_handler) rospy.Service('drp_reactive_controller/stop', Trigger, self.stop_service_handler) def start_service_handler(self, request): self.controller_active = True t = TriggerResponse() t.success=True t.message="DRP Controller started" return t def stop_service_handler(self, request): self.controller_active = False t = TriggerResponse() t.success=True t.message="DRP Controller stopped" return t def dynamic_reconfigure_callback(self, config, level): self.controller_params_cfg = config self.set_pid_params() return self.controller_params_cfg def observation_callback(self, msg): self.current_observation_mutex.acquire() self.current_observation = None self.current_observation = [msg.target_x, msg.target_y, msg.pseudo_distance] self.observation_ts = rospy.Time.now().to_sec() self.current_observation_mutex.release() def set_pid_params(self): self.params_map['flat_vel_kp'] = self.controller_params_cfg.flat_vel_kp self.params_map['flat_vel_ki'] = self.controller_params_cfg.flat_vel_ki self.params_map['flat_vel_deriv_prediction_dt'] = self.controller_params_cfg.flat_vel_deriv_prediction_dt self.params_map['flat_vel_max_deriv_noise_gain'] = self.controller_params_cfg.flat_vel_max_deriv_noise_gain #print (self.params_map['flat_vel_kp'], self.params_map['flat_vel_ki'], self.params_map['flat_vel_deriv_prediction_dt']) self.params_map['flat_yaw_kp'] = self.controller_params_cfg.flat_yaw_kp self.params_map['flat_yaw_ki'] = self.controller_params_cfg.flat_yaw_ki self.params_map['flat_yaw_deriv_prediction_dt'] = self.controller_params_cfg.flat_yaw_deriv_prediction_dt self.params_map['flat_yaw_max_deriv_noise_gain'] = self.controller_params_cfg.flat_yaw_max_deriv_noise_gain #print (self.params_map['flat_yaw_kp'], self.params_map['flat_yaw_ki'], self.params_map['flat_yaw_deriv_prediction_dt']) self.params_map['flat_pitch_kp'] = self.controller_params_cfg.flat_pitch_kp self.params_map['flat_pitch_ki'] = self.controller_params_cfg.flat_pitch_ki self.params_map['flat_pitch_deriv_prediction_dt'] = self.controller_params_cfg.flat_pitch_deriv_prediction_dt self.params_map['flat_pitch_max_deriv_noise_gain'] = self.controller_params_cfg.flat_pitch_max_deriv_noise_gain self.params_map['magnify_speed'] = self.controller_params_cfg.magnify_speed self.params_map['deadzone_abs_vel_error'] = self.controller_params_cfg.deadzone_abs_vel_error self.params_map['deadzone_abs_yaw_error'] = self.controller_params_cfg.deadzone_abs_yaw_error self.params_map['deadzone_abs_pitch_error'] = self.controller_params_cfg.deadzone_abs_pitch_error self.params_map['sec_before_giving_up'] = self.controller_params_cfg.sec_before_giving_up self.vx_pid.set_params(self.params_map['flat_vel_kp'], self.params_map['flat_vel_ki'], self.params_map['flat_vel_deriv_prediction_dt'], self.params_map['flat_vel_max_deriv_noise_gain']) self.yaw_pid.set_params(self.params_map['flat_yaw_kp'], self.params_map['flat_yaw_ki'], self.params_map['flat_yaw_deriv_prediction_dt'], self.params_map['flat_yaw_max_deriv_noise_gain']) self.pitch_pid.set_params(self.params_map['flat_pitch_kp'], self.params_map['flat_pitch_ki'], self.params_map['flat_pitch_deriv_prediction_dt'], self.params_map['flat_pitch_max_deriv_noise_gain']) def compute_errors_from_estimate(self): tx, ty, pd = self.current_observation #Our image target point is centered horizontally, and 1/3 of the way down vertically. image_setpoint_x = self.image_w/2.0 image_setpoint_y = self.image_h/2.0 ######### #Since PD is 0 if very far away and 1.0 if at ideal position, error should decrease as we get closer. error_forward = 1.0 - pd error_x = (tx - image_setpoint_x)/ float(self.image_w) #Pixel difference between target point and DRP point, normalized by image size. error_y = (ty - image_setpoint_y)/ float(self.image_h) return (error_forward, error_x, error_y) def _clip(self, value, min_value, max_value): if value < min_value: return min_value elif value > max_value: return max_value else: return value def _acquire_all_mutexes(self): self.current_observation_mutex.acquire() self.current_pid_mutex.acquire() def _release_all_mutexes(self): self.current_pid_mutex.release() self.current_observation_mutex.release() def compute_control(self): self._acquire_all_mutexes() now = rospy.Time.now().to_sec() target_active = (self.current_observation is not None and ( now - self.observation_ts < (self.params_map['sec_before_giving_up']))) if target_active: ss, yy, pp, rr, hh = 0, 0, 0, 0, 0 error_forward, error_yaw, error_pitch = self.compute_errors_from_estimate() #print (error_forward, error_yaw, error_pitch) self.vx_pid.update(error_forward, now) self.yaw_pid.update(error_yaw, now) self.pitch_pid.update(error_pitch, now) if self.vx_pid.is_initialized(): # forward pseudospeed ss = self._clip(self.vx_pid.control, -1, 1) #if ss <= self.params_map['deadzone_abs_vel_error']: #pass #ss = 0.0 # else: ##### ss = self._clip(self.params_map['magnify_speed']*ss, -1, 1) ####### if self.yaw_pid.is_initialized(): # yaw pseudospeed yy = self._clip(self.yaw_pid.control, -1, 1) if abs(yy) <= self.params_map['deadzone_abs_yaw_error']: yy = 0.0 if self.pitch_pid.is_initialized(): # pitch pseudospeed pp = self._clip(self.pitch_pid.control, -1, 1) if abs(pp) <= self.params_map['deadzone_abs_pitch_error']: pp = 0.0 print ('V, yaw, pitch : ', (ss, yy, pp) ) self.set_vyprh_cmd(ss, yy, pp, rr, hh) else: print ('Target out of sight.') self.set_vyprh_cmd(0, 0, 0, 0, 0) self._release_all_mutexes() return def set_vyprh_cmd(self, ss, yy, pp, rr, hh): self.cmd_msg.throttle = ss+0 # 0.2 self.cmd_msg.yaw = yy self.cmd_msg.pitch = -pp #self.cmd_msg.roll = rr #self.cmd_msg.heave = hh def publish_control(self): #print ('publishing ', self.cmd_msg) if self.controller_active: self.rpy_pub.publish(self.cmd_msg)
def client(queue, port, server_address, args): if args.client_cpu_affinity >= 0: os.sched_setaffinity(0, [args.client_cpu_affinity]) import numpy as np if args.object_type == "numpy": import numpy as xp elif args.object_type == "cupy": import cupy as xp xp.cuda.runtime.setDevice(args.client_dev) else: import cupy as xp import rmm rmm.reinitialize( pool_allocator=True, managed_memory=False, initial_pool_size=args.rmm_init_pool_size, devices=[args.client_dev], ) xp.cuda.runtime.setDevice(args.client_dev) xp.cuda.set_allocator(rmm.rmm_cupy_allocator) ctx = ucx_api.UCXContext( feature_flags=( ucx_api.Feature.AM if args.enable_am is True else ucx_api.Feature.TAG, ) ) worker = ucx_api.UCXWorker(ctx) register_am_allocators(args, worker) ep = ucx_api.UCXEndpoint.create( worker, server_address, port, endpoint_error_handling=True, ) send_msg = xp.arange(args.n_bytes, dtype="u1") if args.reuse_alloc: recv_msg = xp.zeros(args.n_bytes, dtype="u1") if args.enable_am: blocking_am_send(worker, ep, send_msg) blocking_am_recv(worker, ep) else: wireup_recv = bytearray(len(WireupMessage)) blocking_send(worker, ep, WireupMessage) blocking_recv(worker, ep, wireup_recv) op_lock = Lock() finished = [0] outstanding = [0] def maybe_progress(): while outstanding[0] >= args.max_outstanding: worker.progress() def op_started(): with op_lock: outstanding[0] += 1 def op_completed(): with op_lock: outstanding[0] -= 1 finished[0] += 1 if args.cuda_profile: xp.cuda.profiler.start() times = [] for i in range(args.n_iter + args.n_warmup_iter): start = clock() if args.enable_am: blocking_am_send(worker, ep, send_msg) blocking_am_recv(worker, ep) else: if not args.reuse_alloc: recv_msg = xp.zeros(args.n_bytes, dtype="u1") if args.delay_progress: maybe_progress() non_blocking_send(worker, ep, send_msg, op_started, op_completed) maybe_progress() non_blocking_recv(worker, ep, recv_msg, op_started, op_completed) else: blocking_send(worker, ep, send_msg) blocking_recv(worker, ep, recv_msg) stop = clock() if i >= args.n_warmup_iter: times.append(stop - start) if args.delay_progress: while finished[0] != 2 * (args.n_iter + args.n_warmup_iter): worker.progress() if args.cuda_profile: xp.cuda.profiler.stop() assert len(times) == args.n_iter bw_avg = format_bytes(2 * args.n_iter * args.n_bytes / sum(times)) bw_med = format_bytes(2 * args.n_bytes / np.median(times)) lat_avg = int(sum(times) * 1e9 / (2 * args.n_iter)) lat_med = int(np.median(times) * 1e9 / 2) delay_progress_str = ( f"True ({args.max_outstanding})" if args.delay_progress is True else "False" ) print("Roundtrip benchmark") print_separator(separator="=") print_key_value(key="Iterations", value=f"{args.n_iter}") print_key_value(key="Bytes", value=f"{format_bytes(args.n_bytes)}") print_key_value(key="Object type", value=f"{args.object_type}") print_key_value(key="Reuse allocation", value=f"{args.reuse_alloc}") print_key_value(key="Transfer API", value=f"{'AM' if args.enable_am else 'TAG'}") print_key_value(key="Delay progress", value=f"{delay_progress_str}") print_key_value(key="UCX_TLS", value=f"{ucp.get_config()['TLS']}") print_key_value(key="UCX_NET_DEVICES", value=f"{ucp.get_config()['NET_DEVICES']}") print_separator(separator="=") if args.object_type == "numpy": print_key_value(key="Device(s)", value="CPU-only") s_aff = ( args.server_cpu_affinity if args.server_cpu_affinity >= 0 else "affinity not set" ) c_aff = ( args.client_cpu_affinity if args.client_cpu_affinity >= 0 else "affinity not set" ) print_key_value(key="Server CPU", value=f"{s_aff}") print_key_value(key="Client CPU", value=f"{c_aff}") else: print_key_value(key="Device(s)", value=f"{args.server_dev}, {args.client_dev}") print_separator(separator="=") print_key_value("Bandwidth (average)", value=f"{bw_avg}/s") print_key_value("Bandwidth (median)", value=f"{bw_med}/s") print_key_value("Latency (average)", value=f"{lat_avg} ns") print_key_value("Latency (median)", value=f"{lat_med} ns") if not args.no_detailed_report: print_separator(separator="=") print_key_value(key="Iterations", value="Bandwidth, Latency") print_separator(separator="-") for i, t in enumerate(times): ts = format_bytes(2 * args.n_bytes / t) lat = int(t * 1e9 / 2) print_key_value(key=i, value=f"{ts}/s, {lat}ns")
copy_current_request_context from flask_socketio import SocketIO, emit, join_room, leave_room, \ close_room, rooms, disconnect import sqlite3 from flask import request, jsonify # Set this variable to "threading", "eventlet" or "gevent" to test the # different async modes, or leave it set to None for the application to choose # the best option based on installed packages. async_mode = None app = Flask(__name__) app.config['SECRET_KEY'] = 'secret!' socketio = SocketIO(app, async_mode=async_mode) thread = None thread_lock = Lock() db_name = '/home/pi/flask/Flask-SocketIO/example/com4016.db' def create_db(): # 连接 conn = sqlite3.connect(db_name) c = conn.cursor() # 创建表 c.execute('''DROP TABLE IF EXISTS TB4016''') # 删除旧表,如果存在(因为这是临时数据) c.execute('''CREATE TABLE TB4016 (id INTEGER PRIMARY KEY AUTOINCREMENT, port text,insert_time text,device text, step int, valuec float, absval float)''') # 关闭 conn.close()
def __init__(self, filename, unsafe=False): self.unsafe = unsafe self.filename = filename self._lock = Lock() self.ts_refreshed = None self.ts_refreshed_utc = None
def server(queue, args): if args.server_cpu_affinity >= 0: os.sched_setaffinity(0, [args.server_cpu_affinity]) if args.object_type == "numpy": import numpy as xp elif args.object_type == "cupy": import cupy as xp xp.cuda.runtime.setDevice(args.server_dev) else: import cupy as xp import rmm rmm.reinitialize( pool_allocator=True, managed_memory=False, initial_pool_size=args.rmm_init_pool_size, devices=[args.server_dev], ) xp.cuda.runtime.setDevice(args.server_dev) xp.cuda.set_allocator(rmm.rmm_cupy_allocator) ctx = ucx_api.UCXContext( feature_flags=( ucx_api.Feature.AM if args.enable_am is True else ucx_api.Feature.TAG, ) ) worker = ucx_api.UCXWorker(ctx) register_am_allocators(args, worker) # A reference to listener's endpoint is stored to prevent it from going # out of scope too early. ep = None op_lock = Lock() finished = [0] outstanding = [0] def op_started(): with op_lock: outstanding[0] += 1 def op_completed(): with op_lock: outstanding[0] -= 1 finished[0] += 1 def _send_handle(request, exception, msg): # Notice, we pass `msg` to the handler in order to make sure # it doesn't go out of scope prematurely. assert exception is None op_completed() def _tag_recv_handle(request, exception, ep, msg): assert exception is None req = ucx_api.tag_send_nb( ep, msg, msg.nbytes, tag=0, cb_func=_send_handle, cb_args=(msg,) ) if req is None: op_completed() def _am_recv_handle(recv_obj, exception, ep): assert exception is None msg = Array(recv_obj) ucx_api.am_send_nbx(ep, msg, msg.nbytes, cb_func=_send_handle, cb_args=(msg,)) def _listener_handler(conn_request, msg): global ep ep = ucx_api.UCXEndpoint.create_from_conn_request( worker, conn_request, endpoint_error_handling=True, ) # Wireup before starting to transfer data if args.enable_am is True: ucx_api.am_recv_nb(ep, cb_func=_am_recv_handle, cb_args=(ep,)) else: wireup = Array(bytearray(len(WireupMessage))) op_started() ucx_api.tag_recv_nb( worker, wireup, wireup.nbytes, tag=0, cb_func=_tag_recv_handle, cb_args=(ep, wireup), ) for i in range(args.n_iter + args.n_warmup_iter): if args.enable_am is True: ucx_api.am_recv_nb(ep, cb_func=_am_recv_handle, cb_args=(ep,)) else: if not args.reuse_alloc: msg = Array(xp.zeros(args.n_bytes, dtype="u1")) op_started() ucx_api.tag_recv_nb( worker, msg, msg.nbytes, tag=0, cb_func=_tag_recv_handle, cb_args=(ep, msg), ) if not args.enable_am and args.reuse_alloc: msg = Array(xp.zeros(args.n_bytes, dtype="u1")) else: msg = None listener = ucx_api.UCXListener( worker=worker, port=args.port or 0, cb_func=_listener_handler, cb_args=(msg,) ) queue.put(listener.port) while outstanding[0] == 0: worker.progress() # +1 to account for wireup message if args.delay_progress: while finished[0] < args.n_iter + args.n_warmup_iter + 1 and ( outstanding[0] >= args.max_outstanding or finished[0] + args.max_outstanding >= args.n_iter + args.n_warmup_iter + 1 ): worker.progress() else: while finished[0] != args.n_iter + args.n_warmup_iter + 1: worker.progress()
class GstBasePipeline(object): def __init__(self, device, size, rotation, onFatalError, mainLoop, debugLevel): if not Gst.init_check(None): raise ImportError if debugLevel > 0: Gst.debug_set_active(True) Gst.debug_set_default_threshold(debugLevel) self._onFatalError = onFatalError self._mainLop = mainLoop self._toreDownAlready = False #pipeline control self._currentPipelineState = None self._pipelineStateCondition = Condition() self._photoBinAttachDetachLock = Lock() #Make sure attach and detach operation wait for each other to complete self._pipeline = Gst.Pipeline() self._videoSrcBin = self._getVideoSrcBin(self._pipeline, device, size, rotation) self._videoEncBin = self._getVideoEncBin(size, rotation) self._photoCaptureBin = PhotoCaptureBin(self._onNoMorePhotos) self._pipeline.add(self._videoEncBin.bin) self._pipeline.add(self._photoCaptureBin.bin) self._bus = self._pipeline.get_bus() self._bus.set_flushing(True) self._busListener = BusListener(self._bus) self._busListener.addListener(Gst.MessageType.ERROR, self._onBusError) self._busListener.addListener(Gst.MessageType.EOS, self._onBusEos) self._busListener.addListener(Gst.MessageType.STATE_CHANGED, self._onBusStateChanged) self._busListener.addListener(Gst.MessageType.REQUEST_STATE, self._onRequestState) self._busListener.start() def __del__(self): self._logger.info('Pipeline destroyed') def __fatalErrorManager(self, details): self._onFatalError(details) def _attachBin(self, bin): return bin.attach(self._videoSrcBin.requestSrcTeePad()) def _detachBin(self, bin, doneCallback= None): bin.detach(doneCallback) def _stopPipeline(self, doneCallback= None): def onChangeDone(): if doneCallback: doneCallback(True) self.tearDown() self._pipeline.set_state(Gst.State.NULL) onChangeDone() def setToPlayAndWait(self): if self._currentPipelineState != Gst.State.PLAYING: self._pipeline.set_state(Gst.State.PLAYING) if waitToReachState(self._pipeline, Gst.State.PLAYING, 10.0, 3): self._logger.debug( "Succesfully changed pipeline [%s] state to \033[93mPLAYING\033[0m" % self._pipeline.__class__.__name__) self._currentPipelineState = Gst.State.PLAYING result = True else: stateReturn, state, pending = self._pipeline.get_state(1) self._logger.error( "Error [%s] to change pipeline state to \033[93mPLAYING\033[0m, stayed on \033[93m%s\033[0m" % (stateReturn.value_name.replace('GST_STATE_CHANGE_',''), state.value_name.replace('GST_STATE_','')) ) result = False else: result = True return result def _onNoMorePhotos(self): self._logger.debug('No more photos in Photo Queue') waitForDetach = Event() def onDetached(success): if not waitForDetach.is_set(): if not success: self._logger.warn('There was an error detaching Photos Bin') waitForDetach.set() self._photoBinAttachDetachLock.acquire() self._detachBin(self._photoCaptureBin, onDetached) if not waitForDetach.wait(2.0): self._logger.warn('Timeout detaching Photos Bin') self._photoBinAttachDetachLock.release() def tearDown(self): if not self._toreDownAlready: self._logger.debug("Tearing down...") self._busListener.stop() stateChange, state, pending = self._pipeline.get_state(1) # if it's still trying to change to another state, the following two calls will # block so just kill all of it if stateChange != Gst.StateChangeReturn.ASYNC: self._pipeline.set_state(Gst.State.NULL) Gst.deinit() self._videoSrcBin = None self._videoEncBin = None self._photoCaptureBin = None self._bus = None self._toreDownAlready = True if self._mainLop.is_running(): self._mainLop.quit() self._busListener.join() self._logger.debug("Tearing down completed") def takePhoto(self, doneCallback, text=None): if not self._photoCaptureBin.isLinked: if self._attachBin(self._photoCaptureBin): self._photoCaptureBin.addPhotoReq(text, doneCallback ) else: doneCallback(False) else: self._photoCaptureBin.addPhotoReq(text, doneCallback) def playVideo(self, doneCallback= None): if self.isVideoStreaming(): if doneCallback: doneCallback(True) return result = False if self._attachBin(self._videoEncBin): if self._videoEncBin.isPlaying: result = True else: self._logger.error('Video Encoding Bin is not playing.') if doneCallback: doneCallback(result) def stopVideo(self, doneCallback= None): if not self.isVideoStreaming(): if doneCallback: doneCallback(True) return if self._videoEncBin.isLinked: self._detachBin(self._videoEncBin, doneCallback) elif doneCallback: doneCallback(True) def isVideoStreaming(self): return self._videoEncBin.isPlaying ### Signal Handlers and Callbacks def _onBusError(self, msg): busError, detail = msg.parse_error() self._logger.error("gstreamer error: %s\n--- More Info: ---\n%s\n------------------" % (busError, detail)) self.__fatalErrorManager(busError.message) # KEEP THIS. It might be useful to debug hard to find errors ''' if self._logger.isEnabledFor(logging.DEBUG): try: Gst.debug_bin_to_dot_file (self._pipeline, Gst.DebugGraphDetails.ALL, "fatal-error") self._logger.info( "Gstreamer's pipeline dot file created: %s/fatal-error.dot" % os.getenv("GST_DEBUG_DUMP_DOT_DIR") ) except: self._logger.error("Graphic diagram can not created") ''' def _onBusEos(self, msg): self._logger.warn("gstreamer EOS (End of Stream) message received.") self.__fatalErrorManager('EOS Received') def _onBusStateChanged(self, msg): old, new, pending = msg.parse_state_changed() self._logger.debug( "\033[90m%20.20s\033[0m: \033[93m%7.7s\033[0m --> \033[93m%s\033[0m --| \033[93m%s\033[0m" % (msg.src.__class__.__name__.replace('__main__.',''), old.value_name.replace('GST_STATE_',''), new.value_name.replace('GST_STATE_',''), pending.value_name.replace('GST_STATE_','')) ) def _onRequestState(self, msg): state = msg.parse_request_state() self._logger.debug('%s requested state change to \033[93m%s\033[0m' % (msg.src.__class__.__name__.replace('__main__.',''), state.value_name.replace('GST_STATE_',''))) msg.src.set_state(state) ### Implement these in child clases def _getVideoSrcBin(self, pipeline, device, size, rotation): pass def _getVideoEncBin(self, size, rotation): pass
class BlockChain: CREATE_BLOCK_TABLE = "create table if not EXISTS Blocks (Id_hash text primary key, height int not null, isTip boolean, file Binary not NULL, previous_block_hash text not NULL );" UPDATE_TIP_TO_0 = "update Blocks set isTip=0 where isTip=1" UPDATE = "update Blocks set isTip=? where Id_hash=?" CREATE_INDEX = "create index if not EXISTS Block_hash on Blocks(Id_hash);" INSERT_BLOCK = "insert INTO Blocks VALUES (?,?,?,?,?)" GET_BLOCK = "select file FROM blocks where Id_hash=(?)" def __init__(self, path=None): self.path = path self.lock = Lock() if path is None: self.db = sqlite3.connect(p.curdir + "/BlockChain.sqlite3") self.path = p.curdir + "/BlockChain.sqlite3" else: self.db = sqlite3.connect(path) if os.path.exists("./trie"): file = io.open("./trie", 'rb') self.trie = Trie.Trie('$') id = file.read(11) while len(id) == 11: self.trie.insert(id.decode()) id = file.read(11) file.close() else: self.db.cursor().execute("drop table if exists Blocks") io.open("./trie", 'xb') self.trie = Trie.Trie('$') self.tip2 = [] self.init_db() self.db.close() def init_db(self): cursor = self.db.cursor() cursor.execute(self.CREATE_BLOCK_TABLE) cursor.execute(self.CREATE_INDEX) row = cursor.execute("select file from Blocks where isTip=1") file = row.fetchone() if file != None: self.tip = pickle.loads(file[0]) # dict=serpent.loads(file[0]) # self.tip=BlockChain.load_block(dict) count = cursor.execute("select count(*) from Blocks") self.tip2.append( self.get_block(self.tip.header.previous_block_hash)) self.__dict__["height"] = count.fetchone()[0] else: self.__dict__["height"] = 0 def __setattr__(self, key, value): if key == "height": raise Exception("height can not be modifided") else: return super.__setattr__(self, key, value) @classmethod def load_block(self, blockdict): block = Block(0, 0, 0, 0) header = BlockHeader(0, 0, 0, 0) header.__dict__ = blockdict["header"] block.__dict__ = blockdict block.header = header return block def contains(self, block): self.open_db() cursor = self.db.cursor() row = cursor.execute("select Id_hash from Blocks where Id_hash=?", [str(block.get_hash())]) if row.fetchone() != None: return True else: return False def add_block(self, block): """" Add a new block. Here is needed to verify wich tip is larger and then it adds the block to the larger one and updates the tip and the block previous to the tip """ self.lock.acquire() self.db = sqlite3.connect(self.path) if hasattr( self, "tip" ) and block.header.previous_block_hash != self.tip.get_hash(): if self.tip2[-1].get_hash() == block.header.previous_block_hash: self.tip2.append(block) if len(self.tip2) >= 3: cursor = self.db.cursor() self.tip2 = [self.tip2[1], self.tip2[2]] for block in self.tip2: if hasattr(block, "identities"): file = io.open("./trie", 'ab') for identity in block.identities: self.trie.insert(identity.ID) file.write(identity.ID.encode()) file.close() file = pickle.dumps(block) cursor.execute(self.INSERT_BLOCK, [ block.get_hash(), self.__dict__["height"], 0, file, block.header.previous_block_hash ]) cursor.execute(self.UPDATE_TIP_TO_0) cursor.execute(self.UPDATE, [1, self.tip2[-1].get_hash()]) tip2 = self.tip2[-2] self.tip = self.tip2[-1] self.tip2.clear() self.tip2.append(tip2) self.lock.release() return if hasattr(self, "tip"): self.tip2.clear() tip = self.tip self.tip2.append(tip) self.tip = block if hasattr(block, "identities"): file = io.open("./trie", 'ab') for identity in block.identities: self.trie.insert(identity.ID) file.write(identity.ID.encode()) file.close() file = pickle.dumps(block) cursor = self.db.cursor() id = block.get_hash() cursor.execute(self.UPDATE_TIP_TO_0) cursor.execute(self.INSERT_BLOCK, [ id, self.__dict__["height"], 1, file, block.header.previous_block_hash ]) self.__dict__["height"] += 1 self.db.commit() self.db.close() self.lock.release() def add_blocks(self, blocks): self.lock.acquire() self.open_db() for block in blocks: if hasattr( self, "tip" ) and block.header.previous_block_hash != self.tip.get_hash(): pass self.tip = block if hasattr(block, "identities"): file = io.open("./trie", 'ab') for identity in block.identities: self.trie.insert(identity.ID) file.write(identity.ID.encode()) file.close() file = pickle.dumps(block) cursor = self.db.cursor() id = block.get_hash() cursor.execute(self.INSERT_BLOCK, [ id, self.__dict__["height"], 1, file, str(block.header.previous_block_hash) ]) cursor.execute(self.UPDATE, [0, block.header.previous_block_hash]) self.__dict__["height"] += 1 self.db.commit() self.db.close() self.lock.release() def open_db(self): self.db = sqlite3.connect(self.path) def get_block(self, hash): self.lock.acquire() self.open_db() cursor = self.db.cursor() row = cursor.execute(self.GET_BLOCK, [str(hash)]) file = row.fetchone() if file is None: print("no existe el bloque " + hash) self.db.close() self.lock.release() return block = pickle.loads(file[0]) self.db.close() self.lock.release() return block def get_hash_from(self, start): """" :return every block hash from start hash to tip """ self.lock.acquire() self.open_db() cursor = self.db.cursor() rows = cursor.execute("select Id_hash from Blocks where height>=" + str(start) + " limit 500") hashs = [row[0] for row in rows.fetchall()] self.db.close() self.lock.release() return hashs
def __init__(self): self.__list = [] self.__lock = Lock()
class MapManager: map_lock = Lock() robot_pose_lock = Lock() def __init__(self, robot_radius, robot_fov_radius, static_map_topic, robot_occ_grid_topic, push_poses_topic, simulated_pose_topic, simulated_fov_pointcloud_topic): # Get parameters self.static_map_topic = static_map_topic self.robot_occ_grid_topic = robot_occ_grid_topic self.push_poses_topic = push_poses_topic self.simulated_pose_topic = simulated_pose_topic self.simulated_fov_pointcloud_topic = simulated_fov_pointcloud_topic # Declare common parameters self.static_map = None self.init_map = None self.multilayered_map = None self.has_free_space_been_created = False self.current_robot_pose = None # Create subscribers rospy.Subscriber(self.static_map_topic, OccupancyGrid, self._static_map_callback) rospy.Subscriber(self.simulated_fov_pointcloud_topic, PointCloud, self._simulated_fov_pointcloud_callback) rospy.Subscriber(self.simulated_pose_topic, PoseStamped, self._simulated_pose_callback) # Create publishers self.robot_occ_grid_pub = rospy.Publisher(self.robot_occ_grid_topic, OccupancyGrid, queue_size=1) self.push_poses_pub = rospy.Publisher(self.push_poses_topic, PoseArray, queue_size=1) # Initialize map while self.static_map is None: rospy.sleep(0.2) self.robot_metadata = RobotMetaData(robot_radius, robot_fov_radius, self.static_map.info.resolution) self.init_map = MultilayeredMap(self.static_map, self.robot_metadata) self.multilayered_map = copy.deepcopy(self.init_map) def _static_map_callback(self, new_map): # For the moment, we don't want to manage new static maps for the # node's life duration if self.static_map is None: self.static_map = new_map def _simulated_pose_callback(self, pose): MapManager.robot_pose_lock.acquire() self.current_robot_pose = pose MapManager.robot_pose_lock.release() def _simulated_fov_pointcloud_callback(self, pointcloud): MapManager.robot_pose_lock.acquire() robot_pose_copy = copy.deepcopy(self.current_robot_pose) MapManager.robot_pose_lock.release() if self.multilayered_map is not None and robot_pose_copy is not None: if (self.current_robot_pose.header.stamp.to_nsec() <= pointcloud.header.stamp.to_nsec()): MapManager.map_lock.acquire() self.multilayered_map.update_from_point_cloud(pointcloud, robot_pose_copy) MapManager.map_lock.release() self.publish_ros_merged_occ_grid() def get_init_map(self): self.publish_ros_static_inflated_grid() return copy.deepcopy(self.init_map) # We make a copy to be sure its not changed def get_map_copy(self): MapManager.map_lock.acquire() map_copy = copy.deepcopy(self.multilayered_map) MapManager.map_lock.release() return map_copy # FIXME DEPRECATED def manually_add_obstacle(self, obstacle): self.multilayered_map.manually_add_obstacle(obstacle) def publish_ros_merged_occ_grid(self): ros_merged_occ_grid = Utils.convert_matrix_to_ros_occ_grid(self.multilayered_map.merged_occ_grid, self.static_map.header, self.static_map.info) Utils.publish_once(self.robot_occ_grid_pub, ros_merged_occ_grid) def publish_ros_static_inflated_grid(self): ros_merged_occ_grid = Utils.convert_matrix_to_ros_occ_grid(self.multilayered_map.inflated_static_occ_grid, self.static_map.header, self.static_map.info) Utils.publish_once(self.robot_occ_grid_pub, ros_merged_occ_grid) def publish_all_push_poses(self): all_push_poses = PoseArray() all_push_poses.header = self.static_map.header all_push_poses.header.stamp = rospy.Time.now() for obstacle_id, obstacle in self.multilayered_map.obstacles.items(): for push_pose in obstacle.push_poses: all_push_poses.poses = all_push_poses.poses + [push_pose.pose] Utils.publish_once(self.push_poses_pub, all_push_poses)
def setUp(self): self.updater = None self.received_message = None self.message_count = 0 self.lock = Lock()
class Node(object): """ Singleton Node class. It will store data if necessary, record next layer response time, send data packet to next layer and store the loaded model in memory without reloading. Attributes: ip: A dictionary contains Queue of ip addresses for different models type. model: Loaded models associated to a node. graph: Default graph used by Tensorflow. debug: Flag for debugging. lock: Threading lock for safe usage of this class. The lock is used for safe models forwarding. If the models is processing input and it gets request from other devices, the new request will wait until the previous models forwarding finishes. name: Model name. total: Total time of getting frames. count: Total number of frames gets back. input: Store the input for last fully connected layer, it acts as a buffer that it will kick out extra data and store unused data. """ instance = None def __init__(self): self.ip = dict() self.model = None self.graph = tf.get_default_graph() self.debug = False self.lock = Lock() self.name = 'unknown' self.total = 0 self.count = 1 self.input = deque() def log(self, step, data=''): """ Log function for debug. Turn the flag on to show each step result. Args: step: Each step names. data: Data format or size. """ if self.debug: print( '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++' ) for k in range(0, len(step), 68): print('+{:^68.68}+'.format(step[k:k + 68])) for k in range(0, len(data), 68): print('+{:^68.68}+'.format(data[k:k + 68])) print( '++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++' ) print() def acquire_lock(self): self.lock.acquire() def release_lock(self): self.lock.release() def timer(self, interval): self.total += interval print('{:s}: {:.3f}'.format(self.name, self.total / self.count)) self.count += 1 @classmethod def create(cls): if cls.instance is None: cls.instance = cls() return cls.instance