Ejemplo n.º 1
0
class ImageLogger(object):

    def __init__(self, dev_id):
        config = ConfigParser.ConfigParser()
        config.read(path.dirname(path.realpath(__file__))+"/condor.ini")
        storage_root = config.get('condor', 'storage')
        self.storage = path.join(storage_root, str(dev_id))
        if not path.isdir(self.storage):
            mkdir(self.storage)
        self.l = Lock()

    def image(self, img, name =''):
        self.l.acquire()
        if not name:
            name = str(uuid.uuid4())
        
        imgpath = path.join(self.storage, "%s.png" % (name))
        if path.isfile(imgpath):
            i = 1
            imgpath = path.join(self.storage, "%s-%s.png" % (name, i))
            while path.isfile(imgpath):
                i += 1
                imgpath = path.join(self.storage, "%s-%s.png" % (name, i))
        cv2.imwrite(imgpath,img)
        self.l.release()
Ejemplo n.º 2
0
class ClientProcess(Process):
    def __init__(self):
        Process.__init__(self)
        self.parent, self.child = Pipe()
        self.state = Lock()
        self.start()

    def run(self):
        while 1:
            active_clients = self.child.recv()
            self.state.acquire()
            client_socket = socket.fromfd(reduction.recv_handle(self.child), socket.AF_INET, socket.SOCK_STREAM)
            self.client_process(client_socket, active_clients)
            self.state.release()
                        
    def client_process(self, sock, active_clients):
        while 1:
            data = protocol.recv(sock)
            if not data:
                return sock.close()

            encryption = TripleDESCipher(data["key"])
            if data["encryption"] == 1:
                protocol.send(sock, (active_clients, encryption.encrypt(data["message"])))
            else:
                protocol.send(sock, (active_clients, encryption.decrypt(data["message"])))

    def add_task(self):
        if self.state.acquire(False):
            self.parent.send(True)
	    self.state.release()
            return True
        return False
Ejemplo n.º 3
0
 def get_module(self):
     lock = Lock()
     try:
         lock.acquire()
         return load_module("hook_chef", self.hook_path)
     finally:
         lock.release()
Ejemplo n.º 4
0
def main():

    print("Start main()")
    lock = Lock()
    # hoge_lock = Lock()
    # piyo_lock = Lock()

    hoge_pipe1, hoge_pipe2 = Pipe()
    hoge_subprocess = Process(target=hoge, args=(hoge_pipe2, lock))
    # hoge_subprocess = Process(target=hoge, args=(hoge_pipe2, hoge_lock))

    piyo_pipe1, piyo_pipe2 = Pipe()
    piyo_subprocess = Process(target=piyo, args=(piyo_pipe2, lock))
    # piyo_subprocess = Process(target=piyo, args=(piyo_pipe2, piyo_lock))

    hoge_subprocess.start()
    piyo_subprocess.start()

    while True:
        lock.acquire(
        )  ## If this statement enables, doesn't work this program.
        hoge_pipe1.send("send from main() to hoge()")
        string = hoge_pipe1.recv()
        print("Time:{}, Excute main():{}".format(time.time(), string))

        # time.sleep(1)

        piyo_pipe1.send("send from main() to piyo()")
        string = piyo_pipe1.recv()
        print("Time:{}, Excute main():{}".format(time.time(), string))

        # time.sleep(1)
        lock.release(
        )  ## If this statement enables, doesn't work this program.
Ejemplo n.º 5
0
class DbTools:
    def __init__(self, db_name='db.sqlite'):
        self.DB_Name = db_name
        self.db = sqlite3.connect(self.DB_Name, check_same_thread=False)
        self.cur = self.db.cursor()
        self.lock = Lock()

    def write(self, q, val, do_commit=True, many=False):
        self.lock.acquire(True)
        if many:
            self.cur.executemany(q, val)
            last_row_id = None
        else:
            last_row_id = self.cur.execute(q, val).lastrowid
        if do_commit:
            self.db.commit()
        self.lock.release()
        return last_row_id

    def read(self, q, val=(), f_all=True):
        self.lock.acquire(True)
        if f_all:
            res = self.cur.execute(q).fetchall() if val == () else self.cur.execute(q, val).fetchall()
        else:
            res = self.cur.execute(q).fetchone() if val == () else self.cur.execute(q, val).fetchone()
        self.lock.release()
        return res
Ejemplo n.º 6
0
class SMS_split(object):
    '''split the sms_list for sending '''
    def __init__(self,package_size,phone_list):
        self.phone_list = phone_list
        self.package_size = package_size
        self.package = [elem for elem in range(package_size)]       
        self._lock = Lock()
           
    def __iter__(self):
        #the number of sms which already have be splited       
        self.current_spot = 0
        return self
    
    def next(self):
        self._lock.acquire()
        try:
            if (self.current_spot >= len(self.phone_list)):
                self.current_spot = len(self.phone_list)
                raise StopIteration
            self.package = self.phone_list[self.current_spot :  \
										   self.current_spot+self.package_size]
            self.current_spot += self.package_size
        finally: 
            self._lock.release()
        return self.package

    def set_package_size(self, package_size):
        self.package_size = package_size
    def get_package_size(self):
        return self.package_size

    def get_already_send_num(self):
        return self.current_spot
Ejemplo n.º 7
0
def determine_violence_make_request(violence_judgement_array):

    lock = Lock()
    determinant = 0

    try:
        while True:
            lock.acquire()
            if len(violence_judgement_array) == 5:
                for each_judgement in violence_judgement_array:
                    if each_judgement == 'violence':
                        determinant = determinant + 1
                if determinant >= 3:
                    print("post request Occur")
                    with requests.Session() as session:
                        with session.get(
                                url, params={'location':
                                             'police hospital'}) as response:
                            print("response status_code - " +
                                  str(response.status_code))
                            print("response text - " + str(response.text))
                determinant = 0
                del violence_judgement_array[:]
            lock.release()
    except KeyboardInterrupt:
        pass
Ejemplo n.º 8
0
class MLock:
    def __init__(self):
        self.lock = Lock()
        self.rlock = RLock()

    def _acquire(self):    # 需要添加对锁获取次数的判断
        self.lock.acquire()

    def _release(self):
        self.lock.release()

    def _rlacquire(self):
        try:
            self.rlock.acquire()
        except Exception as e:
            printlog.err(e)
        finally:
            pass

    def _rlrelase(self):
        try:
            self.rlock.release()
        except Exception as e:
            printlog.err(e)
        finally:
            pass
Ejemplo n.º 9
0
class XmlBackend(Backend):
    """ ini file backend for configg """
    def __init__(self, path: str):
        super().__init__(path)
        self.lock = Lock()

    def read(self) -> Dict[str, Any]:
        root = ET.parse(self.path).getroot()
        data = {}
        for child in root:
            data[child.tag] = {}
            for child2 in child:
                data[child.tag][child2.tag] = child2.text
        return data

    def write(self, data: Dict[str, Any]) -> None:
        sections = []
        for section, section_data in data.items():
            xml = "<{}>".format(section)
            for key, value in section_data.items():
                xml += "<{key}>{value}</{key}>".format(key=key, value=value)
            xml += "</{}>".format(section)
            sections.append(xml)
        xml_data = "<config>"
        for xml in sections:
            xml_data += xml
        xml_data += "</config>"
        self.lock.acquire(False)
        with open(self.path, "w+") as fp:
            fp.truncate()
            fp.seek(0)
            fp.write(xml_data)
        self.lock.release()
Ejemplo n.º 10
0
class CacheLock:
    def __init__(self):
        self._cache_lock = Lock()

    def acquire_lock(self):
        g.logger.info("Acquiring Lock... {}".format(g.web_protocol))
        self._cache_lock.acquire()
        g.logger.info("Opening Cache")
        if 'AWS_ACCESS_KEY_ID' not in session:
            g.logger.info("Can't open cache files, no AWS key available")
            return 404

        log_entries_file = "logEntriesCache.{}.data".format(
            session['AWS_ACCESS_KEY_ID'])
        log_streams_file = "logStreamsCache.{}.data".format(
            session['AWS_ACCESS_KEY_ID'])

        g.logEntriesCache = shelve.open(log_entries_file, writeback=True)
        g.logStreamsCache = shelve.open(log_streams_file, writeback=True)
        g.logger.info("{}".format(g.logEntriesCache))
        g.logger.info("{}".format(g.logStreamsCache))
        g.logger.info("Acquired {}".format(g.web_protocol))

    def release_lock(self):
        g.logger.info("Release Lock...{}".format(g.web_protocol))
        g.logEntriesCache.close()
        g.logStreamsCache.close()
        self._cache_lock.release()
        g.logger.debug("Released {}".format(g.web_protocol))
Ejemplo n.º 11
0
def launch_workers(outfile, start_index, end_index, score_flag, force, verbose):
	BASE_URL = "http://www.ign.com/games/all-ajax?startIndex="
	
	
	# Synchronization Tools
	num_workers = Semaphore(MAX_NUM_PROCESSES)
	outfile_lock = Lock()
	urlopen_lock = Lock()
	stderr_lock = Lock()
	print_lock = Lock()
	
	# Write the categories
	if (outfile != None):
		outfile.write("title,link,platform,publisher,score,date\n")

	# Launch the workers
	processes = []
	curr_index = start_index;
	while curr_index <= end_index:
		curr_url = BASE_URL + str(curr_index)
	 	worker = Process(target=open_url_and_parse,
	 		args=(outfile, curr_url, score_flag, force, verbose,
	 			outfile_lock, urlopen_lock, stderr_lock, print_lock,
	 			num_workers))
	 	processes.append(worker)
	 	if verbose:
			print_lock.acquire()
			print "Launching worker for url: %s" % curr_url
			print_lock.release()
	 	num_workers.acquire()
	 	worker.start()
	 	curr_index += INDEX_INCREMENT; 
	for p in processes:
	 	p.join()
Ejemplo n.º 12
0
class MyQueue():
    def __init__(self, name, maxsize=30):
        self.queue = Queue()
        self.name = name
        self.maxsize = maxsize
        self.lock = Lock()
        self.put_lock = Lock()

    def qput(self, item):
        # print("queue", self.name, self.queue.qsize())
        self.put_lock.acquire()
        try:
            while self.queue.qsize() >= self.maxsize:
                item = self.queue.get()
            self.queue.put(item)
        except:
            pass
        self.put_lock.release()

    def qget(self, timeout=None):
        # print("queue", self.name, self.queue.qsize())
        self.lock.acquire()
        try:
            item = self.queue.get(timeout=timeout)
            while self.queue.qsize() > self.maxsize:
                item = self.queue.get()
        except:
            item = None
        try:
            self.lock.release()
        except:
            pass
        return item
Ejemplo n.º 13
0
def worker(q: JoinableQueue, i: int, output, print_lock: Lock,
           FLAGS: Tuple[Any]) -> None:
    """Retrieves files from the queue and annotates them."""
    if FLAGS.in_memory:
        with open(FLAGS.alias_db, 'rb') as f:
            alias_db = pickle.load(f)
        with open(FLAGS.relation_db, 'rb') as f:
            relation_db = pickle.load(f)
        with open(FLAGS.wiki_db, 'rb') as f:
            wiki_db = pickle.load(f)
    else:
        alias_db = SqliteDict(FLAGS.alias_db, flag='r')
        relation_db = SqliteDict(FLAGS.relation_db, flag='r')
        wiki_db = SqliteDict(FLAGS.wiki_db, flag='r')

    annotator = Annotator(alias_db,
                          relation_db,
                          wiki_db,
                          distance_cutoff=FLAGS.cutoff,
                          match_aliases=FLAGS.match_aliases,
                          unmatch=FLAGS.unmatch,
                          prune_clusters=FLAGS.prune_clusters)
    while True:
        logger.debug('Worker %i taking a task from the queue', i)
        json_data = q.get()
        if json_data is None:
            break
        annotation = annotator.annotate(json_data)
        print_lock.acquire()
        output.write(json.dumps(annotation) + '\n')
        print_lock.release()
        q.task_done()
        logger.debug('Worker %i finished a task', i)
Ejemplo n.º 14
0
class Logger():
    def __init__(self, filename, filemode='w', *arg, **kwargs):
        from multiprocessing import Lock
        from datetime import datetime
        self.filename = filename
        self.datetime = datetime
        self.on = True
        self.screenon = False
        self.lock = Lock()
        self.lock.acquire(True)
        f = open(self.filename, filemode)
        f.close()
        self.lock.release()
        self.write('Start logging.')

    def write(self, content):
        self.lock.acquire(True)
        if self.on:
            f = open(self.filename, 'a')
            if isinstance(content, str):
                f.write(self.datetime.now().strftime('%Y-%m-%d %H:%M:%S  ') +
                        content.strip() + '\n')
                if self.screenon:
                    print(self.datetime.now().strftime('%Y-%m-%d %H:%M:%S  ') +
                          content.strip()).decode('utf-8')
            else:
                for i in content:
                    f.write(
                        self.datetime.now().strftime('%Y-%m-%d %H:%M:%S  ') +
                        i.strip() + '\n')
                    if self.screenon:
                        print(self.datetime.now().strftime(
                            '%Y-%m-%d %H:%M:%S  ') + i.strip()).decode('utf-8')
            f.close()
        self.lock.release()
Ejemplo n.º 15
0
class WSRunner(Process):
	def __init__(self, url, d):
		Process.__init__(self)
		self.url = url
		self.d = d
		self.lock = Lock()
		self.lock.acquire()
	@asyncio.coroutine
	def ws(self):
		websocket = yield from websockets.connect(self.url)
		update_count = 0
		self.lock.release()
		while update_count < num_loc_updates * num_threads:
			update = yield from websocket.recv()
			update_count += 1
			j = json.loads(update)
			lat = float(j['lat'])
			lng = float(j['lng'])
			player_id = j['player_id']
			self.d[player_id] = (lat, lng)
		websocket.close()

	def run(self):
		loop = asyncio.new_event_loop()
		asyncio.set_event_loop(loop)		
		loop.run_until_complete(self.ws())
Ejemplo n.º 16
0
def fetch_to_db(process_count=1, from_date=datetime(2011, 1, 1)):
    setup_tables()

    unit_handler = UnitHandler()
    s = get_virk_search(from_date)
    try:
        tmp_file = tempfile.NamedTemporaryFile(delete=False)
        m = IOQueueManager()
        m.start()
        queue = m.IOQueue(tmp_file.name)
        queue_lock = Lock()
        consumer_partial = functools.partial(consumer_insert,
                                             queue_lock=queue_lock,
                                             unit_handler=unit_handler)

        processes = [Process(target=consumer_partial,
                             args=(queue,),
                             daemon=True) for _ in range(process_count)]
        for p in processes:
            p.start()
        engine.dispose()  # for multiprocessing.
        producer_scan(s, queue, queue_lock=queue_lock)

        queue_lock.acquire()
        for end in range(process_count):
            queue.put('DONE')
        queue_lock.release()

        for p in processes:
            p.join()

    finally:
        os.remove(tmp_file.name)
        pass
    return
Ejemplo n.º 17
0
def say_hello(name='world', **kwargs):
    """@todo: Docstring for say_hello
    """
    l = Lock()
    l.acquire()
    print "Hello, %s" % name, kwargs
    l.release()
Ejemplo n.º 18
0
class ReoderBuffer(object):
    def __init__(self, out_buffer, size=10):
        self.out_buffer = out_buffer
        self.size = size
        self._array = Manager().list(range(self.size))
        self._lock = Lock()
        self._min = 0
        self._max = self.size
        self._count = 0
        self.update_control()

    def update_control(self):
        self._control = dict()
        for e, i in enumerate(xrange(self._min, self._max)):
            self._control.update({i: e})

    def put(self, block_num, data):
        self._lock.acquire()
        if block_num not in self._control.keys():
            self._lock.release()
            raise IndexError
        self._array[self._control[block_num]] = data
        self._count += 1
        if self._count == self.size: self.sync()
        self._lock.release()

    def sync(self):
        for i in xrange(self._count):
            data = self._array[i]
            self.out_buffer.put(data)
        self._min += self._count
        self._max += self._count
        self._count = 0
        self.update_control()
Ejemplo n.º 19
0
class Database:
    def __init__(self, filename):
        self.connection = sqlite3.connect(filename)
        self.connection.text_factory = lambda x: unicode(
            x, UTF8, 'ignore')  # ignore bad unicode shit
        self.cursor = self.connection.cursor()
        self.cursor.executescript(SCHEMA_SQL)
        self.now = time.strftime('%Y%m%d%H%M%S', time.gmtime())
        self.lock = Lock()

    def commit(self):
        self.connection.commit()

    def close(self):
        self.commit()
        self.connection.close()

    def summary(self, url, limit=DETECTOR_HISTORY):
        params = {'url': url, 'limit': limit}
        rows = self.cursor.execute(SUMMARY_SQL, params)
        return rows.fetchall()

    def insert(self, rowhash):
        rowhash['run'] = self.now
        self.lock.acquire()  # BEGIN PRIVILEGED CODE
        self.cursor.execute(INSERT_SQL, rowhash)
        self.commit()
        self.lock.release()  # END PRIVILEGED CODE
Ejemplo n.º 20
0
class ReoderBuffer(object):
    def __init__(self, out_buffer, size=10):
        self.out_buffer = out_buffer
        self.size = size
        self._array = Manager().list(range(self.size))
        self._lock = Lock()
        self._min = 0
        self._max = self.size
        self._count = 0
        self.update_control()

    def update_control(self):
        self._control = dict()
        for e, i in enumerate(xrange(self._min, self._max)):
            self._control.update({i: e})

    def put(self, block_num, data):
        self._lock.acquire()
        if block_num not in self._control.keys():
            self._lock.release()
            raise IndexError
        self._array[self._control[block_num]] = data
        self._count += 1
        if self._count == self.size: self.sync()
        self._lock.release()

    def sync(self):
        for i in xrange(self._count):
            data = self._array[i]
            self.out_buffer.put(data)
        self._min += self._count
        self._max += self._count
        self._count = 0
        self.update_control()
Ejemplo n.º 21
0
def setup():
    """Performs basic setup for the daemon and ADC"""
    global exitLock
    logger.debug("Setting up ADC")
    try:
        ADC.setup()
    except RuntimeError as e:
        logger.critical(
            "Attempting to start the BBB GPIO library failed.  This can be "
            "due to a number of things, including:"
        )
        logger.critical(
            "- Too new a kernel.  Adafruit BBIO runs on 3.8.13.  Downgrades "
            "to the version this is tested with can be done easily via:")
        logger.critical(
            "  apt-get install linux-{image,headers}-3.8.13-bone79")
        logger.critical("- Not running on a BBB")
        logger.critical("- Conflicting capes")
        logger.critical("Raw exception: %s", str(e))
        return
    tstat = connect()  # TODO: retries
    logger.debug("Attaching signal handlers")
    signal.signal(signal.SIGINT, handle_exit)
    signal.signal(signal.SIGTERM, handle_exit)
    logger.debug("Building Lock for singal interrupts")
    exitLock = Lock()
    exitLock.acquire()
    logger.debug("Running main loop")
    return tstat
Ejemplo n.º 22
0
    def add_mention_db(tweet):

        print(tweet)

        lock = Lock()
        db = Collector.mention_tweet_db()
        c = db[0]
        conn = db[1]

        lock.acquire()

        c.execute(
            '''INSERT INTO mentioned
                             (tweet,
                              username,
                              tweet_date,
                              tweet_id,
                              tweet_source,
                              user_id,
                              reply_id)
                     VALUES(?,?,?,?,?,?,?)''', [
                tweet[0], tweet[1], tweet[2], tweet[3], tweet[4], tweet[5],
                tweet[6]
            ])

        conn.commit()
        lock.release()
Ejemplo n.º 23
0
class Atomic(object):
    def __init__(self):
        self._lock = Lock()
        self._in_transaction = False
        self._transaction_thread = None

    def __enter__(self):
        self.begin_transaction()
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.end_transaction()
        return self

    def in_transaction(self):
        return self._in_transaction

    def begin_transaction(self):
        self._in_transaction = True
        self._transaction_thread = threading.current_thread()
        self._lock.acquire()

    def end_transaction(self):
        self._in_transaction = False
        self._transaction_thread = None
        self._lock.release()
 def infinite_gen(self, desired_num=np.iinfo(np.int).max):
     manager = Manager()
     my_workers = []
     gen_buffer = manager.list()
     worker_lock = Lock()
     work_assigned = 0
     for i in range(self.num_worker - 1):
         job_num = desired_num // self.num_worker
         work_assigned += job_num
         my_workers.append(
             general_worker(self.callable_constructor,
                            self.constructor_params, job_num))
     # last worker
     my_workers.append(
         general_worker(self.callable_constructor, self.constructor_params,
                        desired_num - work_assigned))
     processes = []
     for i in range(self.num_worker):
         p = Process(target=my_workers[i].start,
                     args=(worker_lock, gen_buffer))
         processes.append(p)
         p.start()
     returned_sample_cnt = 0
     while returned_sample_cnt < desired_num:
         while not gen_buffer:
             time.sleep(0.1)
         worker_lock.acquire()
         yield gen_buffer.pop(0)
         worker_lock.release()
         returned_sample_cnt += 1
     # for i in range(desired_num):
     #     yield self.callable_constructor(*self.constructor_params)
     for p in processes:
         p.join()
Ejemplo n.º 25
0
class _DiscoveryWorkbook:
    def __init__(self, name):
        self.l = Lock()
        self.fname = name
        self.init = False

    def new_sheet(self, sname, rows):
        self.l.acquire()
        if self.init:
            w = load_workbook(self.fname)
        else:
            w = Workbook()
            w.remove_sheet(w.worksheets[0])
        ws = w.create_sheet(sname)
        for row in rows:
            row[0] = '=+"' + str(row[0]).replace('"', '""') + '"'
            ws.append(row)

        #simple formatting
        if self.init:
            ws.column_dimensions['A'].width = 89
            for i in range(1, ws.max_row + 1):
                ws.cell(row=i, column=1).font = Font(name='Lucida Console')
        else:
            ws.column_dimensions['A'].width = 12
            ws.column_dimensions['B'].width = 30
            for i in range(1, ws.max_row + 1):
                for j in range(1, ws.max_column + 1):
                    ws.cell(row=i, column=j).font = Font(name='Lucida Console')

        w.save(self.fname)
        self.init = True
        self.l.release()
Ejemplo n.º 26
0
def batch_copy(file_list):
    count = 0
    count_lock = Lock()
    # alive indicates if any process is alive
    alive = False
    proc_list = [None] * config["MAX_PROCESSES"]
    while count < len(file_list) or alive:
        alive = False
        for i in range(config["MAX_PROCESSES"]):
            try:
                # Maybe here will be a TypeError or something
                if proc_list[i] and proc_list[i].is_alive():
                    alive = True
                    continue
                else:
                    proc_list[i].join(0)
                    count_lock.acquire()
                    if count < len(file_list):
                        proc_list[i] = Process(target=copy,
                                               args=(file_list[count]))
                        proc_list[i].start()
                        alive = True
                        count += 1
                    count_lock.release()
            except Exception:
                count_lock.acquire()
                if count < len(file_list):
                    proc_list[i] = Process(target=copy,
                                           args=(file_list[count]))
                    alive = True
                    count += 1
                    proc_list[i].start()
                count_lock.release()
    print("Batch copy complete")
def sensor_read_process(raw_shm: shared_memory.SharedMemory,
                        proc_shm: shared_memory.SharedMemory, r_lock: Lock,
                        p_lock: Lock):
    script_dir = Path(__file__).parent
    device_file = script_dir.parent.joinpath("um7_A500CNP8.json")
    assert device_file.exists(
    ), f"Device file with connection info: {device_file} does not exist!"
    um7 = UM7Serial(device=str(device_file))

    for packet in um7.recv_broadcast(flush_buffer_on_start=False):
        packet_bytes = bytes(json.dumps(packet.__dict__), encoding='utf-8')
        assert len(
            packet_bytes
        ) <= BUFFER_SIZE, f"Packet cannot be serialized, increase `BUFFER` size at least up to {len(packet_bytes)}"
        if isinstance(packet, UM7AllRawPacket):
            r_lock.acquire()
            raw_shm.buf[:] = b' ' * BUFFER_SIZE
            raw_shm.buf[:len(packet_bytes)] = packet_bytes
            r_lock.release()
            # logging.warning(f"[SR][RAW] -> {packet}")
        elif isinstance(packet, UM7AllProcPacket):
            p_lock.acquire()
            proc_shm.buf[:] = b' ' * BUFFER_SIZE
            proc_shm.buf[:len(packet_bytes)] = packet_bytes
            p_lock.release()
Ejemplo n.º 28
0
class IPClusterPool(object):
    def __init__(self, workers):
        self.queue_lock = Lock()
        self.queue = []
        self.workers = {w: None for w in workers}

        self.terminated = False
        self.closed = False
        threading.Thread(target=self._manager).start()


    def terminate(self):
        self.terminated = True

    def join(self):
        while self.terminated != True:
            pass

    def close(self):
        self.closed = True

    def apply_async(self, job, *job_args):
        assert isinstance(job, str), "Job has to be string"
        try:
            self.queue_lock.acquire()
            self.queue.append(IPClusterTask(job, job_args))
            return self.queue[-1]
        except Exception, e:
            raise e
        finally:
Ejemplo n.º 29
0
    def producer(self, pres: list, q_facts: mp.Queue, pre_to_offset, lock: mp.Lock):
        print("Producer started.")

        num_pres = len(pres)
        max_threads = 100
        num_finished = 0
        q_threads = queue.Queue()
        for j in range(min(max_threads, num_pres)):
            p = pres[j]
            lock.acquire()
            offset = pre_to_offset[p]
            lock.release()
            self.add_thread(
                q_threads, q_facts, ("?x", p, "?z"), 1, offset
            )
            num_finished += 1

        while not q_threads.empty():
            q_threads.get().join()
            if num_finished < num_pres:
                p = pres[num_finished]
                lock.acquire()
                offset = pre_to_offset[p]
                lock.release()
                self.add_thread(
                    q_threads, q_facts, ("?x", p, "?z"), 1, offset
                )
                num_finished += 1

        # all requests has been posted and returned.
        q_facts.put(None)
        print("Producer stopped.")
Ejemplo n.º 30
0
class FeExtractor(nn.Module):
    def __init__(self, backbone_cfg, distance, delta_dist, device=None):
        """ Be aware that the underlying embedding space that this feature extractor assumes imploys the cosine distance"""
        super(FeExtractor, self).__init__()
        self.embed_model = UNet2D(**backbone_cfg)

        self.device = device
        self.distance = distance
        self.delta_dist = delta_dist
        self.fwd_mtx = Lock()

    def forward(self, raw):
        self.fwd_mtx.acquire()
        try:
            ret = self.embed_model(raw.unsqueeze(2)).squeeze(2)
        finally:
            self.fwd_mtx.release()
        if isinstance(self.distance, CosineDistance):
            ret = ret / torch.clamp(torch.norm(ret, dim=1, keepdim=True), min=1e-10)
        return ret

    def get_mean_sp_embedding_chunked(self, embeddings, supix, chunks=1):
        """
        This average reduction scheme implements a weighted averaging where the weight distribution is given by the
        normed similarity of each sample to the mean in each superpixel
        """
        dev = embeddings.device
        mask = torch.zeros((int(supix.max()) + 1,) + supix.size(), device=dev).scatter_(0, supix[None], 1)
        slc_sz = mask.shape[0] // chunks
        slices = [slice(slc_sz*step, slc_sz*(step+1), 1) for step in range(chunks)]
        if mask.shape[0] != chunks * slc_sz:
            slices.append(slice(slc_sz*chunks, mask.shape[0], 1))
        sp_embeddings = [self.get_mean_sp_embedding(embeddings, mask[slc]) for slc in slices]

        return torch.cat(sp_embeddings, dim=0)

    def get_mean_sp_embedding(self, embeddings, mask):
        masses = mask.flatten(1).sum(-1)
        masked_embeddings = embeddings[:, None] * mask[None]
        means = masked_embeddings.flatten(2).sum(-1) / masses[None]
        # if isinstance(self.distance, CosineDistance):
        #     means = means / torch.clamp(torch.norm(means, dim=0, keepdim=True), min=1e-10)  # normalize since we use cosine distance
        #     probs = self.distance.similarity(masked_embeddings, means[..., None, None], dim=0, kd=False)
        #     probs = probs * mask
        #     probs = probs / probs.flatten(1).sum(-1)[..., None, None]  # get the probabilities for the embeddings distribution
        #     sp_embeddings = (masked_embeddings * probs[None]).flatten(2).sum(-1)
        #     return (sp_embeddings / torch.clamp(torch.norm(sp_embeddings, dim=0, keepdim=True), min=1e-10)).T
        return means.T

    def get_mean_sp_embedding_sparse(self, embeddings, supix):
        """
        :param embeddings: should have shape = NCDHW
        :param supix: should be consecutive integers and of shape = NDHW
        :return: the mean embedding verctor over each label region in supix
        """
        feat = embeddings.permute(1, 0, 2, 3, 4).flatten(1)
        lbl = supix.flatten()
        n_lbl = torch.unique(supix).size(0)
        sp_embeddings = scatter_mean(feat, lbl, dim=1, dim_size=n_lbl)
        return sp_embeddings
Ejemplo n.º 31
0
def simulate_session_ch(channel: tuple, nodes, edges, lock: mp.Lock) -> None:
    '''This function simulates session for current channel.'''
    lock.acquire()

    try:
        # clean auxiliary information
        for actor in channel:
            params = nodes[actor]
            params['result_list'] = list()
            nodes[actor] = params

        # simulate dialogue
        if not Bernoulli_trial(edges[channel]['a']):
            # channel is not active
            return

        # channel is active
        # determine actors participating as Alice and Bob in the dialogue
        alice, bob = channel

        # simulated dialog
        wA, wB = simulate_dialog(alice, bob, nodes, edges)

        # update result for each actor
        alice_node = nodes[alice]
        alice_node['result_list'].append(wA)
        nodes[alice] = alice_node

        bob_node = nodes[bob]
        bob_node['result_list'].append(wA)
        nodes[bob] = bob_node
    finally:
        lock.release()
Ejemplo n.º 32
0
def set_params_actor(n: int, nodes, lock: mp.Lock) -> None:
    '''This function sets parameters for current actor in net.'''

    lock.acquire()

    try:
        params = dict()
        # set parameters of community actors
        params['rho'] = 20
        if n == 0:
            params['choice'] = 0
        else:
            params['choice'] = DISCLAIMER

        # specify initial prefernce densities of community actors
        if n == 0:
            params['w'] = np.array([1.0, 0.0], float)
        elif n == 1:
            params['w'] = np.array([0.0, 1.0], float)
        else:
            params['w'] = uncertainty(nvars)

        nodes[n] = params
    finally:
        lock.release()
Ejemplo n.º 33
0
def thread(pid: int, q: mp.Queue, loggers: dict, lock: mp.Lock):

    switch_frequency = 100
    epsilon = 0.1
    alpha = 1.0

    while not q.empty():
        try:
            seed, num_atrs = q.get()

            set_seed(seed)

            model = create_model(num_atrs)
            logger = ContextLogger(track_epochs_internally=True)
            callbacks = [logger]

            deltas = {}
            accuracy = wisconsin_card_sort(model,
                                           num_episodes,
                                           switch_frequency,
                                           epsilon,
                                           alpha,
                                           auto_switch=True,
                                           deltas=deltas,
                                           initial_shuffle=True,
                                           shuffle=True,
                                           callbacks=callbacks,
                                           verbose=0)

            lock.acquire()
            loggers[(seed, num_atrs)] = [logger.to_dict(), deltas, accuracy]
            print(f"Runs remaining: {q.qsize()}", end="\r")
            lock.release()
        except queue.Empty:
            return
Ejemplo n.º 34
0
class IPClusterPool(object):
    def __init__(self, workers):
        self.queue_lock = Lock()
        self.queue = []
        self.workers = {w: None for w in workers}

        self.terminated = False
        self.closed = False
        threading.Thread(target=self._manager).start()

    def terminate(self):
        self.terminated = True

    def join(self):
        while self.terminated != True:
            pass

    def close(self):
        self.closed = True

    def apply_async(self, job, *job_args):
        assert isinstance(job, str), "Job has to be string"
        try:
            self.queue_lock.acquire()
            self.queue.append(IPClusterTask(job, job_args))
            return self.queue[-1]
        except Exception, e:
            raise e
        finally:
Ejemplo n.º 35
0
def register_nodes():
    if request is None:
        return "Error: Please supply a valid Node", 400
    data = request.json
    if data is None:
        return "Error: Please supply a valid Node", 400

    if (BootstrapDict['nodeCount'] >= BootstrapDict['N']):
        return "Error: System is full", 405
    lock = Lock()
    lock.acquire()
    BootstrapDict['nodeCount'] += 1
    BootstrapDictInstance = BootstrapDict.copy()
    lock.release()

    node.ring.append({
        'id': BootstrapDictInstance['nodeCount'] - 1,
        'ip': data['ip'],
        'port': data['port'],
        'public_key': data['public_key'],
        'balance': 0
    })
    if (BootstrapDict['nodeCount'] == BootstrapDict['N']):
        start_new_thread(FirstBroadcast, (node.ring, ))

    blockchainjson = jsonpickle.encode(blockchain)
    start_new_thread(MakeFirstTransaction, (
        data['public_key'],
        data['ip'],
        data['port'],
    ))
    return  jsonify({'id':BootstrapDictInstance['nodeCount']-1, 'bootstrap_public_key':makeRSAjsonSendable(BootstrapDictInstance['bootstrap_public_key']),\
     'blockchain': blockchainjson, 'block_capacity': BLOCK_CAPACITY,\
      'start_ring': {'id': 0, 'ip': '192.168.1.5', 'port': '5000', 'public_key':makeRSAjsonSendable(BootstrapDictInstance['bootstrap_public_key']), 'balance': 0}\
      , 'current_block': jsonpickle.encode(node.current_block), 'NBCs': node.NBCs, 'current_NBCs': node.current_NBCs})
Ejemplo n.º 36
0
class TestClass():
    def __init__(self):
        self.count = 0
        self.lock = Lock()
        self.child = None

    def increment(self, l, i):
        self.lock.acquire()
        l[i] = i
        node = self
        node.count = node.count + 1
        while (node.child):
            node = node.child
            node.count = node.count + 1
        node.child = TestClass()
        self.lock.release()

    def get(self):
        return self.count

    def getSelf(self):
        return self

    def getChild(self):
        return self.child.child.child.get()
Ejemplo n.º 37
0
 def next_id(self):
     mutex = Lock()
     try:
         # 获取互斥锁后,进程只能在释放锁后下个进程才能进来
         mutex.acquire()
         timestamp = int(self.gen_time())
         if timestamp < self.last_timestamp:
             tmp = self.last_timestamp - timestamp
             raise Exception(
                 "Clock moved backwards.  Refusing to generate id for %d milliseconds"
                 % tmp)
         if self.last_timestamp == timestamp:
             self.sequence = (self.sequence + 1) & self.sequence_mask
         if not self.sequence:
             timestamp = self.til_next_millis(self.last_timestamp)
         else:
             self.sequence = 0
         self.last_timestamp = timestamp
         return ((timestamp - self.twepoch) << self.timestamp_left_shift) | \
                (self.data_center_id << self.data_center_id_shift) | \
                (self.worker_id << self.worker_id_shift) | self.sequence
     except Exception as e:
         print(e)
     finally:
         # 互斥锁必须被释放掉
         mutex.release()
Ejemplo n.º 38
0
def traverse():
    """以默认配置为基准,每次只调一个参数,m个参数,每个参数n个选项,总共运行m*(n-1)次"""
    commands = []
    nvmlInit()
    deviceCount = nvmlDeviceGetCount()
    gpu_usage_list = Manager().list([0 for i in range(deviceCount)])
    lock = Lock()
    default_setting = {k: v[0] for k, v in options.items()}
    logger.info(F'default setting: {default_setting}')
    for feature in options:
        for i, option in enumerate(options[feature]):
            if i and default_setting[feature] != option:  # 默认设置
                setting = default_setting
                setting[feature] = option
                command = construct_command(setting)
                commands.append(command)
    for c in commands:
        logger.info(F'commands: {c}')

    while commands:
        lock.acquire()
        gpu_id = get_free_gpu_id_and_update(gpu_usage_list)
        if gpu_id != -1:
            setting = {}
            command = commands.pop()
            setting_str = re.sub(r"[\'\{\}\s]", '', str(setting))
            setting_str = setting_str.replace(',', '/').replace(':', '.')
            P = Process(target=pao, args=(gpu_id, command, setting_str))
            P.start()
        lock.release()
        sleep(1)
Ejemplo n.º 39
0
def setup_prometheus_multiproc_dir(lock: multiprocessing.Lock = None):
    """
    Set up prometheus_multiproc_dir for prometheus to work in multiprocess mode,
    which is required when working with Gunicorn server

    Warning: for this to work, prometheus_client library must be imported after
    this function is called. It relies on the os.environ['prometheus_multiproc_dir']
    to properly setup for multiprocess mode
    """
    if lock is not None:
        lock.acquire()

    try:
        prometheus_multiproc_dir = config('instrument').get(
            'prometheus_multiproc_dir')
        logger.debug("Setting up prometheus_multiproc_dir: %s",
                     prometheus_multiproc_dir)
        # Wipe prometheus metrics directory between runs
        # https://github.com/prometheus/client_python#multiprocess-mode-gunicorn
        # Ignore errors so it does not fail when directory does not exist
        shutil.rmtree(prometheus_multiproc_dir, ignore_errors=True)
        os.makedirs(prometheus_multiproc_dir, exist_ok=True)

        os.environ['prometheus_multiproc_dir'] = prometheus_multiproc_dir
    finally:
        if lock is not None:
            lock.release()
Ejemplo n.º 40
0
class ImageLogger(object):

    def __init__(self, root):
        self.storage = root
        if not path.isdir(self.storage):
            mkdir(self.storage)
        self.l = Lock()
        self.fmt = 'jpg'

    def image(self,dev_id, img, ts=None):
        self.l.acquire()
        if ts:
            name = datetime.datetime.fromtimestamp(ts).replace(microsecond=0).strftime('%Y%m%d%H%M%S')
        else:
            name = str(uuid.uuid4())
        image_path = path.join(self.storage,str(dev_id))
        if not path.isdir(image_path):
            mkdir(image_path)

        imgpath = path.join(image_path, "%s.%s" % (name, self.fmt))
        i = 0
        while path.isfile(imgpath):
            imgpath = path.join(image_path, "%s-%s.%s" % (name, i, self.fmt))
            i += 1
        cv2.imwrite(imgpath, img)
        self.l.release()
Ejemplo n.º 41
0
class ObjectManager(object):
    """A thread management object for single object threads"""

    def __init__(self, input_Q=None, output_Q=None, timeout=10, locking=False, lock=None, **kwargs):
        super(ObjectManager, self).__init__(**kwargs)
        self.input = Queue() if input_Q is None else input_Q
        self.output = Queue() if output_Q is None else output_Q
        self._timeout = timeout
        self._locking = locking
        if lock is None:
            self._lock = Lock()
        else:
            self._lock = lock
        self.hdr = {}
        self.hdr["pid"] = current_process().pid
        self.hdr["STATUS"] = self.NORMAL

    def __getattr__(self, attr):
        """Call a method on the underlying threaded object"""

        def method(*args, **kwargs):
            """A threaded method"""
            if self._locking:
                self._lock.acquire()
            self.input.put((self.hdr, attr, args, kwargs), timeout=self._timeout)

        return method

    ERROR = "ERROR"
    NORMAL = "NORMAL"

    @property
    def duplicator(self):
        """Arguments required to duplicate this manager"""
        return {
            "input_Q": self.input,
            "output_Q": self.output,
            "timeout": self._timeout,
            "locking": self._locking,
            "lock": self._lock,
        }

    def retrieve(self, inputs=False, timeout=None):
        """Retrieve a return value off the top of the output queue"""
        timeout = self._timeout if timeout is None else timeout
        try:
            hdr, func, args, kwargs, rvalue = self.output.get(timeout=timeout)
        except QFull, QEmpty:
            raise ThreadStateError(code=2 ** 3, msg="Subthread Ended")
        if self._locking:
            self._lock.release()
        if hdr["STATUS"] == self.ERROR:
            self.clear()
            raise rvalue
        if inputs:
            return func, args, kwargs, rvalue, hdr
        else:
            return rvalue
Ejemplo n.º 42
0
 def run(self):
     signal.signal(signal.SIGINT, signal.SIG_IGN)
     l = Lock()
     l.acquire()
     pd = pcapy.open_live(self.iface_name, ethernet.ETHERNET_MTU, 0, 100)
     pcap_filter = "ether proto %s" % hex(frames.WiwoFrame.ethertype) \
                   + " and (ether[14:1] = 0x07 or ether[14:1] = 0x08)"
     pd.setfilter(pcap_filter)
     l.release()
     pd.loop(-1, self.frame_handler)
Ejemplo n.º 43
0
    class __generator:
        def __init__(self):
            self.lock = Lock()
            self.lastId = 0

        def getId(self):
            self.lock.acquire()
            self.lastId += 1
            self.lock.release()
            return self.lastId
Ejemplo n.º 44
0
    def initialize_proposer(self):
        """
            Intializes a proposer process that acts as a proposer Paxos member
            - creates listening socket for client connections
            - initializes connections to other server connections
            - starts main loop for proposer which reads proposal requests off
               a queue of requests
            - server_list is a list of pairs (host, port)
        """
        # the msg type need to handle dup
        proposer_msg_types = [
            MESSAGE_TYPE.PREPARE_ACK,
            MESSAGE_TYPE.PREPARE_NACK,
            MESSAGE_TYPE.ACCEPT_ACK,
        ]
        msg_history = set()

        def if_dup(msg, msg_history):
            # handle duplication
            if msg.msg_type in proposer_msg_types:
                msg_signature = (
                    msg.msg_type,
                    msg.value,
                    msg.proposal,
                    msg.r_proposal,
                    msg.client_id,
                    msg.instance,
                    msg.origin_id)
                if msg_signature in msg_history:
                    # dup, pass
                    # print "dup msg received by proposer!"
                    return True
                else:
                    msg_history.add(msg_signature)
                    return False

        # counter for proposer number
        proposer_num = self.server_id

        # log file
        write_lock = Lock()
        write_lock.acquire()
        logfile = open("server" + str(self.server_id) + ".txt", "w+")
        write_lock.release()

        def send_to_acceptors(msg, server_connections):
            assert isinstance(msg, message.message)
            # send the proposal to acceptors
            for s_socket in server_connections:
                try:
                    s_socket.sendall(pickle.dumps(msg))
                except Exception, e:
                    server_connections.remove(s_socket)
                    print "{}: ERROR - {}".format(self.DEBUG_TAG, e)
                    pass
Ejemplo n.º 45
0
class DataLoader(object):
    def __init__(self, params, db, nn_db):
	self.lock = Lock()
	self.db = db
	self.cur = db.length
        self.im_shape = params['im_shape']
        self.nn_shape = params['nn_shape']
        self.hist_eq = params['hist_eq']
        self.indexes = np.arange(db.length)
	self.shuffle = params['shuffle']
	self.subtract_mean = params['subtract_mean']
	if self.subtract_mean:
	    self.mean_img = self.db.read_mean_img(self.im_shape)
	
	self.im_shape = params['im_shape']
	self.load_nn = params['load_nn']
	self.nn_query_size = params['nn_query_size']
	if self.load_nn:
	    self.nn_db = nn_db
	    #nn_ignore = 1 if db.db_root == nn_db.db_root else 0
	    nn_ignore = 0
	    self.nn = NN(nn_db, params['nn_db_size'], nn_ignore)
   
    def load_next_data(self):
	nid = self.get_next_id()
        jp, imgs, segs = self.db.read_instance(nid, size=self.im_shape)
        item = {'jp':jp}
	for i in xrange(len(imgs)):
	    img = imgs[i]
	    if self.hist_eq:
		img = correct_hist(img)
	    item.update({'img_' + shape_str(self.im_shape[i]):img.transpose((2,0,1)), 'seg_' + shape_str(self.im_shape[i]): segs[i]})
	if self.load_nn:
	    nn_id = self.nn.nn_ids(jp, self.nn_query_size)
	    if hasattr(nn_id, '__len__'):
		nn_id = random.choice(nn_id)
	    nn_jp, nn_imgs, nn_segs = self.nn_db.read_instance(nn_id, size=self.nn_shape)
	    item.update({'nn_jp':nn_jp})
	    for i in xrange(len(nn_imgs)):
		nn_img = nn_imgs[i]
		if self.hist_eq:
		    nn_img = correct_hist(nn_img)
		item.update({'nn_img_' + shape_str(self.nn_shape[i]):nn_img.transpose((2,0,1)), 'nn_seg_' + shape_str(self.nn_shape[i]): nn_segs[i]})    
        return item

    def get_next_id(self):
	self.lock.acquire()
	if self.cur >= len(self.indexes) - 1:
            self.cur = 0
            if self.shuffle:
		random.shuffle(self.indexes)
	else:
	    self.cur += 1
	self.lock.release()
	return self.indexes[self.cur]
Ejemplo n.º 46
0
class KNN(object):

    def __init__(self):
        (self.train, self.valid, self.test), _ = load_data(data_path='data/mfcc_{}.npy', theano_shared=False)
        self.train_x, self.train_y = self.train
        self.test_x, self.test_y = self.test
        self.valid_x, self.valid_y = self.valid

        self.train_y = self.train_y.reshape(self.train_y.shape[0])
        self.test_y = self.test_y.reshape(self.test_y.shape[0])

        self.accurate = Value('i', 0)
        self.lck = Lock()
        # self.unique_artists()
        # self.centers = len(self.artists_map.keys())
        self.neigh = KNeighborsClassifier(weights='distance', n_jobs=-1, p=1)

    def unique_artists(self):
        self.artists_map = {}
        for artist in self.data_y:
            temp = self.artists_map.get(artist, 0)
            self.artists_map[artist] = temp + 1

    def fit_data(self):
        self.neigh.fit(self.train_x, self.train_y)

    def test_accuracy(self, st, en, thread_number, var_x, var_y):
        for indx in range(st, en):
            data_pt = var_x[indx]
            temp = self.neigh.predict(data_pt.reshape(1, -1))[0]

            if temp == var_y[indx]:
                self.lck.acquire()
                # print self.accurate.value,
                self.accurate.value += + 1
                # print self.accurate.value, indx,
                # print 'Thread: {}'.format(thread_number)
                self.lck.release()

    def testing(self, data_x, data_y):
        self.accurate.value = 0
        processes = []
        ranges = range(0, data_y.shape[0], data_y.shape[0] // 10)
        for x in range(len(ranges) - 1):
            p = Process(target=self.test_accuracy, args=(ranges[x], ranges[x + 1], x, data_x, data_y))
            p.start()
            processes.append(p)

        _ = map(lambda x: x.join(), processes)

        self.accuracy_percentage = self.accurate.value * 100. / data_y.shape[0]
        print 'Accuracy: {}'.format(self.accuracy_percentage)
        print 'Accurate: {}/{}'.format(self.accurate.value, data_y.shape[0])
Ejemplo n.º 47
0
class MessageThrottle(object):
    """
    Send at most throttle_interval message per second to zoom clients
    """
    def __init__(self, configuration, clients):
        self._interval = configuration.throttle_interval
        self._lock = Lock()
        self._clients = clients
        self._message = None
        self._thread = Thread(target=self._run,
                              name='message_throttler')
        self._running = True

    def start(self):
        self._thread.start()

    def add_message(self, message):
        self._lock.acquire()
        try:
            if self._message is None:
                self._message = message
            else:
                self._message.combine(message)
        finally:
            self._lock.release()

    def _run(self):
    
        while self._running:
            self._lock.acquire()
            try:
                if self._message is not None:
                    logging.debug('Sending message: {0}'
                                  .format(self._message.to_json()))
                    for client in self._clients:
                        try:
                            client.write_message(self._message.to_json())
                        except IndexError:
                            logging.debug('Client closed when trying to send '
                                          'update.')
                            continue
                    self._message = None
            except AttributeError as e:
                logging.exception('Exception in MessageThrottle: {0}'.format(e))
            finally:
                self._lock.release()
            
            time.sleep(float(self._interval))

    def stop(self):
        if self._thread.is_alive():
            self._running = False
            self._thread.join()
Ejemplo n.º 48
0
class DataWriter:
    def __init__ (self, targetDir, fps=10.0):
        
        # Create target directory and pose log
        if not os.path.exists(targetDir) :
            os.mkdir (targetDir)
        self.poseFd = open(targetDir+"/pose.csv", 'w')
        self.path = targetDir
        
        self.currentImage = None
        self.currentPose = None
        self.fps = fps
        self.bridge = CvBridge()
        self.currentTimestamp = 0.0

        # Prepare multiprocessing
        self.dataEx = Lock()
        self.process = mp.Process(target=self.start)
        self.end = False
        self.process.start()
    
    def start (self):
        rate = rospy.Rate(self.fps)
        counter = 0
        
        while (self.end == False):
            if self.currentImage is not None and self.currentPose is not None :
                
                self.dataEx.acquire()
                cpImg = copy (self.currentImage)
                cpPose = copy (self.currentPose)
                self.dataEx.release()
                
                curImageName = "{0:06d}.jpg".format (counter)
                
                cv2.imwrite (self.path + "/"+curImageName, self.currentImage)
                poseStr = "{:.6f} {} {} {} {} {} {} {}".format(
                    rospy.Time.now().to_sec(),
                    cpPose.position.x,
                    cpPose.position.y,
                    cpPose.position.z,
                    cpPose.orientation.x,
                    cpPose.orientation.y,
                    cpPose.orientation.z,
                    cpPose.orientation.w)
                self.poseFd.write(poseStr+"\n") 
                
                counter += 1
            rate.sleep()
    
    def close (self):
        self.end = True
        self.poseFd.close()
Ejemplo n.º 49
0
def multiprocess_index(file_or_urls,shard_size=10000):
    lock = Lock()
    t1 = time.time()
    total_recs = 0
    for file_ref in file_or_urls:
        reader = pymarc.MARCReader(open(file_ref,'rb'),utf8_handling="xmlcharrefreplace")
#        error_recs = open('%s-bad.mrc' % file_ref,'wb')
        print("Starting multiprocess index for %s, sharding by %s" % (file_ref,
                                                                      shard_size))
        count = 1
        marc_recs = []
        while 1:
            try:
                rec = next(reader)
            except ValueError as error:
                lock.acquire()
                error_msg = "%s for record %s in %s" % ("ValueError: {0}".format(error),
                                                        count,
                                                        file_ref)
                print(error_msg)
                logging.error(error_msg)
                lock.release()
                count += 1
                pass
                #rec = next(reader)
            except StopIteration:
                break
            if not count%shard_size:
                marc_recs.append(rec)
                shard_process = Process(target=index_shard,args=(marc_recs,))
                if shard_process is not None:
                    shard_process.start()
                    shard_process.join()
                else:
                    lock.acquire()
                    error_msg = "Unable to process %s total_recs=%s" % (file_ref,total_recs)
                    print(error_msg)
                    logging.error(error_msg)
                    lock.release()
                    marc_recs = []
            else:
                marc_recs.append(rec)
            if count%1000:
                sys.stderr.write(".")
            else:
                sys.stderr.write(str(count))
            count += 1
            total_recs += count
    t2 = time.time()
    total_time = (t2 - t1) / 60.0
    print("Finished multi-processing %s in %0.3f" % (total_recs,
                                                     total_time))
Ejemplo n.º 50
0
class PassiveTestEndpoint(object):
    def __init__(self):
        self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        self.s.bind((HOST, PORT))
        self.s.listen(1)
        self.mutex = Lock()

    def loop(self):
        while True:
            conn, addr = self.s.accept()
            print addr, 'connected'
            self.handle_read(conn)

    def handle_read(self, s):
        data = None
        try:
            resp = s.recv(8192)
            if not resp:
                return
            data = json.loads(resp)
        except OSError: pass
        if data:
            cmd = data.get('cmd')
            if cmd == "start":
                self.mutex.acquire()
                program = data.get('program')
                args = data.get('args')
                env = data.get('env')
                if not child_pid:
                    print "now running", program, args
                    test_start(program, args, env)
                    s.sendall(json.dumps({'status': 'ok'}))
                else:
                    s.sendall(json.dumps({'status': 'error',
                                          'code': 'busy',
                                          'errmsg': 'test (%s) stillrunning ' % child_pid}))
                s.close()
                self.mutex.release()
            elif cmd == "terminate":
                self.mutex.acquire()
                print "terminate"
                if not child_pid:
                    s.sendall(json.dumps({'status': 'error',
                                             'code': 'no_child',
                                             'errmsg': 'nothing to terminate'}))
                else:
                    test_terminate()
                    log_content = file('/tmp/test_out', 'r').read()
                    s.sendall(json.dumps({'status': 'ok', 'log': log_content}))
                s.close()
                self.mutex.release()
Ejemplo n.º 51
0
class sync_queue(multiprocessing.queues.Queue):
        
        def __init__(self):
                self.q = Queue()
                self.lock = Lock()

        def add(self, item):
                self.lock.acquire()
                self.q.put(item)
                self.lock.release()

        def remove(self):
                return self.q.get()
Ejemplo n.º 52
0
 def run(self):
     signal.signal(signal.SIGINT, signal.SIG_IGN)
     l = Lock()
     l.acquire()
     pd = pcapy.open_live(self.iface_name, ethernet.ETHERNET_MTU, 0, 100)
     iface_mac_addr = interface.get_string_mac_address(self.iface_name)
     pcap_filter = "ether dst " \
                   + iface_mac_addr \
                   + " and ether proto %s " % hex(frames.WiwoFrame.ethertype) \
                   + "and not (ether[14:1] = 0x07 or ether[14:1] = 0x08)"
     pd.setfilter(pcap_filter)
     l.release()
     pd.loop(-1, self.frame_handler)
Ejemplo n.º 53
0
class clientExecutor(Pyro.core.SynchronizedObjBase):
    def __init__(self):
        Pyro.core.SynchronizedObjBase.__init__(self)
        self.continueRunning =  True
        self.mutex = Lock() 
    def continueLoop(self):
        self.mutex.acquire()
        return self.continueRunning
    def serverShutdownCall(self):
        # receive call from server when all stages are processed
        self.mutex.acquire()
        self.continueRunning = False
        self.mutex.release()
Ejemplo n.º 54
0
class ProcessManager():
    def __init__(self):
        self.lock = Lock()
        self.processes = dict()
        self.logger = logging.getLogger('{0}.{1}'.format(__name__, self.__class__.__name__))
    def has(self, name):
        self.lock.acquire()
        isExists = True if self.processes.has_key(name) else False
        self.lock.release()
        return isExists
    def add(self, name, process):
        self.lock.acquire()
        self.processes[name] = process
        self.logger.debug('Add process ' + name)
        self.lock.release()
    def remove(self, name):
        self.lock.acquire()
        if self.processes.has_key(name):
            del self.processes[name]
            self.logger.debug('Remove process ' + name)
        self.lock.release()
    def kill(self, name):
        self.lock.acquire()
        if self.processes.has_key(name):
            self.processes[name].signalProcess('KILL')
            del self.processes[name]
            self.logger.debug('Killed and removed process ' + name)
        self.lock.release()
Ejemplo n.º 55
0
class KnowledgeBase(Daemon):

    def __init__(self, config):
        set_logging(config)
        self.config = config
        self.pidfile = os.path.abspath(config['pidfile'])
        self.time_lock = Lock()
        self.teller_queue = JoinableQueue()
        self.session_factory = get_sasession(self.config)
        session = self.session_factory()

    def run(self):
        if int(self.config['instant_duration']):
            self.clock = Ticker(self.config, self.session_factory(),
                                self.time_lock, self.teller_queue)
            self.clock.start()

        host = self.config['kb_host']
        port = int(self.config['kb_port'])
        nproc = int(self.config['teller_processes'])
        for n in range(nproc):
            teller = Teller(self.config, self.session_factory, self.teller_queue)
            teller.daemon = True
            teller.start()
        self.socket = Listener((host, port))
        while True:
            try:
                client = self.socket.accept()
            except InterruptedError:
                return
            self.time_lock.acquire()
            self.teller_queue.put(client)
            self.time_lock.release()

    def cleanup(self, signum, frame):
        """cleanup tasks"""
        nproc = int(self.config['teller_processes'])
        for n in range(nproc):
            self.teller_queue.put(None)
        self.teller_queue.close()
        try:
            self.clock.ticking = False
        except AttributeError:
            pass
        self.teller_queue.join()
        try:
            self.clock.join()
        except AttributeError:
            pass
        logger.warn('bye from {n}, received signal {p}'.format(n=mp.current_process().name, p=str(signum)))
Ejemplo n.º 56
0
def main():
    global keyAlphabet, configParam, myOutputFile
    queueLock = Lock()
    outputLock = Lock()
    configParam = []
    workQueue = Queue()
    myProcesses= []
    orderArray = Array('i',[0,0])
    
    if len(sys.argv) == 4:
        for e in range(1,4):
            if sys.argv[e].isdigit():
                configParam.append(int(sys.argv[e]))
            else:
                print "parameters other than filename have to be integers"
                sys.exit()
    else:
        print "usage : <filename> <shift> <numThread> <length> "
        sys.exit()
    
    myOutputFile = 'crypted_'+`configParam[0]`+'_'+`configParam[1]`+'_'+`configParam[2]`+".txt"
    
    try:
        global myInputFile
        myInputFile = open('metin.txt','r')
        (open(myOutputFile,'w')).close()

    except:
        print "Opening failed"
        sys.exit()
    
    keyAlphabet = createKey()
    
    queueLock.acquire()
    myText = myInputFile.read(configParam[2])
    while myText != "":
        workQueue.put(myText)
        myText = myInputFile.read(configParam[2])
    queueLock.release()

    for i in range(0,configParam[1]):
        process = Process(target=readFile,args=(orderArray,workQueue,queueLock,outputLock,myOutputFile))
        process.start()
        myProcesses.append(process)
        
    
    for p in myProcesses:
        p.join()

    myInputFile.close()
Ejemplo n.º 57
0
class Database():

    databaseName = 'data.sqlite3'

    def __init__(self):
        
        self.lock = Lock()
        self.connection = sqlite3.connect(self.databaseName)
        self.cursor = self.connection.cursor()

    def insertItem(self, item):
        
        sql = 'INSERT INTO UNIT VALUES('
        sql += item.fields[4][1].replace('.', '')
        sql += ", '"
        sql += item.fields[0][1]
        sql += "', '"
        sql += item.fields[1][1]
        sql += "', '"
        sql += item.fields[2][1]
        sql += "', "
        sql += item.fields[3][1];
        sql += ");"
        
        self.lock.acquire()
        
        try:
        
            logging.debug(sql + '\n')
            self.cursor.execute(sql)
        
        except sqlite3.Error as e:

            logging.warning(sql + ' ' + e.args[0])

        try:
            
            self.connection.commit()
        
        except:
            
            logging.error('SQLITE COMMIT ISSUE')

        self.lock.release()

    def doesNotContainItem(self, serial):
        
        self.cursor.execute('SELECT * FROM UNIT WHERE ID = ' + serial)
        return self.cursor.fetchone() == None
Ejemplo n.º 58
0
class Director(object):
    def __init__(self, producer, consumer):
        self.producer = producer
        self.consumer = consumer
        self.queue = Queue()
        self.prod_proc = Process(target = self.produce)
        self.prod_proc.daemon = True
        self.lock = Lock()
        self.done = Value('b')
        self.done.value = 0

    def start(self):
        self.prod_proc.start()

    def step(self):
        self.lock.acquire()
        done = (self.done.value != 0)
        self.lock.release()
        if done:
            raise Done
        try:
            data = self.queue.get(block = True, timeout = 1.0)
            self.consumer.consume(data)
        except Empty:
            pass

    def stop(self):
        self.prod_proc.join()

    def run(self):
        self.start()
        while True:
            try:
                self.step()
            except Done:
                break
        self.stop()
        
    def produce(self):
        try:
            while True:
                data = self.producer.produce()
                self.queue.put(data)
        except:
            self.lock.acquire()
            self.done.value = 1
            self.lock.release()
            self.queue.close()
            self.queue.join_thread()
Ejemplo n.º 59
0
class MPlot(ct.StoppableProcess):
    def __init__(self, xlim, ylim, channel=1, post_process=None, x_range=None,
                 *args, **kwargs):
        super(MPlot, self).__init__(*args, **kwargs)
        self.in_queue = multiprocessing.Queue()
        self.xlim = xlim
        self.ylim = ylim
        self.x_range = x_range
        if(self.x_range is None):
            self.x_range = np.arange(self.xlim[0], self.xlim[1])

        self.lock = Lock()
        self.channel = channel

        self.post_process = post_process
        if(self.post_process is None):
            self.post_process = lambda data: data

    def init_plot(self):
        self.lock.acquire()

        self.fig, self.axarr = plt.subplots(self.channel,
                                            sharex=True,
                                            squeeze=False)
        for i in range(self.channel):
            self.axarr[i, 0].hold(False)
        self.fig.show()

        self.lock.release()

    def run(self):
        self.init_plot()
        while(self.is_stopped() is False):
            try:
                data = self.in_queue.get_nowait()
                split_data = ast.split_channel(data, self.channel)
                p_split_data = [self.post_process(sd) for sd in split_data]

                self.lock.acquire()
                for i in range(self.channel):
                    self.axarr[i, 0].plot(self.x_range, p_split_data[i])
                    self.axarr[i, 0].set_xlim(self.xlim)
                    self.axarr[i, 0].set_ylim(self.ylim)
                self.fig.canvas.draw()
                self.lock.release()

            except queue.Empty:
                pass
Ejemplo n.º 60
0
def insert_to_db(table, data, visit_info):
    """Insert event data to database."""
    for _ in xrange(0, cm.MAX_WAIT_FOR_DB_CONN):
        try:
            l = Lock()
            l.acquire()
            with sq.connect(visit_info.out_db,
                            timeout=cm.DB_CONN_TIMOEUT) as conn:
                cursor = conn.cursor()
                return insert_to_opened_db(cursor, table, data, visit_info)
            l.release()
        except sq.OperationalError:  # @UndefinedVariable
            l.release()
            sleep(1)
    cm.print_log(visit_info, "Cannot insert to DB (%s) URL: %s Table: (%s)" %
                 (visit_info.out_db, visit_info.url, table))