Example #1
0
 def __init__(self):
     self.last_svg_generated = None
     self.last_svg_generated_ts = 0
     self.rwlock = rwlock.RWLock()
     self.svg_nodes_topics = dict()
     self.svg_list = deque(maxlen=2)  # Keep the last 2 svg
     self.master = rosgraph.Master('/rostopic')
Example #2
0
    def __init__(self,
                 filename: str,
                 tree_conf: TreeConf,
                 cache_size: int = 512):
        self._filename = filename
        self._tree_conf = tree_conf
        self._lock = rwlock.RWLock()

        if cache_size == 0:
            self._cache = FakeCache()
        else:
            self._cache = cachetools.LRUCache(maxsize=cache_size)

        self._fd, self._dir_fd = open_file_in_dir(filename)

        self._wal = WAL(filename, tree_conf.page_size)
        if self._wal.needs_recovery:
            self.perform_checkpoint(reopen_wal=True)

        # Get the next available page
        self._fd.seek(0, io.SEEK_END)
        last_byte = self._fd.tell()
        self.last_page = int(last_byte / self._tree_conf.page_size)
        self._freelist_start_page = 0

        # Todo: Remove this, it should only be in Tree
        self._root_node_page = 0
Example #3
0
    def __init__(self, size, path=None):
        """
        Initialise the ring buffer with size of the buffer and the path to the file to use.
        If path is None then a temporary file will be created.
        """
        self.readers = []
        self.rwlock = rwlock.RWLock()
        self.write_event = threading.Condition()

        self.size = size
        self.reset()

        if path is None:
            self.buffer_fd = os.tmpfile()
        else:
            # Try opening with read + write and then write + read as w+ truncates the file.
            try:
                self.buffer_fd = open(path, 'r+b')
            except:
                self.buffer_fd = open(path, 'w+b')

        # Check to see if we need to increase the size of the file.
        stat = os.fstat(self.buffer_fd.fileno())
        if stat.st_size < size:
            self.buffer_fd.truncate(size)

        # Map the file into memory
        self.buffer = mmap.mmap(self.buffer_fd.fileno(), self.size)
Example #4
0
    def __init__(self, event_lock, events):
        super().__init__()
        # TODO: Swap this out for picam
        self.cap = cv2.VideoCapture(0)
        # self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, CAMERA_RESOLUTION[1])
        # self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, CAMERA_RESOLUTION[0])

        # Preallocate two buffers, so that I don't have to allocate a new array for every frame.
        # last_frame holds the frame that was most recently read from the camera.
        self.last_frame = np.zeros(CAMERA_RESOLUTION, dtype=np.uint8)
        # next_frame holds the frame which I am currently waiting to get from the camera.
        self.next_frame = np.zeros(CAMERA_RESOLUTION, dtype=np.uint8)
        # Because of multithreading, we need some locks. A RWLock (Read Write Lock) allows any number of threads to
        # acquire a read lock simultaneously, but only one thread can have a write lock at a time.
        self.frame_lock = rwlock.RWLock()
        # To stop the thread, set self.running to False
        self.running = True
        self.event_lock = event_lock
        self.events = events

        # Timing for debugging
        self.__start_time = time.time()
        self.__num_iterations = 0
        # This event is set once the first frame has been read. That way, processes can't try to read from the
        # camera before it is initialized.
        self.ready_event = threading.Event()
Example #5
0
 def __init__(self, endpoint, accessKeyId, accessKey, project,
              logstore, consumer_group, consumer, securityToken=None):
     self.mclient = LogConsumerClient(endpoint, accessKeyId, accessKey, securityToken)
     self.mproject =project
     self.mlogstore = logstore
     self.mconsumer_group = consumer_group
     self.mconsumer = consumer
     self.rw_lock = rwlock.RWLock()
     self.logger = logging.getLogger(self.__class__.__name__)
Example #6
0
    def __init__(self, file_name, tree_conf: TreeConf, cache_size=1024):
        self._filename = file_name
        self._tree_conf = tree_conf

        if cache_size < 0:
            self._cache = LRUCache()  # cache without size limitation
        elif cache_size == 0:
            self._cache = FakeCache()
        else:
            self._cache = LRUCache(capacity=cache_size)
        self._fd = open_database_file(self._filename)
        self._lock = rwlock.RWLock()
        self._wal = WAL(file_name, tree_conf.page_size)

        # Get the last available page
        self._fd.seek(0, io.SEEK_END)
        last_byte = self._fd.tell()
        self.last_page = int(last_byte / self._tree_conf.page_size)
        self._page_GC = list(self._load_page_gc())
        self._auto_commit = True
Example #7
0
    def start(self):
        self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.socket.bind((self._host, self._port))
        self.socket.listen(16)
        self.logger.debug("Start to listen")

        lock = rwlock.RWLock()
        process = multiprocessing.Process(target=manager,
                                          args=(self._timeout, lock))
        process.daemon = True
        process.start()

        while True:
            conn, address = self.socket.accept()
            self.logger.debug("Accepted {}".format(address))
            process = multiprocessing.Process(
                target=handle,
                args=(conn, address, self.browsers, self.environ,
                      self._timeout, lock))
            process.daemon = True
            process.start()
            self.logger.debug("Process {} started".format(process))
Example #8
0
            self.cout_locker_.acquire()
            print self.getName(), val_copy_
            self.cout_locker_.release()
            time.sleep(0.05)

    def join(self):
        self.exit_flag_ = True
        threading.Thread.join(self)


if __name__ == "__main__":
    read_thread_pool_ = []
    write_thread_pool_ = []
    read_count_ = 10
    write_count_ = 10
    rwlocker_ = rwlock.RWLock()
    cout_locker_ = threading.Lock()
    for i in range(read_count_ / 2):
        read_thread_pool_.append(ReadThread(rwlocker_, cout_locker_))
    for i in range(write_count_):
        write_thread_pool_.append(WriteThread(rwlocker_))
    for i in range(read_count_ / 2):
        read_thread_pool_.append(ReadThread(rwlocker_, cout_locker_))
    for i in range(write_count_):
        write_thread_pool_[i].start()
    for i in range(read_count_):
        read_thread_pool_[i].start()
    time.sleep(10)
    for i in range(write_count_):
        write_thread_pool_[i].join()
    for i in range(read_count_):
Example #9
0
import json
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import tornado.escape
from tornado.options import define, options
import threading
import tools
import rwlock

define("port", default=10022, help="run on the given port", type=int)

dicfile = ''
medi_dic = {}
file_lock = rwlock.RWLock()

enttype_dic = {
    u'疾病': 'disease',
    u'症状': 'symptom',
    u'治疗项目': 'treatment',
    u'辅助检查': 'diagnosis_name',
    u'其他诊疗项目': 'other_diagnosis',
    u'器材': 'instrument',
    u'医疗器材': 'instrument',
    u'药品-通用名': 'medicine_cn',
    u'药品-产品名': 'medicine_pn',
    u'药品-商品名': 'medicine_mn',
    u'药品名': 'medicine',
    u'剂型': 'dosage_form',
    u'规格': 'specifications',
Example #10
0
    def executeUpdateSql(self, sql, params=None):
        try:
            self.cursor.execute(sql, params)
            self.db.commit()
        except Exception as e:
            self.db.rollback()
            print(repr(e))

    def executeSelectSql(self, sql, params):
        try:
            self.cursor.execute(sql, params)
            res = self.cursor.fetchall()
            return res
        except Exception as e:
            print(repr(e))
            return None


# 爬虫数据存储的数据库参数
DbParams = {
    "ip": "xxx",
    "user": "******",
    "password": "******",
    "db_name": "ruanjian",
    "charset": "utf8"
}
start_url = "http://www.cst.zju.edu.cn"
db_lock = rwlock.RWLock().writer_lock
db_rwlock = rwlock.RWLock()