Esempio n. 1
0
File: db.py Progetto: fparrel/regepe
def DbBuildInvert(dbtype,ele,invfunc):
    if dbtype not in DBTYPES:
        raise Exception('Invalid database type')
    # Check ele
    if ele not in ELELIST[dbtype]:
        raise Exception('Invalid element')
    #print '<!-- DbBuildInvert -->\n'
    # Target inv db
    dbfileinv = ele.upper()+'_INV.db'
    # Lock and open inv db
    lock = FileLock(dbfileinv,5)
    lock.acquire()
    #Log('DbBuildInvert open db c %s\n'%dbfileinv)
    dbinv = anydbm.open(dbfileinv,'c')
    # Clear inv db
    dbinv.clear()
    # List dir
    for dbfile in os.listdir(dbtype):
        id = dbfile[:-3]
        #Log('DbBuildInvert open db r %s/%s\n'%(dbtype,dbfile))
        db = anydbm.open('%s/%s' % (dbtype,dbfile),'r')
        if db.has_key(ele):
            value = db[ele]
            for word in invfunc(value):
                if dbinv.has_key(word):
                    dbinv[word] = dbinv[word] + (',%s' % id)
                else:
                    dbinv[word] = '%s' % id
        db.close()
        #Log('DbBuildInvert close db r %s/%s\n'%(dbtype,dbfile))
    dbinv.close()
    #Log('DbBuildInvert close db c %s\n'%dbfileinv)
    lock.release()
    # Rebuild is no more needed
    RearmRebuild(ele)
Esempio n. 2
0
File: db.py Progetto: fparrel/regepe
def DbBuildInvertOld(ele,invfunc):
    raise Exception('Deprecated')
    # Check ele
    if ele not in ELELIST['maps']:
        raise Exception('Invalid element')
    # Target inv db
    dbfileinv = ele.upper()+'_INV.db'
    # Lock and open inv db
    lock = FileLock(dbfileinv,5)
    lock.acquire()
    dbinv = anydbm.open(dbfileinv,'c')
    # Clear inv db
    dbinv.clear()
    # List dir
    for mapdbfile in os.listdir('maps'):
        mapid = mapdbfile[:-3]
        dbmap = anydbm.open('maps/%s' % mapdbfile,'r')
        if dbmap.has_key(ele):
            value = dbmap[ele]
            for word in invfunc(value):
                if dbinv.has_key(word):
                    dbinv[word] = dbinv[word] + (',%s' % mapid)
                else:
                    dbinv[word] = '%s' % mapid
        dbmap.close()
    dbinv.close()
    lock.release()
    # Rebuild is no more needed
    RearmRebuild(ele)
Esempio n. 3
0
 def runjobs(self, verbose = None):
     """ Run the jobs """
     # The verbose flag
     if verbose is not None:
         self.verbose = verbose
     # Find all 'job_' methods
     for (exc_name, exc_value) in inspect.getmembers(self, lambda x: inspect.ismethod(x)):
         # The method's name must start with uppercase
         if exc_name[0].isupper():
             lock = FileLock(os.path.join('/', 'tmp', 'TranPy-%s-%s.lock' % (self.job_class, exc_name)))
             # Try to get the lock
             if lock.acquire():
                 if self.verbose:
                     print >>sys.stderr, 'Running %s %s' % (self.job_class, exc_name)
                 # Run the job
                 try:
                     exc_value()
                 except:
                     if self.verbose:
                         traceback.print_exc(file = sys.stderr)
                 finally:
                     # Release the lock
                     lock.release()
             else:
                 if self.verbose:
                     print >>sys.stderr, 'Locked %s %s' % (self.job_class, exc_name)
Esempio n. 4
0
File: db.py Progetto: fparrel/regepe
def DbAddComment(mapid,user,comment):
    mapfile = 'maps/%s.db' % mapid
    if not os.access(mapfile,os.F_OK):
        raise Exception('Invalid map id %s' % mapid)
    d = getCurrentDate()
    lock = FileLock(mapfile,5)
    lock.acquire()
    #Log('DbAddComment open db r %s\n' % mapfile)
    db = anydbm.open(mapfile,'r')
    if db.has_key('last_comment_id'):
        last_comment_id = int(db['last_comment_id'])
    else:
        last_comment_id = 0
    db.close()
    #Log('DbAddComment close db r %s\n' % mapfile)
    last_comment_id += 1
    if last_comment_id>99999:
        lock.release()
        raise Exception('Max comments reached')
    #Log('DbAddComment open db c %s\n' % mapfile)
    db = anydbm.open(mapfile,'c')
    db['last_comment_id'] = str(last_comment_id)
    db['comment%.5d'%last_comment_id] = '%s,%s,%s' % (d,user,comment)
    db.close()
    #Log('DbAddComment close db c %s\n' % mapfile)
    lock.release()
Esempio n. 5
0
        def run(self):
                while True:
                        lock = FileLock("/var/lock/baseDaemon.lock")

                        #Just to be safe; pulling data shouldn't take more than 2h
                        lock.acquire()

                        wikiDatesToMongo(False)
                        time.sleep(2*60*60)

                        lock.release()
Esempio n. 6
0
 def update(self, lock=True):
     if lock:
         flock = FileLock(self.inifile)
         flock.acquire()
     
     try:
         inifp = open(self.inifile, 'w')
         self.cfg.write(inifp)
         inifp.close()
         if lock: flock.release()
         return True
     except:
         if lock: flock.release()
         return False
Esempio n. 7
0
def test_wf_in_actor(workflow_start_regular, tmp_path):
    fail_flag = tmp_path / "fail"
    cnt = tmp_path / "count"
    cnt.write_text(str(0))
    lock_file = tmp_path / "lock"

    @workflow.step
    def start_session():
        if fail_flag.exists():
            raise Exception()
        v = int(cnt.read_text()) + 1
        cnt.write_text(str(v))
        with FileLock(str(lock_file)):
            return "UP"

    @workflow.virtual_actor
    class Session:
        def __init__(self):
            self._session_status = "DOWN"

        @workflow.virtual_actor.readonly
        def get_status(self):
            return self._session_status

        def update_session(self, up):
            (ret, err) = up
            if err is None:
                self._session_status = ret
            else:
                self._session_status = err
            return self._session_status

        def session_start(self):
            step = start_session.step()
            return step

        def session_start_with_status(self):
            self._session_status = "STARTING"
            return self.update_session.step(
                start_session.options(catch_exceptions=True).step())

        def __getstate__(self):
            return self._session_status

        def __setstate__(self, state):
            self._session_status = state

    actor = Session.get_or_create("session_id")
    fail_flag.touch()
    with pytest.raises(Exception):
        actor.session_start.run()
    fail_flag.unlink()
    ray.get(workflow.resume("session_id"))
    # After resume, it'll rerun start_session which will
    # generate 1
    assert cnt.read_text() == "1"
    assert actor.session_start.run() == "UP"
    assert cnt.read_text() == "2"
    assert actor.session_start_with_status.run() == "UP"
    assert cnt.read_text() == "3"

    # Now test a new session.
    actor = Session.get_or_create("session_id")
    fail_flag.touch()
    assert isinstance(actor.session_start_with_status.run(), Exception)
    assert cnt.read_text() == "3"
    lock = FileLock(str(lock_file))
    lock.acquire()
    fail_flag.unlink()
    ret = actor.session_start_with_status.run_async()
    for i in range(0, 60):
        if cnt.read_text() == "4":
            break
        time.sleep(1)
    assert cnt.read_text() == "4"
    # This means when return from session_start_with_status,
    # the session got updated
    assert actor.get_status.run() == "STARTING"
    lock.release()
    assert ray.get(ret) == "UP"
Esempio n. 8
0
    class FilelockUtil(object):
        """
        文件锁工具类
        """
        def __init__(self):
            self.flock = FileLock(file_path)

        def acquire(self):
            """
            文件加锁
            """
            self.flock.acquire()

        def release(self):
            """
            文件锁释放
            """
            self.flock.release()

        def _get_child_pid(self, ppid):
            """
            获取ppid的子进程ID号
            :param ppid: 父进程PID号
            :return    : 子进程PID号
            """
            pids = psutil.pids()
            for pid in pids:
                try:
                    parent_id = psutil.Process(pid).ppid()
                    if parent_id == ppid:
                        return pid
                except Exception:
                    continue
            return None

        def _get_hwnds_for_pid(self, pid):
            """
            获取对应pid进程的所有句柄
            :param pid: 进程号
            :return   : 句柄列表
            """
            def callback(hwnd, hwnds):
                if win32gui.IsWindowVisible(hwnd) and win32gui.IsWindowEnabled(
                        hwnd):
                    _, found_pid = GetWindowThreadProcessId(hwnd)
                    if found_pid == pid:
                        hwnds.append(hwnd)
                    return True

            hwnds = []
            win32gui.EnumWindows(callback, hwnds)
            return hwnds

        def get_hwnds(self, ppid):
            """
            获取ppid的子进程句柄
            :param ppid: 父进程ID
            :return    : 句柄列表
            """
            cpid = self._get_child_pid(ppid)
            hwnds = self._get_hwnds_for_pid(cpid)
            return hwnds

        def hwnd_top_most(self, hwnds):
            """
            根据句柄置顶窗口
            :param hwnds: 句柄列表
            :return     : None
            """
            for hwnd in hwnds:
                win32gui.SetWindowPos(
                    hwnd, win32con.HWND_TOPMOST, 0, 0, 0, 0,
                    win32con.SWP_NOSIZE | win32con.SWP_NOMOVE
                    | win32con.SWP_NOACTIVATE | win32con.SWP_NOOWNERZORDER
                    | win32con.SWP_SHOWWINDOW)

        def hwnd_not_top_most(self, hwnds):
            """
            根据句柄取消窗口的置顶
            :param hwnds: 句柄列表
            :return     : None
            """
            for hwnd in hwnds:
                win32gui.SetWindowPos(
                    hwnd, win32con.HWND_NOTOPMOST, 0, 0, 0, 0,
                    win32con.SWP_NOSIZE | win32con.SWP_SHOWWINDOW)
Esempio n. 9
0
class DiskBufferWriter(object):
    def __init__(self, name, filename, path):
        self._data = None
        self.name: str = name
        self._filename: str = filename
        self._path: str = path
        if not os.path.exists(path):
            os.makedirs(path)

        self._cache_size: int = -1
        self._cache_counter: int = 0

        self._file_lock: bool = False
        pass

    def cleanup_cache(self):
        prefix, suffix = self._get_file_components()
        if os.path.exists(self._path):
            files = [
                f for f in os.listdir(self._path)
                if re.match(prefix + '(_[0-9]+)?' + suffix + '(.lock)?', f)
            ]
            for f in files:
                os.remove(self._path + f)

    def allow_cache(self, cache_size: int = 0):
        # 0 is unlimited cache
        self._cache_size = cache_size
        if self._file_lock:
            # To force clearing locks. Should this be optional?
            self.use_file_lock()

    def use_file_lock(self, use: bool = True, clear_locks: bool = True):
        self._file_lock = use
        try:
            if self._cache_size == -1:
                os.remove(self._path + self._filename)
            else:
                files = [
                    f for f in os.listdir(self._path)
                    if re.match(r'.*_[0-9]+.*\.lock', f)
                ]
                for f in files:
                    os.remove(self._path + f)
        except:
            pass

    @property
    def data(self):
        return self._data

    @data.setter
    def data(self, data):
        self._data = data

        # TODO implement file lock

        outname = self._make_next_filename()

        if self._file_lock:
            self._lock = FileLock(self._path + outname + ".lock", timeout=1)
            if self._lock.is_locked:
                print("Locked " + outname)
            self._lock.acquire()
        with open(self._path + outname, 'w') as outfile:
            json.dump(data, outfile)

        for l in self.pserver.listeners:
            address = "/__DiskBuffer/" + self.name

            #             print('sending ' + address)
            #             print(l.__dict__)
            l.send_message(address, outname)

        if self._file_lock:
            self._lock.release()

    def get_filename_for_writing(self):
        outname = ''
        if self._file_lock:
            if self._lock:
                print("Error, file is locked")
                return ''

            outname = self._make_next_filename()
            self._lock = FileLock(self._path + outname + ".lock", timeout=1)
            if self._lock.is_locked:
                print("Locked " + outname)
            self._lock.acquire()
        else:
            outname = self._make_next_filename()
        self.outname = outname
        return self._path + outname

    def done_writing_file(self, filename: str = ''):
        if filename.find(self._path) == 0:
            filename = filename[len(self._path):]
        else:
            # TODO more robust checking that we are managing that file.
            raise ValueError('Invalid filename')
        for l in self.pserver.listeners:
            address = "/__DiskBuffer/" + self.name

            #             print('sending ' + address)
            #             print(l.__dict__)
            # TODO check that we are actually managing this file, as it is a vector for attack in unsecure networks.
            l.send_message(address, filename)
        if self._file_lock:
            self._lock.release()

    def expose_to_network(self, pserver: ParameterServer):
        self.pserver = pserver

    def _make_next_filename(self):
        outname = self._filename

        if self._cache_size >= 0:
            if self._cache_size > 0 and self._cache_counter == self._cache_size:
                self._cache_counter = 0
            outname = self._make_filename(self._cache_counter)
            self._cache_counter += 1
        return outname

    def _make_filename(self, index):
        prefix, suffix = self._get_file_components()
        outname = prefix + '_' + str(index) + suffix
        return outname

    def _get_file_components(self):
        outname = self._filename
        try:
            index_dot = outname.index('.')
            prefix = outname[0:index_dot]
            suffix = outname[index_dot:]
        except:
            prefix = outname
            suffix = ''
        return [prefix, suffix]
Esempio n. 10
0
    def read(
        self, file_path: Union[Path, str]
    ) -> Union[AllennlpDataset, AllennlpLazyDataset]:
        """
        Returns an dataset containing all the instances that can be read from the file path.

        If `self.lazy` is `False`, this eagerly reads all instances from `self._read()`
        and returns an `AllennlpDataset`.

        If `self.lazy` is `True`, this returns an `AllennlpLazyDataset`, which internally
        relies on the generator created from `self._read()` to lazily produce `Instance`s.
        In this case your implementation of `_read()` must also be lazy
        (that is, not load all instances into memory at once), otherwise
        you will get a `ConfigurationError`.

        In either case, the returned `Iterable` can be iterated
        over multiple times. It's unlikely you want to override this function,
        but if you do your result should likewise be repeatedly iterable.
        """
        if not isinstance(file_path, str):
            file_path = str(file_path)

        lazy = getattr(self, "lazy", None)

        if lazy is None:
            warnings.warn(
                "DatasetReader.lazy is not set, "
                "did you forget to call the superclass constructor?",
                UserWarning,
            )

        if lazy:
            return AllennlpLazyDataset(self._instance_iterator, file_path)
        else:
            cache_file: Optional[str] = None
            if self._cache_directory:
                cache_file = self._get_cache_location_for_file_path(file_path)

            if cache_file is not None and os.path.exists(cache_file):
                try:
                    # Try to acquire a lock just to make sure another process isn't in the middle
                    # of writing to the cache.
                    cache_file_lock = FileLock(
                        cache_file + ".lock",
                        timeout=self.CACHE_FILE_LOCK_TIMEOUT)
                    cache_file_lock.acquire()
                    # We make an assumption here that if we can obtain the lock, no one will
                    # be trying to write to the file anymore, so it should be safe to release the lock
                    # before reading so that other processes can also read from it.
                    cache_file_lock.release()
                    logger.info("Reading instances from cache %s", cache_file)
                    instances = self._instances_from_cache_file(cache_file)
                except Timeout:
                    logger.warning(
                        "Failed to acquire lock on dataset cache file within %d seconds. "
                        "Cannot use cache to read instances.",
                        self.CACHE_FILE_LOCK_TIMEOUT,
                    )
                    instances = self._multi_worker_islice(
                        self._read(file_path))
            else:
                instances = self._multi_worker_islice(self._read(file_path))

            # Then some validation.
            if not isinstance(instances, list):
                instances = list(instances)

            if not instances:
                raise ConfigurationError(
                    "No instances were read from the given filepath {}. "
                    "Is the path correct?".format(file_path))

            # And finally we try writing to the cache.
            if cache_file is not None and not os.path.exists(cache_file):
                if self.max_instances is not None:
                    # But we don't write to the cache when max_instances is specified.
                    logger.warning(
                        "Skipping writing to data cache since max_instances was specified."
                    )
                elif util.is_distributed() or (get_worker_info() and
                                               get_worker_info().num_workers):
                    # We also shouldn't write to the cache if there's more than one process loading
                    # instances since each worker only receives a partial share of the instances.
                    logger.warning(
                        "Can't cache data instances when there are multiple processes loading data"
                    )
                else:
                    try:
                        with FileLock(cache_file + ".lock",
                                      timeout=self.CACHE_FILE_LOCK_TIMEOUT):
                            self._instances_to_cache_file(
                                cache_file, instances)
                    except Timeout:
                        logger.warning(
                            "Failed to acquire lock on dataset cache file within %d seconds. "
                            "Cannot write to cache.",
                            self.CACHE_FILE_LOCK_TIMEOUT,
                        )

            return AllennlpDataset(instances)
def cache_adata(session_ID,
                adata=None,
                group=None,
                store_dir=None,
                store_name=None):
    if ((store_dir is None) or (store_name is None)):
        save_dir = save_analysis_path + str(session_ID) + "/"
        filename = save_dir + "adata_cache"
        chunk_factors = [150, 3]  #faster, hot storage
    else:
        save_dir = store_dir
        filename = save_dir + store_name
        chunk_factors = [3, 3]  #slower, cold storage

    if not (os.path.isdir(save_dir)):
        try:
            print("[DEBUG] making directory:" + str(save_dir))
            os.mkdir(save_dir)
        except:
            return None

    lock_filename = (save_analysis_path + str(session_ID) + "/" + "adata.lock")
    lock = FileLock(lock_filename, timeout=lock_timeout)

    compressor = Blosc(cname='blosclz', clevel=3, shuffle=Blosc.SHUFFLE)
    zarr_cache_dir = filename + ".zarr"
    attribute_groups = [
        "obs", "var", "obsm", "varm", "obsp", "varp", "layers", "X", "uns",
        "raw"
    ]
    extra_attribute_groups = ["X_dense", "layers_dense"]

    if (adata is None):  # then -> read it from the store
        if (os.path.exists(zarr_cache_dir) is True):
            store_store = zarr.DirectoryStore(zarr_cache_dir)
            store = zarr.open_group(store=store_store, mode='r')
            if (group in attribute_groups
                ):  # then -> return only that part of the object (fast)
                group_exists = adata_cache_group_exists(session_ID,
                                                        group,
                                                        store=store)
                if (group_exists is True):
                    ret = read_attribute(store[group])
                else:
                    ret = None
                #store_store.close()
                return ret
            elif (group is
                  None):  # then -> return the whole adata object (slow)
                #adata = ad.read_zarr(zarr_cache_dir)
                d = {}
                for g in attribute_groups:
                    if (g in store.keys()):
                        if (adata_cache_group_exists(session_ID,
                                                     g,
                                                     store=store)):
                            if (g in ["obs", "var"]):
                                d[g] = read_dataframe(store[g])
                            else:
                                d[g] = read_attribute(store[g])
                #store_store.close()
                adata = ad.AnnData(**d)
                if not (adata is None):
                    return adata
                else:
                    print("[ERROR] adata object not saved at: " +
                          str(filename))
                    return None
    else:  # then -> update the state dictionary and write adata to the store
        if (group is None):
            cache_state(session_ID,
                        key="# cells/obs",
                        val=len(adata.obs.index))
            cache_state(session_ID,
                        key="# genes/var",
                        val=len(adata.var.index))
            if ("total_counts" in adata.obs):
                cache_state(session_ID,
                            key="# counts",
                            val=int(np.sum(adata.obs["total_counts"])))
            else:
                cache_state(session_ID,
                            key="# counts",
                            val=int(np.sum(adata.X)))

        elif (group == "obs"):
            cache_state(session_ID, key="# cells/obs", val=len(adata.index))
        elif (group == "var"):
            cache_state(session_ID, key="# genes/var", val=len(adata.index))
        with lock:
            store_store = zarr.DirectoryStore(zarr_cache_dir)
            store = zarr.open_group(store=store_store, mode='a')
            if (group in attribute_groups
                ):  # then -> write only that part of the object (fast)
                if (group == "var"):
                    if (np.nan in adata.var.index):
                        adata.var.index = pd.Series(adata.var.index).replace(
                            np.nan, 'nanchung')
                        adata.var["gene_ID"] = pd.Series(
                            adata.var["gene_ID"]).replace(np.nan, 'nanchung')
                        adata.var["gene_ids"] = pd.Series(
                            adata.var["gene_ids"]).replace(np.nan, 'nanchung')
                write_attribute(
                    store, group,
                    adata)  # here "adata" is actually just a subset of adata

                # write dense copies of X or layers if they're what was passed
                if (group == "X"):
                    dense_name = "X_dense"
                    write_dense.delay(zarr_cache_dir, "X", dense_name,
                                      chunk_factors)

                if (group == "layers"):
                    for l in list(adata.keys(
                    )):  #layers was passed with parameter name "adata"
                        dense_name = "layers_dense/" + str(l)
                        write_dense.delay(zarr_cache_dir, "layers/" + l,
                                          dense_name, chunk_factors)
                #store_store.flush()
                #store_store.close()
                lock.release()
            else:
                # check that necessary fields are present in adata object
                if not ("leiden_n" in adata.obs):
                    if ("leiden" in adata.obs):
                        adata.obs["leiden_n"] = pd.to_numeric(
                            adata.obs["leiden"])
                if not ("cell_ID" in adata.obs):
                    adata.obs["cell_ID"] = adata.obs.index
                if not ("cell_numeric_index" in adata.obs):
                    adata.obs["cell_numeric_index"] = pd.to_numeric(
                        list(range(0, len(adata.obs.index))))
                for i in ["user_" + str(j) for j in range(0, 6)]:
                    if not (i in adata.obs.columns):
                        adata.obs[i] = ["0" for k in adata.obs.index.to_list()]
                if not ("gene_ID" in adata.var):
                    adata.var["gene_ID"] = adata.var.index

                # make sure that there are no "nan" genes in the var index
                if (np.nan in adata.var.index):
                    adata.var.index = pd.Series(adata.var.index).replace(
                        np.nan, 'nanchung')
                    adata.var["gene_ID"] = pd.Series(
                        adata.var["gene_ID"]).replace(np.nan, 'nanchung')
                    adata.var["gene_ids"] = pd.Series(
                        adata.var["gene_ids"]).replace(np.nan, 'nanchung')

                # save it all to the cache, but make dense copies of X and layers
                write_attribute(store, "obs", adata.obs)
                write_attribute(store, "var", adata.var)
                write_attribute(store, "obsm", adata.obsm)
                write_attribute(store, "varm", adata.varm)
                write_attribute(store, "obsp", adata.obsp)
                write_attribute(store, "varp", adata.varp)
                write_attribute(store, "uns", adata.uns)
                write_attribute(store, "raw", adata.raw)
                write_attribute(store, "X", adata.X)
                write_attribute(store, "layers", adata.layers)

                # making dense copies of X and layers (compressed to save disk space)

                dense_name = "X_dense"
                write_dense.delay(zarr_cache_dir, "X", dense_name,
                                  chunk_factors)

                for l in list(adata.layers.keys()):
                    dense_name = "layers_dense/" + str(l)
                    write_dense.delay(zarr_cache_dir, "layers/" + l,
                                      dense_name, chunk_factors)

                #store_store.flush()
                #store_store.close()
                lock.release()
            # set the file mod and access times to current time
            # then return adata as usual
            os.utime(zarr_cache_dir)
            return adata
Esempio n. 12
0
class WeblateLock:
    """Wrapper around Redis or file based lock."""
    def __init__(
        self,
        lock_path: str,
        scope: str,
        key: int,
        slug: str,
        cache_template: str = "lock:{scope}:{key}",
        file_template: Optional[str] = "{slug}-{scope}.lock",
        timeout: int = 1,
    ):
        self._timeout = timeout
        self._lock_path = lock_path
        self._scope = scope
        self._key = key
        self._slug = slug
        self._depth = 0
        default_cache = caches["default"]
        self.use_redis = isinstance(default_cache, RedisCache)
        if self.use_redis:
            # Prefer Redis locking as it works distributed
            self._lock = Lock(
                default_cache.client.get_client(),
                name=self._format_template(cache_template),
                expire=60,
                auto_renewal=True,
            )
        else:
            # Fall back to file based locking
            self._lock = FileLock(
                os.path.join(lock_path, self._format_template(file_template)),
                timeout=self._timeout,
            )

    def _format_template(self, template: str):
        return template.format(
            scope=self._scope,
            key=self._key,
            slug=self._slug,
        )

    def __enter__(self):
        self._depth += 1
        if self._depth > 1:
            return
        if self.use_redis:
            try:
                if not self._lock.acquire(timeout=self._timeout):
                    raise WeblateLockTimeout()
            except AlreadyAcquired:
                pass
        else:
            # Fall back to file based locking
            try:
                self._lock.acquire()
            except Timeout:
                raise WeblateLockTimeout()

    def __exit__(self, exc_type, exc_value, traceback):
        self._depth -= 1
        if self._depth > 0:
            return
        try:
            self._lock.release()
        except NotAcquired:
            # This can happen in case of overloaded server fails to renew the
            # lock before expiry
            pass

    @property
    def is_locked(self):
        return bool(self._depth)
Esempio n. 13
0
class MultiprocessRotatingFileHandler(TimedRotatingFileHandler):
    """
    Ref: https://github.com/di/mrfh/blob/master/mrfh/__init__.py
    * 修改doRollover逻辑,避免日志分片时删除已分片日志
    * 释放日志锁时关闭FileStream,解决文件重命名操作被拒绝的问题
    * 频繁reopen FileStream,造成严重的性能损耗
    * 使用FileLock替代threading.Lock,单进程条件下存在性能损失
    """
    def __init__(self, lock_file, *args, **kwargs):
        self.file_lock = FileLock(lock_file)
        super(MultiprocessRotatingFileHandler, self).__init__(*args, **kwargs)

    def _open_file(self):
        self.stream = self._open()

    def acquire(self):
        self.file_lock.acquire()
        if self.stream and self.stream.closed:
            self._open_file()

    def release(self):
        if self.stream and not self.stream.closed:
            self.stream.flush()
            self.stream.close()
        self.file_lock.release()

    def close(self):
        if self.stream and not self.stream.closed:
            self.stream.flush()
            self.stream.close()
        if self.file_lock.is_locked:
            self.file_lock.release()

    def doRollover(self):
        if self.stream:
            self.stream.close()
            self.stream = None
        # get the time that this sequence started at and make it a TimeTuple
        current_time = int(time.time())
        dst_now = time.localtime(current_time)[-1]
        t = self.rolloverAt - self.interval
        if self.utc:
            time_tuple = time.gmtime(t)
        else:
            time_tuple = time.localtime(t)
            dst_then = time_tuple[-1]
            if dst_now != dst_then:
                if dst_now:
                    addend = 3600
                else:
                    addend = -3600
                time_tuple = time.localtime(t + addend)
        dfn = self.rotation_filename(self.baseFilename + "." +
                                     time.strftime(self.suffix, time_tuple))

        # # Changed part
        # if os.path.exists(dfn):
        #     os.remove(dfn)
        if not os.path.exists(dfn) and os.path.exists(self.baseFilename):
            self.rotate(self.baseFilename, dfn)
        # # Changed part end

        if self.backupCount > 0:
            for s in self.getFilesToDelete():
                os.remove(s)
        if not self.delay:
            self.stream = self._open()
        new_rollover_at = self.computeRollover(current_time)
        while new_rollover_at <= current_time:
            new_rollover_at = new_rollover_at + self.interval
        # If DST changes and midnight or weekly rollover, adjust for this.
        if (self.when == 'MIDNIGHT'
                or self.when.startswith('W')) and not self.utc:
            dst_at_rollover = time.localtime(new_rollover_at)[-1]
            if dst_now != dst_at_rollover:
                if not dst_now:  # DST kicks in before next rollover, so we need to deduct an hour
                    addend = -3600
                else:  # DST bows out before next rollover, so we need to add an hour
                    addend = 3600
                new_rollover_at += addend
        self.rolloverAt = new_rollover_at
Esempio n. 14
0
def download_image(url, write_path, lock_path):
    """
    download_image: This method is called asynchronously by executing threads. It performs several useful functions:
        * Determines if the supplied url has already been downloaded.
        * Determines if another thread is currently downloading the supplied URL.
        * Handles the locking and unlocking of the shared resource: image-lock files.
        * Prints HTTP errors received while attempting the download of an image.
        * Stores the downloaded data at the provided write_path if successfully obtained.
    :param url: The image URL which is to be downloaded.
    :param write_path: The path indicating where the downloaded image is to be stored.
    :param lock_path: The path indicating where the .lock file for the corresponding image is to be located.
    :return url: The url that this method was tasked with downloading. Upon completion, this method will have performed
        the tasks listed above or returned None: (indicating to the controlling ThreadPoolExecutor that this thread is
        dead and should be re-allocated with a new URL to download).
    """
    time_stamp = time.time()
    # Does the file already exist?
    if not os.path.isfile(write_path):
        # print('Working on URL: %r' % url)
        # Does the lock file already exist?
        if not os.path.isfile(lock_path):
            # Create an empty lockfile:
            open(lock_path, 'a').close()
            # Lock the lockfile:
            file_lock = FileLock(lock_path, timeout=0.1)
            # print('Just created lockfile: %s The file is locked: %s' % (lock_path, file_lock.is_locked))
        # Try and acquire the file lock to see if another thread is already working on this url:
        try:
            # print('Attempting to acquire lockfile: %s. The file is now locked: %s' % (lock_path, file_lock.is_locked))
            with file_lock.acquire(timeout=0.1):
                # If this code executes the lockfile has been acquired and is now locked by this process instance.
                # print('Acquired lockfile %s. The file is locked: %s' % (lock_path, file_lock.is_locked))
                # Instantiate http object per urllib3:
                http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED',
                                           ca_certs=certifi.where())
                dl_response = http.request('GET', url)
                if dl_response.status == 200:
                    with open(write_path, 'wb') as fp:
                        fp.write(dl_response.data)
                    # print('Downloaded file: %s' % write_path)
                    return url, time_stamp
                else:
                    print(
                        'Error downloading accessURI %s. Received http response %d'
                        % (url, dl_response.status))
        except Timeout:
            print(
                'Attempt to acquire the file lock timed out. Perhaps another instance of this application '
                'currently holds the lock: %s' % lock_path)
        finally:
            # NOTE: This code is guaranteed to run before any return statements are executed,
            #   see: https://stackoverflow.com/questions/11164144/weird-try-except-else-finally-behavior-with-return-statements
            '''
            NOTE: Exiting from a 'with FileLock.acquire:' block will automatically release the lock. So uncomment the,
                following lines of code only if you believe that the file lock should be released by all threads if a 
                Timeout exception occurs as well as a success:
            '''
            file_lock.release()
            # print('Released file lock: %s. The file is now locked: %s.' % (lock_path, file_lock.is_locked))
    else:
        print('The requested url: %r has already been downloaded!' %
              write_path)
Esempio n. 15
0
while 1:
    dblock = FileLock("db.lock", timeout=1)
    dblock.acquire()
    try:
        db = sqlite3.connect("./user_gamedata.db3")
        cursor = db.cursor()

        # DELETE timeout
        count = 0
        cursor.execute("SELECT * FROM lineupPool")
        results = cursor.fetchall()
        for record in results:
            if round(time.time()) - round(record[1]) > 15:
                count += 1
                cursor.execute("DELETE FROM lineupPool WHERE uid='" +
                               str(record[0]) + "'")
                db.commit()
        db.close()
        # Print message
        print(
            str(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())) + ": " +
            str(count) + " user(s) has/have deleted")

    except:
        print("ERROR occured, please check")

    finally:
        dblock.release()
        time.sleep(10)
Esempio n. 16
0
class Container(ContainerView):
    """
    Containers are sets of {key: values}.
    Ideally we want to query not separate Containers
    but Unions of Containers instead.

    Each Container corresponds to a single RocksDB database.

    Containers satisfy Dict properties per run.
    """
    def __init__(self,
                 path: str,
                 read_only: bool = False,
                 wait_if_busy: bool = False) -> None:
        self.path = Path(path)
        self.read_only = read_only
        self._db_opts = dict(
            create_if_missing=True,
            paranoid_checks=False,
            keep_log_file_num=10,
            skip_stats_update_on_db_open=True,
            skip_checking_sst_file_sizes_on_db_open=True,
            max_open_files=-1,
            write_buffer_size=64 * 1024 * 1024,  # 64MB
            max_write_buffer_number=3,
            target_file_size_base=64 * 1024 * 1024,  # 64MB
            max_background_compactions=4,
            level0_file_num_compaction_trigger=8,
            level0_slowdown_writes_trigger=17,
            level0_stop_writes_trigger=24,
            num_levels=4,
            max_bytes_for_level_base=512 * 1024 * 1024,  # 512MB
            max_bytes_for_level_multiplier=8,
        )
        # opts.allow_concurrent_memtable_write = False
        # opts.memtable_factory = aimrocks.VectorMemtableFactory()
        # opts.table_factory = aimrocks.PlainTableFactory()
        # opts.table_factory = aimrocks.BlockBasedTableFactory(block_cache=aimrocks.LRUCache(67108864))
        # opts.write_buffer_size = 67108864
        # opts.arena_block_size = 67108864

        self._db = None
        self._lock = None
        self._wait_if_busy = wait_if_busy  # TODO implement
        self._lock_path: Optional[Path] = None
        self._progress_path: Optional[Path] = None
        # TODO check if Containers are reopenable

    @property
    def db(self) -> aimrocks.DB:
        if self._db is not None:
            return self._db

        logger.debug(f'opening {self.path} as aimrocks db')
        self.path.parent.mkdir(parents=True, exist_ok=True)
        locks_dir = self.path.parent.parent / 'locks'
        locks_dir.mkdir(parents=True, exist_ok=True)

        if not self.read_only:
            self._lock_path = locks_dir / self.path.name
            self._lock = FileLock(str(self._lock_path), timeout=10)
            self._lock.acquire()

        self._db = aimrocks.DB(str(self.path),
                               aimrocks.Options(**self._db_opts),
                               read_only=self.read_only)

        return self._db

    @property
    def writable_db(self) -> aimrocks.DB:
        db = self.db
        if self._progress_path is None:
            progress_dir = self.path.parent.parent / 'progress'
            progress_dir.mkdir(parents=True, exist_ok=True)
            self._progress_path = progress_dir / self.path.name
            self._progress_path.touch(exist_ok=True)
        return db

    def finalize(self, *, index: ContainerView):
        if not self._progress_path:
            return

        for k, v in self.items():
            index[k] = v

        self._progress_path.unlink(missing_ok=False)
        self._progress_path = None

    def close(self):
        if self._lock is not None:
            self._lock.release()
            self._lock = None
        if self._db is not None:
            # self._db.close()
            self._db = None

    def __del__(self):
        self.close()

    def preload(self):
        self.db

    def tree(self) -> 'TreeView':
        return TreeView(self)

    def batch_set(self,
                  key: bytes,
                  value: bytes,
                  *,
                  store_batch: aimrocks.WriteBatch = None):
        if store_batch is None:
            store_batch = self.writable_db
        store_batch.put(key=key, value=value)

    def __setitem__(self, key: bytes, value: bytes):
        self.writable_db.put(key=key, value=value)

    def __getitem__(self, key: bytes) -> bytes:
        return self.db.get(key=key)

    def __delitem__(self, key: bytes) -> None:
        return self.writable_db.delete(key)

    def batch_delete(self,
                     prefix: bytes,
                     store_batch: aimrocks.WriteBatch = None):
        if store_batch is None:
            batch = aimrocks.WriteBatch()
        else:
            batch = store_batch

        batch.delete_range((prefix, prefix + b'\xff'))

        if not store_batch:
            self.writable_db.write(batch)
        else:
            return batch

    def items(self, prefix: bytes = b'') -> Iterator[Tuple[bytes, bytes]]:
        # TODO return ValuesView, not iterator
        it: Iterator[Tuple[bytes, bytes]] = self.db.iteritems()
        it.seek(prefix)
        for key, val in it:
            if not key.startswith(prefix):
                break
            yield key, val

    def walk(self, prefix: bytes = b''):
        it: Iterator[Tuple[bytes, bytes]] = self.db.iteritems()
        it.seek(prefix)

        while True:
            try:
                key, val = next(it)
            except StopIteration:
                yield None
                break
            jump = yield key
            it.seek(jump)

    def iterlevel(self, prefix: bytes = b'') -> Iterator[Tuple[bytes, bytes]]:
        # TODO return ValuesView, not iterator
        # TODO broken right now
        it: Iterator[Tuple[bytes, bytes]] = self.db.iteritems()
        it.seek(prefix)

        key, val = next(it)

        while True:
            try:
                key, val = next(it)
            except StopIteration:
                break

            if not key.startswith(prefix):
                break

            next_range = key[:-1] + bytes([key[-1] + 1])
            it.seek(next_range)

            yield key, val

    def keys(self, prefix: bytes = b''):
        # TODO return KeyView, not iterator
        it: Iterator[Tuple[bytes, bytes]] = self.db.iterkeys()
        it.seek(prefix)
        for key in it:
            if not key.startswith(prefix):
                break
            yield key

    def values(self, prefix: bytes):
        # not vraz thing to implement
        raise NotImplementedError

    def update(self) -> None:
        raise NotImplementedError

    def view(
            self, prefix: Union[bytes, Tuple[Union[int, str],
                                             ...]]) -> 'ContainerView':
        if not isinstance(prefix, bytes):
            prefix = E.encode_path(prefix)
        return PrefixView(prefix=prefix, container=self)

    def commit(self, batch: aimrocks.WriteBatch):
        self.writable_db.write(batch)

    def next_key(self, prefix: bytes = b'') -> bytes:
        it: Iterator[bytes] = self.db.iterkeys()
        it.seek(prefix + b'\x00')
        key = next(it)

        if not key.startswith(prefix):
            raise KeyError

        return key

    def next_value(self, prefix: bytes = b'') -> bytes:
        key, value = self.next_key_value(prefix)
        return value

    def next_key_value(self, prefix: bytes = b'') -> Tuple[bytes, bytes]:
        it: Iterator[Tuple[bytes, bytes]] = self.db.iteritems()
        it.seek(prefix + b'\x00')

        key, value = next(it)

        if not key.startswith(prefix):
            raise KeyError

        return key, value

    def prev_key(self, prefix: bytes = b'') -> bytes:
        key, value = self.prev_key_value(prefix)
        return key

    def prev_value(self, prefix: bytes = b'') -> bytes:
        key, value = self.prev_key_value(prefix)
        return value

    def prev_key_value(self, prefix: bytes) -> Tuple[bytes, bytes]:
        it: Iterator[Tuple[bytes, bytes]] = self.db.iteritems()
        it.seek_for_prev(prefix + b'\xff')

        key, value = it.get()

        return key, value
Esempio n. 17
0
    if (args.show):
        entryPrint(entry)

    if (args.execute is not None):
        if (entryExecute(entry, args.execute) != True):
            ForceExit('Execution failed!')

# 4. Write entries if were changed
# #####################################################33
if (dataIsChanged == True):
    jsonWrite(configFile, entries)

if (recipientsIsChanged == True):
    jsonWrite(recipientsFile, recipients)

# 5. Finish execution
if (args.execute is not None):
    # If report has something
    if (ReportsIsAnyting(reportFile)):
        # Report also assets
        ReportAssets(reportFile)
        # Generate HTML & PDF
        ReportsToPDF(reportFile)
        # Send emails to all recipients
        for i in range(len(recipients)):
            ReportsMail(recipients[i]['address'], reportFile + '.html')

lockRecipents.release()
lockConfig.release()
Esempio n. 18
0
    exn_log_path = ".."+ os.sep + "logs"+ os.sep +"ib_exn_log.log"
    exn_log_lock_path = ".."+ os.sep + "logs"+ os.sep +"ib_exn_log.log.lock"
    exn_log_lock = FileLock(exn_log_lock_path)

    pos = Position()
    #bot = InterfaceBot(interface_pipe_path, lock, log)
    bot = InterfaceBot(interface_pipe_path, lock)

    try:
        while True:
            try:
                instr = input()
            except Exception as e:
                sys.stderr.write('error reading input')
            outstr = parse_command(instr, bot, pos)
            sys.stdout.write(outstr)
            sys.stdout.flush()
    except Exception as e:
        try:
            exn_log_lock.acquire()
            exn_log = open(exn_log_path, "a+")
            exn_log.write(str(e) + "\n")
            exn_log.write(str(traceback.format_exc()))
            exn_log.close()
        finally:
            exn_log_lock.release()
        #log.write(str(e) + "\n")
        #log.write(str(traceback.format_exc()))
        #log.flush()
        #log.close()
            
Esempio n. 19
0
class Pack():
    def __init__(self, config):
        self.config = config

        self.cwd = pathlib.WindowsPath(self.config['cwd'])

        self.status = 0

        # Prepare Tor
        self.password = uuid.uuid4().hex
        self.relay = tor.Tor(self.config['tor'], self.config['data'])

        # torrc
        torrc = pathlib.Path(config['data']) / 'torrc' / 'torrc'
        self.torrc = torrc.resolve()

        # The Onion Box
        self.box = box.TheOnionBox(config)

        # Stop signal, to terminate our run_loop
        self.stop = threading.Event()

        # the Tray icon
        self.tray = pystray.Icon('theonionpack', title='The Onion Pack')

        self.tray.icon = Image.open(str(self.cwd / 'icons' / 'top16.ico'))

        self.tray.menu = pystray.Menu(
            pystray.MenuItem('Monitor...',
                             action=self.on_monitor,
                             default=True), pystray.Menu.SEPARATOR,
            pystray.MenuItem(
                'Relay Control',
                pystray.Menu(
                    pystray.MenuItem('Edit configuration file...',
                                     action=self.on_open_torrc),
                    pystray.MenuItem('Show logfile...',
                                     action=self.on_show_messages),
                    pystray.Menu.SEPARATOR,
                    pystray.MenuItem('Reload relay configuration',
                                     action=self.on_reload_config))),
            pystray.Menu.SEPARATOR,
            pystray.MenuItem('Stop!', action=self.on_quit))

    def run(self):

        self.lock = FileLock(str(self.cwd / 'theonionpack.lock'))
        running = False

        try:
            with self.lock.acquire(timeout=0):

                # run The Onion Box
                tob = self.box.run(password=self.password)

                # run Tor
                self.relay.run(password=self.password)

                running = True

                # run the Tray icon
                # !! This is a blocking call !!
                self.tray.run(self.run_loop)

                # the block may be released by self.on_quit, issued by an operator via the Tray
                # ... or by a system command (like SIGTERM).

        except Timeout:
            MBox(
                "It seems like another instance of The Onion Pack is already running. Aborting launch procedure...",
                style=0x10)

        finally:
            self.lock.release()

        if running:

            # Stop theonionbox
            self.box.stop()

            # Tor has OwningControllerProcess defined ... thus will terminate as soon as we're done.

            if self.status == 1:
                MBox(
                    "Our instance of TheOnionBox terminated.\r\nThus we have to terminate as well! Sorry...",
                    style=0x10)

        sys.exit(0)

    def run_loop(self, icon: pystray.Icon):

        icon.visible = True

        while self.stop.is_set() is False:

            # quit if TheOnionBox died!
            if self.box.poll() is not None:

                # indicate that the Box terminated!
                self.status += 1
                self.do_quit()
                return

            self.relay.collect_messages()
            sleep(1)

    # Tray menu actions
    def on_monitor(self, icon, item):
        webbrowser.open_new_tab('http://127.0.0.1:8080/')

    def on_quit(self, icon, item):
        self.do_quit()

    def do_quit(self):

        # Stop the run_loop
        self.stop.set()

        # Stop the Tray
        self.tray.stop()

        # cleanup is being performed in self.run()

    # def get_tor_messages(self):
    #     while True:
    #         self.relay.collect_messages()
    #         sleep(5)

    def on_show_messages(self, icon, item):
        fd, name = tempfile.mkstemp(prefix="Tor_", suffix='.html', text=True)
        with open(fd, 'w') as tmp:
            tmp.write('<br>'.join(self.relay.messages))
        webbrowser.open_new_tab(name)

    def on_open_torrc(self):
        def get_default_windows_app(suffix):

            class_root = winreg.QueryValue(winreg.HKEY_CLASSES_ROOT, suffix)
            with winreg.OpenKey(
                    winreg.HKEY_CLASSES_ROOT,
                    r'{}\shell\open\command'.format(class_root)) as key:
                command = winreg.QueryValueEx(key, '')[0]
                return command.split(' ')[0]

        if not self.torrc.exists():
            self.torrc.parent.mkdir(parents=True, exist_ok=True)
            self.torrc.touch()

        path = get_default_windows_app('.txt')
        subprocess.Popen([os.path.expandvars(path), str(self.torrc)])

    def on_reload_config(self):

        controller = None
        try:
            controller = SimplePort('127.0.0.1', 9051)
        except Exception:
            MBox('Failed to connect to the local Tor relay.', style=0x10)

        if controller is None:
            return

        ok = ''
        try:
            ok = controller.msg(f'AUTHENTICATE "{self.password}"')
        except:
            if ok != '250 OK':
                MBox('Failed to authenticate against local Tor relay.',
                     style=0x10)
                controller.shutdown()
                return

        ok = ''
        try:
            ok = controller.msg("SIGNAL RELOAD")
        except:
            if ok != '250 OK':
                MBox('Failed to reload the Tor relay configuration.',
                     style=0x10)

        controller.shutdown()
        return
Esempio n. 20
0
def main(argv):
    final_gen = False
    version = None
    start_version = None
    fltg_root_dir = None
    output_root_dir = None
    dump_all_dir = None
    global dev_included
    global variant_included
    optimize = True

    dev_included = []
    variant_included = []
    try:
        sdk = os.environ['SDK']
    except:
        print('environment variable SDK required')
        sys.exit(-1)

    # Check if there are any input parameters
    try:
        opts, args = getopt.getopt(argv, "hfnv:s:d:o:g:c:t:", ["help"])
    except getopt.GetoptError:
        usage()
        sys.exit(-1)
    for opt, arg in opts:
        if opt in ("-h", "--help"):
            usage()
            sys.exit(0)
        elif opt == '-f':
            final_gen = True
        elif opt == '-v':
            version = arg
        elif opt == '-s':
            start_version = arg
        elif opt == '-d':
            fltg_root_dir = arg
        elif opt == '-o':
            output_root_dir = arg
        elif opt == '-g':
            dump_all_dir = arg
        elif opt == '-n':
            optimize = False
        elif opt == '-c':
            if arg.lower() not in dev_included:
                dev_included.append(arg.lower())
        elif opt == '-t':
            if arg.lower() not in variant_included:
                variant_included.append(arg.lower())
            else:
                print('Variant param must be preceed with device param -c')
                usage()
                sys.exit(-1)
        else:
            usage()
            sys.exit(-1)

    if version == None:
        print('Error: version must be defined')
        usage()
        sys.exit(-1)

    if output_root_dir == None:
        output_root_dir = sdk + ISSU_DB_DIR

    clean_python_default_boolean()
    lock_file = output_root_dir + '/issu_db.lock'

    if final_gen:
        if start_version == None:
            print('missing -s parameter')
            usage()
            sys.exit(-1)
        print('Generating final LTID DB')
        try:
            lock = FileLock(lock_file, 1800)  # wait up to 30 min
            lock.acquire()
        except Timeout:
            print('Failed to acquire ISSU DB lock')
            sys.exit(-1)
        rv = prepare_final_db(output_root_dir, start_version)
        if rv != 0:
            lock.release()
            sys.exit(rv)

        if start_version != version:
            print('Resolving ENUM differences')
            rv = prepare_enum_defs(output_root_dir, start_version, version,
                                   optimize)
            if rv != 0:
                lock.release()
                sys.exit(rv)

        # Parse component action YAML
        rv = parse_comp_actions(sdk, sdk + ISSU_DLL_DIR)
        lock.release()
        sys.exit(rv)

    if version == None:
        print('-v required')
        usage()
        sys.exit(-1)

    # Open the output file
    v = version.replace('.', '_')
    out_f = output_root_dir + '/ver_' + version + '/ltids_db_v' + v + '.ltdb'
    output_dir = output_root_dir + '/ver_' + version + '/'
    try:
        lock = FileLock(lock_file, 1800)  # wait up to 30 min
        lock.acquire()
    except Timeout:
        print('Failed to acquire ISSU DB lock')
        sys.exit(-1)
    try:
        fh = open(out_f, "w")
    except:
        print('Failed to create file ', out_f)
        lock.release()
        sys.exit(-1)

    write_out_header(fh)

    if fltg_root_dir == None:
        fltg_root_dir = sdk + FLTG_INPUT_DIR
    rv = parse_ltid_yml_files(fltg_root_dir, output_dir, dump_all_dir, version,
                              optimize, fh)

    fh.close()
    lock.release()
    sys.exit(rv)
Esempio n. 21
0
 def handle_client(self, conn, addr):
     """receive request from the client and send response"""
     self.print_debug("Connection established with client " + addr[0] + ":" + str(addr[1]) + " at " +
                      time.strftime("%a, %d %b %Y %I:%M:%S %p %Z", time.gmtime()))
     try:
         while True:
             _receive_data = []
             _tmp = conn.recv(2048)
             if _tmp:
                 _tmp = _tmp.decode("utf-8")  # convert to text string
                 _receive_data.append(_tmp)  # append to receive array
             else:
                 break
             str_received_data = ''
             for data in _receive_data:
                 str_received_data += data  # convert received array to string
             print(str_received_data)
             first_line = str_received_data.split("\r\n", 1)
             req_type = first_line[0].split(" ")[0]
             uri = first_line[0].split(" ")[1]
             headers = {}
             header_body = str_received_data.split("\r\n\r\n")
             count = 0
             for line in header_body[0].split("\r\n"):
                 if count != 0:
                     headers[line.split(":")[0].strip()] = line.split(":")[1].strip()
                 else:
                     count = count + 1
             count = -1
             body = ''
             if len(header_body) > 1:
                 if header_body[0].find("Content-Length:") > -1:
                     last = header_body[0].find("\r\n", header_body[0].find("Content-Length:"))
                     --last
                     count = int(header_body[0][header_body[0].find("Content-Length:") + 16:last])
                     body = ''
                     for line in header_body[1]:
                         if count > 0:
                             body += line
                         --count
             http_object = HTTPObject(req_type, uri, headers, body)
             # Sending code
             message = 'HTTP/1.1 '
             if ".." in uri:
                 self.print_debug("Access Denied " + uri)
                 message += "403 Forbidden" + "\r\n"
                 message += "Content-type: text/plain" + "\r\n"
                 message += "Content-Disposition: inline" + "\r\n\r\n"
             else:
                 if HTTPObject.get_req_type(http_object).lower() == "get":
                     try:
                         if HTTPObject.get_uri(http_object) == "/":
                             message += "200 OK" + "\r\n"
                             message += "Date: " + formatdate(timeval=None, localtime=False, usegmt=True) + "\r\n"
                             message += "Server:" + socket.gethostname() + "\r\n"
                             self.print_debug("GET DIR " + os.getcwd().replace("\\", "/") + "/" +
                                              getattr(self._server_obj, '_path') + HTTPObject.get_uri(http_object))
                             working_dir = os.getcwd().replace("\\", "/") + "/" + getattr(self._server_obj, '_path') \
                                           + HTTPObject.get_uri(http_object)
                             list_files = os.listdir(working_dir)
                             str_files = ''
                             for file in list_files:
                                 str_files += file + "\r\n"
                             message += "Content-Length: " + str(len("{\r\n" + str_files + "}")) + "\r\n"
                             message += "Content-Type: text/directory" + "\r\n"
                             if "Content-Disposition" in HTTPObject.get_headers(http_object):
                                 if HTTPObject.get_headers(http_object)["Content-Disposition"] == "attachment":
                                     message += "Content-Disposition: attachment/output" + "\r\n"
                                 else:
                                     message += "Content-Disposition: " + HTTPObject.get_headers(http_object) \
                                                 ["Content-Disposition"] + "\r\n"
                             elif "inline" in HTTPObject.get_uri(http_object):
                                 message += "Content-Disposition: inline" + "\r\n"
                             else:
                                 message += "Content-Disposition: attachment/output" + "\r\n"
                             message += "\r\n"
                             message += "{\r\n" + str_files + "}"
                         else:
                             working_file = os.getcwd().replace("\\", "/") + "/" + getattr(self._server_obj, '_path') \
                                            + HTTPObject.get_uri(http_object)
                             if "Content-Type" in HTTPObject.get_headers(http_object):
                                 if HTTPObject.get_headers(http_object)["Content-Type"] == "text/plain":
                                     if not working_file.endswith(".txt"):
                                         working_file += ".txt"
                                 elif HTTPObject.get_headers(http_object)["Content-Type"] == "text/html":
                                     if not working_file.endswith(".html"):
                                         working_file += ".html"
                                 elif HTTPObject.get_headers(http_object)["Content-Type"] == "text/html":
                                     if not working_file.endswith(".html"):
                                         working_file += ".html"
                                 elif HTTPObject.get_headers(http_object)["Content-Type"] == "text/xml":
                                     if not working_file.endswith(".xml"):
                                         working_file += ".xml"
                                 elif HTTPObject.get_headers(http_object)["Content-Type"] == "application/json":
                                     if not working_file.endswith(".json"):
                                         working_file += ".json"
                             self.print_debug("GET File " + working_file)
                             if not os.path.isfile(working_file):
                                 message += "404 Not Found" + "\r\n"
                                 message += "Date: " + formatdate(timeval=None, localtime=False,
                                                                  usegmt=True) + "\r\n"
                                 message += "Server: " + socket.gethostname() + "\r\n"
                                 if "Content-Disposition" in HTTPObject.get_headers(http_object):
                                     if HTTPObject.get_headers(http_object)["Content-Disposition"] == "attachment":
                                         message += "Content-Disposition: attachment/output" + "\r\n"
                                     else:
                                         message += "Content-Disposition: " + HTTPObject.get_headers(http_object) \
                                                     ["Content-Disposition"] + "\r\n"
                                 elif "inline" in HTTPObject.get_uri(http_object):
                                     message += "Content-Disposition: inline" + "\r\n"
                                 else:
                                     message += "Content-Disposition: inline" + "\r\n"
                                 message += "\r\n"
                             else:
                                 message += "200 OK" + "\r\n"
                                 message += "Date: " + formatdate(timeval=None, localtime=False,
                                                                  usegmt=True) + "\r\n"
                                 message += "Server: " + socket.gethostname() + "\r\n"
                                 fr = open(working_file, 'r')
                                 file_data = fr.read()
                                 if "Content-Type" in HTTPObject.get_headers(http_object):
                                     message += "Content-Type: " + HTTPObject.get_headers(http_object)[
                                                 "Content-Type"] + "\r\n"
                                 else:
                                     if working_file.endswith(".txt"):
                                         message += "Content-Type: text/plain" + "\r\n"
                                     elif working_file.endswith(".html"):
                                         message += "Content-Type: text/html" + "\r\n"
                                     elif working_file.endswith(".xml"):
                                         message += "Content-Type: text/xml" + "\r\n"
                                     elif working_file.endswith(".json"):
                                         message += "Content-Type: application/json" + "\r\n"
                                     else:
                                         message += "Content-Type: text/plain" + "\r\n"
                                 message += "Content-Length: " + str(len(file_data)) + "\r\n"
                                 if "Content-Disposition" in HTTPObject.get_headers(http_object):
                                     if HTTPObject.get_headers(http_object)["Content-Disposition"] == "attachment":
                                         message += "Content-Disposition: attachment/output" + "\r\n"
                                     else:
                                         message += "Content-Disposition: " + HTTPObject.get_headers(http_object) \
                                                     ["Content-Disposition"] + "\r\n"
                                 elif "inline" in HTTPObject.get_uri(http_object):
                                     message += "Content-Disposition: inline" + "\r\n"
                                 else:
                                     message += "Content-Disposition: inline" + "\r\n"
                                 message += "\r\n"
                                 message += file_data
                     except OSError as msg:
                         self.print_debug(msg)
                         message = "HTTP/1.1 400 Bad Request\r\n\r\n"
                         message += msg.strerror
                 elif HTTPObject.get_req_type(http_object).lower() == "post":
                     try:
                         if HTTPObject.get_uri(http_object) != "":
                             working_file = os.getcwd().replace("\\", "/") + "/" + getattr(self._server_obj, '_path') \
                                            + HTTPObject.get_uri(http_object)
                             if "Content-Type" in HTTPObject.get_headers(http_object):
                                 if HTTPObject.get_headers(http_object)["Content-Type"] == "text/plain":
                                     if not working_file.endswith(".txt"):
                                         working_file += ".txt"
                                 elif HTTPObject.get_headers(http_object)["Content-Type"] == "text/html":
                                     if not working_file.endswith(".html"):
                                         working_file += ".html"
                                 elif HTTPObject.get_headers(http_object)["Content-Type"] == "text/html":
                                     if not working_file.endswith(".html"):
                                         working_file += ".html"
                                 elif HTTPObject.get_headers(http_object)["Content-Type"] == "text/xml":
                                     if not working_file.endswith(".xml"):
                                         working_file += ".xml"
                                 elif HTTPObject.get_headers(http_object)["Content-Type"] == "application/json":
                                     if not working_file.endswith(".json"):
                                         working_file += ".json"
                             self.print_debug("POST File " + working_file)
                             path = pathlib.Path(working_file)
                             path.parent.mkdir(parents=True, exist_ok=True)
                             lock_path = working_file + ".lock"
                             my_lock = FileLock(lock_path, timeout=2)
                             my_lock.acquire()
                             try:
                                 open(working_file, "a").write(HTTPObject.get_data(http_object) + "\n")
                             finally:
                                 my_lock.release()
                             message += "200 OK" + "\r\n"
                             message += "Date: " + formatdate(timeval=None, localtime=False, usegmt=True) + "\r\n"
                             message += "Server: " + socket.gethostname() + "\r\n"
                             if "Content-Type" in HTTPObject.get_headers(http_object):
                                 message += "Content-Type: " + HTTPObject.get_headers(http_object)[
                                     "Content-Type"] + "\r\n"
                             else:
                                 if working_file.endswith(".txt"):
                                     message += "Content-Type: text/plain" + "\r\n"
                                 elif working_file.endswith(".html"):
                                     message += "Content-Type: text/html" + "\r\n"
                                 elif working_file.endswith(".json"):
                                     message += "Content-Type: application/json" + "\r\n"
                                 elif working_file.endswith(".xml"):
                                     message += "Content-Type: text/xml" + "\r\n"
                                 else:
                                     message += "Content-Type: text/plain" + "\r\n"
                             message += "Content-Length: " + str(len(HTTPObject.get_data(http_object))) + "\r\n"
                             if "Content-Disposition" in HTTPObject.get_headers(http_object):
                                 if HTTPObject.get_headers(http_object)["Content-Disposition"] == "attachment":
                                     message += "Content-Disposition: attachment/output" + "\r\n"
                                 else:
                                     message += "Content-Disposition: " + HTTPObject.get_headers(http_object) \
                                         ["Content-Disposition"] + "\r\n"
                             elif "inline" in HTTPObject.get_uri(http_object):
                                 message += "Content-Disposition: inline" + "\r\n"
                             else:
                                 message += "Content-Disposition: inline" + "\r\n"
                             message += "\r\n"
                             message += HTTPObject.get_data(http_object)
                     except OSError as msg:
                         self.print_debug(msg)
                         message = "HTTP/1.1 400 Bad Request\r\n\r\n"
                         message += msg.strerror
             print("Sending response message to client " + addr[0])
             print(message)
             conn.sendall(bytes(message, 'utf-8'))
             self.print_debug("Connection closed with client " + addr[0] + ":" + str(addr[1]) + " at " +
                              time.strftime("%a, %d %b %Y %I:%M:%S %p %Z", time.gmtime()))
             break
     finally:
         conn.close()
Esempio n. 22
0
class CandleConnector():
    def __init__(self):
        self.lock = FileLock("config.csv.lock")
        # make dict here that stores the amount for each coin
        self.config = "config.csv"
        self.candles = candles.BinaceConnector()

    def readConfig(self):
        self.lock.acquire()
        df = pd.read_csv(self.config,
                         encoding='utf8',
                         delimiter=',',
                         names=[
                             'coin', 'capital', 'starting', 'limit',
                             'currentPrice', 'autobought', 'takeprofit',
                             'updatetime', 'orderid'
                         ])
        self.lock.release()
        df.set_index('coin', inplace=True)
        return df

    #get the current config
    def getCoinConfigData(self, coin):
        df = self.readConfig()
        return df.loc[coin]

    def getBuyPower(self):
        return float(self.candles.getUSD())

    #save a new copy of the config
    def setCoinConfigData(self, df):
        self.lock.acquire()
        df.to_csv(f'config.csv', mode='w', header=False, index=True)
        self.lock.release()

    def getAutoBoughtAmount(self, coin):
        return float(self.getCoinConfigData(coin)['autobought'])

    # helper for buying a number of coins at current price
    def orderNumber(self, coin, number):
        return (self.candles.buyMarket(coin, number))

    # gives you a quote for a coin
    def getQuote(self, coin):
        return float(self.candles.getCoinPrice(coin))

    # set an order for an number amount
    def orderAmount(self, coin, amount):
        return (self.candles.order_buy_crypto_by_price(coin, amount))

    # write out to a log file
    def logit(self, message, destination):
        with open(f"logdata/{destination}.txt", "a") as f:
            f.write(message)
            f.write("\n")

    def saveCoinBuyData(self, coin, price, amount, setcap=None):
        df = self.readConfig()
        if setcap is not None:
            df.at[coin, 'capital'] = setcap
        df.at[coin, 'starting'] = price
        df.at[coin, 'autobought'] = amount
        df.at[coin, 'limit'] = price * .98
        self.setCoinConfigData(df)

    # check to see how much can be purchased with the current capital
    # then purchase that amount of coins
    def buyNow(self, coin, strat=None):
        coinsCapital = self.getCoinConfigData(coin)['capital']

        avalFunds = self.getBuyPower()
        if (coinsCapital > avalFunds) is True:
            return 0

        price = self.getQuote(coin)
        #TODO add logic that allows for multiple strategies that will
        #allow for different allocations of the starting capital
        BOUGHT = float(coinsCapital / self.getQuote(coin))
        minOrder = None
        minNot = None
        print(BOUGHT)
        #grab the trading rules for the coin
        for filt in (self.candles.getCoinInfo(coin)['filters']):
            if filt['filterType'] == "LOT_SIZE":
                minOrder = float(filt['minQty'])
            if filt['filterType'] == 'MIN_NOTIONAL':
                minNot = float(filt['minNotional'])
        mod = BOUGHT % minOrder

        #make sure the amount we are buying is standardized for Binance
        if mod:
            BOUGHT = BOUGHT - mod

        #this needs to get the perciesion from the filter
        BOUGHT = round(BOUGHT, 8)
        print(BOUGHT)
        if (BOUGHT * price) > minNot:
            order = self.orderNumber(coin, BOUGHT)
            self.saveCoinBuyData(coin, price, BOUGHT)
            self.logit(f"BUYING {order}", coin)
        else:
            BOUGHT = None
            self.logit(
                f"Failed to buy {BOUGHT}, {coin}. Due minNotional of {minNot}",
                coin)
        return BOUGHT

    #sell an amount at current price
    def sellNow(self, coin):
        #get the amount the bot bought
        amount = self.getAutoBoughtAmount(coin)
        if amount > 0:
            # self.candles.testOrder(coin, SIDE_SELL, amount)
            sellorder = self.candles.sellMarket(coin, amount)
            orderID = sellorder['clientOrderId']
            status = self.candles.checkStatus(coin, orderID)
            timeout = 5
            time.sleep(2)
            #check a couple of times to make sure we are selling
            while status != 'FILLED':
                if timeout > 5:
                    timeout = 0
                    self.candles.cancelOrder(coin, orderID)
                status = self.candles.checkStatus(coin, orderID)
                timeout += 1
                time.sleep(2)

            # save the data for analysis later and reset the bot coin's config
            self.logit(f"SELLING DUE TO STRAT {sellorder}", coin)
            sellprice = float(sellorder['fills'][0]['price']) * amount
            print(sellprice)
            self.saveCoinBuyData(coin, 0, 0, setcap=sellprice)
Esempio n. 23
0
class Db(object):
    """
        Basic file storage for a JSON object.
    """
    def where_not_in(self, column, *values):
        """
        Selects results which do not match the given column/values expression.

        Args:
            column (str): The named field to test against.

            values (str): Vales to search for.  A record will not be returned if the field named
                in *column* is contained inside of the list of values given.
        """
        return [x for x in self.data if x[column] not in values]

    def all_jobs(self):
        "Retrieve all records."
        return self.data

    def values(self):
        "Synonym for #all_jobs."
        return self.data

    def find_by_remote_job_id(self, job_id):
        """
        Finds a record by the id number for that job on the remote cluster.

        Args:
            job_id(str): the job id for this job as scheduled or running on the remote.

        Returns:
            A :class:`dict` object containing various attributes of the job on the local and the remote::

                {  
                    local_id: '123123',
                    local_state: 3,
                    local_wd: '/var/sge_working_dir',
                    remote_id: '1234324',
                    remote_state: 2,
                    remote_wd: '/var/remote_working_dir',
                    last_checked: '2017-12-27 16:35:30.898984'
                }
        
        """
        return next((x for x in self.values() if x[R_ID] == job_id), None)

    def find_by_local_job_id(self, job_id):

        return next((x for x in self.values() if x[L_ID] == job_id), None)

    def insert(self, local_id, local_state, local_wd, remote_id, remote_state,
               remote_wd):
        self.data.append(
            dict(local_id=local_id,
                 local_state=local_state,
                 local_wd=local_wd,
                 remote_id=remote_id,
                 remote_state=remote_state,
                 remote_wd=remote_wd,
                 last_checked=str(datetime.now())))

    def update(self, job):
        job['last_checked'] = str(datetime.now())

    def delete(self, job):
        return self.data.remove(job)

    def __init__(self):
        self.lock = FileLock(LOCKFILE)
        self.data = None

    def save(self):
        with open(DBFILE, 'w') as file:
            file.write(dumps(self.data))

    def open(self):
        self.lock.acquire(timeout=2)
        with open(DBFILE) as file:
            self.data = loads(file.read())

    def close(self):
        self.save()
        self.data = None
        self.lock.release()

    def __enter__(self):
        self.open()
        return self

    def __exit__(self, x, y, z):
        "Exit method for resource"
        self.close()
Esempio n. 24
0
def main(argv):
    current_ver = ''
    start_ver = ''
    version_list = []
    rev_version_list = []  # Version list in reverse order
    fltg_root_dir = None

    try:
        opts, args = getopt.getopt(argv, "hv:s:d:", ["help"])
    except getopt.GetoptError:
        usage()
        sys.exit(2)
    for opt, arg in opts:
        if opt in ("-h", "--help"):
            usage()
            sys.exit(0);
        elif opt == '-v':
            current_ver = arg
        elif opt == '-s':
            start_ver = arg
        elif opt == '-d':
            fltg_root_dir = arg
        else:
            usage()
            sys.exit(0);

    if start_ver == '':
        start_ver = current_ver

    try:
        sdk = os.environ['SDK']
    except:
        print("SDK Environment variable must be defined")
        sys.exit(0);

    try:
        issu_db_dir = os.environ['ISSU_DB'] + '/'
    except:
        issu_db_dir = sdk + ISSU_DB_DEF_DIR

    base_dir = issu_db_dir
    ver_list_c = issu_version_list.issu_version_list(base_dir,
                                                     start_ver,
                                                     current_ver)
    rev_version_list = ver_list_c.get_ver_list()
    version_list = ver_list_c.get_ver_list(reverse = False)

    # Build the base dir
    if current_ver == '':
        base_dir += 'current'
    else:
        base_dir += 'ver_' + current_ver
    target_dir = base_dir

    # Lock the DB to enable multiple simultaneous builds. If the lock is taken
    # it means that other build is building the DB. Let it finish first before
    # we can jump to the next phase of compiling the DLL
    lock_file = issu_db_dir + 'issu_db.lock'
    try:
        lock = FileLock(lock_file, 1800)
        lock.acquire()  # wait up to 30 min
    except Timeout:
        print('Failed to acquire ISSU DB lock')
        sys.exit(-1)

    db_class = issu_db_gen.global_struct_db(base_dir, None)
    if not db_class.generate_c_db(current_ver, version_list):
        print('Failed to generate DB for version %s' % current_ver)

    base_dir = issu_db_dir + 'ver_'
    for ver in rev_version_list:
        if ver != current_ver:
            db_class.parse_previous_versions(base_dir, ver)

    # Final generation and shutdown the DB gen class
    db_class.shutdown()

    # Save reg file signatures into the current version DB
    if current_ver != '':
        if fltg_root_dir == None:
            fltg_root_dir = sdk + FLTG_INPUT_DIR
        reg_file_c = issu_reg_files.reg_file_sig(issu_db_dir)
        reg_file_c.save_sig(current_ver, fltg_root_dir)

    # Generate the patch list
    patch_class = issu_patch_insert.issu_patch(issu_db_dir)
    patch_class.read_patch_db(version_list)
    patch_class.create_patch_c_file()
    patch_class.create_patch_makefile()
    lock.release()
Esempio n. 25
0
     print("A template doesn't exist for group " + groupName +
           ", creating one")
     imageTemplate = files[0]
 print('!!!!!!!!!!!!!!!!!!!!!!!!!')
 template = os.path.join(directory, groupName,
                         'template' + groupName + '.json')
 #tkMessageBox.showinfo("Template", "A template doesn't exist for group "+groupName+", creating one")
 timeStart = timeit.default_timer()
 textsT, fieldsT, pairsT, samePairsT, horzLinksT, groupsT, transcriptionsT, cornersT, actualCornersT, complete, height, width = labelImage(
     os.path.join(directory, groupName, imageTemplate), textsT,
     fieldsT, pairsT, samePairsT, horzLinksT, groupsT,
     transcriptionsT, None, cornersT, cornersActualT)
 timeElapsed = timeit.default_timer() - timeStart + (
     labelTime if labelTime is not None else 0)
 if (len(textsT) == 0 and len(fieldsT) == 0):
     lock.release()
     lock = None
     exit()
 else:
     if not complete:
         template += '.nf'
     with open(template, 'w') as out:
         out.write(
             json.dumps({
                 "textBBs": textsT,
                 "fieldBBs": fieldsT,
                 "pairs": pairsT,
                 "samePairs": samePairsT,
                 "horzLinks": horzLinksT,
                 "groups": groupsT,
                 "page_corners": cornersT,
Esempio n. 26
0
def mirror_file_with_lock(fname, lockfile="/tmp/.lockfile_hk_vectorgen_mirror"):
    lock = FileLock(lockfile, timeout=60*10)
    lock.lock()
    result = mirror_file(fname)
    lock.release()
    return result
Esempio n. 27
0
def test_workflow_manager(workflow_start_regular, tmp_path):
    # For sync between jobs
    tmp_file = str(tmp_path / "lock")
    lock = FileLock(tmp_file)
    lock.acquire()

    # For sync between jobs
    flag_file = tmp_path / "flag"
    flag_file.touch()

    @ray.remote
    def long_running(i):
        lock = FileLock(tmp_file)
        with lock.acquire():
            pass

        if i % 2 == 0:
            if flag_file.exists():
                raise ValueError()
        return 100

    outputs = [
        workflow.run_async(long_running.bind(i), workflow_id=str(i))
        for i in range(100)
    ]
    # Test list all, it should list all jobs running
    all_tasks = workflow.list_all()
    assert len(all_tasks) == 100
    all_tasks_running = workflow.list_all(workflow.RUNNING)
    assert dict(all_tasks) == dict(all_tasks_running)
    assert workflow.get_status("0") == "RUNNING"

    # Release lock and make sure all tasks finished
    lock.release()
    for o in outputs:
        try:
            r = ray.get(o)
        except Exception:
            continue
        assert 100 == r
    all_tasks_running = workflow.list_all(workflow.WorkflowStatus.RUNNING)
    assert len(all_tasks_running) == 0
    # Half of them failed and half succeed
    failed_jobs = workflow.list_all("FAILED")
    assert len(failed_jobs) == 50
    finished_jobs = workflow.list_all("SUCCESSFUL")
    assert len(finished_jobs) == 50

    all_tasks_status = workflow.list_all({
        workflow.WorkflowStatus.SUCCESSFUL,
        workflow.WorkflowStatus.FAILED,
        workflow.WorkflowStatus.RUNNING,
    })
    assert len(all_tasks_status) == 100
    assert failed_jobs == [(k, v) for (k, v) in all_tasks_status
                           if v == workflow.WorkflowStatus.FAILED]
    assert finished_jobs == [(k, v) for (k, v) in all_tasks_status
                             if v == workflow.WorkflowStatus.SUCCESSFUL]

    # Test get_status
    assert workflow.get_status("0") == "FAILED"
    assert workflow.get_status("1") == "SUCCESSFUL"
    lock.acquire()
    r = workflow.resume_async("0")
    assert workflow.get_status("0") == workflow.RUNNING
    flag_file.unlink()
    lock.release()
    assert 100 == ray.get(r)
    assert workflow.get_status("0") == workflow.SUCCESSFUL

    # Test cancel
    lock.acquire()
    workflow.resume_async("2")
    assert workflow.get_status("2") == workflow.RUNNING
    workflow.cancel("2")
    assert workflow.get_status("2") == workflow.CANCELED

    # Now resume_all
    resumed = workflow.resume_all(include_failed=True)
    assert len(resumed) == 48
    lock.release()
    assert [ray.get(o) for (_, o) in resumed] == [100] * 48
Esempio n. 28
0
	if new_progress != progress:
		progress = new_progress
		print('PROGRESS: %d%%' % progress)
		sys.stdout.flush()

file_in.close()
result = chsum.hexdigest()
print( result)

obj = dict()
if os.path.isfile( Options.output):
	lock = FileLock( Options.output, 100, 1)
	file_out = open( Options.output, 'r')
	obj = json.load( file_out)
	file_out.close()
	lock.release()
	#print( json.dumps( obj))

file_in_name = os.path.basename( Options.input)
if not 'files' in obj: obj['files'] = dict()
if not file_in_name in obj['files']: obj['files'][file_in_name] = dict()
if not 'checksum' in obj['files'][file_in_name]: obj['files'][file_in_name]['checksum'] = dict()
obj['files'][file_in_name]['checksum'][Options.type] = result
obj['files'][file_in_name]['checksum']['time'] = time.time()
result = json.dumps( obj, indent=1)

lock = FileLock( Options.output, 100, 1)
file_out = open( Options.output, 'w')
file_out.write( result)
file_out.close()
lock.release()
Esempio n. 29
0
    def _instance_iterator(self, file_path: str) -> Iterable[Instance]:
        cache_file: Optional[str] = None
        if self._cache_directory:
            cache_file = self._get_cache_location_for_file_path(file_path)

        if cache_file is not None and os.path.exists(cache_file):
            cache_file_lock = FileLock(cache_file + ".lock",
                                       timeout=self.CACHE_FILE_LOCK_TIMEOUT)
            try:
                cache_file_lock.acquire()
                # We make an assumption here that if we can obtain the lock, no one will
                # be trying to write to the file anymore, so it should be safe to release the lock
                # before reading so that other processes can also read from it.
                cache_file_lock.release()
                logger.info("Reading instances from cache %s", cache_file)
                with open(cache_file) as data_file:
                    yield from self._multi_worker_islice(
                        data_file, transform=self.deserialize_instance)
            except Timeout:
                logger.warning(
                    "Failed to acquire lock on dataset cache file within %d seconds. "
                    "Cannot use cache to read instances.",
                    self.CACHE_FILE_LOCK_TIMEOUT,
                )
                yield from self._multi_worker_islice(self._read(file_path),
                                                     ensure_lazy=True)
        elif cache_file is not None and not os.path.exists(cache_file):
            instances = self._multi_worker_islice(self._read(file_path),
                                                  ensure_lazy=True)
            # The cache file doesn't exist so we'll try writing to it.
            if self.max_instances is not None:
                # But we don't write to the cache when max_instances is specified.
                logger.warning(
                    "Skipping writing to data cache since max_instances was specified."
                )
                yield from instances
            elif util.is_distributed() or (get_worker_info()
                                           and get_worker_info().num_workers):
                # We also shouldn't write to the cache if there's more than one process loading
                # instances since each worker only receives a partial share of the instances.
                logger.warning(
                    "Can't cache data instances when there are multiple processes loading data"
                )
                yield from instances
            else:
                try:
                    with FileLock(cache_file + ".lock",
                                  timeout=self.CACHE_FILE_LOCK_TIMEOUT):
                        with CacheFile(cache_file, mode="w+") as cache_handle:
                            logger.info("Caching instances to temp file %s",
                                        cache_handle.name)
                            for instance in instances:
                                cache_handle.write(
                                    self.serialize_instance(instance) + "\n")
                                yield instance
                except Timeout:
                    logger.warning(
                        "Failed to acquire lock on dataset cache file within %d seconds. "
                        "Cannot write to cache.",
                        self.CACHE_FILE_LOCK_TIMEOUT,
                    )
                    yield from instances
        else:
            # No cache.
            yield from self._multi_worker_islice(self._read(file_path),
                                                 ensure_lazy=True)
Esempio n. 30
0
def test_workflow_manager(workflow_start_regular, tmp_path):
    # For sync between jobs
    tmp_file = str(tmp_path / "lock")
    lock = FileLock(tmp_file)
    lock.acquire()

    # For sync between jobs
    flag_file = tmp_path / "flag"
    flag_file.touch()

    @workflow.step
    def long_running(i):
        lock = FileLock(tmp_file)
        with lock.acquire():
            pass

        if i % 2 == 0:
            if flag_file.exists():
                raise ValueError()
        return 100

    outputs = [
        long_running.step(i).run_async(workflow_id=str(i)) for i in range(100)
    ]
    # Test list all, it should list all jobs running
    all_tasks = workflow.list_all()
    assert len(all_tasks) == 100
    all_tasks_running = workflow.list_all(workflow.WorkflowStatus.RUNNING)
    assert dict(all_tasks) == dict(all_tasks_running)
    assert workflow.get_status("0") == workflow.WorkflowStatus.RUNNING

    # Release lock and make sure all tasks finished
    lock.release()
    for o in outputs:
        try:
            r = ray.get(o)
        except Exception:
            continue
        assert 100 == r
    all_tasks_running = workflow.list_all(workflow.WorkflowStatus.RUNNING)
    assert len(all_tasks_running) == 0
    # Half of them failed and half succeed
    failed_jobs = workflow.list_all(workflow.WorkflowStatus.RESUMABLE)
    assert len(failed_jobs) == 50
    finished_jobs = workflow.list_all(workflow.WorkflowStatus.FINISHED)
    assert len(finished_jobs) == 50

    all_tasks_status = workflow.list_all({
        workflow.WorkflowStatus.FINISHED, workflow.WorkflowStatus.RESUMABLE,
        workflow.WorkflowStatus.RUNNING
    })
    assert len(all_tasks_status) == 100
    assert failed_jobs == {
        k: v
        for (k, v) in all_tasks_status.items()
        if v == workflow.WorkflowStatus.RESUMABLE
    }
    assert finished_jobs == {
        k: v
        for (k, v) in all_tasks_status.items()
        if v == workflow.WorkflowStatus.FINISHED
    }

    # Test get_status
    assert workflow.get_status("0") == workflow.WorkflowStatus.RESUMABLE
    assert workflow.get_status("1") == workflow.WorkflowStatus.FINISHED
    assert workflow.get_status("X") is None
    lock.acquire()
    r = workflow.resume("0")
    assert workflow.get_status("0") == workflow.WorkflowStatus.RUNNING
    flag_file.unlink()
    lock.release()
    assert 100 == ray.get(r)
    assert workflow.get_status("0") == workflow.WorkflowStatus.FINISHED

    # Test cancel
    lock.acquire()
    workflow.resume("2")
    assert workflow.get_status("2") == workflow.WorkflowStatus.RUNNING
    workflow.cancel("2")
    assert workflow.get_status("2") == workflow.WorkflowStatus.CANCELED

    # Now resume_all
    resumed = workflow.resume_all()
    assert len(resumed) == 48
    lock.release()
    assert [ray.get(o) for o in resumed.values()] == [100] * 48
Esempio n. 31
0
def test_actor_writer_2(workflow_start_regular, tmp_path):
    g_lock = str(Path(tmp_path / "g.lock"))
    incr_lock = str(Path(tmp_path / "incr.lock"))
    val_lock = str(Path(tmp_path / "val.lock"))

    val_err = str(Path(tmp_path / "val.err"))
    incr_err = str(Path(tmp_path / "incr.err"))

    @workflow.virtual_actor
    class SyncCounter:
        def __init__(
            self,
            val_lock: str,
            incr_lock: str,
            g_lock: str,
            val_err: str,
            incr_err: str,
        ):
            self.val_lock = val_lock
            self.incr_lock = incr_lock
            self.g_lock = g_lock

            self.val_err = val_err
            self.incr_err = incr_err
            self.v = 0
            if Path(self.val_err).exists():
                raise ValueError()

        @workflow.virtual_actor.readonly
        def val(self):
            with FileLock(self.val_lock), FileLock(self.g_lock):
                if Path(self.val_err).exists():
                    raise ValueError()
                return self.v

        def incr(self, create_incr_err=False):
            with FileLock(self.incr_lock), FileLock(self.g_lock):
                if Path(self.incr_err).exists():
                    raise ValueError()
                if create_incr_err:
                    Path(incr_err).touch()
                self.v += 1
                return self.v

        def __getstate__(self):
            return (
                self.v,
                self.val_lock,
                self.incr_lock,
                self.g_lock,
                self.val_err,
                self.incr_err,
            )

        def __setstate__(self, state):
            (
                self.v,
                self.val_lock,
                self.incr_lock,
                self.g_lock,
                self.val_err,
                self.incr_err,
            ) = state

    # trigger error in init
    Path(val_err).touch()
    actor = SyncCounter.get_or_create(
        "sync_counter", val_lock, incr_lock, g_lock, val_err, incr_err
    )
    with pytest.raises(Exception):
        actor.incr.run()
    Path(val_err).unlink()

    assert ray.get([actor.incr.run_async() for _ in range(9)]) == list(range(2, 11))
    incr_lock = FileLock(incr_lock)
    incr_lock.acquire()

    objs = [actor.incr.run_async() for _ in range(10)]
    assert 10 == actor.val.run()
    Path(val_err).touch()
    with pytest.raises(Exception):
        actor.val.run()
    Path(val_err).unlink()
    incr_lock.release()
    assert ray.get(objs) == list(range(11, 21))

    # test error cases
    actor.incr.run_async()  # 21
    actor.incr.run_async()  # 22
    actor.incr.run_async(create_incr_err=True)  # 23
    actor.incr.run_async()  # 24
    s5 = actor.incr.run_async()  # 25
    with pytest.raises(Exception):
        ray.get(s5)

    assert 23 == actor.val.run()
    Path(incr_err).unlink()
    obj = workflow.resume("sync_counter")
    assert 25 == ray.get(obj)[0]
    assert 25 == actor.val.run()
Esempio n. 32
0
class ObdIO(object):
    """Create a obd server"""
    def __init__(self, port):
        self.port = port
        self.ser = None
        self.__lock = FileLock(gettempdir() + "/yacm-" +
                               self.port.replace("/", "") + ".lock",
                               timeout=1)
        self.__lock.acquire()

    def __enter__(self):
        self.ser = serial.Serial(self.port,
                                 parity=serial.PARITY_NONE,
                                 stopbits=1,
                                 bytesize=8)
        self.ser.baudrate = 500000
        # self.__write("at", "ws")  # Reset device
        self.__write("at", "d0")  # Set defaults
        self.__write("at", "l0")  # Disable line feed
        self.__write("at", "e0")  # Disable echo
        self.__write("at", "h0")  # Disable headers
        self.__write("at", "sp0")  # Auto set protocol
        return self

    def __exit__(self, exception_type, exception_value, exception_traceback):
        self.__write("at", "ws")  # Reset device
        self.ser.close()
        self.__lock.release()
        os.remove(self.__lock.lock_file)

    def query(self, mode: str, code: str) -> Union[str, Tuple]:
        """Query obd requests"""
        self.__write(mode, code)
        return self.__read()

    def __write(self, mode: str, code: str) -> None:
        self.ser.flushInput()
        self.ser.write(f"{mode}{code}\r".encode())
        logging.info(f"Mode: {mode} Code: {code}")
        self.ser.flush()
        if mode == "at":
            if code == "ws":
                self.ser.readline()
            else:
                self.ser.read_until(b'>')  # Discard the "OK" message

    def __read(self) -> Union[str, Tuple]:
        raw_data = self.ser.read_until(b'\r>')
        while raw_data == 0:
            raw_data = self.ser.read_until(b'\r>')
        logging.info(f"raw_data: {raw_data}")
        if raw_data not in {b'\r?\r>', b'?\r\r'}:
            if raw_data[0] == 13 and raw_data[-3] != 13:  # Emulator
                raw_data = raw_data[1:-2]
            if raw_data[0] != 13 and raw_data[-3] == 13:  # Car
                raw_data = raw_data[:-3]
            if raw_data == b"NO DATA":
                result = "NO DATA"
            else:
                result = raw_data.decode("ascii").lower().split(' ')[2:]
        else:
            result = "?"
        logging.info(f"result: {result}\n")
        return result

    def supported_pids(self) -> List[str]:
        """Return supported pids"""
        hex2bin_map = {
            "0": "0000",
            "1": "0001",
            "2": "0010",
            "3": "0011",
            "4": "0100",
            "5": "0101",
            "6": "0110",
            "7": "0111",
            "8": "1000",
            "9": "1001",
            "a": "1010",
            "b": "1011",
            "c": "1100",
            "d": "1101",
            "e": "1110",
            "f": "1111",
        }
        supported_pids: List[str] = []
        for pid in ["00", "20", "40", "60", "80"]:
            pids = ''.join(self.query("01", pid))
            if pids not in {"?", "NO DATA"}:
                binary_pids = ''.join(hex2bin_map[nibble] for nibble in pids)
                pid_code = int(pid)
                for bit in binary_pids:
                    pid_code += 1
                    if bit == "1":
                        supported_pids.append(hex(pid_code)[2:])
        return supported_pids
Esempio n. 33
0
class Metric(MetricInfoMixin):
    """A Metrics is the base class and common API for all metrics.

    Args:
        config_name (``str``): This is used to define a hash specific to a metrics computation script and prevents the metric's data
            to be overridden when the metric loading script is modified.
        keep_in_memory (``bool``): keep all predictions and references in memory. Not possible in distributed settings.
        cache_dir (``str``): Path to a directory in which temporary prediction/references data will be stored.
            The data directory should be located on a shared file-system in distributed setups.
        num_process (``int``): specify the total number of nodes in a distributed settings.
            This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
        process_id (``int``): specify the id of the current process in a distributed setup (between 0 and num_process-1)
            This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
        seed (Optional ``int``): If specified, this will temporarily set numpy's random seed when :func:`datasets.Metric.compute` is run.
        experiment_id (``str``): A specific experiment id. This is used if several distributed evaluations share the same file system.
            This is useful to compute metrics in distributed setups (in particular non-additive metrics like F1).
        max_concurrent_cache_files (``int``): Max number of concurrent metrics cache files (default 10000).
        timeout (``Union[int, float]``): Timeout in second for distributed setting synchronization.
    """

    def __init__(
        self,
        config_name: Optional[str] = None,
        keep_in_memory: bool = False,
        cache_dir: Optional[str] = None,
        num_process: int = 1,
        process_id: int = 0,
        seed: Optional[int] = None,
        experiment_id: Optional[str] = None,
        max_concurrent_cache_files: int = 10000,
        timeout: Union[int, float] = 100,
        **kwargs,
    ):
        # prepare info
        self.config_name = config_name or "default"
        info = self._info()
        info.metric_name = camelcase_to_snakecase(self.__class__.__name__)
        info.config_name = self.config_name
        info.experiment_id = experiment_id or "default_experiment"
        MetricInfoMixin.__init__(self, info)  # For easy access on low level

        # Safety checks on num_process and process_id
        assert isinstance(process_id, int) and process_id >= 0, "'process_id' should be a number greater than 0"
        assert (
            isinstance(num_process, int) and num_process > process_id
        ), "'num_process' should be a number greater than process_id"
        assert (
            num_process == 1 or not keep_in_memory
        ), "Using 'keep_in_memory' is not possible in distributed setting (num_process > 1)."
        self.num_process = num_process
        self.process_id = process_id
        self.max_concurrent_cache_files = max_concurrent_cache_files

        self.keep_in_memory = keep_in_memory
        self._data_dir_root = os.path.expanduser(cache_dir or HF_METRICS_CACHE)
        self.data_dir = self._build_data_dir()
        self.seed: int = seed or np.random.get_state()[1][0]
        self.timeout: Union[int, float] = timeout

        # Update 'compute' and 'add' docstring
        # methods need to be copied otherwise it changes the docstrings of every instance
        self.compute = types.MethodType(copyfunc(self.compute), self)
        self.add_batch = types.MethodType(copyfunc(self.add_batch), self)
        self.add = types.MethodType(copyfunc(self.add), self)
        self.compute.__func__.__doc__ += self.info.inputs_description
        self.add_batch.__func__.__doc__ += self.info.inputs_description
        self.add.__func__.__doc__ += self.info.inputs_description

        # self.arrow_schema = pa.schema(field for field in self.info.features.type)
        self.buf_writer = None
        self.writer = None
        self.writer_batch_size = None
        self.data = None

        # This is the cache file we store our predictions/references in
        # Keep it None for now so we can (cloud)pickle the object
        self.cache_file_name = None
        self.filelock = None
        self.rendez_vous_lock = None

        # This is all the cache files on which we have a lock when we are in a distributed setting
        self.file_paths = None
        self.filelocks = None

    def __len__(self):
        """Return the number of examples (predictions or predictions/references pair)
        currently stored in the metric's cache.
        """
        return 0 if self.writer is None else len(self.writer)

    def __repr__(self):
        return (
            f'Metric(name: "{self.name}", features: {self.features}, '
            f'usage: """{self.inputs_description}""", '
            f"stored examples: {len(self)})"
        )

    def _build_data_dir(self):
        """Path of this metric in cache_dir:
        Will be:
            self._data_dir_root/self.name/self.config_name/self.hash (if not none)/
        If any of these element is missing or if ``with_version=False`` the corresponding subfolders are dropped.
        """
        builder_data_dir = self._data_dir_root
        builder_data_dir = os.path.join(builder_data_dir, self.name, self.config_name)
        os.makedirs(builder_data_dir, exist_ok=True)
        return builder_data_dir

    def _create_cache_file(self, timeout=1) -> Tuple[str, FileLock]:
        """ Create a new cache file. If the default cache file is used, we generated a new hash. """
        file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{self.process_id}.arrow")
        filelock = None
        for i in range(self.max_concurrent_cache_files):
            filelock = FileLock(file_path + ".lock")
            try:
                filelock.acquire(timeout=timeout)
            except Timeout:
                # If we have reached the max number of attempts or we are not allow to find a free name (distributed setup)
                # We raise an error
                if self.num_process != 1:
                    raise ValueError(
                        f"Error in _create_cache_file: another metric instance is already using the local cache file at {file_path}. "
                        f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid colision "
                        f"between distributed metric instances."
                    )
                if i == self.max_concurrent_cache_files - 1:
                    raise ValueError(
                        f"Cannot acquire lock, too many metric instance are operating concurrently on this file system."
                        f"You should set a larger value of max_concurrent_cache_files when creating the metric "
                        f"(current value is {self.max_concurrent_cache_files})."
                    )
                # In other cases (allow to find new file name + not yet at max num of attempts) we can try to sample a new hashing name.
                file_uuid = str(uuid.uuid4())
                file_path = os.path.join(
                    self.data_dir, f"{self.experiment_id}-{file_uuid}-{self.num_process}-{self.process_id}.arrow"
                )
            else:
                break

        return file_path, filelock

    def _get_all_cache_files(self) -> Tuple[List[str], List[FileLock]]:
        """Get a lock on all the cache files in a distributed setup.
        We wait for timeout second to let all the distributed node finish their tasks (default is 100 seconds).
        """
        if self.num_process == 1:
            if self.cache_file_name is None:
                raise ValueError(
                    "Metric cache file doesn't exist. Please make sure that you call `add` or `add_batch` "
                    "at least once before calling `compute`."
                )
            file_paths = [self.cache_file_name]
        else:
            file_paths = [
                os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow")
                for process_id in range(self.num_process)
            ]

        # Let's acquire a lock on each process files to be sure they are finished writing
        filelocks = []
        for process_id, file_path in enumerate(file_paths):
            filelock = FileLock(file_path + ".lock")
            try:
                filelock.acquire(timeout=self.timeout)
            except Timeout:
                raise ValueError(f"Cannot acquire lock on cached file {file_path} for process {process_id}.")
            else:
                filelocks.append(filelock)

        return file_paths, filelocks

    def _check_all_processes_locks(self):
        expected_lock_file_names = [
            os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-{process_id}.arrow.lock")
            for process_id in range(self.num_process)
        ]
        for expected_lock_file_name in expected_lock_file_names:
            nofilelock = FileFreeLock(expected_lock_file_name)
            try:
                nofilelock.acquire(timeout=self.timeout)
            except Timeout:
                raise ValueError(
                    f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist."
                )
            else:
                nofilelock.release()

    def _check_rendez_vous(self):
        expected_lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-0.arrow.lock")
        nofilelock = FileFreeLock(expected_lock_file_name)
        try:
            nofilelock.acquire(timeout=self.timeout)
        except Timeout:
            raise ValueError(
                f"Expected to find locked file {expected_lock_file_name} from process {self.process_id} but it doesn't exist."
            )
        else:
            nofilelock.release()
        lock_file_name = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock")
        rendez_vous_lock = FileLock(lock_file_name)
        try:
            rendez_vous_lock.acquire(timeout=self.timeout)
        except Timeout:
            raise ValueError(f"Couldn't acquire lock on {lock_file_name} from process {self.process_id}.")
        else:
            rendez_vous_lock.release()

    def _finalize(self):
        """Close all the writing process and load/gather the data
        from all the nodes if main node or all_process is True.
        """
        if self.writer is not None:
            self.writer.finalize()
        self.writer = None
        if self.filelock is not None:
            self.filelock.release()

        if self.keep_in_memory:
            # Read the predictions and references
            reader = ArrowReader(path=self.data_dir, info=DatasetInfo(features=self.features))
            self.data = Dataset.from_buffer(self.buf_writer.getvalue())

        elif self.process_id == 0:
            # Let's acquire a lock on each node files to be sure they are finished writing
            file_paths, filelocks = self._get_all_cache_files()

            # Read the predictions and references
            try:
                reader = ArrowReader(path="", info=DatasetInfo(features=self.features))
                self.data = Dataset(**reader.read_files([{"filename": f} for f in file_paths]))
            except FileNotFoundError:
                raise ValueError(
                    "Error in finalize: another metric instance is already using the local cache file. "
                    "Please specify an experiment_id to avoid colision between distributed metric instances."
                )

            # Store file paths and locks and we will release/delete them after the computation.
            self.file_paths = file_paths
            self.filelocks = filelocks

    def compute(self, *args, **kwargs) -> Optional[dict]:
        """Compute the metrics.

        Args:
            We disallow the usage of positional arguments to prevent mistakes
            `predictions` (Optional list/array/tensor): predictions
            `references` (Optional list/array/tensor): references
            `**kwargs` (Optional other kwargs): will be forwared to the metrics :func:`_compute` method (see details in the docstring)

        Return:
            Dictionnary with the metrics if this metric is run on the main process (process_id == 0)
            None if the metric is not run on the main process (process_id != 0)
        """
        if args:
            raise ValueError("Please call `compute` using keyword arguments.")

        predictions = kwargs.pop("predictions", None)
        references = kwargs.pop("references", None)

        if predictions is not None:
            self.add_batch(predictions=predictions, references=references)
        self._finalize()

        self.cache_file_name = None
        self.filelock = None

        if self.process_id == 0:
            self.data.set_format(type=self.info.format)

            predictions = self.data["predictions"]
            references = self.data["references"]
            with temp_seed(self.seed):
                output = self._compute(predictions=predictions, references=references, **kwargs)

            if self.buf_writer is not None:
                self.buf_writer = None
                del self.data
                self.data = None
            else:
                # Release locks and delete all the cache files
                for filelock, file_path in zip(self.filelocks, self.file_paths):
                    logger.info(f"Removing {file_path}")
                    del self.data
                    self.data = None
                    del self.writer
                    self.writer = None
                    os.remove(file_path)
                    filelock.release()

            return output
        else:
            return None

    def add_batch(self, *, predictions=None, references=None):
        """
        Add a batch of predictions and references for the metric's stack.
        """
        batch = {"predictions": predictions, "references": references}
        batch = self.info.features.encode_batch(batch)
        if self.writer is None:
            self._init_writer()
        try:
            self.writer.write_batch(batch)
        except pa.ArrowInvalid:
            raise ValueError(
                f"Predictions and/or references don't match the expected format.\n"
                f"Expected format: {self.features},\n"
                f"Input predictions: {predictions},\n"
                f"Input references: {references}"
            )

    def add(self, *, prediction=None, reference=None):
        """Add one prediction and reference for the metric's stack."""
        example = {"predictions": prediction, "references": reference}
        example = self.info.features.encode_example(example)
        if self.writer is None:
            self._init_writer()
        try:
            self.writer.write(example)
        except pa.ArrowInvalid:
            raise ValueError(
                f"Prediction and/or reference don't match the expected format.\n"
                f"Expected format: {self.features},\n"
                f"Input predictions: {prediction},\n"
                f"Input references: {reference}"
            )

    def _init_writer(self, timeout=1):
        if self.num_process > 1:
            if self.process_id == 0:
                file_path = os.path.join(self.data_dir, f"{self.experiment_id}-{self.num_process}-rdv.lock")
                self.rendez_vous_lock = FileLock(file_path)
                try:
                    self.rendez_vous_lock.acquire(timeout=timeout)
                except TimeoutError:
                    raise ValueError(
                        f"Error in _init_writer: another metric instance is already using the local cache file at {file_path}. "
                        f"Please specify an experiment_id (currently: {self.experiment_id}) to avoid colision "
                        f"between distributed metric instances."
                    )

        if self.keep_in_memory:
            self.buf_writer = pa.BufferOutputStream()
            self.writer = ArrowWriter(
                features=self.info.features, stream=self.buf_writer, writer_batch_size=self.writer_batch_size
            )
        else:
            self.buf_writer = None

            # Get cache file name and lock it
            if self.cache_file_name is None or self.filelock is None:
                cache_file_name, filelock = self._create_cache_file()  # get ready
                self.cache_file_name = cache_file_name
                self.filelock = filelock

            self.writer = ArrowWriter(
                features=self.info.features, path=self.cache_file_name, writer_batch_size=self.writer_batch_size
            )
        # Setup rendez-vous here if
        if self.num_process > 1:
            if self.process_id == 0:
                self._check_all_processes_locks()  # wait for everyone to be ready
                self.rendez_vous_lock.release()  # let everyone go
            else:
                self._check_rendez_vous()  # wait for master to be ready and to let everyone go

    def _info(self) -> MetricInfo:
        """Construct the MetricInfo object. See `MetricInfo` for details.

        Warning: This function is only called once and the result is cached for all
        following .info() calls.

        Returns:
            info: (MetricInfo) The metrics information
        """
        raise NotImplementedError

    def download_and_prepare(
        self,
        download_config: Optional[DownloadConfig] = None,
        dl_manager: Optional[DownloadManager] = None,
        **download_and_prepare_kwargs,
    ):
        """Downloads and prepares dataset for reading.

        Args:
            download_config (Optional ``datasets.DownloadConfig``: specific download configuration parameters.
            dl_manager (Optional ``datasets.DownloadManager``): specific Download Manger to use
        """
        if dl_manager is None:
            if download_config is None:
                download_config = DownloadConfig()
                download_config.cache_dir = os.path.join(self.data_dir, "downloads")
                download_config.force_download = False

            dl_manager = DownloadManager(
                dataset_name=self.name, download_config=download_config, data_dir=self.data_dir
            )

        self._download_and_prepare(dl_manager)

    def _download_and_prepare(self, dl_manager):
        """Downloads and prepares resources for the metric.

        This is the internal implementation to overwrite called when user calls
        `download_and_prepare`. It should download all required resources for the metric.

        Args:
            dl_manager: (DownloadManager) `DownloadManager` used to download and cache
                data..
        """
        return None

    def _compute(self, *, predictions=None, references=None, **kwargs) -> Dict[str, Any]:
        """ This method defines the common API for all the metrics in the library """
        raise NotImplementedError

    def __del__(self):
        if self.filelock is not None:
            self.filelock.release()
        if self.rendez_vous_lock is not None:
            self.rendez_vous_lock.release()
        del self.writer
        del self.data
Esempio n. 34
0
class Cam():
    """Class to control the camera device
    """
    params_dict = {
        'brightness': cv2.CAP_PROP_BRIGHTNESS,
        'gamma': cv2.CAP_PROP_GAMMA,
        'gain': cv2.CAP_PROP_GAIN,
        'exposure': cv2.CAP_PROP_EXPOSURE,
    }

    def __init__(self):
        self._id = 0
        self._lock = FileLock("/tmp/cam.lock")

    def lock_acquire(self, timeout=10) -> None:
        """Acquire a lock to use the camera device

        :param timeout: Time to wait for the lock; defaults to 10 seconds.
        """
        self._lock.acquire(timeout=timeout)

    def lock_release(self) -> None:
        """Release the lock
        """
        self._lock.release()

    def read(self) -> (int, numpy.ndarray):
        """Returns the cv2 camera object

        :return: (ret, frame)
        """
        cam = cv2.VideoCapture(self._id)
        cam.set(cv2.CAP_PROP_BUFFERSIZE, 1)
        cam.set(cv2.CAP_PROP_AUTO_EXPOSURE, 1.0)
        ret, frame = cam.read()

        return ret, frame

    def get_camera_params(self) -> {}:
        """Returns the parameters read from the camera

        :return: Dictionary with the parameters' values
        """
        cam = cv2.VideoCapture(self._id)
        response = {
            'brightness': cam.get(cv2.CAP_PROP_BRIGHTNESS),
            'gamma': cam.get(cv2.CAP_PROP_GAMMA),
            'gain': cam.get(cv2.CAP_PROP_GAIN),
            'exposure': cam.get(cv2.CAP_PROP_EXPOSURE),
        }

        return response

    def set_camera_params(self, params) -> None:
        """Set camera parameters

        :param params: Dictionary with the parameters to set
        """
        cam = cv2.VideoCapture(self._id)
        cam.set(cv2.CAP_PROP_BUFFERSIZE, 1)
        cam.set(cv2.CAP_PROP_AUTO_EXPOSURE, 1.0)
        for param, value in params.items():
            if value is not None:
                cam.set(self.params_dict[param], value)
Esempio n. 35
0
class Metric(object):
    def __init__(
        self,
        name: str = None,
        experiment_id: Optional[str] = None,
        process_id: int = 0,
        num_process: int = 1,
        data_dir: Optional[str] = None,
        in_memory: bool = False,
        **kwargs,
    ):
        """ A Metrics is the base class and common API for all metrics.
            Args:
                process_id (int): specify the id of the node in a distributed settings between 0 and num_nodes-1
                    This can be used, to compute metrics on distributed setups
                    (in particular non-additive metrics like F1).
                data_dir (str): path to a directory in which temporary data will be stored.
                    This should be a shared file-system for distributed setups.
                experiment_id (str): Should be used if you perform several concurrent experiments using
                    the same caching directory (will be indicated in the raise error)
                in_memory (bool): keep all predictions and references in memory. Not possible in distributed settings.
        """
        # Safety checks
        assert isinstance(
            process_id, int
        ) and process_id >= 0, "'process_id' should be a number greater than 0"
        assert (isinstance(num_process, int) and num_process > process_id
                ), "'num_process' should be a number greater than process_id"
        assert (
            process_id == 0 or not in_memory
        ), "Using 'in_memory' is not possible in distributed setting (process_id > 0)."

        # Metric name
        self.name = camelcase_to_snakecase(self.__class__.__name__)
        # Configuration name
        self.config_name: str = name or "default"

        self.process_id = process_id
        self.num_process = num_process
        self.in_memory = in_memory
        self.experiment_id = experiment_id if experiment_id is not None else "cache"
        self._version = "1.0.0"
        self._data_dir_root = os.path.expanduser(data_dir or HF_METRICS_CACHE)
        self.data_dir = self._build_data_dir()

        # prepare info
        info = self._info()
        info.metric_name = self.name
        info.config_name = self.config_name
        info.version = self._version
        self.info = info

        # Update 'compute' and 'add' docstring
        self.compute.__func__.__doc__ += self.info.inputs_description
        self.add_batch.__func__.__doc__ += self.info.inputs_description
        self.add.__func__.__doc__ += self.info.inputs_description

        self.arrow_schema = pa.schema(field
                                      for field in self.info.features.type)
        self.buf_writer = None
        self.writer = None
        self.writer_batch_size = None
        self.data = None

        # Check we can write on the cache file without competitors
        self.cache_file_name = self._get_cache_path(self.process_id)
        self.filelock = FileLock(self.cache_file_name + ".lock")
        try:
            self.filelock.acquire(timeout=1)
        except Timeout:
            raise ValueError(
                "Cannot acquire lock, caching file might be used by another process, "
                "you should setup a unique 'experiment_id' for this run.")

    def _relative_data_dir(self, with_version=True):
        """Relative path of this dataset in data_dir."""
        builder_data_dir = os.path.join(self.name, self.config_name)
        if not with_version:
            return builder_data_dir

        version = self._version
        version_data_dir = os.path.join(builder_data_dir, str(version))
        return version_data_dir

    def _build_data_dir(self):
        """ Return the directory for the current version.
        """
        builder_data_dir = os.path.join(
            self._data_dir_root, self._relative_data_dir(with_version=False))
        version_data_dir = os.path.join(
            self._data_dir_root, self._relative_data_dir(with_version=True))

        def _other_versions_on_disk():
            """Returns previous versions on disk."""
            if not os.path.exists(builder_data_dir):
                return []

            version_dirnames = []
            for dir_name in os.listdir(builder_data_dir):
                try:
                    version_dirnames.append((Version(dir_name), dir_name))
                except ValueError:  # Invalid version (ex: incomplete data dir)
                    pass
            version_dirnames.sort(reverse=True)
            return version_dirnames

        # Check and warn if other versions exist on disk
        version_dirs = _other_versions_on_disk()
        if version_dirs:
            other_version = version_dirs[0][0]
            if other_version != self._version:
                warn_msg = (
                    "Found a different version {other_version} of metric {name} in "
                    "data_dir {data_dir}. Using currently defined version "
                    "{cur_version}.".format(
                        other_version=str(other_version),
                        name=self.name,
                        data_dir=self._data_dir_root,
                        cur_version=str(self._version),
                    ))
                logger.warning(warn_msg)

        os.makedirs(version_data_dir, exist_ok=True)
        return version_data_dir

    def _get_cache_path(self, node_id):
        return os.path.join(
            self.data_dir, f"{self.experiment_id}-{self.name}-{node_id}.arrow")

    def finalize(self, timeout=120):
        """ Close all the writing process and load/gather the data
            from all the nodes if main node or all_process is True.
        """
        self.writer.finalize()
        self.writer = None
        self.buf_writer = None
        self.filelock.release()

        if self.process_id == 0:
            # Let's acquire a lock on each node files to be sure they are finished writing
            node_files = []
            locks = []
            for node_id in range(self.num_process):
                node_file = self._get_cache_path(node_id)
                filelock = FileLock(node_file + ".lock")
                filelock.acquire(timeout=timeout)
                node_files.append({"filename": node_file})
                locks.append(filelock)

            # Read the predictions and references
            reader = ArrowReader(path=self.data_dir, info=None)
            self.data = reader.read_files(node_files)

            # Release all of our locks
            for lock in locks:
                lock.release()

    def compute(self,
                predictions=None,
                references=None,
                timeout=120,
                **metrics_kwargs):
        """ Compute the metrics.
        """
        if predictions is not None:
            self.add_batch(predictions=predictions, references=references)
        self.finalize(timeout=timeout)

        self.data.set_format(type=self.info.format)

        predictions = self.data["predictions"]
        references = self.data["references"]
        output = self._compute(predictions=predictions,
                               references=references,
                               **metrics_kwargs)
        return output

    def add_batch(self, predictions=None, references=None, **kwargs):
        """ Add a batch of predictions and references for the metric's stack.
        """
        batch = {"predictions": predictions, "references": references}
        if self.writer is None:
            self._init_writer()
        self.writer.write_batch(batch)

    def add(self, prediction=None, reference=None, **kwargs):
        """ Add one prediction and reference for the metric's stack.
        """
        example = {"predictions": prediction, "references": reference}
        example = self.info.features.encode_example(example)
        if self.writer is None:
            self._init_writer()
        self.writer.write(example)

    def _init_writer(self):
        if self.in_memory:
            self.buf_writer = pa.BufferOutputStream()
            self.writer = ArrowWriter(schema=self.arrow_schema,
                                      stream=self.buf_writer,
                                      writer_batch_size=self.writer_batch_size)
        else:
            self.buf_writer = None
            self.writer = ArrowWriter(schema=self.arrow_schema,
                                      path=self.cache_file_name,
                                      writer_batch_size=self.writer_batch_size)

    def _info(self) -> MetricInfo:
        """Construct the MetricInfo object. See `MetricInfo` for details.

        Warning: This function is only called once and the result is cached for all
        following .info() calls.

        Returns:
            info: (MetricInfo) The metrics information
        """
        raise NotImplementedError

    def _compute(self,
                 predictions=None,
                 references=None,
                 **kwargs) -> Dict[str, Any]:
        """ This method defines the common API for all the metrics in the library """
        raise NotImplementedError
Esempio n. 36
0
class CandleConnector():
    def __init__(self):
        self.lock = FileLock("config.csv.lock")
        # make dict here that stores the amount for each coin
        self.config = "config.csv"
        self.candles = candles.BinaceConnector()
        self.masterTicker = -60

    def readConfig(self):
        self.lock.acquire()
        df = pd.read_csv(self.config,encoding='utf8', delimiter=',' , 
            names= ['coin', 'capital', 'starting', 'limit', 'currentPrice', 'autobought', 'takeprofit', 'updatetime', 'orderid', 'takeProfitAmount', 'takeProfitOrder', 'delta'])
        self.lock.release()
        df.set_index('coin', inplace=True)
        return df

    #get the current config
    def getCoinConfigData(self, coin):
        df = self.readConfig()
        return df.loc[coin]

    #save a new copy of the config
    def setCoinConfigData(self, df):
        self.lock.acquire()
        df.to_csv(f'config.csv', mode='w', header=False, index=True)
        self.lock.release()

    def getAutoBoughtAmount(self, coin):
        return float(self.getCoinConfigData(coin)['autobought'])

    # helper for buying a number of coins at current price
    def orderNumber(self, coin, number):
        return (self.candles.buyMarket(coin, number))

    # gives you a quote for a coin
    def getQuote(self, coin):
        return float(self.candles.getCoinPrice(coin))

    # write out to a log file
    def logit(self, message, destination):
        with open(f"testData/{destination}.txt", "a") as f:
            f.write(message)
            f.write("\n")

    def saveCoinBuyData(self, coin, price, amount, setcap=0.0, setupdatetime=180, order="none"):
        df = self.readConfig()
        if setcap > 0:
            df.at[coin, 'capital'] = setcap
        df.at[coin, 'starting'] = price
        df.at[coin, 'autobought'] = amount
        df.at[coin, 'limit'] = price * df.at[coin, 'takeprofit']
        df.at[coin, 'updatetime'] = setupdatetime
        df.at[coin, 'orderid'] = order
        self.setCoinConfigData(df)


    def saveCoinLimitData(self, coin, price, limit, setupdatetime=180,):
        df = self.readConfig()
        df.at[coin, 'currentPrice'] = price
        df.at[coin, 'limit'] = limit
        self.setCoinConfigData(df)

    def updateDelta(self, coin, delta, price):
        df = self.readConfig()
        df.at[coin, 'currentPrice'] = price
        df.at[coin, 'delta'] = delta -180
        self.setCoinConfigData(df)

    def updateOrder(self, coin, order):
        df = self.readConfig()
        df.at[coin, 'orderid'] = order
        self.setCoinConfigData(df)


    #sell an amount at current price
    def sellNow(self, coin):
        #get the amount the bot bought
        amount = self.getAutoBoughtAmount(coin)
        print(f"found {amount}")
        if amount > 0:
            print(f"selling")
            sellorder = self.candles.sellMarket(coin, amount)
            time.sleep(1)
            # save the data for analysis later and reset the bot coin's config
            self.logit(f"SELLING DUE TO TAKEPROFIT {sellorder}", "logger")
            #need to check to make sure we did sell before we save this
            sellprice = float(sellorder['fills'][0]['price']) * amount
            print(sellprice)
            self.saveCoinBuyData(coin, 0, 0, setcap=sellprice)

    def echoCurrentTick(self):
        with open('tick', 'w') as f:
            f.write(f"{self.masterTicker}")

    def runForever(self):
        while 1:
            self.masterTicker += 60
            df = self.readConfig()
            # loop over the contents of our config file
            tickers = self.candles.getBook()
            for coin, row in df.iterrows():
                # check to see if the bot has made a purchase
                position = float(row['autobought'])
                if position > 0:
                    for x in tickers:
                        if coin == x['symbol']:
                            currentPrice = float(x['bidPrice'])
                    # if the bot has bought, check the update time
                    currentOrder = row['orderid']
                    updatetime = int(row['updatetime'])
                    delta = self.masterTicker % updatetime
                    if delta== 0:
                        #get the current price and check if it's above our current limit
                        currentLimit = float(row['limit'])
                        if currentPrice < currentLimit:
                            print("checking order")
                            if "none" not in currentOrder:
                                print("cancelOrder")
                                status = self.candles.checkStatus(coin, currentOrder)
                                if status != 'FILLED':
                                    self.candles.cancelOrder(coin, currentOrder)
                                    time.sleep(.3)
                                    print(f"selling because price {currentPrice} < limit {currentLimit}")
                                    self.sellNow(coin)
                            else:
                                self.sellNow(coin)
                        else:
                            # calculate a new limit based on our coin's config profile
                            newlimit = currentPrice*float(row['takeprofit'])
                            print(f"new limit {newlimit}")
                            if newlimit > currentLimit:
                                print(f"new limit > {currentLimit}")
                                self.saveCoinLimitData(coin, currentPrice, newlimit)

                    starting = float(row['starting'])
                    # check to see if a stop loss order has been placed
                    if "none" not in currentOrder:
                        print(f"order {currentOrder} is", end = " ")
                        status = self.candles.checkStatus(coin, currentOrder)
                        if status == 'FILLED':
                            print("FILLED so close")
                            sellprice = float(status['fills'][0]['price']) * row['autobought']
                            self.saveCoinBuyData(coin, 0, 0, setcap=sellprice)
                        print("open")
                    # if stop loss has not been placed, and we are in profit attempt to atleast cover our fees
                    elif currentPrice > starting + (starting * 0.005):
                        print("made our money back placing limit order")
                        #save this order and save to config`
                        order = connector.candles.stopLoss(coin, 
                            stop=(starting + (starting * (2 * .0008))), 
                            limit=(starting + (starting * (2 * .00076))), 
                            position=position)['clientOrderId']
                        self.updateOrder(coin, order)

                    self.updateDelta(coin, delta, currentPrice)
                    self.logit(f"{self.masterTicker}, {row.starting}, {currentPrice}, {row.limit}", coin)
            self.echoCurrentTick()
            time.sleep(60)
Esempio n. 37
0
class Store(object):
    auto_sync = True
    auto_mem_resync_counter = 0

    @staticmethod
    def is_valid_schema(self):
        return all([len(x) == 3 and x[1] in DUMPERS for x in self])

    def __init__(self, db_name, schema, log_level=1):
        """
        schema example = (
            (name, type, len),
            ...
        )

        int
        str
        """
        self._log_lvl = log_level
        db_name = os.path.join('db', db_name)

        self.lock = FileLock(db_name)

        if not self.is_valid_schema(schema):
            self.error("Invalid DB schema")
            raise Exception('Schema is bead!')

        all = self.init_mem(db_name, schema)

        self._memory = all
        self._db_name = db_name
        self._schema = schema

    def info(self, msg):
        if self._log_lvl > 3:
            print(msg)

    def error(self, msg):
        if self._log_lvl > 0:
            print(msg)

    def _insert(self, object_dict_):
        assert type(object_dict_) == dict, 'Wow! Not dict!'
        self._memory.append(object_dict_)

    def sync(self):
        self.lock.acquire()
        f = open(self._db_name, 'wb')
        for d in self._memory:
            for name_, type_, len_ in self._schema:
                bytes_bytes = DUMPERS[type_](d[name_], len_)
                f.write(bytes_bytes)
            f.write('\n')
        f.close()
        self.lock.release()
        self._memory = self.init_mem(self._db_name, self._schema)

    def execute(self, row):
        """
        Execute SQL query. Support: select, delete, insert.
        """
        # row = "select where value[1] == 'd' and id >= 0 limit 2"
        # row = "insert into ... values (2, 'awdwa')"
        # row = "delete where ... limit k"
        try:
            method, tail = row.split(' ', 1)
            method = method.lower()
            tail = tail.strip(' ')
            rez = None

            self.info(u'-- SQL {0} {1} --'.format(method, tail))

            if method == 'insert':
                r = re.compile(r'^.*?values?[ ]*(\(.*?\))$', re.M)
                z = r.match(tail)
                if z:
                    rez = [self.insert(*z.groups())]
                    if self.auto_sync:
                        self.sync()

            elif method in ['select', 'delete']:
                r = re.compile(r'^.*?(?:(?:where)[ ]*(.*?))?[ ]*(?:limit[ ]*(\d+))?[ ]*([dD][Ee][ScCs][ckCK])?[ ]*$')
                z = r.match(tail)
                if z:
                    rez = self.__getattribute__('go_go')(method, z.groups())
                else:
                    rez = self.__getattribute__(method)()
            elif method == 'last':
                rez = [self.last()]

            if hasattr(rez, '__len__') and rez.__len__() == 1:
                return rez[0]

            return rez
        except Exception as e:
            self.error("Invalid SQL syntax detected: {0!r} by {1}".format(row, e))
            raise Exception('Invalid SQL syntax!!')

    def go_go(self, method, args):
        return self.__getattribute__(method)(*args)

    def delete(self, where=None, limit=None, desk=None):
        limit = int(limit.strip()) if limit else 0
        where = 'True' if not where else where
        where = self.fix_where(where)

        rez = 0
        del_indexes = []
        l = locals()
        i = 0
        mem = self._memory if not desk else reversed(self._memory)
        for d in mem:
            for name_, type_, len_ in self._schema:
                l[name_] = d[name_]

            st = parser.expr(where)
            is_ok = eval(st.compile())

            if is_ok:
                rez += 1
                del_indexes.append(i)

            i += 1

            if limit and rez >= limit:
                break

        z = 0
        for x in sorted(del_indexes):
            self._delete_dy_index(x - z)
            z += 1

        return rez

    def _delete_dy_index(self, index):
        if 0 <= index < len(self._memory):
            del self._memory[index]
            return 1
        return 0

    def _delete_dy_indexes(self, *indexes):
        del_counter = 0
        for index in sorted(indexes):
            deleted = self._delete_dy_index(index - del_counter)
            del_counter += deleted
        return del_counter

    def _memory_dump(self):
        print '\n-- dump --'
        for d in self._memory:
            print d.values()
        print '-- |--| --\n'

    def select(self, where=None, limit=None, desk=None):
        limit = int(limit.strip()) if limit else 0
        where = 'True' if not where else where
        where = self.fix_where(where)

        rez = []
        l = locals()
        mem = self._memory if not desk else reversed(self._memory)
        for d in mem:
            for name_, type_, len_ in self._schema:
                l[name_] = d[name_]

            st = parser.expr(where)
            is_ok = eval(st.compile())

            if limit and len(rez) >= limit:
                return rez

            if is_ok:
                rez.append(d)
        return rez

    def insert(self, insert_obj_row):
        if not insert_obj_row.startswith('(') or not insert_obj_row.endswith(')'):
            return
        insert_obj_row = insert_obj_row.replace("'", "'''")
        insert_obj_row = insert_obj_row.replace("\"", "'''")
        try:
            st = parser.expr(insert_obj_row)
            obj = eval(st.compile())
            if type(obj) != tuple or len(obj) != len(self._schema):
                return

            d = {}
            i = 0
            for name_, type_, len_ in self._schema:
                d[name_] = obj[i]
                i += 1

                _ck = CHECKERS[type_]
                if _ck(d[name_]):
                    return

            self._insert(d)
            return d
        except Exception as e:
            self.error('Insertion error!', e)
            return

    def fix_where(self, where):
        z = where.replace(' = ', ' == ')
        z = z.replace('__', '')
        return z.replace('import', '')

    def last(self):
        pass

    def init_mem(self, db_name, schema):
        i = 0
        all = []
        data = {}

        self.lock.acquire()

        def read_tail(f):
            while True:
                c = f.read(1)
                if not c or c == '\n':
                    return

        try:
            _r = open(db_name, 'rb')
        except:
            self.info("Create New DB")
            try:
                t = open(db_name, 'a')
                t.close()
            except Exception as e:
                self.error("Can not create NEW DB", e)
                raise Exception('Can not create new DB')

        try:
            _r = open(db_name, 'rb')
        except:
            self.error("I/0 Error #1! Can not open!")
            raise Exception("I/0 Error #1! Can not open!")

        while 1:
            if i == len(schema):
                all.append(data)
                read_tail(_r)
                i = 0
                data = {}

            name_, type_, len_ = schema[i]

            d = _r.read(len_).replace('\0', '')
            if not d: break

            zero = _r.read(1)
            if not zero or ord(zero) != 0:
                read_tail(_r)
                i = 0
                data = {}
                continue

            try:
                data[name_] = type_(d)
            except:
                read_tail(_r)
                i = 0
                data = {}
            else:
                i += 1

        _r.close()

        self.lock.release()

        return all
Esempio n. 38
0
class EngineConnector:
    def __init__(self, otherbot_path, is_first):
        import uuid
        import subprocess
        from filelock import FileLock
        self.str_uuid = str(uuid.uuid1())
        self.interface_pipe_path = os.path.abspath("pipes" + os.sep +
                                                   self.str_uuid + ".pipe")
        self.interface_lock_path = self.interface_pipe_path + ".lock"
        self.lock = FileLock(self.interface_lock_path)
        self.is_first = is_first

        #create pipe file
        #os.mknod(os.path.abspath(self.interface_pipe_path))
        pipe = open(self.interface_pipe_path, "w+")
        pipe.close()

        #run engine
        ibot_launch_str = "python3 .." + os.sep + "EngineInterface" + os.sep + "main.py " + self.interface_pipe_path

        if is_first:
            engine_launch_str = "java -cp bin com.theaigames.tictactoe.Tictactoe \"" + ibot_launch_str + "\" \"" + otherbot_path + "\""
        else:
            engine_launch_str = "java -cp bin com.theaigames.tictactoe.Tictactoe \"" + otherbot_path + "\" \"" + ibot_launch_str + "\""
        self.engine_process = subprocess.Popen(engine_launch_str,
                                               shell=True,
                                               stdout=subprocess.PIPE,
                                               stderr=subprocess.PIPE,
                                               cwd=".." + os.sep +
                                               "ultimatetictactoe-engine")
        #avoid blocking by setting flags
        flags = fcntl(self.engine_process.stdout, F_GETFL)
        fcntl(self.engine_process.stdout, F_SETFL, flags | os.O_NONBLOCK)
        fcntl(self.engine_process.stderr, F_SETFL, flags | os.O_NONBLOCK)

    def read_state(self):
        #print("reading state")
        received = ""
        received_selection = False
        while (not received_selection):
            #try to read selection
            self.lock.acquire()
            try:
                pipe = open(self.interface_pipe_path, "r")
                #check that file has been updated
                first_line = pipe.readline()
                expected = "state"
                if (expected in first_line):
                    received_selection = True
                    received = pipe.read()
                    pipe.close()
                pipe.close()
            finally:
                self.lock.release()
            #sleep for a duration to give time for selection to be made
            time.sleep(0.001)
        #process received into a tuple of board, macroboard, moves
        lines = received.splitlines(keepends=False)
        #print("state: " + str((lines[1], lines[3], lines[5])))
        return (self.process_board(lines[1]),
                self.process_macroboard(lines[3]), lines[5])

    #attempts a single state read
    def try_read_state(self):
        received = ""
        received_selection = False
        self.lock.acquire()
        try:
            pipe = open(self.interface_pipe_path, "r")
            #check that file has been updated
            first_line = pipe.readline()
            expected = "state"
            if (expected in first_line):
                received_selection = True
                received = pipe.read()
                pipe.close()
            pipe.close()
        finally:
            self.lock.release()
        if received_selection:
            lines = received.splitlines(keepends=False)
            #print("state: " + str((lines[1], lines[3], lines[5])))
            return (self.process_board(lines[1]),
                    self.process_macroboard(lines[3]), lines[5])
        else:
            return None

    def send_move(self, selection):
        self.lock.acquire()
        try:
            pipe = open(self.interface_pipe_path, "w+")
            pipe.write("move\n")
            pipe.write(selection)
            pipe.close()
        finally:
            self.lock.release()

    def process_board(self, pre_board):
        if self.is_first:
            return pre_board.replace("-1", "a").replace("1", "m").replace(
                "2", "y").replace("a", "-1")
        else:
            return pre_board.replace("-1", "a").replace("2", "m").replace(
                "1", "y").replace("a", "-1")

    def process_macroboard(self, pre_macroboard):
        if self.is_first:
            return pre_macroboard.replace("-1", "a").replace("1", "m").replace(
                "2", "y").replace("a", "-1")
        else:
            return pre_macroboard.replace("-1", "a").replace("2", "m").replace(
                "1", "y").replace("a", "-1")

    # return 0 if draw, 1 if player 1 wins, 2 if player 2 wins, -1 if active game
    def win_status(self):
        l = self.engine_process.stdout.readline().decode(
            sys.getdefaultencoding())
        while l != "":
            #print("l:" + l)
            if "Draw" in l:
                return 0
            if "winner: player1" in l:
                return 1
            if "winner: player2" in l:
                return 2
            l = self.engine_process.stdout.readline().decode(
                sys.getdefaultencoding())
        return -1

    def close(self):
        if os.path.exists(self.interface_lock_path):
            os.remove(self.interface_lock_path)
        os.remove(self.interface_pipe_path)