def test_double_reader_writer(self):
        lock = fasteners.ReaderWriterLock()
        activated = collections.deque()
        active = threading.Event()

        def double_reader():
            with lock.read_lock():
                active.set()
                while not lock.has_pending_writers:
                    time.sleep(0.001)
                with lock.read_lock():
                    activated.append(lock.owner)

        def happy_writer():
            with lock.write_lock():
                activated.append(lock.owner)

        reader = _daemon_thread(double_reader)
        reader.start()
        active.wait(WAIT_TIMEOUT)
        self.assertTrue(active.is_set())

        writer = _daemon_thread(happy_writer)
        writer.start()

        reader.join()
        writer.join()
        self.assertEqual(2, len(activated))
        self.assertEqual(['r', 'w'], list(activated))
    def test_no_double_writers(self):
        lock = fasteners.ReaderWriterLock()
        watch = _utils.StopWatch(duration=5)
        watch.start()
        dups = collections.deque()
        active = collections.deque()

        def acquire_check(me):
            with lock.write_lock():
                if len(active) >= 1:
                    dups.append(me)
                    dups.extend(active)
                active.append(me)
                try:
                    time.sleep(random.random() / 100)
                finally:
                    active.remove(me)

        def run():
            me = threading.current_thread()
            while not watch.expired():
                acquire_check(me)

        threads = []
        for i in range(0, self.THREAD_COUNT):
            t = _daemon_thread(run)
            threads.append(t)
            t.start()
        while threads:
            t = threads.pop()
            t.join()

        self.assertEqual([], list(dups))
        self.assertEqual([], list(active))
Exemple #3
0
 def __init__(self, tickerid='MARKET:SYMBOL'):
     super().__init__()
     self.tickerid = tickerid
     self.candles = {}
     self.lock = fasteners.ReaderWriterLock()
     self.queues = {}
     self.start_threads()
    def test_writer_chaotic(self):
        lock = fasteners.ReaderWriterLock()
        activated = collections.deque()

        def chaotic_writer(blow_up):
            with lock.write_lock():
                if blow_up:
                    raise RuntimeError("Broken")
                else:
                    activated.append(lock.owner)

        def happy_reader():
            with lock.read_lock():
                activated.append(lock.owner)

        with futures.ThreadPoolExecutor(max_workers=20) as e:
            for i in range(0, 20):
                if i % 2 == 0:
                    e.submit(chaotic_writer, blow_up=bool(i % 4 == 0))
                else:
                    e.submit(happy_reader)

        writers = [a for a in activated if a == 'w']
        readers = [a for a in activated if a == 'r']
        self.assertEqual(5, len(writers))
        self.assertEqual(10, len(readers))
 def test_writer_reader_writer(self):
     lock = fasteners.ReaderWriterLock()
     with lock.write_lock():
         self.assertTrue(lock.is_writer())
         with lock.read_lock():
             self.assertTrue(lock.is_reader())
             with lock.write_lock():
                 self.assertTrue(lock.is_writer())
Exemple #6
0
def test_writer_reader_writer():
    lock = fasteners.ReaderWriterLock()
    with lock.write_lock():
        assert lock.is_writer()
        with lock.read_lock():
            assert lock.is_reader()
            with lock.write_lock():
                assert lock.is_writer()
Exemple #7
0
 def save(self):
     lock = fasteners.ReaderWriterLock()
     while True:
         try:
             with lock.write_lock():
                 torch.save(deepcopy(self).cpu().state_dict(), self.path)
             return
         except:
             sleep(np.random.random() + 1)
Exemple #8
0
 def load(self):
     lock = fasteners.ReaderWriterLock()
     while True:
         try:
             with lock.read_lock():
                 state_dict = torch.load(self.path)
             self.load_state_dict(state_dict)
             return
         except:
             sleep(np.random.random() + 1)
Exemple #9
0
def test_reader_abort():
    lock = fasteners.ReaderWriterLock()
    assert lock.owner is None

    with pytest.raises(RuntimeError):
        with lock.read_lock():
            assert lock.owner == lock.READER
            raise RuntimeError("Broken")

    assert lock.owner is None
    def test_double_reader(self):
        lock = fasteners.ReaderWriterLock()
        with lock.read_lock():
            self.assertTrue(lock.is_reader())
            self.assertFalse(lock.is_writer())
            with lock.read_lock():
                self.assertTrue(lock.is_reader())
            self.assertTrue(lock.is_reader())

        self.assertFalse(lock.is_reader())
        self.assertFalse(lock.is_writer())
    def test_reader_abort(self):
        lock = fasteners.ReaderWriterLock()
        self.assertFalse(lock.owner)

        def blow_up():
            with lock.read_lock():
                self.assertEqual(lock.READER, lock.owner)
                raise RuntimeError("Broken")

        self.assertRaises(RuntimeError, blow_up)
        self.assertFalse(lock.owner)
Exemple #12
0
def test_reader_to_writer():
    lock = fasteners.ReaderWriterLock()

    with lock.read_lock():
        with pytest.raises(RuntimeError):
            with lock.write_lock():
                pass
        assert lock.is_reader()
        assert not lock.is_writer()

    assert not lock.is_reader()
    assert not lock.is_writer()
    def test_reader_to_writer(self):
        lock = fasteners.ReaderWriterLock()

        def writer_func():
            with lock.write_lock():
                pass

        with lock.read_lock():
            self.assertRaises(RuntimeError, writer_func)
            self.assertFalse(lock.is_writer())

        self.assertFalse(lock.is_reader())
        self.assertFalse(lock.is_writer())
Exemple #14
0
def test_single_reader_writer():
    results = []
    lock = fasteners.ReaderWriterLock()
    with lock.read_lock():
        assert lock.is_reader()
        assert not results
    with lock.write_lock():
        results.append(1)
        assert lock.is_writer()
    with lock.read_lock():
        assert lock.is_reader()
        assert len(results) == 1
    assert not lock.is_reader()
    assert not lock.is_writer()
 def test_single_reader_writer(self):
     results = []
     lock = fasteners.ReaderWriterLock()
     with lock.read_lock():
         self.assertTrue(lock.is_reader())
         self.assertEqual(0, len(results))
     with lock.write_lock():
         results.append(1)
         self.assertTrue(lock.is_writer())
     with lock.read_lock():
         self.assertTrue(lock.is_reader())
         self.assertEqual(1, len(results))
     self.assertFalse(lock.is_reader())
     self.assertFalse(lock.is_writer())
    def test_writer_to_reader(self):
        lock = fasteners.ReaderWriterLock()

        def reader_func():
            with lock.read_lock():
                self.assertTrue(lock.is_writer())
                self.assertTrue(lock.is_reader())

        with lock.write_lock():
            self.assertIsNone(reader_func())
            self.assertFalse(lock.is_reader())

        self.assertFalse(lock.is_reader())
        self.assertFalse(lock.is_writer())
Exemple #17
0
def test_double_reader():
    lock = fasteners.ReaderWriterLock()

    with lock.read_lock():
        assert lock.is_reader()
        assert not lock.is_writer()

        with lock.read_lock():
            assert lock.is_reader()

        assert lock.is_reader()

    assert not lock.is_reader()
    assert not lock.is_writer()
 def __init__(self, conf):
     super(DirBackend, self).__init__(conf)
     max_cache_size = self._conf.get('max_cache_size')
     if max_cache_size is not None:
         max_cache_size = int(max_cache_size)
         if max_cache_size < 1:
             raise ValueError("Maximum cache size must be greater than"
                              " or equal to one")
         self.file_cache = cachetools.LRUCache(max_cache_size)
     else:
         self.file_cache = {}
     self.encoding = self._conf.get('encoding', self.DEFAULT_FILE_ENCODING)
     if not self._path:
         raise ValueError("Empty path is disallowed")
     self._path = os.path.abspath(self._path)
     self.lock = fasteners.ReaderWriterLock()
Exemple #19
0
    def __init__(self, flow_detail, backend=None, scope_fetcher=None):
        self._result_mappings = {}
        self._reverse_mapping = {}
        if backend is None:
            # Err on the likely-hood that most people don't make there
            # objects able to be deepcopyable (resources, locks and such
            # can't be deepcopied)...
            backend = impl_memory.MemoryBackend({'deep_copy': False})
            with contextlib.closing(backend.get_connection()) as conn:
                conn.update_flow_details(flow_detail, ignore_missing=True)
        self._backend = backend
        self._flowdetail = flow_detail
        self._transients = {}
        self._injected_args = {}
        self._lock = fasteners.ReaderWriterLock()
        self._ensure_matchers = [
            ((task.BaseTask, ), (models.TaskDetail, 'Task')),
            ((retry.Retry, ), (models.RetryDetail, 'Retry')),
        ]
        if scope_fetcher is None:
            scope_fetcher = lambda atom_name: None
        self._scope_fetcher = scope_fetcher

        # NOTE(imelnikov): failure serialization looses information,
        # so we cache failures here, in atom name -> failure mapping.
        self._failures = {}
        for ad in self._flowdetail:
            fail_cache = {}
            if ad.failure is not None:
                fail_cache[states.EXECUTE] = ad.failure
            if ad.revert_failure is not None:
                fail_cache[states.REVERT] = ad.revert_failure
            self._failures[ad.name] = fail_cache

        self._atom_name_to_uuid = dict(
            (ad.name, ad.uuid) for ad in self._flowdetail)
        try:
            source, _clone = self._atomdetail_by_name(
                self.injector_name, expected_type=models.TaskDetail)
        except exceptions.NotFound:
            pass
        else:
            names_iter = six.iterkeys(source.results)
            self._set_result_mapping(source.name,
                                     dict((name, name) for name in names_iter))
Exemple #20
0
    def Store(self, obj, envKey):
        """Stores authorization token passed as an argument.
        param obj instance to be stored.
        """
        DefaultStorageStrategy.cache_path = os.path.join(
            Configuration.TempPath, "cached-data." + envKey + ".py")

        if obj == None:
            return
        lock = fasteners.ReaderWriterLock()
        with lock.write_lock():
            fp = open(DefaultStorageStrategy.cache_path, 'w')
            os.chmod(DefaultStorageStrategy.cache_path,
                     stat.S_IRUSR | stat.S_IWUSR)
            # Write it to the result to the file as a json
            serializedObj = "#" + json.dumps(obj.__dict__)
            # add hash to prevent download token file via http when path is invalid
            fp.write(serializedObj)
            fp.close()
Exemple #21
0
    def Get(self, envKey):
        """Gets the currently stored objects as dictionary.
        return stored Token dictionary or null.
        """
        DefaultStorageStrategy.cache_path = os.path.join(
            Configuration.TempPath, "cached-data." + envKey + ".py")

        if not os.path.exists(DefaultStorageStrategy.cache_path):
            return None
        lock = fasteners.ReaderWriterLock()
        with lock.read_lock():
            fp = open(DefaultStorageStrategy.cache_path, 'rb')
            serializedObj = fp.read().decode('UTF-8')
            try:
                cached = json.loads(serializedObj[1:])
            except:
                cached = None
            fp.close()
        return OAuthToken(cached)
def _spawn_variation(readers, writers, max_workers=None):
    start_stops = collections.deque()
    lock = fasteners.ReaderWriterLock()

    def read_func(ident):
        with lock.read_lock():
            # TODO(harlowja): sometime in the future use a monotonic clock here
            # to avoid problems that can be caused by ntpd resyncing the clock
            # while we are actively running.
            enter_time = _utils.now()
            time.sleep(WORK_TIMES[ident % len(WORK_TIMES)])
            exit_time = _utils.now()
            start_stops.append((lock.READER, enter_time, exit_time))
            time.sleep(NAPPY_TIME)

    def write_func(ident):
        with lock.write_lock():
            enter_time = _utils.now()
            time.sleep(WORK_TIMES[ident % len(WORK_TIMES)])
            exit_time = _utils.now()
            start_stops.append((lock.WRITER, enter_time, exit_time))
            time.sleep(NAPPY_TIME)

    if max_workers is None:
        max_workers = max(0, readers) + max(0, writers)
    if max_workers > 0:
        with futures.ThreadPoolExecutor(max_workers=max_workers) as e:
            count = 0
            for _i in range(0, readers):
                e.submit(read_func, count)
                count += 1
            for _i in range(0, writers):
                e.submit(write_func, count)
                count += 1

    writer_times = []
    reader_times = []
    for (lock_type, start, stop) in list(start_stops):
        if lock_type == lock.WRITER:
            writer_times.append((start, stop))
        else:
            reader_times.append((start, stop))
    return (writer_times, reader_times)
    def test_no_concurrent_readers_writers(self):
        lock = fasteners.ReaderWriterLock()
        watch = _utils.StopWatch(duration=5)
        watch.start()
        dups = collections.deque()
        active = collections.deque()

        def acquire_check(me, reader):
            if reader:
                lock_func = lock.read_lock
            else:
                lock_func = lock.write_lock
            with lock_func():
                if not reader:
                    # There should be no-one else currently active, if there
                    # is ensure we capture them so that we can later blow-up
                    # the test.
                    if len(active) >= 1:
                        dups.append(me)
                        dups.extend(active)
                active.append(me)
                try:
                    time.sleep(random.random() / 100)
                finally:
                    active.remove(me)

        def run():
            me = threading.current_thread()
            while not watch.expired():
                acquire_check(me, random.choice([True, False]))

        threads = []
        for i in range(0, self.THREAD_COUNT):
            t = _daemon_thread(run)
            threads.append(t)
            t.start()
        while threads:
            t = threads.pop()
            t.join()

        self.assertEqual([], list(dups))
        self.assertEqual([], list(active))
    def test_double_reader_abort(self):
        lock = fasteners.ReaderWriterLock()
        activated = collections.deque()

        def double_bad_reader():
            with lock.read_lock():
                with lock.read_lock():
                    raise RuntimeError("Broken")

        def happy_writer():
            with lock.write_lock():
                activated.append(lock.owner)

        with futures.ThreadPoolExecutor(max_workers=20) as e:
            for i in range(0, 20):
                if i % 2 == 0:
                    e.submit(double_bad_reader)
                else:
                    e.submit(happy_writer)

        self.assertEqual(10, len([a for a in activated if a == 'w']))
Exemple #25
0
def _spawn_variation(readers, writers, max_workers=None):
    start_stops = collections.deque()
    lock = fasteners.ReaderWriterLock()

    def read_func(ident):
        with lock.read_lock():
            enter_time = time.monotonic()
            time.sleep(WORK_TIMES[ident % len(WORK_TIMES)])
            exit_time = time.monotonic()
            start_stops.append((lock.READER, enter_time, exit_time))
            time.sleep(NAPPY_TIME)

    def write_func(ident):
        with lock.write_lock():
            enter_time = time.monotonic()
            time.sleep(WORK_TIMES[ident % len(WORK_TIMES)])
            exit_time = time.monotonic()
            start_stops.append((lock.WRITER, enter_time, exit_time))
            time.sleep(NAPPY_TIME)

    if max_workers is None:
        max_workers = max(0, readers) + max(0, writers)
    if max_workers > 0:
        with futures.ThreadPoolExecutor(max_workers=max_workers) as e:
            count = 0
            for _i in range(0, readers):
                e.submit(read_func, count)
                count += 1
            for _i in range(0, writers):
                e.submit(write_func, count)
                count += 1

    writer_times = []
    reader_times = []
    for (lock_type, start, stop) in list(start_stops):
        if lock_type == lock.WRITER:
            writer_times.append((start, stop))
        else:
            reader_times.append((start, stop))
    return writer_times, reader_times
Exemple #26
0
def test_reader_chaotic():
    lock = fasteners.ReaderWriterLock()
    activated = collections.deque()

    def chaotic_reader(blow_up):
        with lock.read_lock():
            if blow_up:
                raise RuntimeError("Broken")
            else:
                activated.append(lock.owner)

    def happy_writer():
        with lock.write_lock():
            activated.append(lock.owner)

    with futures.ThreadPoolExecutor(max_workers=20) as e:
        for i in range(0, 20):
            if i % 2 == 0:
                e.submit(chaotic_reader, blow_up=bool(i % 4 == 0))
            else:
                e.submit(happy_writer)

    assert sum(a == 'w' for a in activated) == 10
    assert sum(a == 'r' for a in activated) == 5
Exemple #27
0
 def __init__(self, conf=None):
     super(MemoryBackend, self).__init__(conf)
     self.memory = FakeFilesystem(
         deep_copy=self._conf.get('deep_copy', True))
     self.lock = fasteners.ReaderWriterLock()
Exemple #28
0
import fasteners


def read_something(ident, rw_lock):
    with rw_lock.read_lock():
        print("Thread %s is reading something" % ident)
        time.sleep(1)


def write_something(ident, rw_lock):
    with rw_lock.write_lock():
        print("Thread %s is writing something" % ident)
        time.sleep(2)


rw_lock = fasteners.ReaderWriterLock()
threads = []
for i in range(0, 10):
    is_writer = random.choice([True, False])
    if is_writer:
        threads.append(
            threading.Thread(target=write_something, args=(i, rw_lock)))
    else:
        threads.append(
            threading.Thread(target=read_something, args=(i, rw_lock)))

try:
    for t in threads:
        t.start()
finally:
    while threads:
Exemple #29
0
 def __init__(self):
     self._lock = fasteners.ReaderWriterLock()
Exemple #30
0
class Project(Versioned):
    """
    Represent the current Meltano project from a file-system
    perspective.
    """

    __version__ = 1
    _activate_lock = threading.Lock()
    _find_lock = threading.Lock()
    _meltano_rw_lock = fasteners.ReaderWriterLock()
    _default = None

    def __init__(self, root: Union[Path, str] = None):
        self.root = Path(root or os.getcwd()).resolve()
        self._meltano_ip_lock = fasteners.InterProcessLock(
            self.run_dir("meltano.yml.lock"))

    @classmethod
    @fasteners.locked(lock="_activate_lock")
    def activate(cls, project: "Project"):
        project.ensure_compatible()

        # helpful to refer to the current absolute project path
        os.environ["MELTANO_PROJECT_ROOT"] = str(project.root)

        # create a symlink to our current binary
        try:
            executable = Path(os.path.dirname(sys.executable), "meltano")
            if executable.is_file():
                project.run_dir().joinpath("bin").symlink_to(executable)
        except FileExistsError:
            pass

        load_dotenv(dotenv_path=project.root.joinpath(".env"))
        logging.debug(f"Activated project at {project.root}")

        # set the default project
        cls._default = project

    @property
    def backend_version(self):
        return int(self.meltano.get("version", 1))

    @classmethod
    @fasteners.locked(lock="_find_lock")
    def find(cls, from_dir: Union[Path, str] = None, activate=True):
        if cls._default:
            return cls._default

        project = Project(from_dir)

        if not project.meltanofile.exists():
            raise ProjectNotFound()

        # if we activate a project using `find()`, it should
        # be set as the default project for future `find()`
        if activate:
            cls.activate(project)

        return project

    @property
    def meltano(self) -> Dict:
        """Return a copy of the current meltano config"""
        # fmt: off
        with self._meltano_rw_lock.read_lock(), \
             self.meltanofile.open() as meltanofile:
            return yaml.safe_load(meltanofile)
        # fmt: on

    @contextmanager
    def meltano_update(self):
        """
        Yield the current meltano configuration and update the meltanofile
        if the context ends gracefully.
        """
        # fmt: off
        with self._meltano_rw_lock.write_lock(), \
            self._meltano_ip_lock:

            with self.meltanofile.open() as meltanofile:
                # read the latest version
                meltano_update = yaml.safe_load(meltanofile)

            yield meltano_update

            try:
                with atomic_write(self.meltanofile,
                                  overwrite=True) as meltanofile:
                    # update if everything is fine
                    yaml.dump(meltano_update,
                              meltanofile,
                              default_flow_style=False)
            except Exception as err:
                logging.critical(f"Could not update meltano.yml: {err}")
                raise
        # fmt: on

    def root_dir(self, *joinpaths):
        return self.root.joinpath(*joinpaths)

    @property
    def meltanofile(self):
        return self.root.joinpath("meltano.yml")

    @makedirs
    def meltano_dir(self, *joinpaths):
        return self.root.joinpath(".meltano", *joinpaths)

    @makedirs
    def analyze_dir(self, *joinpaths):
        return self.root_dir("analyze", *joinpaths)

    @makedirs
    def venvs_dir(self, *prefixes):
        return self.meltano_dir(*prefixes, "venv")

    @makedirs
    def run_dir(self, *joinpaths):
        return self.meltano_dir("run", *joinpaths)

    @makedirs
    def job_dir(self, job_id, *joinpaths):
        return self.run_dir("elt", slugify(job_id), *joinpaths)

    @makedirs
    def model_dir(self, *joinpaths):
        return self.meltano_dir("models", *joinpaths)

    @makedirs
    def plugin_dir(self, plugin: "PluginRef", *joinpaths):
        return self.meltano_dir(plugin.type, plugin.name, *joinpaths)

    def __eq__(self, other):
        return self.root == other.root

    def __hash__(self):
        return self.root.__hash__()