Ejemplo n.º 1
0
    def read(self, path):
        file = self.search(path)
        if not file:
            raise FileNotFoundError(path)

        LOGGER.info(f"Downloading {file} at {path}")

        request = self._drive.files().get_media(fileId=file._id)
        stream = io.BytesIO()
        self._download(request, stream)

        # TODO: This code should be fixed in order to support Google Docs
        #
        # except HttpError as e:
        #     LOGGER.error(f"Error reading {file}. Cause: {e}")
        #     # Export a Google Doc file
        #     if e.resp.status == 403:
        #         pass
        #         try:
        #             self._download(
        #                 self._drive.files().export_media(
        #                     fileId=file.id, mimeType=file.mime_type
        #                 ),
        #                 stream,
        #             )
        #         except HttpError as f:
        #             if e.resp.status != 403:
        #                 raise

        stream.flush()
        stream.seek(0)

        return stream
Ejemplo n.º 2
0
    def _start_collectors(self, master_state, slave_state):
        def collect_deltas(source, dest):
            for delta in source[0].get_changes():
                if delta:
                    LOGGER.debug(
                        f"Incremental delta received from {source[0]}:\n{delta}"
                    )
                    self._queue.put((delta, source, dest))
            LOGGER.trace("Collector thread is terminating.")

        watches = [
            threading.Thread(
                name="Watch-MS",
                target=collect_deltas,
                args=((self.master_fs, master_state), (self.slave_fs, slave_state)),
            ),
            threading.Thread(
                name="Watch-SM",
                target=collect_deltas,
                args=((self.slave_fs, slave_state), (self.master_fs, master_state)),
            ),
        ]

        for watch in watches:
            watch.daemon = True  # Kill with main thread
            watch.start()

        while True:
            LOGGER.info("Watching for FS state changes")
            delta, source, dest = self._queue.get()
            delta.apply(source, dest)
            LOGGER.debug(f"Incremental delta applied to {dest[0]}")

        for watch in watches:
            watch.join()
Ejemplo n.º 3
0
    def _save_states(self, signum=None, frame=None):
        if signum:
            print("")
            LOGGER.warn(
                f"Received termination signal ({signum}). Shutting down...")

        if self._master_state:
            self._master_state.save(self._master_state_file)
            LOGGER.info("Master FS state saved")

            self._slave_state.save(self._slave_state_file)
            LOGGER.info("Slave FS state saved")

        if signum:
            exit(signum)
Ejemplo n.º 4
0
 def func_wrapper(*args, **kwargs):
     backoff = delay
     while True:
         try:
             error = False
             return f(*args, **kwargs)
         except FSNotReady as e:
             LOGGER.error(f"A file system is not ready yet: {e}")
             LOGGER.info(
                 f"A new start attempt will be made in {int(backoff)} seconds"
             )
             sleep(backoff)
             backoff = min(cap, backoff * ratio)
             error = True
         finally:
             if not error:
                 backoff = delay
Ejemplo n.º 5
0
    def start(self):
        with ErwinConfiguration() as config:
            LOGGER.info("Erwin configuration loaded successfully.")

            # Create master and slave FSs
            self.master_fs = GoogleDriveFS(**config.get_master_fs_params())
            LOGGER.info("Master FS is online.")
            LOGGER.debug(f"Created Master FS of type {type(self.master_fs)}")

            self.slave_fs = LocalFS(**config.get_slave_fs_params())
            LOGGER.info("Slave FS is online.")
            LOGGER.debug(f"Created Slave FS of type {type(self.slave_fs)}")

            # Load the previous state
            prev_master_state, prev_slave_state = config.load_fs_states()

            # Register signal handlers
            config.register_state_handler(prev_master_state, prev_slave_state)

            LOGGER.info("Previous FS states loaded successfully.")

            # Compute deltas since last launch
            master_deltas = self.master_fs.state - prev_master_state
            LOGGER.debug(f"Master deltas since last state save:\n{master_deltas}")

            slave_deltas = self.slave_fs.state - prev_slave_state
            LOGGER.debug(f"Slave deltas since last state save:\n{slave_deltas}")

            self.resolve_conflicts(master_deltas, slave_deltas)

            master_deltas.apply(
                (self.master_fs, prev_master_state), (self.slave_fs, prev_slave_state)
            )
            if self.master_fs.state - prev_master_state:
                raise RuntimeError("Not all deltas applied correctly to master!")

            # At this point we do not expect to have any conflicts left as we
            # have resolved them at master before.
            new_slave_deltas = self.slave_fs.state - prev_slave_state
            LOGGER.debug(f"New deltas:\n{new_slave_deltas}")

            new_slave_deltas.apply(
                (self.slave_fs, prev_slave_state), (self.master_fs, prev_master_state)
            )

            # Start the collectors to watch for changes on both FSs.
            self._start_collectors(prev_master_state, prev_slave_state)
Ejemplo n.º 6
0
 def move_conflict(path):
     conflict_path = self.slave_fs.conflict(path)
     self.slave_fs.copy(path, conflict_path)
     LOGGER.info(
         f"Conflicting file on slave backed up: {path} -> {conflict_path}"
     )