Exemplo n.º 1
0
    def _create_empty_file_or_folder(self):
        self._fullname = FilePath(self._get_full_name())
        if not self._fullname:
            return

        data_dir = self._cfg.sync_directory if self._cfg else get_data_dir()
        self._in_data_dir = self._fullname in FilePath(data_dir)
        logger.debug("Adding special file %s", self._fullname)
        if self._in_data_dir:
            self._sync.add_special_file(self._fullname,
                                        self._on_special_file_event)

        special_file_created = False
        try:
            if self._is_folder:
                make_dirs(self._fullname, is_folder=True)
            else:
                create_empty_file(self._fullname)
            special_file_created = True
            self._update_spec_files(self._fullname, self._is_folder)
        except Exception as e:
            logger.warning("Can't create file or folder %s. Reason %s",
                           self._fullname, e)

        if not self._in_data_dir and special_file_created:
            self._sync.add_special_file(self._fullname,
                                        self._on_special_file_event)
        elif self._in_data_dir and not special_file_created:
            self._sync.remove_special_file(self._fullname)
Exemplo n.º 2
0
    def __init__(self, path_converter, db_file_created_cb=None):
        self._pc = path_converter

        self.possibly_sync_folder_is_removed = Signal()
        self.db_or_disk_full = Signal()

        self._db_file = self._pc.create_abspath('.pvtbox/storage.db')
        logger.debug("DB file: %s", self._db_file)
        new_db_file = not exists(self._db_file)
        if new_db_file and callable(db_file_created_cb):
            db_file_created_cb()

        make_dirs(self._db_file)

        if not new_db_file:
            # Database migration. It can be executed before opening db
            try:
                upgrade_db("storage_db", db_filename=self._db_file)
            except Exception as e:
                remove_file(self._db_file)
                new_db_file = True
                logger.warning(
                    "Can't upgrade storage db. "
                    "Reason: (%s) Creating...", e)
                if callable(db_file_created_cb):
                    db_file_created_cb()

        self._engine = create_engine('sqlite:///{}'.format(
            FilePath(self._db_file)),
                                     connect_args={
                                         'timeout': 60 * 1000,
                                         'check_same_thread': False,
                                     })
        self._engine.pool_timeout = 60 * 60 * 1000
        self._Session = sessionmaker(bind=self._engine)

        Base.metadata.create_all(self._engine, checkfirst=True)

        if new_db_file:
            try:
                stamp_db("storage_db", db_filename=self._db_file)
            except Exception as e:
                logger.error("Error stamping storage db: %s", e)

        self._lock = threading.RLock()
Exemplo n.º 3
0
    def _move(self):
        data_dir = self._cfg.sync_directory if self._cfg else get_data_dir()
        downloads_dir = get_downloads_dir(data_dir=data_dir, create=True)
        download_name = op.join(downloads_dir, self._current_share_hash)
        if not self._renew_dest_dir():
            return

        dest_dir = self._dest_dirs.get(self._current_share_hash, data_dir)
        dest_name = op.join(dest_dir, self._current_share_name)
        dest_name = FilePath(dest_name).longpath
        dest_name = get_next_name(dest_name)
        logger.debug("Move '%s' to '%s'", download_name, dest_name)
        try:
            if FilePath(dest_dir) not in FilePath(data_dir):
                make_dirs(dest_name)
            shutil.move(download_name, dest_name)
        except IOError as e:
            logger.warning(
                "Can't move downloaded shared file to %s. "
                "Reason: %s", dest_name, e)
            self.cancel_share_download(self._current_share_name,
                                       folder_deleted=True)
Exemplo n.º 4
0
 def _write_opened_port_to_accessible_file(self, port, port_file):
     make_dirs(port_file, False)
     with open(port_file, 'wb') as f:
         f.write('{}'.format(port).encode())
Exemplo n.º 5
0
    def migrate(self, old_dir, new_dir):
        logger.info("Starting sync dir migration from %s, to %s",
                    old_dir, new_dir)
        old_dir = FilePath(old_dir).longpath
        new_dir = FilePath(new_dir).longpath
        old_files = get_filelist(old_dir)
        old_dirs = get_dir_list(old_dir)
        total_count = len(old_files) + len(old_dirs) + 1
        progress = 0
        sent_progress = 0
        logger.debug("Migration progress: %s/%s (%s%%)", 0, total_count, sent_progress)
        count = 1

        copied_dirs = []
        copied_files = []

        make_dirs(new_dir, is_folder=True)
        copied_dirs.append(new_dir)
        logger.debug("Migration progress: %s/%s (%s%%)", count, total_count, sent_progress)
        self.progress.emit(sent_progress)

        for dir in old_dirs:
            if self._cancelled.isSet():
                self._delete(dirs=copied_dirs)
                logger.debug("Migration done because cancelled")
                self.done.emit()
                return

            new_dir_path = ensure_unicode(op.join(
                new_dir, op.relpath(dir, start=old_dir)))

            try:
                make_dirs(new_dir_path, is_folder=True)
            except Exception as e:
                logger.error("Make dirs error: %s", e)
                self.failed.emit(str(e))
                self._delete(dirs=copied_dirs)
                return

            copied_dirs.append(new_dir_path)
            count += 1
            progress = int(count / total_count * 100)
            if progress > sent_progress:
                sent_progress = progress
                self.progress.emit(sent_progress)
            logger.debug("Migration progress: %s/%s (%s%%)", count, total_count, sent_progress)

        for file in old_files:
            if self._cancelled.isSet():
                self._delete(dirs=copied_dirs, files=copied_files)
                logger.debug("Migration done because cancelled")
                self.done.emit()
                return

            if file in HIDDEN_FILES:
                continue

            new_file_path = ensure_unicode(op.join(
                new_dir, op.relpath(file, start=old_dir)))

            logger.info("Copying file %s, to %s",
                        file, new_file_path)
            try:
                copy_file(file, new_file_path, preserve_file_date=True)
            except Exception as e:
                logger.error("Copy file error: %s", e)
                self.failed.emit(str(e))
                self._delete(dirs=copied_dirs, files=copied_files)
                return

            copied_files.append(new_file_path)
            count += 1
            progress = int(count / total_count * 100)
            if progress > sent_progress:
                sent_progress = progress
                self.progress.emit(sent_progress)
            logger.debug("Migration progress: %s/%s (%s%%)", count, total_count, sent_progress)

        logger.debug("Saving new config")
        self._cfg.set_settings(dict(sync_directory=FilePath(new_dir)))
        self._cfg.sync()
        logger.info("New config saved")

        logger.debug("Updating shortcuts")
        create_shortcuts(new_dir)
        remove_shortcuts(old_dir)
        logger.debug("Resetting custom folder icons")
        reset_all_custom_folder_icons(old_dir)

        logger.debug("Migration done")
        self.done.emit()

        logger.info("Migration thread end")
Exemplo n.º 6
0
 def update_file_signature(self, file, signature):
     signature_path = self._pc.create_abspath(file.signature_rel_path)
     make_dirs(signature_path)
     with open(signature_path, 'wb') as f:
         dump(signature, f, protocol=2)
Exemplo n.º 7
0
    def _on_upload_task_completed(self, upload_id_str, elapsed, total_str):
        """
        Slot to be called on upload task download completion

        @param upload_id_str ID of upload task [string]
        @param elapsed Time elapsed from download starting (in seconds) [float]
        @param total_str Size of file being downloaded (in bytes) [string]
        """

        upload_id = int(upload_id_str)
        state = self.download_tasks_info[upload_id]['state']

        upload_name = self.download_tasks_info[upload_id]['upload_name']
        if state == 'cancelled':
            logger.debug("Upload task %s cancelled", upload_id)
            self._on_upload_failed(upload_id)
            # Tray notification
            self.upload_cancelled.emit(upload_name)
            return
        elif state == 'paused':
            self.download_tasks_info[upload_id]['elapsed'] += elapsed
            return

        elapsed += self.download_tasks_info[upload_id]['elapsed']
        total = int(total_str)
        bps_avg = int(total / elapsed) if elapsed > 0 else 0
        bps_avg = "{:,}".format(bps_avg)
        logger.info(
            "Upload task ID '%s' complete (downloaded %s bytes in %s seconds"
            "(%s Bps))", upload_id_str, total_str, elapsed, bps_avg)

        # Calculate checksum
        tmp_fn = self.download_tasks_info[upload_id]['tmp_fn']
        checksum = self.download_tasks_info[upload_id]['upload_md5']
        try:
            logger.debug("Calculating checksum for upload task ID '%s'...",
                         upload_id)
            checksum_calculated = hashfile(tmp_fn)
        except Exception as e:
            logger.error("Failed to calculate checksum of '%s' (%s)", tmp_fn,
                         e)
            self._on_upload_failed(upload_id)
            return

        if self._tracker:
            self._tracker.http_download(upload_id, total, elapsed,
                                        checksum_calculated == checksum)

        # Validate checksum
        if checksum_calculated != checksum:
            logger.error("MD5 checkfum of '%s' is '%s' instead of '%s'",
                         tmp_fn, checksum_calculated, checksum)
            self._on_upload_failed(upload_id)
            return

        # Move file to its location
        path = self._check_upload_path(upload_id)
        if path is None:
            return
        path = FilePath(op.join(path, upload_name))
        fullpath = ensure_unicode(op.join(self._cfg.sync_directory, path))
        fullpath = FilePath(fullpath).longpath
        dirname = op.dirname(fullpath)
        if not op.isdir(dirname):
            logger.warning(
                "Destination directory %s"
                "does not exist for upload %s", dirname, fullpath)
            self._on_upload_failed(upload_id)
            return

        try:
            try:
                logger.info("Moving downloaded file '%s' to '%s'...", tmp_fn,
                            fullpath)
                # Create necessary directories
                make_dirs(fullpath)
                # Move file
                shutil.move(src=tmp_fn, dst=fullpath)
            except OSError as e:
                if e.errno != errno.EACCES:
                    raise e
                logger.warning(
                    "Can't move downloaded file '%s' into '%s' (%s)", tmp_fn,
                    dirname, e)
                fullpath = get_next_name(fullpath)
                shutil.move(src=tmp_fn, dst=fullpath)
        except Exception as e:
            logger.error("Failed to move downloaded file '%s' into '%s' (%s)",
                         tmp_fn, dirname, e)
            self._on_upload_failed(upload_id)
            return

        self.download_status.emit(*self._empty_progress,
                                  [{}, {}, [upload_id_str]], {})
        self._on_upload_complete(upload_id)
Exemplo n.º 8
0
def logging_setup(loglevel, logfilename=None, copies_logging=True):
    """
    Configures logging module

    @param loglevel Log level to be used [str]
    @param logfilename Name of file to save log into [str]
    """

    if set_verbose and loglevel == 'DEBUG':
        loglevel = VERBOSE

    config = load_config()
    set_root_directory(config.sync_directory)

    copies_file_prefix = 'copies_'
    if not logfilename:
        logfilename = time.strftime('%Y%m%d_%H%M%S.log')
        logfilename = get_bases_filename(root_directory, logfilename)

    copies_logs = sorted(glob.glob(
        get_bases_filename(
            root_directory,
            copies_file_prefix + '[0-9][0-9][0-9][0-9][0-9][0-9][0-9][0-9]'
            '_[0-9][0-9][0-9][0-9][0-9][0-9]*.log')),
                         reverse=True)
    if copies_logs:
        copies_filename = copies_logs[0]
    else:
        log_dir, log_file = split(logfilename)
        copies_filename = join(log_dir, copies_file_prefix + log_file)

    cfg = {
        'version': 1,
        'disable_existing_loggers': False,
        'formatters': {
            'logfile': {
                'format':
                '[%(asctime)s %(levelname)s %(name)s:%(lineno)d] %(threadName)s(%(thread)d): %(message)s',  # noqa
            },
            'console': {
                'format':
                '[%(asctime)s %(levelname)s %(name)s:%(lineno)d] %(threadName)s(%(thread)d): %(message)s',  # noqa
            },
        },
        'handlers': {
            'console': {
                'class': 'logging.StreamHandler',
                'formatter': 'console',
                'stream': sys.stdout,
                'level': loglevel,
                'filters': ['console_filter'],
            },
            'file': {
                'formatter': 'logfile',
                'class': 'common.logging_setup.EconoRotatingFileHandler',
                'filename': logfilename,
                'logsCount': DEFAULT_LOGS_COUNT,
                'level': loglevel,
            },
        },
        'filters': {
            'console_filter': {
                '()': ConsoleFilter
            }
        },
        'loggers': {
            # for any logger
            '': {
                'handlers': [
                    'console',
                    'file',
                ],
                'level': loglevel,
            },
        },
    }

    if copies_logging:
        cfg['handlers']['copies_file'] = {
            'formatter': 'logfile',
            'class': 'common.logging_setup.EconoRotatingFileHandler',
            'filename': copies_filename,
            'logsCount': DEFAULT_COPIES_LOGS_COUNT,
            'file_name_prefix': copies_file_prefix,
            'level': loglevel,
        }
        cfg['loggers']['copies_logger'] = {
            'handlers': [
                'copies_file',
            ],
            'level': loglevel,
            'propagate': False,
        }

    make_dirs(logfilename, is_folder=False)

    logging.raiseExceptions = False

    logging.config.dictConfig(cfg)
Exemplo n.º 9
0
    def create_patch(cls,
                     modify_file,
                     root,
                     old_blocks_hashes=None,
                     new_blocks_hashes=None,
                     old_file_hash=None,
                     new_file_hash=None,
                     uuid=None,
                     blocksize=SIGNATURE_BLOCK_SIZE):
        def get_patch_filename(suffix):
            return os.path.join(
                get_patches_dir(root), 'patches',
                str(old_file_hash) + str(new_file_hash) + suffix)

        patch_data_file = get_patch_filename('.patch_data')

        # Create directory structure to store patch file
        make_dirs(patch_data_file)

        with open(modify_file, 'rb') as handle_file, \
                open(patch_data_file, 'wb') as data_file:
            blocks = SortedDict()
            patch = dict()
            new_blocks_hashes_search = dict()
            if old_blocks_hashes:
                old_blocks_hashes_search = \
                    dict((value, key) for key, value in
                         old_blocks_hashes.items())
            else:
                old_blocks_hashes_search = dict()
            if new_blocks_hashes is None:
                new_blocks_hashes = cls.block_checksum(filepath=modify_file,
                                                       blocksize=blocksize)
            for new_offset, new_hash in new_blocks_hashes.items():
                clone_block_offset = new_blocks_hashes_search.get(
                    new_hash, None)
                from_patch = clone_block_offset is not None
                clone_block_offset = clone_block_offset if from_patch \
                    else old_blocks_hashes_search.get(new_hash, None)
                if clone_block_offset is None:
                    data_file_offset = data_file.tell()
                    data = cls.get_data(handle=handle_file,
                                        size=blocksize,
                                        offset=new_offset)
                    data_file.write(data)
                    data_size = data_file.tell() - data_file_offset
                    blocks[new_offset] = dict(
                        new=True,
                        hash=new_hash,
                        offset=data_file_offset,
                        data_size=data_size,
                    )
                    new_blocks_hashes_search[new_hash] = new_offset
                else:
                    blocks[new_offset] = dict(new=False,
                                              hash=new_hash,
                                              offset=clone_block_offset,
                                              from_patch=from_patch)

        patch['old_hash'] = old_file_hash
        if new_file_hash is None:
            new_file_hash = Rsync.hash_from_block_checksum(new_blocks_hashes)
        patch['new_hash'] = new_file_hash

        info = cls.getfileinfo(modify_file)
        patch['blocks'] = blocks
        patch['time_modify'] = info.st_mtime
        patch['size'] = info.st_size
        patch['blocksize'] = blocksize

        patch_info_file = get_patch_filename('.patch_info')

        with open(patch_info_file, 'w') as info_file:
            json.dump(patch, info_file)

        if uuid is not None:
            patch_archive_file = op.join(get_patches_dir(root, create=True),
                                         uuid)
        else:
            patch_archive_file = get_patch_filename('.patch')

        with tarfile.open(patch_archive_file, 'w') as archive:
            archive.add(patch_info_file, arcname='info')
            archive.add(patch_data_file, arcname='data')
        remove_file(patch_info_file)
        remove_file(patch_data_file)

        patch['archive_file'] = patch_archive_file
        patch['archive_size'] = os.stat(patch_archive_file).st_size
        return patch