Exemplo n.º 1
0
    def _start(self, items):
        """ Start some items if conditions are met.
        """
        # TODO: Filter by a custom date field, for scheduled downloads starting at a certain time, or after a given delay

        # TODO: Don't start anything more if download BW is used >= config threshold in %

        # Check if anything more is ready to start downloading
        startable = [i for i in items if self.config.startable.match(i)]
        if not startable:
            self.LOG.debug("Checked %d item(s), none startable" % (len(items),))
            return

        # TODO: sort by priority, then loaded time

        # Stick to "start_at_once" parameter, unless "downloading_min" is violated
        downloading = [i for i in items if self.config.downloading.match(i)]
        start_now = max(self.config.start_at_once, self.config.downloading_min - len(downloading))
        start_now = min(start_now, len(startable))

        #down_traffic = sum(i.down for i in downloading)
        ##self.LOG.info("%d downloading, down %d" % (len(downloading), down_traffic))
        
        # Start eligible items
        for idx, item in enumerate(startable):
            # Check if we reached 'start_now' in this run
            if idx >= start_now:
                self.LOG.debug("Only starting %d item(s) in this run, %d more could be downloading" % (
                    start_now, len(startable)-idx,))
                break

            # TODO: Prevent start of more torrents that can fit on the drive (taking "off" files into account)
            # (restarts items that were stopped due to the "low_diskspace" schedule, and also avoids triggering it at all)

            # Only check the other conditions when we have `downloading_min` covered
            if len(downloading) < self.config.downloading_min:
                self.LOG.debug("Catching up from %d to a minimum of %d downloading item(s)" % (
                    len(downloading), self.config.downloading_min))
            else:
                # Limit to the given maximum of downloading items
                if len(downloading) >= self.config.downloading_max:
                    self.LOG.debug("Already downloading %d item(s) out of %d max, %d more could be downloading" % (
                        len(downloading), self.config.downloading_max, len(startable)-idx,))
                    break

            # If we made it here, start it!
            downloading.append(item)
            self.LOG.info("%s '%s' [%s, #%s]" % (
                "WOULD start" if self.config.dry_run else "Starting", 
                fmt.to_utf8(item.name), item.alias, item.hash))
            if not self.config.dry_run:
                item.start()
                if not self.config.quiet:
                    self.proxy.log('', "%s: Started '%s' {%s}" % (
                        self.__class__.__name__, fmt.to_utf8(item.name), item.alias,
                    ))
Exemplo n.º 2
0
    def mainloop(self):
        """ The main loop.
        """
        # Print usage if not enough args
        if len(self.args) < 2:
            self.parser.print_help()
            self.parser.exit()

        # TODO: Add mode to move tied metafiles, without losing the tie

        # Target handling
        target = self.args[-1]
        if "//" in target.rstrip('/'):
            # Create parts of target path
            existing, _ = target.split("//", 1)
            if not os.path.isdir(existing):
                self.fatal("Path before '//' MUST exists in %s" % (pretty_path(target),))

            # Possibly create the rest
            target = target.replace("//", "/")
            if not os.path.exists(target):
                self.guarded(os.makedirs, target)

        # Preparation
        # TODO: Handle cases where target is the original download path correctly!
        #       i.e.   rtmv foo/ foo   AND   rtmv foo/ .   (in the download dir)
        proxy = config.engine.open()
        download_path = os.path.realpath(os.path.expanduser(proxy.get_directory().rstrip(os.sep)))
        target = self.resolve_slashed(target)
        source_paths = [self.resolve_slashed(i) for i in self.args[:-1]]
        source_realpaths = [os.path.realpath(i) for i in source_paths]
        source_items = defaultdict(list) # map of source path to item
        items = list(config.engine.items(prefetch=self.PREFETCH_FIELDS))

        # Validate source paths and find matching items
        for item in items:
            if not item.path:
                continue

            realpath = None
            try:
                realpath = os.path.realpath(item.path)
            except (EnvironmentError, UnicodeError), exc:
                self.LOG.warning("Cannot realpath %r (%s)" % (item.path, exc))
            
            # Look if item matches a source path
            # TODO: Handle download items nested into each other!
            try:
                path_idx = source_realpaths.index(realpath or fmt.to_utf8(item.path))
            except ValueError:
                continue

            if realpath:
                self.LOG.debug('Item path %s resolved to %s' % (pretty_path(item.path), pretty_path(realpath)))
            self.LOG.debug('Found "%s" for %s' % (fmt.to_utf8(item.name), pretty_path(source_paths[path_idx])))
            source_items[source_paths[path_idx]].append(item)
Exemplo n.º 3
0
    def load(self):
        """ Load metafile into client.
        """
        if not self.ns.info_hash and not self.parse():
            return

        self.addinfo()

        # TODO: dry_run
        try:
            # TODO: Scrub metafile if requested

            # Determine target state
            start_it = self.job.config.load_mode.lower() in ("start", "started")
            queue_it = self.job.config.queued

            if "start" in self.ns.flags:
                start_it = True
            elif "load" in self.ns.flags:
                start_it = False

            if "queue" in self.ns.flags:
                queue_it = True

            # Load metafile into client
            load_cmd = self.job.proxy.load.verbose
            if queue_it:
                if not start_it:
                    self.ns.commands.append("d.priority.set=0")
            elif start_it:
                load_cmd = self.job.proxy.load.start_verbose

            self.job.LOG.debug("Templating values are:\n    %s" % "\n    ".join("%s=%s" % (key, repr(val))
                for key, val in sorted(self.ns.items())
            ))

            load_cmd(xmlrpc.NOHASH, self.ns.pathname, *tuple(self.ns.commands))
            time.sleep(.05) # let things settle

            # Announce new item
            if not self.job.config.quiet:
                msg = "%s: Loaded '%s' from '%s/'%s%s" % (
                    self.job.__class__.__name__,
                    fmt.to_utf8(self.job.proxy.d.name(self.ns.info_hash, fail_silently=True)),
                    os.path.dirname(self.ns.pathname).rstrip(os.sep),
                    " [queued]" if queue_it else "",
                    (" [startable]"  if queue_it else " [started]") if start_it else " [normal]",
                )
                self.job.proxy.log(xmlrpc.NOHASH, msg)

            # TODO: Evaluate fields and set client values
            # TODO: Add metadata to tied file if requested

            # TODO: Execute commands AFTER adding the item, with full templating
            # Example: Labeling - add items to a persistent view, i.e. "postcmd = view.set_visible={{label}}"
            #   could also be done automatically from the path, see above under "flags" (autolabel = True)
            #   and add traits to the flags, too, in that case

        except xmlrpc.ERRORS as exc:
            self.job.LOG.error("While loading #%s: %s" % (self.ns.info_hash, exc))
Exemplo n.º 4
0
    def load(self):
        """ Load metafile into client.
        """
        if not self.ns.info_hash and not self.parse():
            return

        self.addinfo()

        # TODO: dry_run
        try:
            # TODO: Scrub metafile if requested

            # Determine target state
            start_it = self.job.config.load_mode.lower() in ("start", "started")
            queue_it = self.job.config.queued

            if "start" in self.ns.flags:
                start_it = True
            elif "load" in self.ns.flags:
                start_it = False

            if "queue" in self.ns.flags:
                queue_it = True

            # Load metafile into client
            load_cmd = self.job.proxy.load_verbose
            if queue_it:
                if not start_it:
                    self.ns.commands.append("d.set_priority=0")
            elif start_it:
                load_cmd = self.job.proxy.load_start_verbose

            self.job.LOG.debug("Templating values are:\n    %s" % "\n    ".join("%s=%s" % (key, repr(val))
                for key, val in sorted(self.ns.items())
            ))

            load_cmd(self.ns.pathname, *tuple(self.ns.commands))
            time.sleep(.05) # let things settle

            # Announce new item
            if not self.job.config.quiet:
                msg = "%s: Loaded '%s' from '%s/'%s%s" % (
                    self.job.__class__.__name__,
                    fmt.to_utf8(self.job.proxy.d.get_name(self.ns.info_hash, fail_silently=True)),
                    os.path.dirname(self.ns.pathname).rstrip(os.sep),
                    " [queued]" if queue_it else "",
                    (" [startable]"  if queue_it else " [started]") if start_it else " [normal]",
                )
                self.job.proxy.log('', msg)

            # TODO: Evaluate fields and set client values
            # TODO: Add metadata to tied file if requested

            # TODO: Execute commands AFTER adding the item, with full templating
            # Example: Labeling - add items to a persistent view, i.e. "postcmd = view.set_visible={{label}}"
            #   could also be done automatically from the path, see above under "flags" (autolabel = True)
            #   and add traits to the flags, too, in that case

        except xmlrpc.ERRORS, exc:
            self.job.LOG.error("While loading #%s: %s" % (self.ns.info_hash, exc))
Exemplo n.º 5
0
def pretty_path(path):
    """ Prettify path for logging.
    """
    path = fmt.to_utf8(path)
    home_dir = os.path.expanduser("~")
    if path.startswith(home_dir):
        path = "~" + path[len(home_dir):]
    return '"%s"' % (path,)
Exemplo n.º 6
0
def assign_fields(meta, assignments, options_debug=False):
    """ Takes a list of C{key=value} strings and assigns them to the
        given metafile. If you want to set nested keys (e.g. "info.source"),
        you have to use a dot as a separator. For exotic keys *containing*
        a dot, double that dot ("dotted..key").

        Numeric values starting with "+" or "-" are converted to integers.

        If just a key name is given (no '='), the field is removed.
    """
    for assignment in assignments:
        assignment = fmt.to_unicode(assignment)
        try:
            if '=' in assignment:
                field, val = assignment.split('=', 1)
            else:
                field, val = assignment, None

            if val and val[0] in "+-" and val[1:].isdigit():
                val = int(val, 10)

            # TODO: Allow numerical indices, and "+" for append
            namespace = meta
            keypath = [
                i.replace('\0', '.')
                for i in field.replace('..', '\0').split('.')
            ]
            for key in keypath[:-1]:
                # Create missing dicts as we go...
                namespace = namespace.setdefault(fmt.to_utf8(key), {})
        except (KeyError, IndexError, TypeError, ValueError) as exc:
            if options_debug:
                raise
            raise error.UserError("Bad assignment %r (%s)!" %
                                  (assignment, exc))
        else:
            if val is None:
                del namespace[fmt.to_utf8(keypath[-1])]
            else:
                namespace[fmt.to_utf8(keypath[-1])] = fmt.to_utf8(val)

    return meta
Exemplo n.º 7
0
def add_fast_resume(meta, datapath):
    """ Add fast resume data to a metafile dict.
    """
    # Get list of files
    files = meta["info"].get("files", None)
    single = files is None
    if single:
        if os.path.isdir(datapath):
            datapath = os.path.join(datapath, meta["info"]["name"])
        files = [
            Bunch(
                path=[os.path.abspath(datapath)],
                length=meta["info"]["length"],
            )
        ]

    # Prepare resume data
    resume = meta.setdefault("libtorrent_resume", {})
    resume["bitfield"] = len(meta["info"]["pieces"]) // 20
    resume["files"] = []
    piece_length = meta["info"]["piece length"]
    offset = 0

    for fileinfo in files:
        # Get the path into the filesystem
        filepath = os.sep.join(fileinfo["path"])
        if not single:
            filepath = os.path.join(datapath,
                                    fmt.to_utf8(filepath.strip(os.sep)))

        # Check file size
        if os.path.getsize(filepath) != fileinfo["length"]:
            raise OSError(
                errno.EINVAL,
                "File size mismatch for %r [is %d, expected %d]" % (
                    filepath,
                    os.path.getsize(filepath),
                    fileinfo["length"],
                ))

        # Add resume data for this file
        resume["files"].append(
            dict(
                priority=1,
                mtime=int(os.path.getmtime(filepath)),
                completed=(offset + fileinfo["length"] + piece_length - 1) //
                piece_length - offset // piece_length,
            ))
        offset += fileinfo["length"]

    return meta
Exemplo n.º 8
0
    def parse(self, conditions):
        """ Parse filter conditions.

            @param conditions: multiple conditions.
            @type conditions: list or str
        """
        conditions_text = conditions
        try:
            conditions = shlex.split(fmt.to_utf8(conditions))
        except AttributeError:
            # Not a string, assume parsed tree
            conditions_text = self._tree2str(conditions)

        # Empty list?
        if not conditions:
            raise FilterError("No conditions given at all!")

        # NOT *must* appear at the start of a group
        negate = conditions[:1] == ["NOT"]
        if negate:
            conditions = conditions[1:]
            if not conditions:
                raise FilterError("NOT must be followed by some conditions!")

        # Handle grouping
        if '[' in conditions:
            tree = [[]]
            for term in conditions:
                if term == '[':
                    tree.append([])  # new grouping
                elif term == ']':
                    subtree = tree.pop()
                    if not tree:
                        raise FilterError(
                            "Unbalanced brackets, too many closing ']' in condition %r"
                            % (conditions_text, ))
                    tree[-1].append(
                        subtree)  # append finished group to containing level
                else:
                    tree[-1].append(term)  # append to current level

            if len(tree) > 1:
                raise FilterError(
                    "Unbalanced brackets, too many open '[' in condition %r" %
                    (conditions_text, ))
            conditions = tree[0]

        # Prepare root matcher
        conditions = list(conditions)
        matcher = CompoundFilterAll()
        if "OR" in conditions:
            root = CompoundFilterAny()
            root.append(matcher)
        else:
            root = matcher

        # Go through conditions and parse them
        for condition in conditions:
            if condition == "OR":
                # Leading OR, or OR OR in sequence?
                if not matcher:
                    raise FilterError("Left-hand side of OR missing in %r!" %
                                      (conditions_text, ))

                # Start next run of AND conditions
                matcher = CompoundFilterAll()
                root.append(matcher)
            elif isinstance(condition, list):
                matcher.append(self.parse(condition))
            else:
                matcher.append(self._create_filter(condition))

        # Trailing OR?
        if not matcher:
            raise FilterError("Right-hand side of OR missing in %r!" %
                              (conditions_text, ))

        return NegateFilter(root) if negate else root
Exemplo n.º 9
0
 def __str__(self):
     return fmt.to_utf8("%s=%s" % (self._name, self._condition))
Exemplo n.º 10
0
    def mainloop(self):
        """ The main loop.
        """
        if not self.args:
            self.parser.print_help()
            self.parser.exit()

        for idx, filename in enumerate(self.args):
            torrent = metafile.Metafile(filename)
            if idx and not self.options.output:
                print('')
                print("~" * 79)

            try:
                # Read and check metafile
                try:
                    data = metafile.checked_open(
                        filename,
                        log=self.LOG if self.options.skip_validation else None,
                        quiet=(self.options.quiet
                               and (self.options.output or self.options.raw)))
                except EnvironmentError as exc:
                    self.fatal("Can't read '%s' (%s)" % (
                        filename,
                        str(exc).replace(": '%s'" % filename, ""),
                    ))
                    raise

                listing = None

                if self.options.raw or self.options.json:
                    if not self.options.reveal and "info" in data:
                        # Shorten useless binary piece hashes
                        data["info"]["pieces"] = "<%d piece hashes>" % (
                            len(data["info"]["pieces"]) /
                            len(hashlib.sha1().digest())  # bogus pylint: disable=E1101
                        )

                    if self.options.json:
                        listing = json.dumps(data,
                                             default=repr,
                                             indent=4,
                                             sort_keys=True)
                    else:
                        pprinter = (pprint.PrettyPrinter if self.options.reveal
                                    else metafile.MaskingPrettyPrinter)()
                        listing = pprinter.pformat(data)
                elif self.options.output:

                    def splitter(fields):
                        "Yield single names for a list of comma-separated strings."
                        for flist in fields:
                            for field in flist.split(','):
                                yield field.strip()

                    data["__file__"] = filename
                    if 'info' in data:
                        data["__hash__"] = metafile.info_hash(data)
                        data["__size__"] = metafile.data_size(data)
                    values = []
                    for field in splitter(self.options.output):
                        try:
                            val = data
                            for key in field.split('.'):
                                val = val[key]
                        except KeyError as exc:
                            self.LOG.error("%s: Field %r not found (%s)" %
                                           (filename, field, exc))
                            break
                        else:
                            values.append(str(val))
                    else:
                        listing = '\t'.join(values)
                else:
                    listing = '\n'.join(
                        torrent.listing(masked=not self.options.reveal))
            except (ValueError, KeyError, bencode.BencodeError) as exc:
                if self.options.debug:
                    raise
                self.LOG.warning("Bad metafile %r (%s: %s)" %
                                 (filename, type(exc).__name__, exc))
            else:
                if listing is not None:
                    print(fmt.to_utf8(listing))
Exemplo n.º 11
0
    def _start(self, items):
        """ Start some items if conditions are met.
        """
        # TODO: Filter by a custom date field, for scheduled downloads starting at a certain time, or after a given delay

        # TODO: Don't start anything more if download BW is used >= config threshold in %

        # Check if anything more is ready to start downloading
        startable = [i for i in items if self.config.startable.match(i)]
        if not startable:
            self.LOG.debug(
                "Checked %d item(s), none startable according to [ %s ]",
                len(items), self.config.startable)
            return

        # Check intermission delay
        now = time.time()
        if now < self.last_start:
            # compensate for summer time and other oddities
            self.last_start = now
        delayed = int(self.last_start + self.config.intermission - now)
        if delayed > 0:
            self.LOG.debug("Delaying start of {:d} item(s),"
                           " due to {:d}s intermission with {:d}s left".format(
                               len(startable), self.config.intermission,
                               delayed))
            return

        # TODO: sort by priority, then loaded time

        # Stick to "start_at_once" parameter, unless "downloading_min" is violated
        downloading = [i for i in items if self.config.downloading.match(i)]
        start_now = max(self.config.start_at_once,
                        self.config.downloading_min - len(downloading))
        start_now = min(start_now, len(startable))

        #down_traffic = sum(i.down for i in downloading)
        ##self.LOG.info("%d downloading, down %d" % (len(downloading), down_traffic))

        # Start eligible items
        for idx, item in enumerate(startable):
            # Check if we reached 'start_now' in this run
            if idx >= start_now:
                self.LOG.debug(
                    "Only starting %d item(s) in this run, %d more could be downloading"
                    % (
                        start_now,
                        len(startable) - idx,
                    ))
                break

            # TODO: Prevent start of more torrents that can fit on the drive (taking "off" files into account)
            # (restarts items that were stopped due to the "low_diskspace" schedule, and also avoids triggering it at all)

            # Only check the other conditions when we have `downloading_min` covered
            if len(downloading) < self.config.downloading_min:
                self.LOG.debug(
                    "Catching up from %d to a minimum of %d downloading item(s)"
                    % (len(downloading), self.config.downloading_min))
            else:
                # Limit to the given maximum of downloading items
                if len(downloading) >= self.config.downloading_max:
                    self.LOG.debug(
                        "Already downloading %d item(s) out of %d max, %d more could be downloading"
                        % (
                            len(downloading),
                            self.config.downloading_max,
                            len(startable) - idx,
                        ))
                    break

            # If we made it here, start it!
            self.last_start = now
            downloading.append(item)
            self.LOG.info(
                "%s '%s' [%s, #%s]" %
                ("WOULD start" if self.config.dry_run else "Starting",
                 fmt.to_utf8(item.name), item.alias, item.hash))
            if not self.config.dry_run:
                item.start()
                if not self.config.quiet:
                    self.proxy.log(
                        xmlrpc.NOHASH, "%s: Started '%s' {%s}" % (
                            self.__class__.__name__,
                            fmt.to_utf8(item.name),
                            item.alias,
                        ))
Exemplo n.º 12
0
    def parse(self, conditions):
        """ Parse filter conditions.

            @param conditions: multiple conditions.
            @type conditions: list or str
        """
        conditions_text = conditions
        try:
            conditions = shlex.split(fmt.to_utf8(conditions))
        except AttributeError:
            # Not a string, assume parsed tree
            conditions_text = self._tree2str(conditions)

        # Empty list?
        if not conditions:
            raise FilterError("No conditions given at all!")

        # NOT *must* appear at the start of a group
        negate = conditions[:1] == ["NOT"]
        if negate:
            conditions = conditions[1:]
            if not conditions:
                raise FilterError("NOT must be followed by some conditions!")

        # Handle grouping
        if '[' in conditions:
            tree = [[]]
            for term in conditions:
                if term == '[':
                    tree.append([]) # new grouping
                elif term == ']':
                    subtree = tree.pop()
                    if not tree:
                        raise FilterError("Unbalanced brackets, too many closing ']' in condition %r" % (conditions_text,))
                    tree[-1].append(subtree) # append finished group to containing level
                else:
                    tree[-1].append(term) # append to current level

            if len(tree) > 1:
                raise FilterError("Unbalanced brackets, too many open '[' in condition %r" % (conditions_text,))
            conditions = tree[0]

        # Prepare root matcher
        conditions = list(conditions)
        matcher = CompoundFilterAll()
        if "OR" in conditions:
            root = CompoundFilterAny()
            root.append(matcher)
        else:
            root = matcher

        # Go through conditions and parse them
        for condition in conditions:
            if condition == "OR":
                # Leading OR, or OR OR in sequence?
                if not matcher:
                    raise FilterError("Left-hand side of OR missing in %r!" % (conditions_text,))

                # Start next run of AND conditions
                matcher = CompoundFilterAll()
                root.append(matcher)
            elif isinstance(condition, list):
                matcher.append(self.parse(condition))
            else:
                matcher.append(self._create_filter(condition))

        # Trailing OR?
        if not matcher:
            raise FilterError("Right-hand side of OR missing in %r!" % (conditions_text,))

        return NegateFilter(root) if negate else root
Exemplo n.º 13
0
                        except KeyError, exc:
                            self.LOG.error("%s: Field %r not found (%s)" %
                                           (filename, field, exc))
                            break
                        else:
                            values.append(str(val))
                    else:
                        listing = '\t'.join(values)
                else:
                    listing = '\n'.join(
                        torrent.listing(masked=not self.options.reveal))
            except (ValueError, KeyError, bencode.BencodeError), exc:
                if self.options.debug:
                    raise
                self.LOG.warning("Bad metafile %r (%s: %s)" %
                                 (filename, type(exc).__name__, exc))
            else:
                if listing is not None:
                    print(fmt.to_utf8(listing))


def run():  #pragma: no cover
    """ The entry point.
    """
    ScriptBase.setup()
    MetafileLister().run()


if __name__ == "__main__":
    run()
Exemplo n.º 14
0
 def __init__(self, text):
     shlex.shlex.__init__(self, fmt.to_utf8(text), None, True)
     self.whitespace += ','
     self.whitespace_split = True
     self.commenters = ''
Exemplo n.º 15
0
    def mainloop(self):
        """ The main loop.
        """
        # Print usage if not enough args
        if len(self.args) < 2:
            self.parser.print_help()
            self.parser.exit()

        # TODO: Add mode to move tied metafiles, without losing the tie

        # Target handling
        target = self.args[-1]
        if "//" in target.rstrip('/'):
            # Create parts of target path
            existing, _ = target.split("//", 1)
            if not os.path.isdir(existing):
                self.fatal("Path before '//' MUST exists in %s" %
                           (pretty_path(target), ))

            # Possibly create the rest
            target = target.replace("//", "/")
            if not os.path.exists(target):
                self.guarded(os.makedirs, target)

        # Preparation
        # TODO: Handle cases where target is the original download path correctly!
        #       i.e.   rtmv foo/ foo   AND   rtmv foo/ .   (in the download dir)
        proxy = config.engine.open()
        download_path = os.path.realpath(
            os.path.expanduser(
                proxy.directory.default(xmlrpc.NOHASH).rstrip(os.sep)))
        target = self.resolve_slashed(target)
        source_paths = [self.resolve_slashed(i) for i in self.args[:-1]]
        source_realpaths = [os.path.realpath(i) for i in source_paths]
        source_items = defaultdict(list)  # map of source path to item
        items = list(config.engine.items(prefetch=self.PREFETCH_FIELDS))

        # Validate source paths and find matching items
        for item in items:
            if not item.path:
                continue

            realpath = None
            try:
                realpath = os.path.realpath(item.path)
            except (EnvironmentError, UnicodeError) as exc:
                self.LOG.warning("Cannot realpath %r (%s)" % (item.path, exc))

            # Look if item matches a source path
            # TODO: Handle download items nested into each other!
            try:
                path_idx = source_realpaths.index(realpath
                                                  or fmt.to_utf8(item.path))
            except ValueError:
                continue

            if realpath:
                self.LOG.debug('Item path %s resolved to %s' %
                               (pretty_path(item.path), pretty_path(realpath)))
            self.LOG.debug(
                'Found "%s" for %s' %
                (fmt.to_utf8(item.name), pretty_path(source_paths[path_idx])))
            source_items[source_paths[path_idx]].append(item)

        ##for path in source_paths: print path, "==>"; print "  " + "\n  ".join(i.path for i in source_items[path])

        if not os.path.isdir(target) and len(source_paths) > 1:
            self.fatal(
                "Can't move multiple files to %s which is no directory!" %
                (pretty_path(target), ))

        # Actually move the data
        moved_count = 0
        for path in source_paths:
            item = None  # Make sure there's no accidental stale reference

            if not source_items[path]:
                self.LOG.warn("No download item found for %s, skipping!" %
                              (pretty_path(path), ))
                continue

            if len(source_items[path]) > 1:
                self.LOG.warn(
                    "Can't handle multi-item moving yet, skipping %s!" %
                    (pretty_path(path), ))
                continue

            if os.path.islink(path):
                self.LOG.warn("Won't move symlinks, skipping %s!" %
                              (pretty_path(path), ))
                continue

            for item in source_items[path]:
                if os.path.islink(item.path) and os.path.realpath(
                        item.path) != os.readlink(item.path):
                    self.LOG.warn(
                        "Can't handle multi-hop symlinks yet, skipping %s!" %
                        (pretty_path(path), ))
                    continue

                if not item.is_complete:
                    if self.options.force_incomplete:
                        self.LOG.warn("Moving incomplete item '%s'!" %
                                      (item.name, ))
                    else:
                        self.LOG.warn("Won't move incomplete item '%s'!" %
                                      (item.name, ))
                        continue

                moved_count += 1
                dst = target
                if os.path.isdir(dst):
                    dst = os.path.join(dst, os.path.basename(path))
                self.LOG.info("Moving to %s..." % (pretty_path(dst), ))

                # Pause torrent?
                # was_active = item.is_active and not self.options.dry_run
                # if was_active: item.pause()

                # TODO: move across devices
                # TODO: move using "d.directory.set" instead of symlinks
                if os.path.islink(item.path):
                    if os.path.abspath(dst) == os.path.abspath(
                            item.path.rstrip(os.sep)):
                        # Moving back to original place
                        self.LOG.debug("Unlinking %s" %
                                       (pretty_path(item.path), ))
                        self.guarded(os.remove, item.path)
                        self.guarded(os.rename, path, dst)
                    else:
                        # Moving to another place
                        self.LOG.debug("Re-linking %s" %
                                       (pretty_path(item.path), ))
                        self.guarded(os.rename, path, dst)
                        self.guarded(os.remove, item.path)
                        self.guarded(os.symlink, os.path.abspath(dst),
                                     item.path)
                else:
                    # Moving download initially
                    self.LOG.debug("Symlinking %s" %
                                   (pretty_path(item.path), ))
                    src1, src2 = os.path.join(download_path,
                                              os.path.basename(
                                                  item.path)), fmt.to_unicode(
                                                      os.path.realpath(path))
                    assert src1 == src2, 'Item path %r should match %r!' % (
                        src1, src2)
                    self.guarded(os.rename, item.path, dst)
                    self.guarded(os.symlink, os.path.abspath(dst), item.path)

                # Resume torrent?
                # if was_active: sitem.resume()

        # Print stats
        self.LOG.debug("XMLRPC stats: %s" % proxy)
        self.LOG.log(
            logging.DEBUG if self.options.cron else logging.INFO,
            "Moved %d path%s (skipped %d)" %
            (moved_count, "" if moved_count == 1 else "s",
             len(source_paths) - moved_count))
Exemplo n.º 16
0
    def _make_info(self, piece_size, progress, walker, piece_callback=None):
        """ Create info dict.
        """
        # These collect the file descriptions and piece hashes
        file_list = []
        pieces = []

        # Initialize progress state
        hashing_secs = time.time()
        totalsize = -1 if self._fifo else self._calc_size()
        totalhashed = 0

        # Start a new piece
        sha1sum = hashlib.sha1()
        done = 0
        filename = None

        # Hash all files
        for filename in walker:
            # Assemble file info
            filesize = os.path.getsize(filename)
            filepath = filename[len(
                os.path.dirname(self.datapath) if self._fifo else self.datapath
            ):].lstrip(os.sep)
            file_list.append({
                "length":
                filesize,
                "path": [
                    fmt.to_utf8(x) for x in fmt.to_unicode(filepath).replace(
                        os.sep, '/').split('/')
                ],
            })
            self.LOG.debug("Hashing %r, size %d..." % (filename, filesize))

            # Open file and hash it
            fileoffset = 0
            handle = open(filename, "rb")
            try:
                while fileoffset < filesize:
                    # Read rest of piece or file, whatever is smaller
                    chunk = handle.read(
                        min(filesize - fileoffset, piece_size - done))
                    sha1sum.update(chunk)  # bogus pylint: disable=E1101
                    done += len(chunk)
                    fileoffset += len(chunk)
                    totalhashed += len(chunk)

                    # Piece is done
                    if done == piece_size:
                        pieces.append(sha1sum.digest())  # bogus pylint: disable=E1101
                        if piece_callback:
                            piece_callback(filename, pieces[-1])

                        # Start a new piece
                        sha1sum = hashlib.sha1()
                        done = 0

                    # Report progress
                    if progress:
                        progress(totalhashed, totalsize)
            finally:
                handle.close()

        # Add hash of partial last piece
        if done > 0:
            pieces.append(sha1sum.digest())  # bogus pylint: disable=E1103
            if piece_callback:
                piece_callback(filename, pieces[-1])

        # Build the meta dict
        metainfo = {
            "pieces": b"".join(pieces),
            "piece length": piece_size,
            "name": os.path.basename(self.datapath),
        }

        # Handle directory/FIFO vs. single file
        if self._fifo or os.path.isdir(self.datapath):
            metainfo["files"] = file_list
        else:
            metainfo["length"] = totalhashed

        hashing_secs = time.time() - hashing_secs
        self.LOG.info("Hashing of %s took %.1f secs (%s/s)" % (
            fmt.human_size(totalhashed).strip(),
            hashing_secs,
            fmt.human_size(totalhashed / hashing_secs).strip(),
        ))

        # Return validated info dict
        return check_info(metainfo), totalhashed
Exemplo n.º 17
0
 def __str__(self):
     return fmt.to_utf8("%s=%s" % (self._name, self._condition))