Exemplo n.º 1
0
def list_(options):
    folders = load_magic_folders(options["node-directory"])
    if options["json"]:
        _list_json(options, folders)
        return 0
    _list_human(options, folders)
    return 0
Exemplo n.º 2
0
    def init_magic_folder(self):
        #print "init_magic_folder"
        if self.config.get_config("drop_upload", "enabled", False, boolean=True):
            raise node.OldConfigOptionError(
                "The [drop_upload] section must be renamed to [magic_folder].\n"
                "See docs/frontends/magic-folder.rst for more information."
            )

        if self.config.get_config("magic_folder", "enabled", False, boolean=True):
            from allmydata.frontends import magic_folder

            try:
                magic_folders = magic_folder.load_magic_folders(self.config._basedir)
            except Exception as e:
                log.msg("Error loading magic-folder config: {}".format(e))
                raise

            # start processing the upload queue when we've connected to
            # enough servers
            threshold = min(self.encoding_params["k"],
                            self.encoding_params["happy"] + 1)

            for (name, mf_config) in magic_folders.items():
                self.log("Starting magic_folder '{}'".format(name))
                s = magic_folder.MagicFolder.from_config(self, name, mf_config)
                self._magic_folders[name] = s
                s.setServiceParent(self)

                connected_d = self.storage_broker.when_connected_enough(threshold)
                def connected_enough(ign, mf):
                    mf.ready()  # returns a Deferred we ignore
                    return None
                connected_d.addCallback(connected_enough, s)
Exemplo n.º 3
0
def join(options):
    fields = options.invite_code.split(INVITE_SEPARATOR)
    if len(fields) != 2:
        raise usage.UsageError("Invalid invite code.")
    magic_readonly_cap, dmd_write_cap = fields

    maybe_upgrade_magic_folders(options["node-directory"])
    existing_folders = load_magic_folders(options["node-directory"])

    if options['name'] in existing_folders:
        print("This client already has a magic-folder named '{}'".format(
            options['name']),
              file=options.stderr)
        return 1

    db_fname = os.path.join(
        options["node-directory"],
        u"private",
        u"magicfolder_{}.sqlite".format(options['name']),
    )
    if os.path.exists(db_fname):
        print("Database '{}' already exists; not overwriting".format(db_fname),
              file=options.stderr)
        return 1

    folder = {
        u"directory": options.local_dir.encode('utf-8'),
        u"collective_dircap": magic_readonly_cap,
        u"upload_dircap": dmd_write_cap,
        u"poll_interval": options["poll-interval"],
    }
    existing_folders[options["name"]] = folder

    save_magic_folders(options["node-directory"], existing_folders)
    return 0
Exemplo n.º 4
0
    def init_magic_folder(self):
        #print "init_magic_folder"
        if self.config.get_config("drop_upload", "enabled", False, boolean=True):
            raise node.OldConfigOptionError(
                "The [drop_upload] section must be renamed to [magic_folder].\n"
                "See docs/frontends/magic-folder.rst for more information."
            )

        if self.config.get_config("magic_folder", "enabled", False, boolean=True):
            from allmydata.frontends import magic_folder

            try:
                magic_folders = magic_folder.load_magic_folders(self.config._basedir)
            except Exception as e:
                log.msg("Error loading magic-folder config: {}".format(e))
                raise

            # start processing the upload queue when we've connected to
            # enough servers
            threshold = min(self.encoding_params["k"],
                            self.encoding_params["happy"] + 1)

            for (name, mf_config) in magic_folders.items():
                self.log("Starting magic_folder '{}'".format(name))
                s = magic_folder.MagicFolder.from_config(self, name, mf_config)
                self._magic_folders[name] = s
                s.setServiceParent(self)

                connected_d = self.storage_broker.when_connected_enough(threshold)
                def connected_enough(ign, mf):
                    mf.ready()  # returns a Deferred we ignore
                    return None
                connected_d.addCallback(connected_enough, s)
Exemplo n.º 5
0
def join(options):
    fields = options.invite_code.split(INVITE_SEPARATOR)
    if len(fields) != 2:
        raise usage.UsageError("Invalid invite code.")
    magic_readonly_cap, dmd_write_cap = fields

    maybe_upgrade_magic_folders(options["node-directory"])
    existing_folders = load_magic_folders(options["node-directory"])

    if options['name'] in existing_folders:
        print("This client already has a magic-folder named '{}'".format(options['name']), file=options.stderr)
        return 1

    db_fname = os.path.join(
        options["node-directory"],
        u"private",
        u"magicfolder_{}.sqlite".format(options['name']),
    )
    if os.path.exists(db_fname):
        print("Database '{}' already exists; not overwriting".format(db_fname), file=options.stderr)
        return 1

    folder = {
        u"directory": options.local_dir.encode('utf-8'),
        u"collective_dircap": magic_readonly_cap,
        u"upload_dircap": dmd_write_cap,
        u"poll_interval": options["poll-interval"],
    }
    existing_folders[options["name"]] = folder

    save_magic_folders(options["node-directory"], existing_folders)
    return 0
Exemplo n.º 6
0
def list_(options):
    folders = load_magic_folders(options["node-directory"])
    if options["json"]:
        _list_json(options, folders)
        return 0
    _list_human(options, folders)
    return 0
Exemplo n.º 7
0
def create(options):
    precondition(isinstance(options.alias, unicode), alias=options.alias)
    precondition(isinstance(options.nickname, (unicode, NoneType)),
                 nickname=options.nickname)
    precondition(isinstance(options.local_dir, (unicode, NoneType)),
                 local_dir=options.local_dir)

    # make sure we don't already have a magic-folder with this name before we create the alias
    maybe_upgrade_magic_folders(options["node-directory"])
    folders = load_magic_folders(options["node-directory"])
    if options['name'] in folders:
        print >> options.stderr, "Already have a magic-folder named '{}'".format(
            options['name'])
        return 1

    # create an alias; this basically just remembers the cap for the
    # master directory
    from allmydata.scripts import tahoe_add_alias
    create_alias_options = _delegate_options(options, CreateAliasOptions())
    create_alias_options.alias = options.alias

    rc = tahoe_add_alias.create_alias(create_alias_options)
    if rc != 0:
        print >> options.stderr, create_alias_options.stderr.getvalue()
        return rc
    print >> options.stdout, create_alias_options.stdout.getvalue()

    if options.nickname is not None:
        print >> options.stdout, u"Inviting myself as client '{}':".format(
            options.nickname)
        invite_options = _delegate_options(options, InviteOptions())
        invite_options.alias = options.alias
        invite_options.nickname = options.nickname
        invite_options['name'] = options['name']
        rc = invite(invite_options)
        if rc != 0:
            print >> options.stderr, u"magic-folder: failed to invite after create\n"
            print >> options.stderr, invite_options.stderr.getvalue()
            return rc
        invite_code = invite_options.stdout.getvalue().strip()
        print >> options.stdout, u"  created invite code"
        join_options = _delegate_options(options, JoinOptions())
        join_options['poll-interval'] = options['poll-interval']
        join_options.nickname = options.nickname
        join_options.local_dir = options.local_dir
        join_options.invite_code = invite_code
        rc = join(join_options)
        if rc != 0:
            print >> options.stderr, u"magic-folder: failed to join after create\n"
            print >> options.stderr, join_options.stderr.getvalue()
            return rc
        print >> options.stdout, u"  joined new magic-folder"
        print >> options.stdout, (
            u"Successfully created magic-folder '{}' with alias '{}:' "
            u"and client '{}'\nYou must re-start your node before the "
            u"magic-folder will be active.").format(options['name'],
                                                    options.alias,
                                                    options.nickname)
    return 0
Exemplo n.º 8
0
    def init_magic_folder(self):
        #print "init_magic_folder"
        if self.get_config("drop_upload", "enabled", False, boolean=True):
            raise OldConfigOptionError(
                "The [drop_upload] section must be renamed to [magic_folder].\n"
                "See docs/frontends/magic-folder.rst for more information.")

        if self.get_config("magic_folder", "enabled", False, boolean=True):
            from allmydata.frontends import magic_folder

            try:
                magic_folders = magic_folder.load_magic_folders(self.basedir)
            except Exception as e:
                log.msg("Error loading magic-folder config: {}".format(e))
                raise

            # start processing the upload queue when we've connected to
            # enough servers
            threshold = min(self.encoding_params["k"],
                            self.encoding_params["happy"] + 1)

            for (name, mf_config) in magic_folders.items():
                self.log("Starting magic_folder '{}'".format(name))
                db_filename = os.path.join(
                    self.basedir, "private",
                    "magicfolder_{}.sqlite".format(name))
                local_dir_config = mf_config['directory']
                try:
                    poll_interval = int(mf_config["poll_interval"])
                except ValueError:
                    raise ValueError("'poll_interval' option must be an int")

                s = magic_folder.MagicFolder(
                    client=self,
                    upload_dircap=mf_config["upload_dircap"].encode('ascii'),
                    collective_dircap=mf_config["collective_dircap"].encode(
                        'ascii'),
                    local_path_u=abspath_expanduser_unicode(local_dir_config,
                                                            base=self.basedir),
                    dbfile=abspath_expanduser_unicode(db_filename),
                    umask=self.get_config("magic_folder", "download.umask",
                                          0077),
                    name=name,
                    downloader_delay=poll_interval,
                )
                self._magic_folders[name] = s
                s.setServiceParent(self)
                s.startService()

                connected_d = self.storage_broker.when_connected_enough(
                    threshold)

                def connected_enough(ign, mf):
                    mf.ready()  # returns a Deferred we ignore
                    return None

                connected_d.addCallback(connected_enough, s)
Exemplo n.º 9
0
def create(options):
    precondition(isinstance(options.alias, unicode), alias=options.alias)
    precondition(isinstance(options.nickname, (unicode, NoneType)), nickname=options.nickname)
    precondition(isinstance(options.local_dir, (unicode, NoneType)), local_dir=options.local_dir)

    # make sure we don't already have a magic-folder with this name before we create the alias
    maybe_upgrade_magic_folders(options["node-directory"])
    folders = load_magic_folders(options["node-directory"])
    if options['name'] in folders:
        print("Already have a magic-folder named '{}'".format(options['name']), file=options.stderr)
        return 1

    # create an alias; this basically just remembers the cap for the
    # master directory
    from allmydata.scripts import tahoe_add_alias
    create_alias_options = _delegate_options(options, CreateAliasOptions())
    create_alias_options.alias = options.alias

    rc = tahoe_add_alias.create_alias(create_alias_options)
    if rc != 0:
        print(create_alias_options.stderr.getvalue(), file=options.stderr)
        return rc
    print(create_alias_options.stdout.getvalue(), file=options.stdout)

    if options.nickname is not None:
        print(u"Inviting myself as client '{}':".format(options.nickname), file=options.stdout)
        invite_options = _delegate_options(options, InviteOptions())
        invite_options.alias = options.alias
        invite_options.nickname = options.nickname
        invite_options['name'] = options['name']
        rc = invite(invite_options)
        if rc != 0:
            print(u"magic-folder: failed to invite after create\n", file=options.stderr)
            print(invite_options.stderr.getvalue(), file=options.stderr)
            return rc
        invite_code = invite_options.stdout.getvalue().strip()
        print(u"  created invite code", file=options.stdout)
        join_options = _delegate_options(options, JoinOptions())
        join_options['poll-interval'] = options['poll-interval']
        join_options.nickname = options.nickname
        join_options.local_dir = options.local_dir
        join_options.invite_code = invite_code
        rc = join(join_options)
        if rc != 0:
            print(u"magic-folder: failed to join after create\n", file=options.stderr)
            print(join_options.stderr.getvalue(), file=options.stderr)
            return rc
        print(u"  joined new magic-folder", file=options.stdout)
        print(
            u"Successfully created magic-folder '{}' with alias '{}:' "
            u"and client '{}'\nYou must re-start your node before the "
            u"magic-folder will be active."
        .format(options['name'], options.alias, options.nickname), file=options.stdout)
    return 0
Exemplo n.º 10
0
    def init_magic_folder(self):
        #print "init_magic_folder"
        if self.get_config("drop_upload", "enabled", False, boolean=True):
            raise OldConfigOptionError("The [drop_upload] section must be renamed to [magic_folder].\n"
                                       "See docs/frontends/magic-folder.rst for more information.")

        if self.get_config("magic_folder", "enabled", False, boolean=True):
            from allmydata.frontends import magic_folder

            try:
                magic_folders = magic_folder.load_magic_folders(self.basedir)
            except Exception as e:
                log.msg("Error loading magic-folder config: {}".format(e))
                raise

            # start processing the upload queue when we've connected to
            # enough servers
            threshold = min(self.encoding_params["k"],
                            self.encoding_params["happy"] + 1)

            for (name, mf_config) in magic_folders.items():
                self.log("Starting magic_folder '{}'".format(name))
                db_filename = os.path.join(self.basedir, "private", "magicfolder_{}.sqlite".format(name))
                local_dir_config = mf_config['directory']
                try:
                    poll_interval = int(mf_config["poll_interval"])
                except ValueError:
                    raise ValueError("'poll_interval' option must be an int")

                s = magic_folder.MagicFolder(
                    client=self,
                    upload_dircap=mf_config["upload_dircap"].encode('ascii'),
                    collective_dircap=mf_config["collective_dircap"].encode('ascii'),
                    local_path_u=abspath_expanduser_unicode(local_dir_config, base=self.basedir),
                    dbfile=abspath_expanduser_unicode(db_filename),
                    umask=self.get_config("magic_folder", "download.umask", 0077),
                    name=name,
                    downloader_delay=poll_interval,
                )
                self._magic_folders[name] = s
                s.setServiceParent(self)
                s.startService()

                connected_d = self.storage_broker.when_connected_enough(threshold)
                def connected_enough(ign, mf):
                    mf.ready()  # returns a Deferred we ignore
                    return None
                connected_d.addCallback(connected_enough, s)
Exemplo n.º 11
0
def leave(options):
    from ConfigParser import SafeConfigParser

    existing_folders = load_magic_folders(options["node-directory"])

    if not existing_folders:
        print("No magic-folders at all", file=options.stderr)
        return 1

    if options["name"] not in existing_folders:
        print("No such magic-folder '{}'".format(options["name"]),
              file=options.stderr)
        return 1

    privdir = os.path.join(options["node-directory"], u"private")
    db_fname = os.path.join(privdir,
                            u"magicfolder_{}.sqlite".format(options["name"]))

    # delete from YAML file and re-write it
    del existing_folders[options["name"]]
    save_magic_folders(options["node-directory"], existing_folders)

    # delete the database file
    try:
        fileutil.remove(db_fname)
    except Exception as e:
        print(
            "Warning: unable to remove %s due to %s: %s" %
            (quote_local_unicode_path(db_fname), e.__class__.__name__, str(e)),
            file=options.stderr)

    # if this was the last magic-folder, disable them entirely
    if not existing_folders:
        parser = SafeConfigParser()
        parser.read(os.path.join(options["node-directory"], u"tahoe.cfg"))
        parser.remove_section("magic_folder")
        with open(os.path.join(options["node-directory"], u"tahoe.cfg"),
                  "w") as f:
            parser.write(f)

    return 0
Exemplo n.º 12
0
def leave(options):
    from ConfigParser import SafeConfigParser

    existing_folders = load_magic_folders(options["node-directory"])

    if not existing_folders:
        print("No magic-folders at all", file=options.stderr)
        return 1

    if options["name"] not in existing_folders:
        print("No such magic-folder '{}'".format(options["name"]), file=options.stderr)
        return 1

    privdir = os.path.join(options["node-directory"], u"private")
    db_fname = os.path.join(privdir, u"magicfolder_{}.sqlite".format(options["name"]))

    # delete from YAML file and re-write it
    del existing_folders[options["name"]]
    save_magic_folders(options["node-directory"], existing_folders)

    # delete the database file
    try:
        fileutil.remove(db_fname)
    except Exception as e:
        print("Warning: unable to remove %s due to %s: %s"
            % (quote_local_unicode_path(db_fname), e.__class__.__name__, str(e)), file=options.stderr)

    # if this was the last magic-folder, disable them entirely
    if not existing_folders:
        parser = SafeConfigParser()
        parser.read(os.path.join(options["node-directory"], u"tahoe.cfg"))
        parser.remove_section("magic_folder")
        with open(os.path.join(options["node-directory"], u"tahoe.cfg"), "w") as f:
            parser.write(f)

    return 0
Exemplo n.º 13
0
 def get_caps_from_files(self, client_num):
     from allmydata.frontends.magic_folder import load_magic_folders
     folders = load_magic_folders(self.get_clientdir(i=client_num))
     mf = folders["default"]
     return mf['collective_dircap'], mf['upload_dircap']
Exemplo n.º 14
0
def status(options):
    nodedir = options["node-directory"]
    stdout, stderr = options.stdout, options.stderr
    magic_folders = load_magic_folders(os.path.join(options["node-directory"]))

    with open(os.path.join(nodedir, u'private', u'api_auth_token'), 'rb') as f:
        token = f.read()

    print("Magic-folder status for '{}':".format(options["name"]), file=stdout)

    if options["name"] not in magic_folders:
        raise Exception("No such magic-folder '{}'".format(options["name"]))

    dmd_cap = magic_folders[options["name"]]["upload_dircap"]
    collective_readcap = magic_folders[options["name"]]["collective_dircap"]

    # do *all* our data-retrievals first in case there's an error
    try:
        dmd_data = _get_json_for_cap(options, dmd_cap)
        remote_data = _get_json_for_cap(options, collective_readcap)
        magic_data = _get_json_for_fragment(options,
                                            'magic_folder?t=json',
                                            method='POST',
                                            post_args=dict(
                                                t='json',
                                                name=options["name"],
                                                token=token,
                                            ))
    except Exception as e:
        print("failed to retrieve data: %s" % str(e), file=stderr)
        return 2

    for d in [dmd_data, remote_data, magic_data]:
        if isinstance(d, dict) and 'error' in d:
            print("Error from server: %s" % d['error'], file=stderr)
            print("This means we can't retrieve the remote shared directory.",
                  file=stderr)
            return 3

    captype, dmd = dmd_data
    if captype != 'dirnode':
        print("magic_folder_dircap isn't a directory capability", file=stderr)
        return 2

    now = datetime.now()

    print("Local files:", file=stdout)
    for (name, child) in dmd['children'].items():
        captype, meta = child
        status = 'good'
        size = meta['size']
        created = datetime.fromtimestamp(
            meta['metadata']['tahoe']['linkcrtime'])
        version = meta['metadata']['version']
        nice_size = abbreviate_space(size)
        nice_created = abbreviate_time(now - created)
        if captype != 'filenode':
            print("%20s: error, should be a filecap" % name, file=stdout)
            continue
        print("  %s (%s): %s, version=%s, created %s" %
              (name, nice_size, status, version, nice_created),
              file=stdout)

    print(file=stdout)
    print("Remote files:", file=stdout)

    captype, collective = remote_data
    for (name, data) in collective['children'].items():
        if data[0] != 'dirnode':
            print("Error: '%s': expected a dirnode, not '%s'" %
                  (name, data[0]),
                  file=stdout)
        print("  %s's remote:" % name, file=stdout)
        dmd = _get_json_for_cap(options, data[1]['ro_uri'])
        if isinstance(dmd, dict) and 'error' in dmd:
            print("    Error: could not retrieve directory", file=stdout)
            continue
        if dmd[0] != 'dirnode':
            print("Error: should be a dirnode", file=stdout)
            continue
        for (n, d) in dmd[1]['children'].items():
            if d[0] != 'filenode':
                print("Error: expected '%s' to be a filenode." % (n, ),
                      file=stdout)

            meta = d[1]
            status = 'good'
            size = meta['size']
            created = datetime.fromtimestamp(
                meta['metadata']['tahoe']['linkcrtime'])
            version = meta['metadata']['version']
            nice_size = abbreviate_space(size)
            nice_created = abbreviate_time(now - created)
            print("    %s (%s): %s, version=%s, created %s" %
                  (n, nice_size, status, version, nice_created),
                  file=stdout)

    if len(magic_data):
        uploads = [item for item in magic_data if item['kind'] == 'upload']
        downloads = [item for item in magic_data if item['kind'] == 'download']
        longest = max([len(item['path']) for item in magic_data])

        # maybe gate this with --show-completed option or something?
        uploads = [item for item in uploads if item['status'] != 'success']
        downloads = [item for item in downloads if item['status'] != 'success']

        if len(uploads):
            print()
            print("Uploads:", file=stdout)
            for item in uploads:
                _print_item_status(item, now, longest)

        if len(downloads):
            print()
            print("Downloads:", file=stdout)
            for item in downloads:
                _print_item_status(item, now, longest)

        for item in magic_data:
            if item['status'] == 'failure':
                print("Failed:", item, file=stdout)

    return 0
Exemplo n.º 15
0
 def get_caps_from_files(self, client_num):
     from allmydata.frontends.magic_folder import load_magic_folders
     folders = load_magic_folders(self.get_clientdir(i=client_num))
     mf = folders["default"]
     return mf['collective_dircap'], mf['upload_dircap']
Exemplo n.º 16
0
def status(options):
    nodedir = options["node-directory"]
    stdout, stderr = options.stdout, options.stderr
    magic_folders = load_magic_folders(os.path.join(options["node-directory"]))

    with open(os.path.join(nodedir, u'private', u'api_auth_token'), 'rb') as f:
        token = f.read()

    print("Magic-folder status for '{}':".format(options["name"]), file=stdout)

    if options["name"] not in magic_folders:
        raise Exception(
            "No such magic-folder '{}'".format(options["name"])
        )

    dmd_cap = magic_folders[options["name"]]["upload_dircap"]
    collective_readcap = magic_folders[options["name"]]["collective_dircap"]

    # do *all* our data-retrievals first in case there's an error
    try:
        dmd_data = _get_json_for_cap(options, dmd_cap)
        remote_data = _get_json_for_cap(options, collective_readcap)
        magic_data = _get_json_for_fragment(
            options,
            'magic_folder?t=json',
            method='POST',
            post_args=dict(
                t='json',
                name=options["name"],
                token=token,
            )
        )
    except Exception as e:
        print("failed to retrieve data: %s" % str(e), file=stderr)
        return 2

    for d in [dmd_data, remote_data, magic_data]:
        if isinstance(d, dict) and 'error' in d:
            print("Error from server: %s" % d['error'], file=stderr)
            print("This means we can't retrieve the remote shared directory.", file=stderr)
            return 3

    captype, dmd = dmd_data
    if captype != 'dirnode':
        print("magic_folder_dircap isn't a directory capability", file=stderr)
        return 2

    now = datetime.now()

    print("Local files:", file=stdout)
    for (name, child) in dmd['children'].items():
        captype, meta = child
        status = 'good'
        size = meta['size']
        created = datetime.fromtimestamp(meta['metadata']['tahoe']['linkcrtime'])
        version = meta['metadata']['version']
        nice_size = abbreviate_space(size)
        nice_created = abbreviate_time(now - created)
        if captype != 'filenode':
            print("%20s: error, should be a filecap" % name, file=stdout)
            continue
        print("  %s (%s): %s, version=%s, created %s" % (name, nice_size, status, version, nice_created), file=stdout)

    print(file=stdout)
    print("Remote files:", file=stdout)

    captype, collective = remote_data
    for (name, data) in collective['children'].items():
        if data[0] != 'dirnode':
            print("Error: '%s': expected a dirnode, not '%s'" % (name, data[0]), file=stdout)
        print("  %s's remote:" % name, file=stdout)
        dmd = _get_json_for_cap(options, data[1]['ro_uri'])
        if isinstance(dmd, dict) and 'error' in dmd:
            print("    Error: could not retrieve directory", file=stdout)
            continue
        if dmd[0] != 'dirnode':
            print("Error: should be a dirnode", file=stdout)
            continue
        for (n, d) in dmd[1]['children'].items():
            if d[0] != 'filenode':
                print("Error: expected '%s' to be a filenode." % (n,), file=stdout)

            meta = d[1]
            status = 'good'
            size = meta['size']
            created = datetime.fromtimestamp(meta['metadata']['tahoe']['linkcrtime'])
            version = meta['metadata']['version']
            nice_size = abbreviate_space(size)
            nice_created = abbreviate_time(now - created)
            print("    %s (%s): %s, version=%s, created %s" % (n, nice_size, status, version, nice_created), file=stdout)

    if len(magic_data):
        uploads = [item for item in magic_data if item['kind'] == 'upload']
        downloads = [item for item in magic_data if item['kind'] == 'download']
        longest = max([len(item['path']) for item in magic_data])

        # maybe gate this with --show-completed option or something?
        uploads = [item for item in uploads if item['status'] != 'success']
        downloads = [item for item in downloads if item['status'] != 'success']

        if len(uploads):
            print()
            print("Uploads:", file=stdout)
            for item in uploads:
                _print_item_status(item, now, longest)

        if len(downloads):
            print()
            print("Downloads:", file=stdout)
            for item in downloads:
                _print_item_status(item, now, longest)

        for item in magic_data:
            if item['status'] == 'failure':
                print("Failed:", item, file=stdout)

    return 0