Beispiel #1
0
    def load_cached_providers(cls, credential_manager):
        """
        Attempts to load all Providers of this type that have user credential stored
        Returns:
            (providers, failed_identifiers)
            providers: a list of functional Providers
            failed_identifiers: a list of uuids for the accounts that failed to load
        """
        providers = []
        failed_ids = []

        def run_load(provider_id):
            try:
                provider = cls.load_from_credential(credential_manager,
                                                    provider_id)
                providers.append(provider)
            except Exception as e:
                logger.debug("Exception loading cached providers from %s: %s",
                             cls, e)
                failed_ids.append((cls.provider_identifier(), provider_id))

        credentials = credential_manager.get_user_credentials(cls)
        run_parallel(
            run_load, map(lambda provider_id: [provider_id],
                          credentials.keys()))

        return providers, failed_ids
Beispiel #2
0
    def put(self, filename, data, key=None):
        """
        Args:
            filename: string
            data: bytestring
            key: an optional key used to encrypt
        Returns:
            key: bytestring of the key used for encryption
        Raises:
            FatalOperationFailure if any provider failed
        """
        # encrypt
        if key is None:
            key = encryption.generate_key()
        ciphertext = encryption.encrypt(data, key)

        # compute RS
        shares = erasure_encoding.share(ciphertext,
                                        self.file_reconstruction_threshold,
                                        self.num_providers)

        # upload to each provider
        def upload(provider, share):
            provider.put(filename, share)

        failures = run_parallel(upload, zip(self.providers, shares))

        if len(failures) > 0:
            raise exceptions.FatalOperationFailure(failures)

        return key
Beispiel #3
0
    def provision(providers, bootstrap_reconstruction_threshold,
                  file_reconstruction_threshold):
        """
        Create a new Daruma.
        Warning: Deletes all files on all providers! Even if a FatalOperationFailure is thrown, files on all providers will be unstable or deleted.
        Args:
            providers: a list of providers
            bootstrap_reconstruction_threshold: the number of providers that need to be up to recover the key. Between 1 and len(providers)-1, inclusive
            file_reconstruction_threshold: the number of providers that need to be up to read files, given the key. Between 1 and len(providers)-1, inclusive
        Returns a constructed Daruma object
        Raises:
            ValueError if arguments are invalid
            FatalOperationFailure if provisioning failed
        """
        logger.debug("provisioning: brt=%d, frt=%d",
                     bootstrap_reconstruction_threshold,
                     file_reconstruction_threshold)
        Daruma._assert_valid_params(providers,
                                    bootstrap_reconstruction_threshold,
                                    file_reconstruction_threshold)
        # make a copy of providers so that changes to the external list doesn't affect this one
        providers = providers[:]

        def wipe(provider):
            provider.wipe()

        failures = run_parallel(wipe,
                                map(lambda provider: [provider], providers))
        if len(failures) > 0:
            raise exceptions.FatalOperationFailure(failures)

        master_key = generate_key()
        manifest_name = generate_random_name()
        bootstrap = Bootstrap(master_key, manifest_name,
                              file_reconstruction_threshold)

        # the bootstrap manager uses SSSS
        bootstrap_manager = BootstrapManager(
            providers, bootstrap_reconstruction_threshold)

        try:
            bootstrap_manager.distribute_bootstrap(bootstrap)
        except exceptions.FatalOperationFailure:
            # TODO check for network error
            raise

        file_manager = FileManager(providers,
                                   len(providers),
                                   file_reconstruction_threshold,
                                   master_key,
                                   manifest_name,
                                   setup=True)
        resilience_manager = ResilienceManager(providers, file_manager,
                                               bootstrap_manager)
        return Daruma(bootstrap_manager,
                      file_manager,
                      resilience_manager,
                      load_manifest=False)
Beispiel #4
0
    def delete(self, filename):
        def delete_share(provider):
            provider.delete(filename)

        args = map(lambda provider: [provider], self.providers)
        failures = run_parallel(delete_share, args)

        if len(failures) > 0:
            raise exceptions.FatalOperationFailure(failures)
Beispiel #5
0
    def reprovision(self, providers, bootstrap_reconstruction_threshold,
                    file_reconstruction_threshold):
        """
        Update the thresholds and providers for the system
        Will redistribute every file across the new provider list
        This method is thread-safe.
        Args:
            providers: a list of provider objects across which to distribute
            bootstrap_reconstruction_threshold: the new bootstrap threshold. Between 1 and len(providers)-1, inclusive
            file_reconstruction_threshold: the new file threshold. Between 1 and len(providers)-1, inclusive
        Raises:
            ValueError if arguments are invalid
        """
        logger.debug("reprovisioning: brt=%d, frt=%d",
                     bootstrap_reconstruction_threshold,
                     file_reconstruction_threshold)
        Daruma._assert_valid_params(providers,
                                    bootstrap_reconstruction_threshold,
                                    file_reconstruction_threshold)
        # do nothing if params are the same
        if providers == self.file_manager.providers and \
           bootstrap_reconstruction_threshold == self.bootstrap_manager.bootstrap_reconstruction_threshold and \
           file_reconstruction_threshold == self.file_manager.file_reconstruction_threshold:
            return

        old_providers = self.file_manager.providers

        self._set_all_internal_providers(providers)

        self.bootstrap_manager.bootstrap_reconstruction_threshold = bootstrap_reconstruction_threshold
        self.file_manager.file_reconstruction_threshold = file_reconstruction_threshold

        self._reset()

        # wipe the providers that were used previously but aren't any longer
        def remove(provider):
            provider.remove()

        run_parallel(
            remove,
            map(lambda provider: [provider],
                set(old_providers) - set(providers)))
Beispiel #6
0
def main(args):
    provider_manager = ProviderManager()
    cm = CredentialManager()
    cm.load()

    providers, errors = provider_manager.load_all_providers_from_credentials()

    def wipe(provider):
        try:
            provider.wipe()
        except Exception:
            print "failed to wipe provider: " + provider.provider_name()

    run_parallel(wipe, map(lambda provider: [provider], providers))

    behavior = args[0]
    cm.clear_user_credentials(provider_class=None,
                              account_identifier=None)  # clear the file

    if behavior == "setup":
        # copy in default credentials (Dropbox, GoogleDrive, OneDrive)
        default_credentials = os.path.join(cm.config_dir,
                                           "default_credentials.json")
        current_credentials = os.path.join(cm.config_dir,
                                           "user_credentials.json")
        try:
            copyfile(default_credentials, current_credentials)
        except IOError:
            print "credential reset to setup state failed, see default_credentials.json"

        if len(args) == 2:  # write credentials for the demo server
            cm.load()  # reload the credential manager with updated store

            ip_block = args[1]
            url = ''.join("http://158.130.108." + ip_block + ":5000")
            demo_server = TestServerProvider(cm)

            try:
                demo_server.connect(url)
            except exceptions.ConnectionFailure:
                print "please start a demo server at: " + url
Beispiel #7
0
    def load_all_providers_from_credentials(self):
        """
        Get all providers of exposed types that can be loaded from the underlying credential_manager
        Returns (cached_providers, failed_ids)
            cached_providers: a list of loaded provider objects
            failed_ids: the uuids of providers that failed to load
        """
        def flatten(list_of_lists):
            return [item for sublist in list_of_lists for item in sublist]

        providers_and_errors = []

        def load_cached_by_class(provider_class):
            providers_and_errors.append(
                provider_class.load_cached_providers(self.credential_manager))

        provider_classes = self.get_provider_classes()
        run_parallel(
            load_cached_by_class,
            map(lambda provider_class: [provider_class], provider_classes))

        return tuple(map(flatten, zip(*providers_and_errors)))
Beispiel #8
0
    def get(self, filename, key):
        """
        Args:
            filename: string
            key: bytestring
        Returns:
            result: bytestring
        Raises:
             FileReconstructionError
             OperationFailure if any provider failed
        """
        # map from share to provider returning that share
        shares_map = {}

        def get_share(provider):
            shares_map[provider.get(filename)] = provider

        args = map(lambda provider: [provider], self.providers)
        failures = run_parallel(get_share, args)

        shares = shares_map.keys()

        if len(shares) < self.file_reconstruction_threshold:
            raise exceptions.FatalOperationFailure(failures)

        data, bad_shares = self._recover(shares, key)

        for bad_share in bad_shares:
            failures.append(
                exceptions.InvalidShareFailure(shares_map[bad_share],
                                               "distributor invalid share"))

        if data is None:
            raise exceptions.FatalOperationFailure(
                failures, "data was none, with %s shares" % len(shares))

        if len(failures) > 0:
            raise exceptions.OperationFailure(failures, data)

        return data
Beispiel #9
0
    def distribute_bootstrap(self, bootstrap):
        """
        Secret-Share distribute a Bootstrap object to all providers
        """
        self.num_providers = len(self.providers)

        string = str(bootstrap)

        provider_ids = map(str, xrange(len(self.providers)))

        # make a random version
        version = generate_random_name()

        # compute new shares using len(providers) and bootstrap_reconstruction_threshold
        shares_map = shamir_secret_sharing.share(
            string, provider_ids, self.bootstrap_reconstruction_threshold)

        # write shares to providers
        def put_to_provider(provider, filename, data):
            provider.put(filename, data)

        args = []
        for provider, provider_id in zip(self.providers, provider_ids):
            share = shares_map[provider_id]
            bootstrap_plaintext = self._make_bootstrap_plaintext(
                self.bootstrap_reconstruction_threshold, self.num_providers,
                provider_id, version)

            args.append([
                provider, self.BOOTSTRAP_PLAINTEXT_FILE_NAME,
                zlib.compress(bootstrap_plaintext)
            ])
            args.append(
                [provider, self.BOOTSTRAP_FILE_NAME,
                 zlib.compress(share)])

        failures = run_parallel(put_to_provider, args)

        if len(failures) > 0:
            raise exceptions.FatalOperationFailure(failures)
Beispiel #10
0
    def _download_and_vote(self):
        """
        Recovers n, bootstrap threshold, shares, and failures from providers if there is a consensus
        A provider may be considered failing when one of the following cases is met:
            a provider experiences some error that prevents a returned share
            a provider gives a share with structoral errors (wrong type, cannot decompress or unpack, etc)
            the provider does not vote for the consensus n, bootstrap threshold
            a provider id is of the wrong type or out of the predetermined range
        Returns:
            threshold, the voted bootstrap threshold
            n, the voted n
            provider_id_map, a map from id to list of providers who claim to have that id
            shares_map, a map from provider to its share
            failures, a list of failures from failing providers
        Raises FatalOperationFailure if unable to reach consensus
        """
        def vote_for_params(vote_map):
            """
            Args:
                vote_map: maps tuple of (threshold, n, version) votes to providers that voted
            Returns:
                (threshold, n), failures
                threshold: the voted bootstrap threshold
                n: the voted n
                failed_providers: a list of providers who did not vote for the winning vote if one was found
            Raises:
                FatalOperationFailure: no parameters were returned or there were too few providers in agreement to support the consensus threshold
            """
            # vote on threshold
            largest_group_size = 0
            winning_vote = None
            for vote, sources in vote_map.items():
                if len(sources) > largest_group_size:
                    winning_vote = vote
                    largest_group_size = len(sources)

            if winning_vote is not None:
                threshold, n, version = winning_vote

            # we protect against (k-1) providers failing
            # so, a group of defectors larger than k are outside threat model
            # just ensure that the largest group is size at least k
            if winning_vote is None or largest_group_size < threshold:
                raise exceptions.FatalOperationFailure(
                    [], "didn't reach bootstrap vote consensus")

            failed_providers = []
            # add all providers who misvoted to failures
            for vote, sources in vote_map.items():
                if vote != winning_vote:
                    failed_providers += sources

            return threshold, n, failed_providers

        provider_id_map = defaultdict(list)
        shares_map = {}  # maps provider to bootstrap share
        vote_map = defaultdict(
            list)  # maps (threshold, n) vote to providers that voted

        def get_bootstrap_plaintext(provider):
            try:
                bootstrap_plaintext = zlib.decompress(
                    provider.get(self.BOOTSTRAP_PLAINTEXT_FILE_NAME))
                threshold_vote, n_vote, provider_id, version = self._parse_bootstrap_plaintext(
                    bootstrap_plaintext)

                # track provider votes for bootstrap threshold, n values, and version
                vote_map[(int(threshold_vote), int(n_vote),
                          version)].append(provider)
                provider_id_map[provider_id].append(provider)
            except ValueError:
                raise exceptions.InvalidShareFailure(
                    provider,
                    "Got invalid boostrap share from " + str(provider))

        def get_bootstrap_share(provider):
            try:
                shares_map[provider] = zlib.decompress(
                    provider.get(self.BOOTSTRAP_FILE_NAME))
            except zlib.error:
                raise exceptions.InvalidShareFailure(provider)

        def run_on_provider(func, provider):
            return func(provider)

        args = []
        for provider in self.providers:
            args.append([get_bootstrap_plaintext, provider])
            args.append([get_bootstrap_share, provider])

        failures = run_parallel(run_on_provider, args)

        # we don't want to double penalize if a provider fails in the same way on both files
        failures = list(set(failures))

        try:
            threshold, n, voting_failures = vote_for_params(vote_map)
        except exceptions.FatalOperationFailure as e:
            raise exceptions.FatalOperationFailure(failures + e.failures)

        # add all providers with invalid id to failures
        for provider_id, providers in provider_id_map.items():
            try:
                provider_id = int(provider_id)
                assert provider_id < n and provider_id >= 0
            except (AssertionError, ValueError):
                for provider in providers:
                    failures.append(exceptions.InvalidShareFailure(provider))
                    # remove the provider's share from shares_map
                    shares_map.pop(provider, None)

        # if a provider misvoted, don't use its share - delete it if it hasn't been deleted already
        for provider in voting_failures:
            shares_map.pop(provider, None)

        # ensure that we have at least threshold shares
        if len(shares_map.values()) < threshold:
            raise exceptions.FatalOperationFailure(failures)

        return threshold, n, provider_id_map, shares_map, failures