def sync_from_block_store(self, root_block_id, src_block_store, **kwargs) :
        if self.__handle__ is not None :
            raise Exception("key value store must be closed to sync")

        default_minimum_duration = pconfig.shared_configuration(['Replication', 'MinimumDuration'], 5)
        minimum_duration = kwargs.get('minimum_duration', default_minimum_duration)

        default_duration = pconfig.shared_configuration(['Replication', 'Duration'], 60)
        duration = kwargs.get('duration', default_duration)

        try :
            root_block = src_block_store.get_block(root_block_id)
        except Exception :
            logger.exception('failed to get root block from remote kv store')
            raise

        try :
            root_block = root_block.decode('utf8')
        except AttributeError :
            pass

        root_block = root_block.rstrip('\0')
        root_block_json = json.loads(root_block)
        block_ids = [root_block_id] + root_block_json['BlockIds']

        # check to see which blocks need to be pushed
        blocks_to_extend = []
        blocks_to_pull = []
        for block_id in block_ids :
            block_status = self.__check_block__(block_id)

            # if the size is 0 then the block is unknown to the storage service
            if block_status['size'] == 0 :
                blocks_to_pull.append(block_id)

            # if the expiration is nearing, then add to the list to extend, the
            # policy here is to extend if the block is within 5 seconds of expiring
            elif block_status['duration'] < minimum_duration :
                blocks_to_extend.append(block_id)

        # there is currently no operation to simply extend the expiration of
        # an existing block, so for now just add the blocks to extend onto
        # the end of the blocks to push
        blocks_to_pull += blocks_to_extend

        if len(blocks_to_pull) == 0 :
            return 0

        block_data_list = src_block_store.get_blocks(blocks_to_pull)
        for block_data in block_data_list :
            self.__store_block__(block_data, input_encoding='raw')

        # if block_store_list is None :
        #     raise Exception('failed to push blocks to block_store')

        self.hash_identity = root_block_id
        return len(blocks_to_pull)
def build_file_name(basename, data_dir=None, data_sub=None, extension=''):
    """build a file name from the basename and directory; this is a
    common operation for scripts that process a configuration file

    :param str basename: base name of a file, may be a full path, may have an extension
    :param str data_dir: directory where the file will be placed
    :param str data_sub: subdirectory where the files of this type are stored
    :param str extension: the extension to add to the file if it doesnt have one
    """

    global __default_data_directory__
    if data_dir is None:
        if __default_data_directory__ is None:
            import pdo.common.config as pconfig
            __default_data_directory__ = pconfig.shared_configuration(
                ['Contract', 'DataDirectory'], "./data")

        data_dir = __default_data_directory__

    if data_sub is not None:
        data_dir = os.path.join(data_dir, data_sub)

    # os.path.abspath only works for full paths, not relative paths
    # this check should catch './abc'
    if os.path.split(basename)[0]:
        return os.path.realpath(basename)
    if basename[-len(extension):] == extension:
        return os.path.join(data_dir, basename)
    else:
        return os.path.join(data_dir, basename + extension)
Example #3
0
def local_block_manager():
    global __local_block_manager__
    if __local_block_manager__ is None:
        block_store_file = pconfig.shared_configuration(
            ['StorageService', 'BlockStore'], "./blockstore.mdb")

        __local_block_manager__ = BlockStoreManager(block_store_file, True)
    return __local_block_manager__
def KeyValueInitialize(block_store_file = None) :
    global __block_store_initialized__
    if __block_store_initialized__ :
        raise Exception("duplicate block store initialization")

    if block_store_file is None :
        block_store_file = pconfig.shared_configuration(['StorageService', 'KeyValueStore'], "./keyvalue.mdb")
        kvs.SetLogger(logger)

    kvs.block_store_open(block_store_file)
    __block_store_initialized__ = True
    def set_replication_parameters(self,
                                   num_provable_replicas=None,
                                   availability_duration=None,
                                   **kwargs):
        # pull the defaults from the configuration if they are not
        # otherwise set by the caller
        if num_provable_replicas is None:
            num_provable_replicas = pconfig.shared_configuration(
                ['Replication', 'NumProvableReplicas'], 1)

        if availability_duration is None:
            availability_duration = pconfig.shared_configuration(
                ['Replication', 'Duration'], 120)

        self.replication_params = dict()
        self.replication_params[
            'num_provable_replicas'] = num_provable_replicas
        self.replication_params[
            'availability_duration'] = availability_duration  #seconds

        # we replicate to storage services associated with all provisioned encalves
        self.replication_params['service_ids'] = self.enclave_map.keys()
Example #6
0
def sync_block_store(src_block_store,
                     dst_block_store,
                     root_block_id,
                     root_block=None,
                     **kwargs):
    """
    ensure that required blocks are stored in the storage service

    assumes that all of the blocks referenced by root_block_id are in the source
    block manager

    :param src_block_store object implementing the block_store_manager interface
    :param dst_block_store object implementing the block_store_manager interface
    :param root_block_id string: block identifier for the root block
    :param root_block string: block data for the root block
    """
    if root_block is None:
        root_block = src_block_store.get_block(root_block_id)

    block_ids = [root_block_id]

    try:
        root_block = root_block.decode('utf8')
    except AttributeError:
        pass

    root_block = root_block.rstrip('\0')
    root_block_json = json.loads(root_block)
    block_ids.extend(root_block_json['BlockIds'])

    default_minimum_duration = pconfig.shared_configuration(
        ['Replication', 'MinimumDuration'], 5)
    minimum_duration = kwargs.get('minimum_duration', default_minimum_duration)

    default_duration = pconfig.shared_configuration(
        ['Replication', 'Duration'], 60)
    duration = kwargs.get('duration', default_duration)

    # check to see which blocks need to be pushed
    blocks_to_push = []
    blocks_to_extend = []
    block_status_list = dst_block_store.check_blocks(block_ids)

    for block_status in block_status_list:
        # if the size is 0 then the block is unknown to the storage service
        if block_status['size'] == 0:
            blocks_to_push.append(block_status['block_id'])
        # if the expiration is nearing, then add to the list to extend, the
        # policy here is to extend if the block is within 5 seconds of expiring
        elif block_status['duration'] < minimum_duration:
            blocks_to_extend.append(block_status['block_id'])

    # there is currently no operation to simply extend the expiration of
    # an existing block, so for now just add the blocks to extend onto
    # the end of the blocks to push
    blocks_to_push += blocks_to_extend

    if len(blocks_to_push) == 0:
        return 0

    block_data_list = src_block_store.get_blocks(blocks_to_push)
    block_store_list = dst_block_store.store_blocks(block_data_list,
                                                    duration=duration)
    if block_store_list is None:
        raise Exception('failed to push blocks to block_store')

    return len(blocks_to_push)