def _get_body_ack(self): # N2H _res_type = self.ack_result_type _result = self.ack_result if _res_type == 'datasets': result = {_res_type: [i.to_json() for i in _result]} elif _res_type == 'files': result = \ {_res_type: {dir_name: [f.to_json() for f in dir_files] for dir_name, dir_files in _result.iteritems()}} elif _res_type == 'chunks': assert consists_of(_result.iterkeys(), UUID), repr(_result) assert consists_of(_result.itervalues(), UUID), repr(_result) result = {_res_type: {k.hex: v.hex for k, v in _result.iteritems()}} elif _res_type in ('cloud_stats', 'data_stats'): result = {_res_type: _result} else: raise NotImplementedError('Result type {!r} unsupported; ' 'value {!r}' .format(_res_type, _result)) return result
def __invariant(self): """Verify the class invariants.""" for d in (self.expect_replication, self.expect_restore,): assert consists_of(d.iterkeys(), PeerUUID), repr(d.keys()) assert consists_of(d.itervalues(), list), repr(d.values()) assert all(consists_of(per_inh_list, Chunk) for per_inh_list in d.itervalues()), \ repr(d.values()) return True
def __invariant(self): """Verify the class invariants.""" for d in ( self.expect_replication, self.expect_restore, ): assert consists_of(d.iterkeys(), PeerUUID), repr(d.keys()) assert consists_of(d.itervalues(), list), repr(d.values()) assert all(consists_of(per_inh_list, Chunk) for per_inh_list in d.itervalues()), \ repr(d.values()) return True
def _get_body(self): assert consists_of(self.chunks_map.iterkeys(), Host), \ repr(self.chunks_map) assert consists_of((ch for ch_list in self.chunks_map.itervalues() for ch in ch_list), Chunk), \ repr(self.chunks_map) chunks_data = {host.uuid.hex: {'urls': host.urls, 'chunks': [c.to_json() for c in chunks]} for host, chunks in self.chunks_map.iteritems()} return {'chunks': chunks_data}
def _get_body_ack(self): # N2H assert isinstance(self.ack_hosts_to_use, dict) assert consists_of(self.ack_hosts_to_use.iterkeys(), UUID) assert consists_of(self.ack_hosts_to_use.itervalues(), PerHostChunksData) assert isinstance(self.ack_result_code, int), self.ack_result_code result = {'result': self.ack_result_code} if ProvideBackupHostsMessage.ResultCodes.is_good(self.ack_result_code): result['hosts'] = {uuid.hex: per_host.to_json() for uuid, per_host in self.ack_hosts_to_use .iteritems()} return result
def _get_body_ack(self): # N2H result = {} # Process the last update time if self.ack_lu_time is not None: # We have a "last update time" to report back to the host assert isinstance(self.ack_lu_time, datetime), \ repr(self.ack_lu_time) result['last update time'] = strftime(self.ack_lu_time) else: # The settings were not updated on the node pass # Process the restore progress information: if self.ack_restore_progress: result['restore'] = \ map(HeartbeatMessage.serialize_restore_progress, self.ack_restore_progress) if self.inbox_update: result['msgs'] = HeartbeatMessage.serialize_inbox_update( self.inbox_update) logger.verbose('Node sends inbox_update: %s', self.inbox_update) if self.auth_tokens: assert consists_of(self.auth_tokens, AuthToken), \ repr(self.auth_tokens) result['auth_tokens'] = [t.to_json() for t in self.auth_tokens] logger.verbose('Node sends auth_tokens: %s', self.auth_tokens) return result
def __init__(self, dataset=None, host_chunks_map_getter=None, chunks=None, *args, **kwargs): """Constructor. @note: all arguments are optional and may exist or non-exist independently @todo: SERIALIZE OR AVOID C{host_chunks_map_getter}! @type dataset: NoneType, AbstractDataset @type host_chunks_map_getter: NoneType, col.Callable @type chunks: NoneType, col.Iterable @precondition: (dataset is not None or host_chunks_map_getter is not None or chunks is not None) """ assert dataset is not None or \ host_chunks_map_getter is not None or \ chunks is not None, \ (dataset, host_chunks_map_getter, chunks) super(ProgressTransaction_Host.State, self) \ .__init__(*args, **kwargs) self.dataset = dataset self.host_chunks_map_getter = host_chunks_map_getter self.chunks = list(chunks) if chunks is not None else None assert self.chunks is None or consists_of(self.chunks, Chunk), \ repr(self.chunks)
def _init_from_body_ack(self, body): # N2H # Process the last update time try: self.ack_lu_time = strptime(body['last update time']) except KeyError: # No such key, ignore pass # Process the progress if 'restore' in body: self.ack_restore_progress = \ map(HeartbeatMessage.deserialize_restore_progress, body['restore']) if 'msgs' in body: self.inbox_update = HeartbeatMessage.deserialize_inbox_update( body['msgs']) logger.verbose('Host received inbox_update: %s', self.inbox_update) if 'auth_tokens' in body: self.auth_tokens = [ AuthToken.from_json(t)() for t in body['auth_tokens'] ] assert consists_of(self.auth_tokens, AuthToken), \ repr(self.auth_tokens) logger.verbose('Host receives auth_tokens: %s', self.auth_tokens)
def _init_from_body_ack(self, body): # N2H # Process the last update time try: self.ack_lu_time = strptime(body['last update time']) except KeyError: # No such key, ignore pass # Process the progress if 'restore' in body: self.ack_restore_progress = \ map(HeartbeatMessage.deserialize_restore_progress, body['restore']) if 'msgs' in body: self.inbox_update = HeartbeatMessage.deserialize_inbox_update( body['msgs']) logger.verbose('Host received inbox_update: %s', self.inbox_update) if 'auth_tokens' in body: self.auth_tokens = [AuthToken.from_json(t)() for t in body['auth_tokens']] assert consists_of(self.auth_tokens, AuthToken), \ repr(self.auth_tokens) logger.verbose('Host receives auth_tokens: %s', self.auth_tokens)
def _get_body(self): # H2N assert consists_of(self.payload, NotifyNodeMessage.AbstractPayload), \ repr(self.payload) return [{'type': item.TYPE, 'data': item.to_json()} for item in self.payload]
def __connectionMade(self): """ @note: Cloned from t.w.c.HTTPPageGetter.connectionMade() """ _factory = self.factory # pylint:disable=E1101 method = getattr(_factory, 'method', 'GET') _factory.session.connected(_factory.realm) self.sendCommand(method, _factory.path) self.sendHeader('Host', _factory.headers.get('host', _factory.host)) self.sendHeader('User-Agent', _factory.agent) # If we are on the no-digested-yet ("Authorization" header is absent) # phase of the LOGIN transaction ("x-host-uuid-candidates" header is # present), we don't evaluate the body. # It will make it easier to avoid the "Content-Length" header # completely in the future, when switching to HTTP/1.1 and chunked # content type encoding. if _factory.headers.has_key('x-host-uuid-candidates') and \ not _factory.headers.has_key('Authorization'): data = None else: data = getattr(_factory, 'postdata', None) if data is not None: self.sendHeader('Content-Length', str(len(data))) cookieData = [] for (key, value) in _factory.headers.items(): logger.debug('Sending headers: %r: %r', key, value) if key.lower() not in self._specialHeaders: # we calculated it on our own if isinstance(value, basestring): self.sendHeader(key, value) else: assert consists_of(value, basestring), repr(value) for l in value: self.sendHeader(key, l) if key.lower() == 'cookie': cookieData.append(value) for cookie, cookval in _factory.cookies.items(): cookieData.append('{}={}'.format(cookie, cookval)) if cookieData: self.sendHeader('Cookie', '; '.join(cookieData)) self.endHeaders() self.headers = {} if data is not None: # self.transport is t.i.tcp.TLSConnection # (see also t.i.tcp.Connection) if isinstance(data, basestring): # The body is the string with the contents. self.transport.write(data) elif isinstance(data, col.Iterable): # The body is the generator yielding the contents. producer = LazyPullProducer(self.transport, iter(data)) else: raise NotImplementedError("{!r}".format(data))
def _get_body(self): assert consists_of(self.chunks_map.iterkeys(), Host), \ repr(self.chunks_map) assert consists_of((ch for ch_list in self.chunks_map.itervalues() for ch in ch_list), Chunk), \ repr(self.chunks_map) chunks_data = { host.uuid.hex: { 'urls': host.urls, 'chunks': [c.to_json() for c in chunks] } for host, chunks in self.chunks_map.iteritems() } return {'chunks': chunks_data}
def on_begin(self): """ Received the NOTIFY_HOST request from a host. """ assert self.is_outgoing() with self.open_state() as state: assert (state.chunk_uuids_to_replicate is None or consists_of(state.chunk_uuids_to_replicate, UUID)), \ repr(self.chunk_uuids_to_replicate) assert (state.chunk_uuids_to_restore is None or consists_of(state.chunk_uuids_to_restore, UUID)), \ repr(self.chunk_uuids_to_restore) self.message.expect_replication = state.chunk_uuids_to_replicate self.message.expect_restore = state.chunk_uuids_to_restore self.manager.post_message(self.message)
def _get_body(self): # N2H assert isinstance(self.dataset, (AbstractDataset, NoneType)), \ repr(self.dataset) assert isinstance(self.ugroup, UserGroup), \ repr(self.ugroup) assert isinstance(self.sync, bool), \ repr(self.sync) assert consists_of(self.files.iterkeys(), File), \ repr(self.files) assert consists_of(self.files.itervalues(), list), \ repr(self.files) assert consists_of((bl for v in self.files.itervalues() for bl in v), Chunk.Block), \ repr(self.files) assert isinstance(self.wr_uuid, (TransactionUUID, NoneType)), \ repr(self.wr_uuid) res = {} if self.dataset is not None: res['dataset'] = self.dataset.to_json() if self.ugroup is not None: res['ugroup'] = self.ugroup.to_json() res['sync'] = 1 if self.sync else 0 _file_map = {json.dumps(file.to_json()): [bl.to_json(bl.chunk, with_file=False) for bl in blocks] for file, blocks in self.files.iteritems()} res['files'] = _file_map if self.wr_uuid is not None: res['want_restore'] = self.wr_uuid.hex return res
def __init__(self, chunk_uuids_to_delete, *args, **kwargs): """Constructor. @type chunk_uuids_to_delete: col.Iterable """ super(ExecuteOnHostTransactionState_Node, self) \ .__init__(*args, **kwargs) self.chunk_uuids_to_delete = list(chunk_uuids_to_delete) assert consists_of(self.chunk_uuids_to_delete, UUID), \ repr(self.chunk_uuids_to_delete) assert self.is_valid_bsonable(), repr(self)
def __init__(self, uuid, urls=None, *args, **kwargs): """Constructor. @type uuid: UUID @type urls: NoneType, list """ assert not args and not kwargs, (args, kwargs) super(AbstractInhabitant, self).__init__(*args, **kwargs) self.uuid = PeerUUID.safe_cast_uuid(uuid if uuid is not None else gen_uuid()) self.urls = coalesce(urls, []) assert consists_of(self.urls, basestring) and all(url.startswith("https://") for url in self.urls), repr( self.urls )
def _on_child_nifn_completed(self, ni_state): """ This method is called after the child NEED_INFO_FROM_NODE transaction has succeeded. @type ni_state: NeedInfoFromNodeTransaction_Host.State """ _message = self.message logger.debug('Received response to NEED_INFO_FROM_NODE') # This is a dictionary mapping local chunk UUID # to the cloud chunk UUID number which should be used instead. uuids_to_fix = ni_state.ack_result logger.debug('Need to fix: %r', uuids_to_fix) _all_chunks = self.__all_chunks if uuids_to_fix: assert isinstance(uuids_to_fix, col.Mapping), repr(uuids_to_fix) # Which chunks are present under a different name? misnamed_chunk_uuids = {k for k, v in uuids_to_fix.iteritems() if k != v} assert consists_of(misnamed_chunk_uuids, UUID), \ repr(misnamed_chunk_uuids) misnamed_chunks = {c for c in self.__all_chunks if c.uuid in misnamed_chunk_uuids} # 1. These chunks should be considered already uploaded,.. self.__uploaded_chunks |= misnamed_chunks self.__uploading_chunks -= misnamed_chunks # 2. ... renamed in the database,.. HostQueries.HostChunks.mark_chunks_as_already_existing( uuids_to_fix) # 3. ... and renamed in the state (while needed). for ch in self.__all_chunks: if ch.uuid in uuids_to_fix: ch.uuid = uuids_to_fix[ch.uuid] # Now we finally know the real set of the chunks which need # to be uploaded. Go to the next step. self._ask_for_backup_hosts()
def on_begin(self): """ Either send CHUNKS request to another Host (if C{self.is_outgoing()}), or receive CHUNKS request from another Host. """ with self.open_state() as state: chunks = state.chunks logger.debug('Initializing CHUNKS transaction with %i chunks', len(coalesce(chunks, []))) my_host = self.manager.app.host assert ((self.message.src == my_host and consists_of(chunks, AbstractChunkWithContents)) or (self.message.dst == my_host and chunks is None)), repr(self) if self.is_outgoing(): logger.verbose('Going to send %i chunk(s): %r', len(coalesce(chunks, [])), [ch.uuid for ch in chunks]) self.message.chunks = chunks self.manager.post_message(self.message) elif self.is_incoming(): logger.debug('Going to receive the chunks') self.message.body_receiver.on_finish.addCallback( self._incoming_chunks_body_received) self.message.body_receiver.on_finish.addBoth( self._incoming_chunks_body_finish) else: self.neither_incoming_nor_outgoing()
def __assert_invariants(self): for _res_type, _res in self.ack_result.iteritems(): if _res_type == 'chunks::uuid': assert all(k in ('fs', 'fs:own', 'db', 'db:own') for k in _res.iterkeys()), \ repr(_res) assert (consists_of(_res['fs'], UUID) and consists_of(_res['fs:own'], UUID) and consists_of(_res['db'], UUID) and consists_of(_res['db:own'], UUID)), \ repr(_res) assert all(consists_of(v, UUID) for v in _res.itervalues()), \ repr(_res) elif _res_type == 'datasets::uuid': assert consists_of(_res, UUID), repr(_res) else: assert False, repr(_res_type)
def _get_body_ack(self): # H2N assert (isinstance(self.ack_deleted_chunk_uuids, list) and consists_of(self.ack_deleted_chunk_uuids, UUID)), \ repr(self.ack_deleted_chunk_uuids) return {'delete_chunks': [u.hex for u in self.ack_deleted_chunk_uuids]}
def on_begin(self): """ @todo: Add errback too. """ cls = self.__class__ _message = self.message _host = _message.dst logger.debug('Starting backup...') _dataset = self.dataset \ = cls.__create_dataset_from_incoming_message(_message) if self.manager.app.feature_set.per_group_encryption: # Read group key from the user group with db.RDB() as rdbw: _ugroup = Queries.Inhabitants.get_ugroup_by_uuid( _dataset.ugroup_uuid, rdbw) group_key = _ugroup.enc_key else: group_key = None self.__cryptographer = Cryptographer(group_key=group_key, key_generator=None) logger.debug('Created dataset %r.', _dataset) if _dataset is None: raise Exception('No dataset!') else: self.__notify_about_backup_started() self.__notify_about_backup_running() ds_uuid = _dataset.uuid with cls.per_dataset_transactions_lock: if ds_uuid in cls.per_dataset_transactions: self.ack_result_code = BackupMessage.ResultCodes \ .GENERAL_FAILURE raise Exception('The dataset {} is already being ' 'backed up'.format(ds_uuid)) else: cls.per_dataset_transactions[ds_uuid] = self # Force copying it to dict, to don't cause # race conditions during the logger message serialization. logger.debug('Added backup %r, per dataset transactions ' 'are now %r', ds_uuid, dict(cls.per_dataset_transactions)) if _dataset is None: raise Exception('The dataset {} is not found.'.format(ds_uuid)) # Initialize chunks. # Please note that these chunks may include the ones # which are actually present in the cloud already # but under a different UUID. # This will be fixed later, after NEED_INFO_ACK is received. # All chunks, including the already uploaded ones; # contains ChunkFromFilesFinal objects. # _dataset is MyDatasetOnChunks. # dataset.__chunks is list of ChunkFromFilesFinal. self.__all_chunks = set(_dataset.chunks()) assert consists_of(self.__all_chunks, ChunkFromFilesFinal), \ repr(self.__all_chunks) # Already uploaded chunks; contains Chunk objects. with db.RDB() as rdbw: self.__uploaded_chunks = \ set(HostQueries.HostChunks .get_uploaded_chunks(_dataset.uuid, rdbw=rdbw)) assert consists_of(self.__uploaded_chunks, Chunk), \ repr(self.__uploaded_chunks) # Only the pending chunks. self.__uploading_chunks = {ch for ch in self.__all_chunks if ch not in self.__uploaded_chunks} assert consists_of(self.__uploading_chunks, ChunkFromFilesFinal), \ repr(self.__uploading_chunks) # # Now create the NEED_INFO transaction. # But only if we have chunks to ask! # if self.__uploading_chunks: _query = { 'select': ('chunks.uuid', 'uuid'), 'from': 'chunks', 'where': {'["hash", "size", "uuid"]': [c for c in self.__uploading_chunks if c.hash is not None]} } nifn_tr = self.manager.create_new_transaction( name='NEED_INFO_FROM_NODE', src=_message.dst, dst=self.manager.app.primary_node, parent=self, # NEED_INFO_FROM_NODE-specific query=_query) nifn_tr.completed.addCallbacks(self._on_child_nifn_completed, partial(logger.error, 'NI issue: %r')) else: logger.debug('IMHO, no new chunks to upload. ' 'Proceeding directly.') # Go to the next step directly. self._ask_for_backup_hosts()
def _get_body(self): # N2H assert consists_of(self.chunk_uuids_to_delete, UUID) return {'delete_chunks': [u.hex for u in self.chunk_uuids_to_delete]}
def __init__(self, chunks_map, *args, **kwargs): r"""Constructor. >>> from datetime import datetime >>> from uuid import UUID >>> from common.chunks import ChunkInfo >>> from common.inhabitants import User >>> from common.typed_uuids import ChunkUUID >>> tr_start_time = datetime(2012, 9, 26, 14, 29, 48, 877434) >>> tr_uuid = UUID('1a82a181-741d-4a64-86e5-77a7dd000ba2') >>> tr_src_uuid = UUID('fa87ebfd-d498-4ba6-9f04-a933e4512b24') >>> tr_dst_uuid=UUID('e6aa4157-ee8a-449e-a2d5-3340a59e717d') >>> u1, u2, u3, u4 = \ ... (ChunkUUID('5b237ceb-300d-4c88-b4c0-6331cb14b5b4'), ... ChunkUUID('940f0711-52d7-42fb-bf4c-818580f432dc'), ... ChunkUUID('a5b605f2-6ea5-49f3-8658-d217b7e8e784'), ... ChunkUUID('0a7064b3-bef6-45c0-9e82-e9f9a40dfcf3')) >>> host1 = Host( ... name="SecretHost1", ... user=User(name="SecretUser1", ... digest="1a73bf8f3e54a5c5e3cecfe38d25fdf82587b868"), ... uuid=PeerUUID('233ad9c2-268f-4506-ab0f-4c71461c5d88'), ... urls=["https://192.168.1.2:1234", "https://127.0.0.1:1234"] ... ) >>> host2 = Host( ... name="SecretHost2", ... user=User(name="SecretUser2", ... digest="e5e503e5197792ec4d4bac2dd4d3d1c870efbdbb"), ... uuid=PeerUUID('e96a073b-3cd0-49a6-b14a-1fb04c221a9c'), ... urls=["https://192.168.2.3:4242"] ... ) >>> SendChunksTransactionState_Node( ... chunks_map={ ... host1: [ChunkInfo(crc32=0x2A5FE875, uuid=u4, ... maxsize_code=1, ... hash='abcdabcd' * 8, size=73819)], ... host2: [ChunkInfo(crc32=0x07FD7A5B, uuid=u1, ... maxsize_code=1, ... hash='abcdefgh' * 8, size=2097152), ... ChunkInfo(crc32=0x7E5CE7AD, uuid=u2, ... maxsize_code=0, ... hash='01234567' * 8, size=143941), ... ChunkInfo(crc32=0xDCC847D8, uuid=u3, ... maxsize_code=1, ... hash='76543210' * 8, size=2097151)] ... }, ... tr_start_time=tr_start_time, ... tr_uuid=tr_uuid, ... tr_src_uuid=tr_src_uuid, ... tr_dst_uuid=tr_dst_uuid ... ) # doctest:+ELLIPSIS,+NORMALIZE_WHITESPACE SendCh...State_Node(chunks_map={Host(uuid=PeerUUID('e96a073b-...1a9c'), urls=['https://192.168.2.3:4242'], name='SecretHost2', user=User(name='SecretUser2', digest='e5e503e5197792ec4...dd4d3d1c870efbdbb')): [ChunkInfo(uuid=ChunkUUID('5b237ceb-...-6331cb14b5b4'), maxsize_code=1, hash=unhexlify('616263646...667686162636465666768'), size=2097152, crc32=0x07FD7A5B), ChunkInfo(uuid=ChunkUUID('940f0711-...-818580f432dc'), maxsize_code=0, hash=unhexlify('303132333...536373031323334353637'), size=143941, crc32=0x7E5CE7AD), ChunkInfo(uuid=ChunkUUID('a5b605f2-...-d217b7e8e784'), maxsize_code=1, hash=unhexlify('373635343...231303736353433323130'), size=2097151, crc32=0xDCC847D8)], Host(uuid=PeerUUID('233ad9c2-268f-4506-ab0f-4c71461c5d88'), urls=['https://192.168.1.2:1234', 'https://127.0.0.1:1234'], name='SecretHost1', user=User(name='SecretUser1', digest='1a73bf8f3e54a5c5e3cecfe38d25fdf82587b868')): [ChunkInfo(uuid=ChunkUUID('0a7064b3-...-e9f9a40dfcf3'), maxsize_code=1, hash=unhexlify('616263646...263646162636461626364'), size=73819, crc32=0x2A5FE875)]}) @type chunks_map: col.Mapping """ super(SendChunksTransactionState_Node, self).__init__(*args, **kwargs) self.chunks_map = dict(chunks_map) # Validate date assert consists_of(self.chunks_map.iterkeys(), Host), repr(chunks_map) assert consists_of((ch for ch_list in chunks_map.itervalues() for ch in ch_list), Chunk), repr(chunks_map) assert self.is_valid_bsonable(), repr(self)
def _get_body(self): # H2N assert (isinstance(self.ds_uuids_to_delete, list) and consists_of(self.ds_uuids_to_delete, UUID)), \ repr(self.ds_uuids_to_delete) return {'delete_datasets': [u.hex for u in self.ds_uuids_to_delete]}
def __init__(self, chunks_to_replicate=None, chunks_to_restore=None, *args, **kwargs): r"""Constructor. Either of the two fields, C{chunks_to_replicate} or C{chunks_to_restore}, is likely created. >>> from datetime import datetime >>> from uuid import UUID >>> from common.chunks import ChunkInfo >>> from common.typed_uuids import ChunkUUID >>> tr_start_time = datetime(2012, 9, 26, 14, 29, 48, 877434) >>> tr_uuid = UUID('1a82a181-741d-4a64-86e5-77a7dd000ba2') >>> tr_src_uuid = UUID('fa87ebfd-d498-4ba6-9f04-a933e4512b24') >>> tr_dst_uuid=UUID('e6aa4157-ee8a-449e-a2d5-3340a59e717d') >>> u1, u2, u3, u4 = \ ... (ChunkUUID('5b237ceb-300d-4c88-b4c0-6331cb14b5b4'), ... ChunkUUID('940f0711-52d7-42fb-bf4c-818580f432dc'), ... ChunkUUID('a5b605f2-6ea5-49f3-8658-d217b7e8e784'), ... ChunkUUID('0a7064b3-bef6-45c0-9e82-e9f9a40dfcf3')) >>> host_uuid_1, host_uuid_2 = \ ... (PeerUUID('233ad9c2-268f-4506-ab0f-4c71461c5d88'), ... PeerUUID('e96a073b-3cd0-49a6-b14a-1fb04c221a9c')) >>> # No optional arguments. >>> ReceiveChunksTransactionState_Node( ... tr_start_time=tr_start_time, ... tr_uuid=tr_uuid, ... tr_src_uuid=tr_src_uuid, ... tr_dst_uuid=tr_dst_uuid ... ) # doctest:+ELLIPSIS,+NORMALIZE_WHITESPACE ReceiveCh...State_Node(tr_start_time=datetime.datetime(2012, 9, 26, 14, 29, 48, 877434), tr_uuid=UUID('1a82a181-741d-4a64-86e5-77a7dd000ba2'), tr_src_uuid=UUID('fa87ebfd-d498-4ba6-9f04-a933e4512b24'), tr_dst_uuid=UUID('e6aa4157-ee8a-449e-a2d5-3340a59e717d')) >>> # All optional arguments. >>> ReceiveChunksTransactionState_Node( ... chunks_to_replicate={ ... host_uuid_1: ... [ChunkInfo(crc32=0x2A5FE875, uuid=u4, ... maxsize_code=1, ... hash='abcdabcd' * 8, size=73819)], ... host_uuid_2: ... [ChunkInfo(crc32=0x07FD7A5B, uuid=u1, ... maxsize_code=1, ... hash='abcdefgh' * 8, size=2097152), ... ChunkInfo(crc32=0x7E5CE7AD, uuid=u2, ... maxsize_code=0, ... hash='01234567' * 8, size=143941), ... ChunkInfo(crc32=0xDCC847D8, uuid=u3, ... maxsize_code=1, ... hash='76543210' * 8, size=2097151)] ... }, ... chunks_to_restore={ ... host_uuid_1: ... [ChunkInfo(crc32=0x07FD7A5B, uuid=u1, ... maxsize_code=1, ... hash='abcdefgh' * 8, size=2097152), ... ChunkInfo(crc32=0x7E5CE7AD, uuid=u2, ... maxsize_code=0, ... hash='01234567' * 8, size=143941), ... ChunkInfo(crc32=0xDCC847D8, uuid=u3, ... maxsize_code=1, ... hash='76543210' * 8, size=2097151)], ... host_uuid_2: ... [ChunkInfo(crc32=0x2A5FE875, uuid=u1, ... maxsize_code=1, ... hash='abcdabcd' * 8, size=73819)] ... }, ... tr_start_time=tr_start_time, ... tr_uuid=tr_uuid, ... tr_src_uuid=tr_src_uuid, ... tr_dst_uuid=tr_dst_uuid ... ) # doctest:+ELLIPSIS,+NORMALIZE_WHITESPACE ReceiveCh...State_Node(tr_start_time=datetime.datetime(2012, 9, 26, 14, 29, 48, 877434), tr_uuid=UUID('1a82a181-741d-4a64-86e5-77a7dd000ba2'), tr_src_uuid=UUID('fa87ebfd-d498-4ba6-9f04-a933e4512b24'), tr_dst_uuid=UUID('e6aa4157-ee8a-449e-a2d5-3340a59e717d'), chunks_to_replicate={PeerUUID('e96a073b-...fb04c221a9c'): [ChunkInfo(uuid=ChunkUUID('5b237ceb-300d...-6331cb14b5b4'), maxsize_code=1, hash=unhexlify('6162636465...6162636465666768'), size=2097152, crc32=0x07FD7A5B), ChunkInfo(uuid=ChunkUUID('940f0711-52d7...-818580f432dc'), maxsize_code=0, hash=unhexlify('3031323334...3031323334353637'), size=143941, crc32=0x7E5CE7AD), ChunkInfo(uuid=ChunkUUID('a5b605f2-6ea5...-d217b7e8e784'), maxsize_code=1, hash=unhexlify('3736353433...3736353433323130'), size=2097151, crc32=0xDCC847D8)], PeerUUID('233ad9c2-268f-4506-ab0f-4c71461c5d88'): [ChunkInfo(uuid=ChunkUUID('0a7064b3-bef6...-e9f9a40dfcf3'), maxsize_code=1, hash=unhexlify('6162636461...6162636461626364'), size=73819, crc32=0x2A5FE875)]}, chunks_to_restore={PeerUUID('e96a073b-...fb04c221a9c'): [ChunkInfo(uuid=ChunkUUID('5b237ceb-300d...-6331cb14b5b4'), maxsize_code=1, hash=unhexlify('6162636461...6162636461626364'), size=73819, crc32=0x2A5FE875)], PeerUUID('233ad9c2-268f-4506-ab0f-4c71461c5d88'): [ChunkInfo(uuid=ChunkUUID('5b237ceb-300d...-6331cb14b5b4'), maxsize_code=1, hash=unhexlify('6162636465...6162636465666768'), size=2097152, crc32=0x07FD7A5B), ChunkInfo(uuid=ChunkUUID('940f0711-52d7...-818580f432dc'), maxsize_code=0, hash=unhexlify('3031323334...3031323334353637'), size=143941, crc32=0x7E5CE7AD), ChunkInfo(uuid=ChunkUUID('a5b605f2-6ea5...-d217b7e8e784'), maxsize_code=1, hash=unhexlify('3736353433...3736353433323130'), size=2097151, crc32=0xDCC847D8)]}) @type chunks_to_replicate: NoneType, col.Mapping @type chunks_to_restore: NoneType, col.Mapping """ super(ReceiveChunksTransactionState_Node, self).__init__(*args, **kwargs) self.chunks_to_replicate = dict(coalesce(chunks_to_replicate, {})) self.chunks_to_restore = dict(coalesce(chunks_to_restore, {})) if __debug__: for d in (self.chunks_to_replicate, self.chunks_to_restore): assert consists_of(d.iterkeys(), PeerUUID), \ repr(d.keys()) assert consists_of(d.itervalues(), list), repr(d.values()) assert all(consists_of(per_inh_chunks, Chunk) for per_inh_chunks in d.itervalues()), \ repr(d.values()) assert self.is_valid_bsonable(), repr(self)
def _get_body(self): r""" >>> host = Host(uuid=UUID('00000000-7606-420c-8a98-a6c8728ac98d')) >>> node = Node(uuid=UUID('11111111-79b4-49e0-b72e-8c4ce6a2aed9')) >>> u1, u2, u3 = \ ... (ChunkUUID('5b237ceb-300d-4c88-b4c0-6331cb14b5b4'), ... ChunkUUID('940f0711-52d7-42fb-bf4c-818580f432dc'), ... ChunkUUID('a5b605f2-6ea5-49f3-8658-d217b7e8e784')) >>> msg0 = ProgressMessage( ... src=node, ... dst=host, ... uuid=UUID('4ac2536a-4a1e-4b08-ad4e-11bd675fdf15') ... ) >>> ProgressMessage._get_body._without_bzip2._without_json_dumps(msg0) {} >>> msg1 = ProgressMessage( ... src=node, ... dst=host, ... uuid=UUID('baebd0f2-fc58-417b-97ee-08a892cd5a8f') ... ) >>> msg1.completion = True >>> # msg1.dataset = ??? # TODO: DatasetOnChunks() ? >>> msg1.chunks_by_uuid = { ... u1: ChunkInfo(crc32=0x07FD7A5B, ... uuid=u1, ... maxsize_code=1, ... hash='abcdefgh' * 8, ... size=2097152), ... u2: ChunkInfo(crc32=0x7E5CE7AD, ... uuid=u2, ... maxsize_code=0, ... hash='01234567' * 8, ... size=143941), ... u3: ChunkInfo(crc32=0xDCC847D8, ... uuid=u3, ... maxsize_code=1, ... hash='76543210' * 8, ... size=2097152) ... } >>> # msg1.chunks_map_getter = ??? # TODO >>> # msg1.blocks_map = ??? # TODO >>> ProgressMessage._get_body._without_bzip2._without_json_dumps( ... msg1) # doctest:+ELLIPSIS,+NORMALIZE_WHITESPACE {'chunks': [{'crc32': 2120017837, 'maxsize_code': 0, 'hash': 'MDEyMzQ1NjcwMTIzNDU2NzAxM...EyMzQ1NjcwMTIzNDU2Nw==', 'uuid': '940f071152d742fbbf4c818580f432dc', 'size': 143941}, {'crc32': 3704113112, 'maxsize_code': 1, 'hash': 'NzY1NDMyMTA3NjU0MzIxMDc2N...Y1NDMyMTA3NjU0MzIxMA==', 'uuid': 'a5b605f26ea549f38658d217b7e8e784', 'size': 2097152}, {'crc32': 134052443, 'maxsize_code': 1, 'hash': 'YWJjZGVmZ2hhYmNkZWZnaGFiY...JjZGVmZ2hhYmNkZWZnaA==', 'uuid': '5b237ceb300d4c88b4c06331cb14b5b4', 'size': 2097152}]} @precondition: isinstance(self.dataset, (NoneType, AbstractDataset)) @precondition: isinstance(self.chunks_map_getter, (NoneType, col.Callable)) @precondition: isinstance(self.chunks_by_uuid, (NoneType, col.Mapping)) """ if not any((self.dataset is not None, self.chunks_map_getter is not None)): logger.warning('For %r, dataset and chunks_map are both empty', self) # H2N data = {} # Will be added at the last moment _chunks_by_uuid = {} # Encode dataset if self.dataset is not None: data['dataset_completed' if self.completion else 'dataset'] = self.dataset.to_json() # Encode mapping from chunk UUID to the Chunk Info if self.chunks_by_uuid is not None: assert consists_of(self.chunks_by_uuid.iterkeys(), UUID), \ repr(self.chunks_by_uuid) assert consists_of(self.chunks_by_uuid.itervalues(), Chunk), \ repr(self.chunks_by_uuid) _chunks_by_uuid = dict(self.chunks_by_uuid) # Encode host_chunks_map (a mapping defining which host # received which chunks). if self.chunks_map_getter is not None: host_chunks_map = self.chunks_map_getter() assert consists_of(host_chunks_map.iterkeys(), Host), \ repr(host_chunks_map) assert consists_of(host_chunks_map.iterkeys(), Host), \ repr(host_chunks_map) assert all(consists_of(v, ProgressNotificationPerHost) for v in host_chunks_map.itervalues()), \ repr(host_chunks_map) data['host_chunks_map'] = \ {host.uuid.hex: [notif.to_json() for notif in per_host_notifs] for host, per_host_notifs in host_chunks_map.iteritems()} # Encode blocks_map (a mapping defining which chunks # contain which blocks). if self.blocks_map is not None: data['blocks_map'] = [block.to_json(chunk) for (block, chunk) in self.blocks_map] # Update the 'chunks' field as well _chunks_by_uuid.update({chunk.uuid: chunk for (block, chunk) in self.blocks_map}) # Finally, let's add the info about the chunks. # Currently it is stored as a dict, but it will be kept in the JSON # as the list. if _chunks_by_uuid: data['chunks'] = [v.to_json() for v in _chunks_by_uuid.itervalues()] return data
class UserMessage(IDocStoreTopDocument, IBSONable, datatypes.UserMessage): """ A message (together with its translations) between the Node and the Host. @note: username may mismatch the original one casewise. """ __slots__ = () bson_schema = { 'username_uc': basestring, 'key': basestring, 'ts': datetime, 'body': lambda body: consists_of(body.iterkeys(), basestring) and consists_of( body.itervalues(), basestring) } @property def username_uc(self): return self.username.upper() def to_bson(self): cls = self.__class__ doc = super(UserMessage, self).to_bson() doc.update({ 'username_uc': self.username_uc, 'key': self.key, 'ts': self.ts, 'body': self.body, }) assert cls.validate_schema(doc), repr(doc) return doc @classmethod def from_bson(cls, doc): assert cls.validate_schema(doc), repr(doc) return partial(super(UserMessage, cls).from_bson(doc), username=doc['username_uc'], key=doc['key'], ts=doc['ts'], body=doc['body']) @classmethod @contract_epydoc def from_common(cls, common_message): """ Create an UserMessage model from the more common implementation, C{datatypes.UserMessage}. @type common_message: datatypes.UserMessage @rtype: UserMessage """ return cls(username=common_message.username, key=common_message.key, ts=common_message.ts, body=common_message.body)
def _get_body(self): # H2N assert isinstance(self.ds_uuids_to_delete, list) and consists_of(self.ds_uuids_to_delete, UUID), repr( self.ds_uuids_to_delete ) return {"delete_datasets": [u.hex for u in self.ds_uuids_to_delete]}