def send(self, message): if self.sendRandKey: message = (self.sendRandKey, message) data = zlib.compress(pickle.dumps(message), 3) if self.encryptor: data = self.encryptor.encrypt(data) data = struct.pack('i', len(data)) + data self.__writeBuffer += data self.__trySendBuffer()
def __serialize(self, filename, raft_data): with self.__lock: try: self.__logger.info('serializer has started') with zipfile.ZipFile(filename, 'w', zipfile.ZIP_DEFLATED) as f: # store the federation data f.writestr('federation.bin', pickle.dumps(self.__data)) self.__logger.debug( 'federation data has stored in {0}'.format(filename)) # store the raft data f.writestr(RAFT_DATA_FILE, pickle.dumps(raft_data)) self.__logger.info( '{0} has restored'.format(RAFT_DATA_FILE)) self.__logger.info('snapshot has created') except Exception as ex: self.__logger.error( 'failed to create snapshot: {0}'.format(ex)) finally: self.__logger.info('serializer has stopped')
def __create_index(self, index_name, index_config): if self.is_index_exist(index_name): # open the index return self.__open_index(index_name, index_config=index_config) start_time = time.time() index = None with self.__lock: try: self.__logger.debug('creating {0}'.format(index_name)) # set index config self.__index_configs[index_name] = index_config self.__logger.debug( self.__index_configs[index_name].get_storage_type()) # create the index if self.__index_configs[index_name].get_storage_type( ) == 'ram': index = self.__ram_storage.create_index( self.__index_configs[index_name].get_schema(), indexname=index_name) else: index = self.__file_storage.create_index( self.__index_configs[index_name].get_schema(), indexname=index_name) self.__indices[index_name] = index self.__logger.info('{0} has created'.format(index_name)) # save the index config with open( os.path.join(self.__file_storage.folder, self.get_index_config_file(index_name)), 'wb') as f: f.write(pickle.dumps(index_config)) # open the index writer self.__open_writer(index_name) except Exception as ex: self.__logger.error('failed to create {0}: {1}'.format( index_name, ex)) finally: self.__record_metrics(start_time, 'create_index') return index
def __serialize(self, filename, raft_data): with self.__lock: try: self.__logger.debug('serializer has started') # store the index files and raft logs to the snapshot file with zipfile.ZipFile(filename, 'w', zipfile.ZIP_DEFLATED) as f: for index_name in self.get_index_names(): self.__commit_index(index_name) # with self.__get_writer(index_name).writelock: # with self.__indices[index_name].lock('WRITELOCK'): # index files for index_filename in self.get_index_files(index_name): if self.__index_configs.get( index_name).get_storage_type() == "ram": with self.__ram_storage.open_file( index_filename) as r: f.writestr(index_filename, r.read()) else: f.write( os.path.join(self.__file_storage.folder, index_filename), index_filename) self.__logger.debug('{0} has stored in {1}'.format( index_filename, filename)) # index config file f.write( os.path.join( self.__file_storage.folder, self.get_index_config_file(index_name)), self.get_index_config_file(index_name)) self.__logger.debug('{0} has stored in {1}'.format( self.get_index_config_file(index_name), filename)) # store the raft data f.writestr(RAFT_DATA_FILE, pickle.dumps(raft_data)) self.__logger.debug( '{0} has restored'.format(RAFT_DATA_FILE)) self.__logger.debug('snapshot has created') except Exception as ex: self.__logger.error( 'failed to create snapshot: {0}'.format(ex)) finally: self.__logger.debug('serializer has stopped')
def test_doChangeClusterUT1(): removeFiles(['dump1.bin']) baseAddr = getNextAddr() oterAddr = getNextAddr() o1 = TestObj(baseAddr, ['localhost:1235', oterAddr], dumpFile='dump1.bin', dynamicMembershipChange=True) __checkParnerNodeExists(o1, 'localhost:1238', False) __checkParnerNodeExists(o1, 'localhost:1239', False) __checkParnerNodeExists(o1, 'localhost:1235', True) noop = _bchr(_COMMAND_TYPE.NO_OP) member = _bchr(_COMMAND_TYPE.MEMBERSHIP) # Check regular configuration change - adding o1._onMessageReceived( 'localhost:12345', { 'type': 'append_entries', 'term': 1, 'prevLogIdx': 1, 'prevLogTerm': 0, 'commit_index': 2, 'entries': [(noop, 2, 1), (noop, 3, 1), (member + pickle.dumps(['add', 'localhost:1238']), 4, 1)] }) __checkParnerNodeExists(o1, 'localhost:1238', True) __checkParnerNodeExists(o1, 'localhost:1239', False) # Check rollback adding o1._onMessageReceived( 'localhost:1236', { 'type': 'append_entries', 'term': 2, 'prevLogIdx': 2, 'prevLogTerm': 1, 'commit_index': 3, 'entries': [(noop, 3, 2), (member + pickle.dumps(['add', 'localhost:1239']), 4, 2)] }) __checkParnerNodeExists(o1, 'localhost:1238', False) __checkParnerNodeExists(o1, 'localhost:1239', True) __checkParnerNodeExists(o1, oterAddr, True) # Check regular configuration change - removing o1._onMessageReceived( 'localhost:1236', { 'type': 'append_entries', 'term': 2, 'prevLogIdx': 4, 'prevLogTerm': 2, 'commit_index': 4, 'entries': [(member + pickle.dumps(['rem', 'localhost:1235']), 5, 2)] }) __checkParnerNodeExists(o1, 'localhost:1238', False) __checkParnerNodeExists(o1, 'localhost:1239', True) __checkParnerNodeExists(o1, 'localhost:1235', False) # Check log compaction o1._forceLogCompaction() doTicks([o1], 0.5) o1._destroy() o2 = TestObj(oterAddr, [baseAddr, 'localhost:1236'], dumpFile='dump1.bin', dynamicMembershipChange=True) doTicks([o2], 0.5) __checkParnerNodeExists(o2, oterAddr, False) __checkParnerNodeExists(o2, baseAddr, True) __checkParnerNodeExists(o2, 'localhost:1238', False) __checkParnerNodeExists(o2, 'localhost:1239', True) __checkParnerNodeExists(o2, 'localhost:1235', False) o2._destroy()