def test_add(self): left_locator = ErStoreLocator(store_type="levelDb", namespace="ns", name='mat_a') right_locator = ErStoreLocator(store_type="levelDb", namespace="ns", name='mat_b') left_mat = rpt(left_locator) right_mat = rpt(right_locator) result = left_mat.add(right_mat)
def test_gpu_load(self): left_locator = ErStoreLocator(store_type="levelDb", namespace="ns", name='mat_a') right_locator = ErStoreLocator(store_type="levelDb", namespace="ns", name='mat_b') left_mat = rpt(left_locator) right_mat = rpt(right_locator) print("[test_gpu_add]: +++___") left_mat.gpu_load(right_mat)
def test_matmul(self): left_locator = ErStoreLocator(store_type="levelDb", namespace="ns", name='mat_a') right_locator = ErStoreLocator(store_type="levelDb", namespace="ns", name='mat_b') left_mat = rpt(left_locator) right_mat = rpt(right_locator) print("[test cpu matmul ]: +++___") left_mat.mat_mul(right_mat)
def save_as(self, name=None, namespace=None, partition=None, options: dict = None): if partition is not None and partition <= 0: raise ValueError('partition cannot <= 0') if not namespace: namespace = self.get_namespace() if not name: if self.get_namespace() == namespace: forked_store_locator = self.get_store()._store_locator.fork() name = forked_store_locator._name else: name = self.get_name() if not partition: partition = self.get_partitions() if options is None: options = {} store_type = options.get('store_type', self.ctx.default_store_type) refresh_nodes = options.get('refresh_nodes') saved_as_store = ErStore(store_locator=ErStoreLocator( store_type=store_type, namespace=namespace, name=name, total_partitions=partition)) if partition == self.get_partitions() and not refresh_nodes: return self.map_values(lambda v: v, output=saved_as_store, options=options) else: return self.map(lambda k, v: (k, v), output=saved_as_store, options=options)
def test_reduce(self): def concat(a, b): return a + b pickled_function = cloudpickle.dumps(concat) store_locator = ErStoreLocator(store_type="levelDb", namespace="ns", name='name') job = ErJob(id="1", name="reduce", inputs=[ErStore(store_locator=store_locator)], functors=[ErFunctor(name="reduce", body=pickled_function)]) channel = grpc.insecure_channel( target='localhost:20000', options=[('grpc.max_send_message_length', -1), ('grpc.max_receive_message_length', -1)]) roll_pair_stub = command_pb2_grpc.CommandServiceStub(channel) request = ErCommandRequest( seq=1, uri='com.webank.eggroll.rollpair.component.RollPair.reduce', args=[job.to_proto().SerializeToString()]) result = roll_pair_stub.call(request.to_proto()) time.sleep(1200)
def test_scalar_mul_raw(self): def scalar_mul(v): pub_key, private_key = rpt_engine.keygen() return rpt_engine.slcmul(rpt_engine.load(v, 1, 1, 1), 2.0, pub_key, private_key) pickled_function = cloudpickle.dumps(scalar_mul) store_locator = ErStoreLocator(store_type="levelDb", namespace="ns", name='mat_a') functor = ErFunctor(name="mapValues", body=pickled_function) job = ErJob(id="1", name="mapValues", inputs=[ErStore(store_locator=store_locator)], functors=[functor]) channel = grpc.insecure_channel( target='localhost:20000', options=[('grpc.max_send_message_length', -1), ('grpc.max_receive_message_length', -1)]) roll_pair_stub = command_pb2_grpc.CommandServiceStub(channel) request = ErCommandRequest( seq=1, uri='com.webank.eggroll.rollpair.component.RollPair.mapValues', args=[job.to_proto().SerializeToString()]) # print(f"ready to call") result = roll_pair_stub.call(request.to_proto()) time.sleep(1200)
def __repartition_with(self, other): self_partition = self.get_partitions() other_partition = other.get_partitions() if other_partition != self_partition: self_name = self.get_name() self_count = self.count() other_name = other.get_name() other_count = other.count() L.info( f"repartition start: partitions of rp: {self_name}: {self_partition}, " f"other: {other_name}: {other_partition}, repartitioning") if self_count <= other_count: shuffle_rp = self shuffle_rp_count = self_count shuffle_rp_name = self_name shuffle_rp_partition = self_partition not_shuffle_rp = other not_shuffle_rp_count = other_count not_shuffle_rp_name = other_name not_shuffle_rp_partition = other_partition else: not_shuffle_rp = self not_shuffle_rp_count = self_count not_shuffle_rp_name = self_name not_shuffle_rp_partition = self_partition shuffle_rp = other shuffle_rp_count = other_count shuffle_rp_name = other_name shuffle_rp_partition = other_partition L.debug( f"repatition selection: rp: {shuffle_rp_name} count:{shuffle_rp_count} " f"<= rp: {not_shuffle_rp_name} count:{not_shuffle_rp_count}. " f"repartitioning {shuffle_rp_name}") store = ErStore(store_locator=ErStoreLocator( store_type=shuffle_rp.get_store_type(), namespace=shuffle_rp.get_namespace(), name=str(uuid.uuid1()), total_partitions=not_shuffle_rp_partition)) res_rp = shuffle_rp.map(lambda k, v: (k, v), output=store) res_rp.disable_gc() L.debug( f"repartition end: rp to shuffle: {shuffle_rp_name}, " f"count: {shuffle_rp_count}, partitions: {shuffle_rp_partition}; " f"rp NOT shuffle: {not_shuffle_rp_name}, " f"count: {not_shuffle_rp_count}, partitions: {not_shuffle_rp_partition}' " f"res rp: {res_rp.get_name()}, " f"count: {res_rp.count()}, partitions :{res_rp.get_partitions()}" ) store_shuffle = res_rp.get_store() return [store_shuffle, other.get_store()] if self_count <= other_count \ else [self.get_store(), store_shuffle] else: return [self.__store, other.__store]
def save_as(self, name, namespace, partition, options: dict = None): if options is None: options = {} store_type = options.get('store_type', self.ctx.default_store_type) if partition == self.get_partitions(): store = ErStore(store_locator=ErStoreLocator( store_type=store_type, namespace=namespace, name=name, total_partitions=self.get_partitions())) return self.map_values(lambda v: v, output=store) else: store = ErStore( store_locator=ErStoreLocator(store_type=store_type, namespace=namespace, name=name, total_partitions=partition)) return self.map(lambda k, v: (k, v), output=store)
def load(self, namespace=None, name=None, options: dict = None): if options is None: options = {} store_type = options.get('store_type', self.default_store_type) total_partitions = options.get('total_partitions', 1) partitioner = options.get('partitioner', PartitionerTypes.BYTESTRING_HASH) store_serdes = options.get('serdes', self.default_store_serdes) create_if_missing = options.get('create_if_missing', True) # todo:1: add combine options to pass it through store_options = self.__session.get_all_options() store_options.update(options) final_options = store_options.copy() # TODO:1: tostring in er model if 'create_if_missing' in final_options: del final_options['create_if_missing'] # TODO:1: remove these codes by adding to string logic in ErStore if 'include_key' in final_options: del final_options['include_key'] if 'total_partitions' in final_options: del final_options['total_partitions'] if 'name' in final_options: del final_options['name'] if 'namespace' in final_options: del final_options['namespace'] # TODO:1: remove these codes by adding to string logic in ErStore if 'keys_only' in final_options: del final_options['keys_only'] # TODO:0: add 'error_if_exist, persistent / default store type' L.info("final_options:{}".format(final_options)) store = ErStore(store_locator=ErStoreLocator( store_type=store_type, namespace=namespace, name=name, total_partitions=total_partitions, partitioner=partitioner, serdes=store_serdes), options=final_options) if create_if_missing: result = self.__session._cluster_manager_client.get_or_create_store( store) else: result = self.__session._cluster_manager_client.get_store(store) if result is None: raise EnvironmentError( "result is None, please check whether the store:{} has been created before" .format(store)) return RollPair(self.populate_processor(result), self)
def cleanup(self, namespace, name, options: dict = None): if options is None: options = {} total_partitions = options.get('total_partitions', 1) partitioner = options.get('partitioner', PartitionerTypes.BYTESTRING_HASH) store_serdes = options.get('serdes', self.default_store_serdes) # todo:1: add combine options to pass it through store_options = self.__session.get_all_options() store_options.update(options) final_options = store_options.copy() # TODO:1: tostring in er model if 'create_if_missing' in final_options: del final_options['create_if_missing'] # TODO:1: remove these codes by adding to string logic in ErStore if 'include_key' in final_options: del final_options['include_key'] if 'total_partitions' in final_options: del final_options['total_partitions'] if 'name' in final_options: del final_options['name'] if 'namespace' in final_options: del final_options['namespace'] # TODO:1: remove these codes by adding to string logic in ErStore if 'keys_only' in final_options: del final_options['keys_only'] # TODO:0: add 'error_if_exist, persistent / default store type' L.info("final_options:{}".format(final_options)) store = ErStore(store_locator=ErStoreLocator( store_type=StoreTypes.ROLLPAIR_LMDB, namespace=namespace, name=name, total_partitions=total_partitions, partitioner=partitioner, serdes=store_serdes), options=final_options) results = self.__session._cluster_manager_client.get_store_from_namespace( store) L.debug('res:{}'.format(results._stores)) if results._stores is not None: L.debug("item count:{}".format(len(results._stores))) for item in results._stores: L.debug("item namespace:{} name:{}".format( item._store_locator._namespace, item._store_locator._name)) rp = RollPair(er_store=item, rp_ctx=self) rp.destroy()
def load(self, name=None, namespace=None, options: dict = None): if options is None: options = {} if not namespace: namespace = options.get('namespace', self.get_session().get_session_id()) store_type = options.get('store_type', self.default_store_type) total_partitions = options.get('total_partitions', None) no_partitions_param = False if total_partitions is None: no_partitions_param = True total_partitions = 1 partitioner = options.get('partitioner', PartitionerTypes.BYTESTRING_HASH) store_serdes = options.get('serdes', self.default_store_serdes) create_if_missing = options.get('create_if_missing', False) # todo:1: add combine options to pass it through store_options = self.__session.get_all_options() store_options.update(options) final_options = store_options.copy() # TODO:0: add 'error_if_exist, persistent / default store type' store = ErStore( store_locator=ErStoreLocator( store_type=store_type, namespace=namespace, name=name, total_partitions=total_partitions, partitioner=partitioner, serdes=store_serdes), options=final_options) if create_if_missing: result = self.__session._cluster_manager_client.get_or_create_store(store) else: result = self.__session._cluster_manager_client.get_store(store) if len(result._partitions) == 0: L.info(f"store: namespace={namespace}, name={name} not exist, " f"create_if_missing={create_if_missing}, create first") return None if False and not no_partitions_param and result._store_locator._total_partitions != 0\ and total_partitions != result._store_locator._total_partitions: raise ValueError(f"store:{result._store_locator._name} input total_partitions:{total_partitions}, " f"output total_partitions:{result._store_locator._total_partitions}, must be the same") return RollPair(self.populate_processor(result), self)
def map_values(_tagged_key): is_standalone = self.ctx.rp_ctx.get_session().get_option( SessionConfKeys.CONFKEY_SESSION_DEPLOY_MODE) == DeployModes.STANDALONE if is_standalone: dst_name = _tagged_key store_type = rp.get_store_type() else: dst_name = DELIM.join([_tagged_key, self.dst_host, str(self.dst_port), obj_type]) store_type = StoreTypes.ROLLPAIR_ROLLSITE if is_standalone: status_rp = self.ctx.rp_ctx.load(namespace, STATUS_TABLE_NAME + DELIM + self.roll_site_session_id, options=_options) status_rp.disable_gc() if isinstance(obj, RollPair): status_rp.put(_tagged_key, (obj_type.encode("utf-8"), rp.get_name(), rp.get_namespace())) else: status_rp.put(_tagged_key, (obj_type.encode("utf-8"), dst_name, namespace)) else: store = rp.get_store() store_locator = store._store_locator new_store_locator = ErStoreLocator(store_type=store_type, namespace=namespace, name=dst_name, total_partitions=store_locator._total_partitions, partitioner=store_locator._partitioner, serdes=store_locator._serdes) # TODO:0: move options from job to store when database modification finished options = {"roll_site_header": roll_site_header, "proxy_endpoint": self.ctx.proxy_endpoint, "obj_type": obj_type} if isinstance(obj, RollPair): roll_site_header._options['total_partitions'] = obj.get_store()._store_locator._total_partitions L.debug(f"pushing map_values: {dst_name}, count: {obj.count()}, tag_key:{_tagged_key}") rp.map_values(lambda v: v, output=ErStore(store_locator=new_store_locator), options=options) L.info(f"pushing map_values done:{type(obj)}, tag_key:{_tagged_key}") return _tagged_key
def load_store(self, name=None, namespace=None, options: dict = None): if options is None: options = {} if not namespace: namespace = options.get('namespace', self.get_session().get_session_id()) if not name: raise ValueError(f"name is required, cannot be blank") store_type = options.get('store_type', self.default_store_type) total_partitions = options.get('total_partitions', 1) partitioner = options.get('partitioner', PartitionerTypes.BYTESTRING_HASH) store_serdes = options.get('serdes', self.default_store_serdes) create_if_missing = options.get('create_if_missing', False) # todo:1: add combine options to pass it through store_options = self.__session.get_all_options() store_options.update(options) final_options = store_options.copy() # TODO:0: add 'error_if_exist, persistent / default store type' store = ErStore(store_locator=ErStoreLocator( store_type=store_type, namespace=namespace, name=name, total_partitions=total_partitions, partitioner=partitioner, serdes=store_serdes), options=final_options) if create_if_missing: result = self.__session._cluster_manager_client.get_or_create_store( store) else: result = self.__session._cluster_manager_client.get_store(store) if len(result._partitions) == 0: L.info(f"store: namespace={namespace}, name={name} not exist, " f"create_if_missing={create_if_missing}, create first") return None return self.populate_processor(result)
# See the License for the specific language governing permissions and # limitations under the License. from eggroll.core.conf_keys import SessionConfKeys, TransferConfKeys, \ ClusterManagerConfKeys, NodeManagerConfKeys from eggroll.core.constants import DeployModes from eggroll.core.constants import ProcessorTypes, ProcessorStatus from eggroll.core.constants import StoreTypes from eggroll.core.meta_model import ErStore, ErStoreLocator, ErEndpoint, \ ErProcessor from eggroll.core.session import ErSession from eggroll.roll_pair.roll_pair import RollPairContext ER_STORE1 = ErStore( store_locator=ErStoreLocator(store_type=StoreTypes.ROLLPAIR_LEVELDB, namespace="namespace", name="name")) def get_debug_test_context(is_standalone=False, manager_port=4670, egg_port=20001, transfer_port=20002, session_id='testing'): manager_port = manager_port egg_ports = [egg_port] egg_transfer_ports = [transfer_port] self_server_node_id = 2 options = {} if is_standalone: options[SessionConfKeys.CONFKEY_SESSION_DEPLOY_MODE] = "standalone" options[TransferConfKeys.CONFKEY_TRANSFER_SERVICE_HOST] = "127.0.0.1" options[TransferConfKeys.CONFKEY_TRANSFER_SERVICE_PORT] = str(transfer_port) options[ClusterManagerConfKeys.CONFKEY_CLUSTER_MANAGER_PORT] = str(manager_port)
def __repartition_with(self, other): self_partition = self.get_partitions() other_partition = other.get_partitions() should_shuffle = False if len(self.__store._partitions) != len(other.__store._partitions): should_shuffle = True else: for i in range(len(self.__store._partitions)): if self.__store._partitions[i]._processor._id != other.__store._partitions[i]._processor._id: should_shuffle = True if other_partition != self_partition or should_shuffle: self_name = self.get_name() self_count = self.count() other_name = other.get_name() other_count = other.count() L.debug(f"repartition start: self rp={self_name} partitions={self_partition}, " f"other={other_name}: partitions={other_partition}, repartitioning") if self_count < other_count: shuffle_rp = self shuffle_rp_count = self_count shuffle_rp_name = self_name shuffle_total_partitions = self_partition shuffle_rp_partitions = self.__store._partitions not_shuffle_rp = other not_shuffle_rp_count = other_count not_shuffle_rp_name = other_name not_shuffle_total_partitions = other_partition not_shuffle_rp_partitions = other.__store._partitions else: not_shuffle_rp = self not_shuffle_rp_count = self_count not_shuffle_rp_name = self_name not_shuffle_total_partitions = self_partition not_shuffle_rp_partitions = self.__store._partitions shuffle_rp = other shuffle_rp_count = other_count shuffle_rp_name = other_name shuffle_total_partitions = other_partition shuffle_rp_partitions = other.__store._partitions L.trace(f"repartition selection: rp={shuffle_rp_name} count={shuffle_rp_count}, " f"rp={not_shuffle_rp_name} count={not_shuffle_rp_count}. " f"repartitioning {shuffle_rp_name}") store = ErStore(store_locator=ErStoreLocator(store_type=shuffle_rp.get_store_type(), namespace=shuffle_rp.get_namespace(), name=str(uuid.uuid1()), total_partitions=not_shuffle_total_partitions), partitions=not_shuffle_rp_partitions) res_rp = shuffle_rp.map(lambda k, v: (k, v), output=store) res_rp.disable_gc() if L.isEnabledFor(logging.DEBUG): L.debug(f"repartition end: rp to shuffle={shuffle_rp_name}, " f"count={shuffle_rp_count}, partitions={shuffle_total_partitions}; " f"rp NOT shuffled={not_shuffle_rp_name}, " f"count={not_shuffle_rp_count}, partitions={not_shuffle_total_partitions}' " f"res rp={res_rp.get_name()}, " f"count={res_rp.count()}, partitions={res_rp.get_partitions()}") store_shuffle = res_rp.get_store() return [store_shuffle, other.get_store()] if self_count < other_count \ else [self.get_store(), store_shuffle] else: return [self.__store, other.__store]
def test_scalar_mul(self): store_locator = ErStoreLocator(store_type="levelDb", namespace="ns", name='mat_a') original = rpt(store_locator) result = original.scalar_mul(2)
def cleanup(self, name, namespace, options: dict = None): if not namespace: raise ValueError('namespace cannot be blank') L.debug(f'cleaning up namespace={namespace}, name={name}') if options is None: options = {} total_partitions = options.get('total_partitions', 1) partitioner = options.get('partitioner', PartitionerTypes.BYTESTRING_HASH) store_serdes = options.get('serdes', self.default_store_serdes) if name == '*': store_type = options.get('store_type', '*') L.debug(f'cleaning up whole store_type={store_type}, namespace={namespace}, name={name}') er_store = ErStore(store_locator=ErStoreLocator(namespace=namespace, name=name, store_type=store_type)) job_id = generate_job_id(namespace, tag=RollPair.CLEANUP) job = ErJob(id=job_id, name=RollPair.DESTROY, inputs=[er_store], options=options) args = list() cleanup_partitions = [ErPartition(id=-1, store_locator=er_store._store_locator)] for server_node, eggs in self.__session._eggs.items(): egg = eggs[0] task = ErTask(id=generate_task_id(job_id, egg._command_endpoint._host), name=job._name, inputs=cleanup_partitions, job=job) args.append(([task], egg._command_endpoint)) futures = self.__command_client.async_call( args=args, output_types=[ErTask], command_uri=CommandURI(f'{RollPair.EGG_PAIR_URI_PREFIX}/{RollPair.RUN_TASK}')) for future in futures: result = future.result() self.get_session()._cluster_manager_client.delete_store(er_store) else: # todo:1: add combine options to pass it through store_options = self.__session.get_all_options() store_options.update(options) final_options = store_options.copy() store = ErStore( store_locator=ErStoreLocator( store_type=StoreTypes.ROLLPAIR_LMDB, namespace=namespace, name=name, total_partitions=total_partitions, partitioner=partitioner, serdes=store_serdes), options=final_options) task_results = self.__session._cluster_manager_client.get_store_from_namespace(store) L.trace('res={}'.format(task_results._stores)) if task_results._stores is not None: L.trace("item count={}".format(len(task_results._stores))) for item in task_results._stores: L.trace("item namespace={} name={}".format(item._store_locator._namespace, item._store_locator._name)) rp = RollPair(er_store=item, rp_ctx=self) rp.destroy()
def testLRGuest(self): #base obj rpc = TestLR_guest.rpc rpt_store = ErStore(store_locator=ErStoreLocator( store_type=store_type, namespace="ns", name="mat_a")) rpt_ctx = self.rptc rp_ctx = self.rpc rs_ctx = self.rsc _tag = "Hello2" #rs = rs_ctx.load(name="roll_pair_h2g.table", tag="{}".format(_tag)) rpt_store = ErStore(store_locator=ErStoreLocator( store_type=store_type, namespace="ns", name="mat_a")) # #local RP pub, priv = Ciper().genkey() # rpt_ctx.start_gen_obfuscator(pub_key=pub) rs_ctx.load(name="roll_pair_name.test_key_pair", tag="pub_priv_key").push((pub, priv), host_parties) #[0].result() #base data G = np.array([[ 0.254879, -1.046633, 0.209656, 0.074214, -0.441366, -0.377645, -0.485934, 0.347072, -0.28757, -0.733474 ], [ -1.142928, -0.781198, -1.166747, -0.923578, 0.62823, -1.021418, -1.111867, -0.959523, -0.096672, -0.121683 ], [ -1.451067, -1.406518, -1.456564, -1.092337, -0.708765, -1.168557, -1.305831, -1.745063, -0.499499, -0.302893 ], [ -0.879933, 0.420589, -0.877527, -0.780484, -1.037534, -0.48388, -0.555498, -0.768581, 0.43396, -0.200928 ]]) Y = np.array([[1], [1], [1], [1]]) w_G = NumpyTensor(np.ones((10, 1)), pub) # rp_x_G = rp_ctx.load('namespace', 'G') rp_x_Y = rp_ctx.load('namespace', 'Y') rp_w_G = rp_ctx.load('namespace', 'w_G') rp_x_G.put('1', NumpyTensor(G, pub)) rp_x_Y.put('1', NumpyTensor(Y, pub)) rp_w_G.put('1', w_G) X_G = self.rptc.from_roll_pair(rp_x_G) # fw_G1 = X_G @ w_G # enc_fw_square_G = (fw_G1 * fw_G1).encrypt() # enc_fw_square_G.out(priv,"aa334") # return X_Y = self.rptc.from_roll_pair(rp_x_Y) learning_rate = 0.15 itr = 0 pre_loss_A = None #round 1 while itr < max_iter: round = str(itr) # X_G = NumpyTensor(G, pub) fw_G1 = X_G @ w_G fw_G2 = X_G @ w_G enc_fw_G = fw_G1.encrypt() enc_fw_square_G = (fw_G1 * fw_G2).encrypt() rs = rs_ctx.load(name="roll_pair_name.table", tag="fw_G1" + round) futures = rs.push(fw_G1._store, host_parties) rs = rs_ctx.load(name="roll_pair_name.table", tag="enc_fw_G" + round) futures = rs.push(enc_fw_G._store, host_parties) rs = rs_ctx.load(name="roll_pair_name.table", tag="enc_fw_square_G" + round) futures = rs.push(enc_fw_square_G._store, host_parties) rs = rs_ctx.load(name="roll_pair_name.table", tag="X_Y" + round) futures = rs.push(rp_x_Y, host_parties) rs = rs_ctx.load(name="roll_pair_name.table", tag="X_G" + round) futures = rs.push(rp_x_G, host_parties) # rs = rs_ctx.load(name="roll_pair_name.table", tag="W_G" + round) # futures = rs.push(rp_w_G, host_parties) # time.sleep(5) # print("sleep 5 sec") rs = rs_ctx.load(name="roll_pair_name.table", tag="W_G_result" + round) w_G = self.rptc.from_roll_pair(rs.pull(host_parties)[0].result()) # # enc_fw_square_G = (fw_G1 * fw_G2).encrypt() # # # # enc_agg_wx_G = enc_fw_G + enc_fw_H # # # # enc_agg_wx_square_G = enc_fw_square_G + enc_fw_square_H + fw_G1 * enc_fw_H * 2 # # # # enc_fore_grad_G = enc_agg_wx_G * 0.25 - X_Y * 0.5 # # # # enc_grad_G = (X_G * enc_fore_grad_G).mean() # # enc_grad_H = (X_H * enc_fore_grad_G).mean() # # # # # # enc_grad_G.out(priv, '123') # # # # grad_A = enc_grad_G.hstack(enc_grad_H) # # # # learning_rate *= 0.999 # # optim_grad_A = grad_A * learning_rate # # optim_grad_G, optim_grad_H = optim_grad_A.decrypt(priv).split(10, 1) # # # # # w_G.out(priv, "111111111111") # # # optim_grad_G.out(priv, "22222222") # # # # w_G = w_G - optim_grad_G.T() # # w_H = w_H - optim_grad_H.T() # # # # enc_half_ywx_G = enc_agg_wx_G * 0.5 * X_Y # # # #todo diversion # # enc_loss_G = (((enc_half_ywx_G * -1)) + enc_agg_wx_square_G / 8 + NumpyTensor(np.log(2), pub)).mean() # # loss_AA = enc_loss_G.decrypt(priv) # # # # loss_A = next(loss_AA._store.get_all())[1]._ndarray[0][0] # # tmp = 99999 if pre_loss_A is None else loss_A - pre_loss_A # # if pre_loss_A is not None and abs(loss_A - pre_loss_A) < 1e-4: # # break # # pre_loss_A = loss_A # # print("pre_loss_A:", pre_loss_A) # itr += 1
def test_lr(self): #base obj rpc = TestLR.rpc context = TestLR.rptc store = ErStore(store_locator=ErStoreLocator( store_type=store_type, namespace="ns", name="mat_a")) G = np.array([[ 0.254879, -1.046633, 0.209656, 0.074214, -0.441366, -0.377645, -0.485934, 0.347072, -0.28757, -0.733474 ], [ -1.142928, -0.781198, -1.166747, -0.923578, 0.62823, -1.021418, -1.111867, -0.959523, -0.096672, -0.121683 ], [ -1.451067, -1.406518, -1.456564, -1.092337, -0.708765, -1.168557, -1.305831, -1.745063, -0.499499, -0.302893 ], [ -0.879933, 0.420589, -0.877527, -0.780484, -1.037534, -0.48388, -0.555498, -0.768581, 0.43396, -0.200928 ]]) H = np.array([[ 0.449512, -1.247226, 0.413178, 0.303781, -0.123848, -0.184227, -0.219076, 0.268537, 0.015996, -0.789267, -0.33736, -0.728193, -0.442587, -0.272757, -0.608018, -0.577235, -0.501126, 0.143371, -0.466431, -0.554102 ], [ -1.245485, -0.842317, -1.255026, -1.038066, -0.426301, -1.088781, -0.976392, -0.898898, 0.983496, 0.045702, -0.493639, 0.34862, -0.552483, -0.526877, 2.253098, -0.82762, -0.780739, -0.376997, -0.310239, 0.176301 ], [ -1.549664, -1.126219, -1.546652, -1.216392, -0.354424, -1.167051, -1.114873, -1.26182, -0.327193, 0.629755, -0.666881, -0.779358, -0.708418, -0.637545, 0.710369, -0.976454, -1.057501, -1.913447, 0.795207, -0.149751 ], [ -0.851273, 0.733108, -0.843535, -0.786363, -0.049836, -0.424532, -0.509221, -0.679649, 0.797298, 0.385927, -0.451772, 0.453852, -0.431696, -0.494754, -1.182041, 0.281228, 0.084759, -0.25242, 1.038575, 0.351054 ]]) Y = np.array([[1], [1], [1], [1]]) rp_x_G = rpc.load('egr', 'rp_x_G') rp_x_H = rpc.load('egr', 'rp_x_H') rp_x_Y = rpc.load('egr', 'rp_x_Y') pub, priv = Ciper().genkey() rp_x_G.put('1', NumpyTensor(G, pub)) rp_x_H.put('1', NumpyTensor(H, pub)) rp_x_Y.put('1', NumpyTensor(Y, pub)) X_G = self.rptc.from_roll_pair(rp_x_G) X_H = self.rptc.from_roll_pair(rp_x_H) X_Y = self.rptc.from_roll_pair(rp_x_Y) w_H = NumpyTensor(np.ones((20, 1)), pub) w_G = NumpyTensor(np.ones((10, 1)), pub) #X_H._store.map_values(lambda v: print("123", v._ndarry)) learning_rate = 0.15 itr = 0 pre_loss_A = None while itr < max_iter: fw_H1 = X_H @ w_H fw_H2 = X_H @ w_H enc_fw_H = fw_H1.encrypt() enc_fw_H.out(priv, "123") enc_fw_square_H = (fw_H1 * fw_H2).encrypt() fw_G1 = X_G @ w_G fw_G2 = X_G @ w_G enc_fw_G = fw_G1.encrypt() enc_fw_square_G = (fw_G1 * fw_G2).encrypt() enc_agg_wx_G = enc_fw_G + enc_fw_H enc_agg_wx_square_G = enc_fw_square_G + enc_fw_square_H + fw_G1 * enc_fw_H * 2 enc_fore_grad_G = 0.25 * enc_agg_wx_G - 0.5 * X_Y enc_grad_G = (X_G * enc_fore_grad_G).mean() enc_grad_H = (X_H * enc_fore_grad_G).mean() enc_grad_G.out(priv, '123') grad_A = enc_grad_G.hstack(enc_grad_H) learning_rate *= 0.999 optim_grad_A = grad_A * learning_rate optim_grad_G, optim_grad_H = optim_grad_A.decrypt(priv).split( 10, 1) # w_G.out(priv, "111111111111") # optim_grad_G.out(priv, "22222222") w_G = w_G - optim_grad_G.T() w_H = w_H - optim_grad_H.T() enc_half_ywx_G = enc_agg_wx_G * 0.5 * X_Y # #todo diversion enc_loss_G = (((-1 * enc_half_ywx_G)) + enc_agg_wx_square_G / 8 + NumpyTensor(np.log(2), pub)).mean() loss_AA = enc_loss_G.decrypt(priv) loss_A = next(loss_AA._store.get_all())[1]._ndarray[0][0] tmp = 99999 if pre_loss_A is None else loss_A - pre_loss_A if pre_loss_A is not None and abs(loss_A - pre_loss_A) < 1e-4: break pre_loss_A = loss_A print("pre_loss_A:", pre_loss_A) itr += 1
def testLRHost(self): # #multi context rpt_ctx = self.rptc rp_ctx = self.rpc rs_ctx = self.rsc _tag = "Hello2" #rs = rs_ctx.load(name="roll_pair_h2g.table", tag="{}".format(_tag)) rpt_store = ErStore(store_locator=ErStoreLocator( store_type=store_type, namespace="ns", name="mat_a")) #local RP H = np.array([[ 0.449512, -1.247226, 0.413178, 0.303781, -0.123848, -0.184227, -0.219076, 0.268537, 0.015996, -0.789267, -0.33736, -0.728193, -0.442587, -0.272757, -0.608018, -0.577235, -0.501126, 0.143371, -0.466431, -0.554102 ], [ -1.245485, -0.842317, -1.255026, -1.038066, -0.426301, -1.088781, -0.976392, -0.898898, 0.983496, 0.045702, -0.493639, 0.34862, -0.552483, -0.526877, 2.253098, -0.82762, -0.780739, -0.376997, -0.310239, 0.176301 ], [ -1.549664, -1.126219, -1.546652, -1.216392, -0.354424, -1.167051, -1.114873, -1.26182, -0.327193, 0.629755, -0.666881, -0.779358, -0.708418, -0.637545, 0.710369, -0.976454, -1.057501, -1.913447, 0.795207, -0.149751 ], [ -0.851273, 0.733108, -0.843535, -0.786363, -0.049836, -0.424532, -0.509221, -0.679649, 0.797298, 0.385927, -0.451772, 0.453852, -0.431696, -0.494754, -1.182041, 0.281228, 0.084759, -0.25242, 1.038575, 0.351054 ]]) rp_x_H = rp_ctx.load('namespace', 'H') # pub, priv = Ciper().genkey() pub, priv = rs_ctx.load( name="roll_pair_name.test_key_pair", tag="pub_priv_key").pull(guest_parties)[0].result() # rpt_ctx.start_gen_obfuscator(pub_key=pub) rp_x_H.put('1', NumpyTensor(H, pub)) X_H = self.rptc.from_roll_pair(rp_x_H) w_H = NumpyTensor(np.ones((20, 1)), pub) w_G = NumpyTensor(np.ones((10, 1)), pub) learning_rate = 0.15 itr = 0 pre_loss_A = None while itr < max_iter: round = str(itr) fw_H1 = X_H @ w_H fw_H2 = X_H @ w_H enc_fw_H = fw_H1.encrypt() enc_fw_square_H = (fw_H1 * fw_H2).encrypt() #get fw_G1 rs = rs_ctx.load(name="roll_pair_name.table", tag="fw_G1" + round) fw_G1 = rs.pull(guest_parties)[0].result() rs = rs_ctx.load(name="roll_pair_name.table", tag="enc_fw_G" + round) enc_fw_G = rs.pull(guest_parties)[0].result() #get enc_fw_sqre_G rs = rs_ctx.load(name="roll_pair_name.table", tag="enc_fw_square_G" + round) enc_fw_square_G = rs.pull(guest_parties)[0].result() enc_agg_wx_G = enc_fw_H + self.rptc.from_roll_pair(enc_fw_G) enc_agg_wx_square_G = self.rptc.from_roll_pair( enc_fw_square_G) + enc_fw_square_H + self.rptc.from_roll_pair( fw_G1) * enc_fw_H * 2 #get X_Y X_G rs = rs_ctx.load(name="roll_pair_name.table", tag="X_Y" + round) X_Y = rs.pull(guest_parties)[0].result() rs = rs_ctx.load(name="roll_pair_name.table", tag="X_G" + round) X_G = rs.pull(guest_parties)[0].result() # rs = rs_ctx.load(name="roll_pair_name.table", tag="W_G" + round) # w_G = rs.pull(guest_parties)[0].result() enc_fore_grad_G = 0.25 * enc_agg_wx_G - 0.5 * self.rptc.from_roll_pair( X_Y) enc_grad_G = (self.rptc.from_roll_pair(X_G) * enc_fore_grad_G).mean() enc_grad_H = (X_H * enc_fore_grad_G).mean() grad_A = enc_grad_G.hstack(enc_grad_H) learning_rate *= 0.999 optim_grad_A = grad_A * learning_rate optim_grad_G, optim_grad_H = optim_grad_A.decrypt(priv).split( 10, 1) # w_G = RollPaillierTensor(w_G) - optim_grad_G.T() w_G = w_G - optim_grad_G.T() w_H = w_H - optim_grad_H.T() #send w_G rs = rs_ctx.load(name="roll_pair_name.table", tag="W_G_result" + round) future = rs.push(w_G._store, guest_parties) print("W_G_result1 send") enc_half_ywx_G = enc_agg_wx_G * 0.5 * self.rptc.from_roll_pair(X_Y) enc_loss_G = (((-1 * enc_half_ywx_G)) + enc_agg_wx_square_G / 8 + NumpyTensor(np.log(2), pub)).mean() loss_AA = enc_loss_G.decrypt(priv) loss_A = next(loss_AA._store.get_all())[1]._ndarray[0][0] tmp = 99999 if pre_loss_A is None else loss_A - pre_loss_A pre_loss_A = loss_A print("pre_loss_A:", pre_loss_A) # # fw_G1 = X_G @ w_G # # fw_G2 = X_G @ w_G # # enc_fw_G = fw_G1.encrypt() # # enc_fw_square_G = (fw_G1 * fw_G2).encrypt() # # # # enc_agg_wx_G = enc_fw_G + enc_fw_H # # # # enc_agg_wx_square_G = enc_fw_square_G + enc_fw_square_H + fw_G1 * enc_fw_H * 2 # # # # enc_fore_grad_G = enc_agg_wx_G * 0.25 - X_Y * 0.5 # # # # enc_grad_G = (X_G * enc_fore_grad_G).mean() # # enc_grad_H = (X_H * enc_fore_grad_G).mean() # # # # # # enc_grad_G.out(priv, '123') # # # # grad_A = enc_grad_G.hstack(enc_grad_H) # # # # learning_rate *= 0.999 # # optim_grad_A = grad_A * learning_rate # # optim_grad_G, optim_grad_H = optim_grad_A.decrypt(priv).split(10, 1) # # # # # w_G.out(priv, "111111111111") # # # optim_grad_G.out(priv, "22222222") # # # # w_G = w_G - optim_grad_G.T() # # w_H = w_H - optim_grad_H.T() # # # # enc_half_ywx_G = enc_agg_wx_G * 0.5 * X_Y # # # #todo diversion # # enc_loss_G = (((enc_half_ywx_G * -1)) + enc_agg_wx_square_G / 8 + NumpyTensor(np.log(2), pub)).mean() # # loss_AA = enc_loss_G.decrypt(priv) # # # # loss_A = next(loss_AA._store.get_all())[1]._ndarray[0][0] # # tmp = 99999 if pre_loss_A is None else loss_A - pre_loss_A # # if pre_loss_A is not None and abs(loss_A - pre_loss_A) < 1e-4: # # break # # pre_loss_A = loss_A # # print("pre_loss_A:", pre_loss_A) # itr += 1
def map_values(_tagged_key, is_standalone, roll_site_header): if is_standalone: dst_name = _tagged_key store_type = rp.get_store_type() else: dst_name = DELIM.join([ _tagged_key, self.dst_host, str(self.dst_port), obj_type ]) store_type = StoreTypes.ROLLPAIR_ROLLSITE if is_standalone: status_rp = self.ctx.rp_ctx.load( namespace, STATUS_TABLE_NAME + DELIM + self.roll_site_session_id, options=_options) status_rp.disable_gc() if isinstance(obj, RollPair): status_rp.put(_tagged_key, (obj_type.encode("utf-8"), rp.get_name(), rp.get_namespace())) else: status_rp.put( _tagged_key, (obj_type.encode("utf-8"), dst_name, namespace)) else: store = rp.get_store() store_locator = store._store_locator new_store_locator = ErStoreLocator( store_type=store_type, namespace=namespace, name=dst_name, total_partitions=store_locator._total_partitions, partitioner=store_locator._partitioner, serdes=store_locator._serdes) # TODO:0: move options from job to store when database modification finished options = { "roll_site_header": roll_site_header, "proxy_endpoint": self.ctx.proxy_endpoint, "obj_type": obj_type } if isinstance(obj, RollPair): roll_site_header._options[ 'total_partitions'] = obj.get_store( )._store_locator._total_partitions L.info( f"RollSite.push: pushing {roll_site_header}, type: RollPair, count: {obj.count()}" ) else: L.info( f"RollSite.push: pushing {roll_site_header}, type: object" ) rp.map_values( lambda v: v, output=ErStore(store_locator=new_store_locator), options=options) L.info( f"RollSite.push: push {roll_site_header} done. type:{type(obj)}" ) return _tagged_key