def original_train(model_dir, model_filename): """ Original Training: train and save pre-trained paddle model """ # Step 1. load paddle net [x, y, _, loss] = network.uci_network() # Step 2. train place = fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) train_reader = paddle.batch(paddle.dataset.uci_housing.train(), batch_size=network.BATCH_SIZE, drop_last=True) start_time = time.time() for epoch_id in range(network.PADDLE_UPDATE_EPOCH): step = 0 for data in train_reader(): avg_loss = exe.run(feed=feeder.feed(data), fetch_list=[loss.name]) if step % 50 == 0: print('Epoch={}, Step={}, Loss={}'.format( epoch_id, step, avg_loss[0])) step += 1 end_time = time.time() print('Paddle Training of Epoch={} Batch_size={}, cost time in seconds:{}'. format(network.PADDLE_UPDATE_EPOCH, network.BATCH_SIZE, (end_time - start_time))) # Step 3. save model to update mpc_du.save_trainable_model(exe=exe, program=fluid.default_main_program(), model_dir=model_dir, model_filename=model_filename)
def train_infer_model(model_dir, model_filename): """ Original Training: train and save paddle inference model. """ # Step 1. load paddle net [x, y, y_pre, loss] = network.uci_network() # Step 2. train place = fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) train_reader = paddle.batch( paddle.dataset.uci_housing.train(), batch_size=network.BATCH_SIZE, drop_last=True) start_time = time.time() for epoch_id in range(network.TRAIN_EPOCH): step = 0 for data in train_reader(): avg_loss = exe.run(feed=feeder.feed(data), fetch_list=[loss.name]) if step % 50 == 0: print('Epoch={}, Step={}, Loss={}'.format(epoch_id, step, avg_loss[0])) step += 1 end_time = time.time() print('For Prediction: Paddle Training of Epoch={} Batch_size={}, cost time in seconds:{}' .format(network.TRAIN_EPOCH, network.BATCH_SIZE, (end_time - start_time))) # Step 3. save inference model fluid.io.save_inference_model(executor=exe, main_program=fluid.default_main_program(), dirname=model_dir, model_filename=model_filename, feeded_var_names=[x.name], target_vars=[y_pre])
def encrypt_model_and_train(role, ip, server, port, model_save_dir, model_filename): """ Load uci network and train MPC model. """ place = fluid.CPUPlace() exe = fluid.Executor(place) # Step 1. Initialize MPC environment and load paddle model network and initialize parameter. pfl_mpc.init("aby3", role, ip, server, port) [_, _, _, loss] = network.uci_network() exe.run(fluid.default_startup_program()) # Step 2. TRANSPILE: encrypt default_main_program into MPC program aby3.transpile() # Step 3. MPC-TRAINING: model training based on MPC program. mpc_data_dir = "../mpc_data/" feature_file = mpc_data_dir + "house_feature" feature_shape = (13, ) label_file = mpc_data_dir + "house_label" label_shape = (1, ) if not os.path.exists('./tmp'): os.makedirs('./tmp') loss_file = "./tmp/uci_mpc_loss.part{}".format(role) if os.path.exists(loss_file): os.remove(loss_file) batch_size = network.UCI_BATCH_SIZE epoch_num = network.TRAIN_EPOCH feature_name = 'x' label_name = 'y' loader = process_data.get_mpc_dataloader(feature_file, label_file, feature_shape, label_shape, feature_name, label_name, role, batch_size) start_time = time.time() for epoch_id in range(epoch_num): step = 0 for sample in loader(): mpc_loss = exe.run(feed=sample, fetch_list=[loss.name]) if step % 50 == 0: print('Epoch={}, Step={}, Loss={}'.format( epoch_id, step, mpc_loss)) with open(loss_file, 'ab') as f: f.write(np.array(mpc_loss).tostring()) step += 1 end_time = time.time() print('Mpc Training of Epoch={} Batch_size={}, cost time in seconds:{}'. format(epoch_num, batch_size, (end_time - start_time))) # Step 4. SAVE trained MPC model as a trainable model. aby3.save_trainable_model(exe=exe, model_dir=model_save_dir, model_filename=model_filename) print('Successfully save mpc trained model into:{}'.format(model_save_dir))