# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle_fl as fl import paddle.fluid as fluid from paddle_fl.core.server.fl_server import FLServer from paddle_fl.core.master.fl_job import FLRunTimeJob server = FLServer() server_id = 0 job_path = "fl_job_config" job = FLRunTimeJob() job.load_server_job(job_path, server_id) job._scheduler_ep = "127.0.0.1:9091" # IP address for scheduler server.set_server_job(job) server._current_ep = "127.0.0.1:8181" # IP address for server server.start()
datefmt="%d-%M-%Y %H:%M:%S", level=logging.DEBUG) logger = logging.getLogger("FLTrainer") BATCH_SIZE = 64 train_reader = paddle.batch(paddle.reader.shuffle(paddle.dataset.mnist.train(), buf_size=500), batch_size=BATCH_SIZE) test_reader = paddle.batch(paddle.dataset.mnist.test(), batch_size=BATCH_SIZE) trainer_num = 2 trainer_id = int(sys.argv[1]) # trainer id for each guest job_path = "fl_job_config" job = FLRunTimeJob() job.load_trainer_job(job_path, trainer_id) job._scheduler_ep = "127.0.0.1:9091" # Inform the scheduler IP to trainer trainer = FLTrainerFactory().create_fl_trainer(job) trainer.trainer_id = trainer_id trainer._current_ep = "127.0.0.1:{}".format(9000 + trainer_id) trainer.trainer_num = trainer_num trainer.key_dir = "./keys/" trainer.start() output_folder = "fl_model" epoch_id = 0 step_i = 0 inputs = fluid.layers.data(name='x', shape=[1, 28, 28], dtype='float32') label = fluid.layers.data(name='y', shape=[1], dtype='int64')
break os.system("ls") os.system("gzip -d {}.tar.gz".format(message)) print("gzip finish") os.system("tar -xf {}.tar".format(message)) os.system("ls") zmq_socket.close() print("close socket") #program start if 'server' in message: server = FLServer() server_id = 0 job_path = "job_config" job = FLRunTimeJob() job.load_server_job(job_path, server_id) job._scheduler_ep = scheduler_conf["ENDPOINT"] server.set_server_job(job) server._current_ep = endpoint server.start() else: def reader(): for i in range(1000): data_dict = {} for i in range(3): data_dict[str(i)] = np.random.rand(1, 5).astype('float32') data_dict["label"] = np.random.randint(2, size=(1, 1)).astype('int64') yield data_dict
import paddle_fl as fl import paddle.fluid as fluid from paddle_fl.core.server.fl_server import FLServer from paddle_fl.core.master.fl_job import FLRunTimeJob server = FLServer() server_id = 0 job_path = "fl_job_config" job = FLRunTimeJob() job.load_server_job(job_path, server_id) server.set_server_job(job) server.start()
datefmt="%d-%M-%Y %H:%M:%S", level=logging.DEBUG) def reader(): for i in range(1000): data_dict = {} for i in range(3): data_dict[str(i)] = np.random.rand(1, 5).astype('float32') data_dict["label"] = np.random.randint(2, size=(1, 1)).astype('int64') yield data_dict trainer_id = int(sys.argv[1]) # trainer id for each guest job_path = "fl_job_config" job = FLRunTimeJob() job.load_trainer_job(job_path, trainer_id) #job._scheduler_ep = "127.0.0.1:9091" # Inform the scheduler IP to trainer job._scheduler_ep = os.environ['FL_SCHEDULER_SERVICE_HOST'] + ":" + os.environ[ 'FL_SCHEDULER_SERVICE_PORT_FL_SCHEDULER'] trainer = FLTrainerFactory().create_fl_trainer(job) #trainer._current_ep = "127.0.0.1:{}".format(9000+trainer_id) trainer._current_ep = os.environ['TRAINER0_SERVICE_HOST'] + ":" + os.environ[ 'TRAINER0_SERVICE_PORT_TRAINER0'] trainer.start() print(trainer._scheduler_ep, trainer._current_ep) output_folder = "fl_model" epoch_id = 0 while not trainer.stop(): print("batch %d start train" % (epoch_id)) train_step = 0
import numpy import sys import paddle import paddle.fluid as fluid import logging import math logging.basicConfig(filename="test.log", filemode="w", format="%(asctime)s %(name)s:%(levelname)s:%(message)s", datefmt="%d-%M-%Y %H:%M:%S", level=logging.DEBUG) trainer_id = int(sys.argv[1]) # trainer id for each guest job_path = "fl_job_config" job = FLRunTimeJob() job.load_trainer_job(job_path, trainer_id) trainer = FLTrainerFactory().create_fl_trainer(job) trainer.start() test_program = trainer._main_program.clone(for_test=True) alldata = pd.read_csv('alldata.csv') print(len(alldata)) # In[3]: label = alldata['Label'].to_frame() alldata = alldata.drop('Company', 1) alldata = alldata.drop('Label', 1)
# You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle_fl as fl import os import paddle.fluid as fluid from paddle_fl.core.server.fl_server import FLServer from paddle_fl.core.master.fl_job import FLRunTimeJob import time server = FLServer() server_id = 0 job_path = "fl_job_config" job = FLRunTimeJob() job.load_server_job(job_path, server_id) job._scheduler_ep = os.environ['FL_SCHEDULER_SERVICE_HOST'] + ":" + os.environ[ 'FL_SCHEDULER_SERVICE_PORT_FL_SCHEDULER'] # IP address for scheduler #job._endpoints = os.environ['POD_IP'] + ":" + os.environ['FL_SERVER_SERVICE_PORT_FL_SERVER'] # IP address for server server.set_server_job(job) server._current_ep = os.environ['FL_SERVER_SERVICE_HOST'] + ":" + os.environ[ 'FL_SERVER_SERVICE_PORT_FL_SERVER'] # IP address for server print(job._scheduler_ep, server._current_ep) server.start() print("connect")