Example #1
0
        self.loss = fluid.layers.reduce_mean(self.sum_cost)
        self.startup_program = fluid.default_startup_program()

inputs = [fluid.layers.data( \
            name=str(slot_id), shape=[5],
            dtype="float32")
          for slot_id in range(3)]
label = fluid.layers.data( \
            name="label",
            shape=[1],
            dtype='int64')

model = Model()
model.mlp(inputs, label)

job_generator = JobGenerator()
optimizer = fluid.optimizer.SGD(learning_rate=0.1)
job_generator.set_optimizer(optimizer)
job_generator.set_losses([model.loss])
job_generator.set_startup_program(model.startup_program)
job_generator.set_infer_feed_and_target_names([x.name for x in inputs],
                                              [model.predict.name])

build_strategy = FLStrategyFactory()
build_strategy.fed_avg = True
build_strategy.inner_step = 10
strategy = build_strategy.create_fl_strategy()

# endpoints will be collected through the cluster
# in this example, we suppose endpoints have been collected
endpoints = ["127.0.0.1:8181"]
Example #2
0
                                          low=init_low_bound,
                                          high=init_high_bound),
                                      learning_rate=fc_lr_x))
        cost = fluid.layers.cross_entropy(input=self.fc,
                                          label=self.dst_wordseq)
        self.acc = fluid.layers.accuracy(input=self.fc,
                                         label=self.dst_wordseq,
                                         k=20)
        self.loss = fluid.layers.mean(x=cost)
        self.startup_program = fluid.default_startup_program()


model = Model()
model.gru4rec_network()

job_generator = JobGenerator()
optimizer = fluid.optimizer.SGD(learning_rate=2.0)
job_generator.set_optimizer(optimizer)
job_generator.set_losses([model.loss])
job_generator.set_startup_program(model.startup_program)
job_generator.set_infer_feed_and_target_names(
    [model.src_wordseq.name, model.dst_wordseq.name],
    [model.loss.name, model.acc.name])

build_strategy = FLStrategyFactory()
build_strategy.fed_avg = True
build_strategy.inner_step = 1
strategy = build_strategy.create_fl_strategy()

# endpoints will be collected through the cluster
# in this example, we suppose endpoints have been collected
Example #3
0
def job_generate():
    #generate a fl job which is the same as fl_master
    inputs = [fluid.layers.data( \
                name=str(slot_id), shape=[5],
                dtype="float32")
               for slot_id in range(3)]
    label = fluid.layers.data( \
                name="label",
                shape=[1],
                dtype='int64')

    model = Model()
    model.mlp(inputs, label)

    job_generator = JobGenerator()
    optimizer = fluid.optimizer.SGD(learning_rate=0.1)
    job_generator.set_optimizer(optimizer)
    job_generator.set_losses([model.loss])
    job_generator.set_startup_program(model.startup_program)
    job_generator.set_infer_feed_and_target_names(
        [x.name for x in inputs], [model.predict.name])

    build_strategy = FLStrategyFactory()
    build_strategy.fed_avg = True
    build_strategy.inner_step = 10
    strategy = build_strategy.create_fl_strategy()

    # endpoints will be collected through the cluster
    # in this example, we suppose endpoints have been collected
    server_ip = ["{}".format(ip_list[0])]
    
    output = "job_config"
    job_generator.generate_fl_job(
        strategy, server_endpoints=server_ip, worker_num=int(default_dict["worker_nodes"]), output=output)
    
    file_list = os.listdir(output)
    for file in file_list:
        tar = tarfile.open('{}/{}.tar.gz'.format(output,file),'w:gz')
        for root,dir,files in os.walk("{}/{}".format(output,file)):
                for f in files:
                        fullpath = os.path.join(root,f)
                        tar.add(fullpath)
        tar.close()
        #self.accuracy = fluid.layers.accuracy(input=self.predict, label=self.label)
        #self.loss = fluid.layers.mean(self.sum_cost)
        self.startup_program = fluid.default_startup_program()


model = Model()
model.lr_network()

STEP_EPSILON = 0.1
DELTA = 0.00001
SIGMA = math.sqrt(2.0 * math.log(1.25 / DELTA)) / STEP_EPSILON
CLIP = 4.0
batch_size = 64

job_generator = JobGenerator()
optimizer = fluid.optimizer.SGD(learning_rate=0.1)
job_generator.set_optimizer(optimizer)
job_generator.set_losses([model.avg_cost])
job_generator.set_startup_program(model.startup_program)
job_generator.set_infer_feed_and_target_names(
    [model.inputs.name, model.label.name], [model.avg_cost.name])

build_strategy = FLStrategyFactory()
build_strategy.dpsgd = True
build_strategy.inner_step = 1
strategy = build_strategy.create_fl_strategy()
strategy.learning_rate = 0.1
strategy.clip = CLIP
strategy.batch_size = float(batch_size)
strategy.sigma = CLIP * SIGMA