Esempio n. 1
0
    def _get_gradient(self,
                      input_to_check,
                      place,
                      output_names,
                      no_grad_set,
                      parallel=False):
        prog = Program()
        block = prog.global_block()
        self._append_ops(block)
        loss = append_loss_ops(block, output_names)
        param_grad_list = append_backward(loss=loss,
                                          parameter_list=input_to_check,
                                          no_grad_set=no_grad_set)

        inputs = self._get_inputs(block)
        feed_dict = self.feed_var(inputs, place)

        fetch_list = [g for p, g in param_grad_list]
        if parallel:
            use_cuda = False
            if isinstance(place, fluid.CUDAPlace(0)):
                use_cuda = True
            executor = fluid.ParallelExecutor(use_cuda=use_cuda,
                                              loss_name=loss.name,
                                              main_program=prog)
        else:
            executor = Executor(place)
        return list(
            map(np.array,
                executor.run(prog, feed_dict, fetch_list, return_numpy=False)))
Esempio n. 2
0
        def closure(**kwargs):
            role = kwargs['role']

            pfl_mpc.init("aby3", role, "localhost", self.server,
                         int(self.port))
            loss = append_loss_ops(block, output_names)
            param_grad_list = append_backward(loss=loss,
                                              parameter_list=input_to_check,
                                              no_grad_set=no_grad_set)

            inputs = self._get_inputs(block)
            feed_dict = self.feed_var(inputs, place)

            fetch_list = [g for p, g in param_grad_list]

            executor = Executor(place)

            executor.run()
            outs = executor.run(prog,
                                feed=feed_dict,
                                fetch_list=fetch_list,
                                return_numpy=False)
            # append lod information in last position
            lod = []
            for idx in range(fetch_list_len):
                return_results[idx].append(np.array(outs[idx]))
                lod_i = outs[idx].lod()
                lod_concat = []
                for i in lod_i:
                    lod_concat.append(i)
                lod.append(lod_concat)
            return_results[fetch_list_len].append(lod)
Esempio n. 3
0
    def _get_gradient(self,
                      input_to_check,
                      place,
                      output_names,
                      no_grad_set,
                      parallel=False):
        prog = Program()
        block = prog.global_block()
        self._append_ops(block)
        loss = append_loss_ops(block, output_names)
        param_grad_list = append_backward(loss=loss,
                                          parameter_list=input_to_check,
                                          no_grad_set=no_grad_set)

        inputs = self._get_inputs(block)
        feed_dict = self.feed_var(inputs, place)

        fetch_list = [g for p, g in param_grad_list]

        return_results = [Manager().list() for _ in range(len(fetch_list))]

        def closure(**kwargs):
            role = kwargs['role']

            pfl_mpc.init("privc", role, "localhost", self.server,
                         int(self.port))

            #init_op = fluid.default_main_program().global_block().ops[0]

            #_insert_init_op(program, init_op)

            executor = Executor(place)

            executor.run()
            outs = executor.run(prog, feed=feed_dict, fetch_list=fetch_list)

            for idx in range(len(fetch_list)):
                return_results[idx].append(outs[idx])

        ret = self.multi_party_run(target=closure)
        self.assertEqual(ret[0], True)

        outs = []

        for idx in range(len(fetch_list)):
            outs.append(self.reconstruct(np.array(return_results[idx])))
        return outs
Esempio n. 4
0
    def _get_gradient(self,
                      input_to_check,
                      place,
                      output_names,
                      no_grad_set,
                      parallel=False):
        prog = Program()
        block = prog.global_block()
        self._append_ops(block)
        loss = append_loss_ops(block, output_names)
        param_grad_list = append_backward(loss=loss,
                                          parameter_list=input_to_check,
                                          no_grad_set=no_grad_set)

        inputs = self._get_inputs(block)
        feed_dict = self.feed_var(inputs, place)

        fetch_list = [g for p, g in param_grad_list]

        # Manager() can not store LoDTensor directly
        # So, use one additional element to store output lod
        return_results = [Manager().list() for _ in range(len(fetch_list) + 1)]

        def closure(**kwargs):
            role = kwargs['role']

            pfl_mpc.init("aby3", role, "localhost", self.server,
                         int(self.port))

            #init_op = fluid.default_main_program().global_block().ops[0]

            #_insert_init_op(program, init_op)

            executor = Executor(place)

            executor.run()
            outs = executor.run(prog,
                                feed=feed_dict,
                                fetch_list=fetch_list,
                                return_numpy=False)
            # append lod information in last position
            lod = []
            for idx in range(len(fetch_list)):
                return_results[idx].append(np.array(outs[idx]))
                lod_i = outs[idx].lod()
                lod_concat = []
                for i in lod_i:
                    lod_concat.append(i)
                lod.append(lod_concat)
            return_results[len(fetch_list)].append(lod)

        ret = self.multi_party_run(target=closure)
        self.assertEqual(ret[0], True)

        outs = []

        lod = np.array(return_results[len(fetch_list)])
        # from numpy array to LoDTensor
        for idx in range(len(fetch_list)):
            t = fluid.LoDTensor()
            reveal_data = aby3.reconstruct(np.array(return_results[idx]))
            t.set(reveal_data, place)
            lod_idx = lod[0][idx]
            # TODO: fix: exception throw because some output lod error in gru op
            # out.set_lod(out.lod()) will throw exception
            try:
                t.set_lod(lod_idx)
            except Exception as e:
                pass

            outs.append(t)
        return outs