Пример #1
0
def model_convert(model_contents: dict, module_name: str, framework_name=None):
    """Convert a H**o model component into format of a common ML framework

    :param model_contents: The model dict un-serialized from the model protobuf.
    :param module_name: The module name, typically as HomoXXXX.
    :param framework_name: The wanted framework, e.g. "sklearn", "pytorch", etc.
                           If not specified, the target framework will be chosen
                           automatically.
    :return: the converted framework name and a instance of the model object from
             the specified framework.
    """

    if not framework_name:
        framework_name = get_default_target_framework(model_contents,
                                                      module_name)
        if not framework_name:
            return None, None
    target_framework, component_converter = _get_component_converter(
        module_name, framework_name)
    if not component_converter:
        LOGGER.warn(
            f"Module {module_name} cannot be converted to framework {framework_name}"
        )
        return None, None
    LOGGER.info(
        f"Converting {module_name} module to a model of framework {target_framework}"
    )

    return target_framework, component_converter.convert(model_contents)
Пример #2
0
def client_fit(self, data_inst):
    self._header = data_inst.schema["header"]
    if not self.component_properties.is_warm_start:
        client_align_labels(self, data_inst=data_inst)
    data = self.data_converter.convert(
        data_inst,
        batch_size=self.batch_size,
        encode_label=self.encode_label,
        label_mapping=self._label_align_mapping,
    )
    if not self.component_properties.is_warm_start:
        self.nn_model = self.model_builder(
            input_shape=data.get_shape()[0],
            nn_define=self.nn_define,
            optimizer=self.optimizer,
            loss=self.loss,
            metrics=self.metrics,
        )
    else:
        self.callback_warm_start_init_iter(self.aggregate_iteration_num + 1)

    epoch_degree = float(len(data)) * self.aggregate_every_n_epoch

    while self.aggregate_iteration_num + 1 < self.max_aggregate_iteration_num:
        # update iteration num
        self.aggregate_iteration_num += 1

        self.callback_list.on_epoch_begin(self.aggregate_iteration_num)
        LOGGER.info(f"start {self.aggregate_iteration_num}_th aggregation")

        # train
        self.nn_model.train(
            data, aggregate_every_n_epoch=self.aggregate_every_n_epoch)

        # send model for aggregate, then set aggregated model to local
        self.aggregator.send_weighted_model(
            weighted_model=self.nn_model.get_model_weights(),
            weight=epoch_degree * self.aggregate_every_n_epoch,
            suffix=_suffix(self),
        )
        weights = self.aggregator.get_aggregated_model(suffix=_suffix(self))
        self.nn_model.set_model_weights(weights=weights)
        self.callback_list.on_epoch_end(self.aggregate_iteration_num)
        # calc loss and check convergence
        if client_is_converged(self, data, epoch_degree):
            LOGGER.info(f"early stop at iter {self.aggregate_iteration_num}")
            break

        LOGGER.info(
            f"role {self.role} finish {self.aggregate_iteration_num}_th aggregation"
        )
    else:
        LOGGER.warn(
            f"reach max iter: {self.aggregate_iteration_num}, not converged")

    self.set_summary(self._summary)
Пример #3
0
    def fit(self, data_inst):
        label_mapping = HomoLabelEncoderArbiter().label_alignment()
        LOGGER.info(f'label mapping: {label_mapping}')
        while self.aggregate_iteration_num < self.max_aggregate_iteration_num:
            self.model = self.aggregator.weighted_mean_model(suffix=self._suffix())
            self.aggregator.send_aggregated_model(model=self.model, suffix=self._suffix())

            if self._is_converged():
                LOGGER.info(f"early stop at iter {self.aggregate_iteration_num}")
                break
            self.aggregate_iteration_num += 1
        else:
            LOGGER.warn(f"reach max iter: {self.aggregate_iteration_num}, not converged")
        self.set_summary(self._summary)
Пример #4
0
def server_fit(self, data_inst):
    if not self.component_properties.is_warm_start:
        label_mapping = HomoLabelEncoderArbiter().label_alignment()
        LOGGER.info(f"label mapping: {label_mapping}")
    else:
        self.callback_warm_start_init_iter(self.aggregate_iteration_num + 1)
    while self.aggregate_iteration_num + 1 < self.max_aggregate_iteration_num:
        # update iteration num
        self.aggregate_iteration_num += 1

        self.callback_list.on_epoch_begin(self.aggregate_iteration_num)
        self.model = self.aggregator.weighted_mean_model(suffix=_suffix(self))
        self.aggregator.send_aggregated_model(model=self.model,
                                              suffix=_suffix(self))
        self.callback_list.on_epoch_end(self.aggregate_iteration_num)
        if server_is_converged(self):
            LOGGER.info(f"early stop at iter {self.aggregate_iteration_num}")
            break
    else:
        LOGGER.warn(
            f"reach max iter: {self.aggregate_iteration_num}, not converged")
    self.set_summary(self._summary)
Пример #5
0
    def fit(self, data_inst: CTableABC, *args):
        self._header = data_inst.schema["header"]
        self._align_labels(data_inst)
        data = self.data_converter.convert(data_inst, batch_size=self.batch_size, encode_label=self.encode_label,
                                           label_mapping=self._label_align_mapping)
        self.nn_model = self.model_builder(input_shape=data.get_shape()[0],
                                           nn_define=self.nn_define,
                                           optimizer=self.optimizer,
                                           loss=self.loss,
                                           metrics=self.metrics)

        epoch_degree = float(len(data)) * self.aggregate_every_n_epoch

        while self.aggregate_iteration_num < self.max_aggregate_iteration_num:
            LOGGER.info(f"start {self.aggregate_iteration_num}_th aggregation")

            # train
            self.nn_model.train(data, aggregate_every_n_epoch=self.aggregate_every_n_epoch)

            # send model for aggregate, then set aggregated model to local
            self.aggregator.send_weighted_model(weighted_model=self.nn_model.get_model_weights(),
                                                weight=epoch_degree * self.aggregate_every_n_epoch,
                                                suffix=self._suffix())
            weights = self.aggregator.get_aggregated_model(suffix=self._suffix())
            self.nn_model.set_model_weights(weights=weights)

            # calc loss and check convergence
            if self._is_converged(data, epoch_degree):
                LOGGER.info(f"early stop at iter {self.aggregate_iteration_num}")
                break

            LOGGER.info(f"role {self.role} finish {self.aggregate_iteration_num}_th aggregation")
            self.aggregate_iteration_num += 1
        else:
            LOGGER.warn(f"reach max iter: {self.aggregate_iteration_num}, not converged")

        self.set_summary(self._summary)