Exemplo n.º 1
0
 def __init__(self,
              X,
              Y,
              model,
              criterion,
              end_trigger,
              batch_size,
              optim_method=None,
              cores=None,
              bigdl_type="float"):
     if not optim_method:
         optim_methods = {model.name(): SGD()}
     elif isinstance(optim_method, OptimMethod):
         optim_methods = {model.name(): optim_method}
     elif isinstance(optim_method, JavaObject):
         optim_methods = {model.name(): OptimMethod(optim_method, bigdl_type)}
     else:
         optim_methods = optim_method
     if cores is None:
         cores = multiprocessing.cpu_count()
     JavaValue.__init__(self, None, bigdl_type,
                        [JTensor.from_ndarray(X) for X in to_list(X)],
                        JTensor.from_ndarray(Y),
                        model.value,
                        criterion,
                        optim_methods, end_trigger, batch_size, cores)
Exemplo n.º 2
0
    def set_validation(self, batch_size, X_val, Y_val, trigger, val_method=None):
        """
        Configure validation settings.

        :param batch_size: validation batch size
        :param X_val: features of validation dataset
        :param Y_val: label of validation dataset
        :param trigger: validation interval
        :param val_method: the ValidationMethod to use,e.g. "Top1Accuracy", "Top5Accuracy", "Loss"
        """
        if val_method is None:
            val_method = [Top1Accuracy()]
        callBigDlFunc(self.bigdl_type, "setValidation", self.value, batch_size,
                      trigger, [JTensor.from_ndarray(X) for X in to_list(X_val)],
                      JTensor.from_ndarray(Y_val), to_list(val_method))
Exemplo n.º 3
0
 def __init__(self,
              learningrate=1e-3,
              learningrate_decay=0.0,
              weightdecay=0.0,
              momentum=0.0,
              dampening=DOUBLEMAX,
              nesterov=False,
              leaningrate_schedule=None,
              learningrates=None,
              weightdecays=None,
              bigdl_type="float"):
     super(SGD, self).__init__(None, bigdl_type, learningrate, learningrate_decay, weightdecay,
                        momentum, dampening, nesterov,
                        leaningrate_schedule if (leaningrate_schedule) else Default(),
                        JTensor.from_ndarray(learningrates), JTensor.from_ndarray(weightdecays))
Exemplo n.º 4
0
 def __init__(self,
              weights=None,
              size_average=True,
              bigdl_type="float"):
     super(MultiLabelSoftMarginCriterion, self).__init__(None, bigdl_type,
                                                         JTensor.from_ndarray(weights),
                                                         size_average)
Exemplo n.º 5
0
 def __init__(self,
              weights=None,
              size_average=True,
              bigdl_type="float"):
     super(CrossEntropyCriterion, self).__init__(None, bigdl_type,
                                                 JTensor.from_ndarray(
                                                     weights),
                                                 size_average)
Exemplo n.º 6
0
 def __init__(self,
              learningrate=1e-3,
              learningrate_decay=0.0,
              weightdecay=0.0,
              momentum=0.0,
              dampening=DOUBLEMAX,
              nesterov=False,
              leaningrate_schedule=None,
              learningrates=None,
              weightdecays=None,
              bigdl_type="float"):
     super(SGD, self).__init__(
         None, bigdl_type, learningrate, learningrate_decay, weightdecay,
         momentum, dampening, nesterov, leaningrate_schedule if
         (leaningrate_schedule) else Default(),
         JTensor.from_ndarray(learningrates),
         JTensor.from_ndarray(weightdecays))
Exemplo n.º 7
0
 def __init__(self,
              weights=None,
              size_average=True,
              logProbAsInput=True,
              bigdl_type="float"):
     super(ClassNLLCriterion, self).__init__(None, bigdl_type,
                                             JTensor.from_ndarray(weights),
                                             size_average, logProbAsInput)
Exemplo n.º 8
0
 def set_running_std(self, running_std):
     """
     Set the running variance of the BatchNormalization layer.
     :param running_std: a Numpy array.
     """
     callBigDlFunc(self.bigdl_type, "setRunningStd", self.value,
                   JTensor.from_ndarray(running_std))
     return self
Exemplo n.º 9
0
 def set_running_mean(self, running_mean):
     """
     Set the running mean of the BatchNormalization layer.
     :param running_mean: a Numpy array.
     """
     callBigDlFunc(self.bigdl_type, "setRunningMean", self.value,
                   JTensor.from_ndarray(running_mean))
     return self
Exemplo n.º 10
0
 def __init__(self,
              X,
              y,
              model,
              criterion,
              end_trigger,
              batch_size,
              optim_method=None,
              cores=None,
              bigdl_type="float"):
     if cores is None:
         cores = multiprocessing.cpu_count()
     JavaValue.__init__(self, None, bigdl_type,
                        [JTensor.from_ndarray(X) for X in to_list(X)],
                        JTensor.from_ndarray(y), model.value, criterion,
                        optim_method if optim_method else SGD(),
                        end_trigger, batch_size, cores)
Exemplo n.º 11
0
 def set_running_mean(self, running_mean):
     """
     Set the running mean of the BatchNormalization layer.
     :param running_mean: a Numpy array.
     """
     callBigDlFunc(self.bigdl_type, "setRunningMean",
                   self.value, JTensor.from_ndarray(running_mean))
     return self
Exemplo n.º 12
0
 def set_running_std(self, running_std):
     """
     Set the running variance of the BatchNormalization layer.
     :param running_std: a Numpy array.
     """
     callBigDlFunc(self.bigdl_type, "setRunningStd",
                   self.value, JTensor.from_ndarray(running_std))
     return self
Exemplo n.º 13
0
 def __init__(self,
              weights=None,
              size_average=True,
              logProbAsInput=True,
              bigdl_type="float"):
     super(ClassNLLCriterion, self).__init__(None, bigdl_type,
                                             JTensor.from_ndarray(weights),
                                             size_average, logProbAsInput)
Exemplo n.º 14
0
 def __init__(self,
              p=1,
              weights=None,
              margin=1.0,
              size_average=True,
              bigdl_type="float"):
     super(MultiMarginCriterion,
           self).__init__(None, bigdl_type, p,
                          JTensor.from_ndarray(weights), margin,
                          size_average)
Exemplo n.º 15
0
 def __init__(self,
              p=1,
              weights=None,
              margin=1.0,
              size_average=True,
              bigdl_type="float"):
     super(MultiMarginCriterion, self).__init__(None, bigdl_type,
                                                p,
                                                JTensor.from_ndarray(weights),
                                                margin,
                                                size_average)
Exemplo n.º 16
0
    def set_validation(self,
                       batch_size,
                       X_val,
                       Y_val,
                       trigger,
                       val_method=None):
        """
        Configure validation settings.

        :param batch_size: validation batch size
        :param X_val: features of validation dataset
        :param Y_val: label of validation dataset
        :param trigger: validation interval
        :param val_method: the ValidationMethod to use,e.g. "Top1Accuracy", "Top5Accuracy", "Loss"
        """
        if val_method is None:
            val_method = [Top1Accuracy()]
        callBigDlFunc(self.bigdl_type, "setValidation", self.value, batch_size,
                      trigger,
                      [JTensor.from_ndarray(X) for X in to_list(X_val)],
                      JTensor.from_ndarray(Y_val), to_list(val_method))
Exemplo n.º 17
0
 def __init__(self,
              log_prob_as_input=False,
              zero_based_label=True,
              weights=None,
              size_average=True,
              padding_value=-1,
              bigdl_type="float"):
     super(SparseCategoricalCrossEntropy,
           self).__init__(None, bigdl_type,
                          log_prob_as_input, zero_based_label,
                          JTensor.from_ndarray(weights), size_average,
                          padding_value)
Exemplo n.º 18
0
 def __init__(self, input_dim, output_dim, init="uniform", weights=None, trainable=True,
              input_length=None, W_regularizer=None, input_shape=None, **kwargs):
     if input_length:
         input_shape = (input_length, )
     super(Embedding, self).__init__(None,
                                     input_dim,
                                     output_dim,
                                     init,
                                     JTensor.from_ndarray(weights),
                                     trainable,
                                     W_regularizer,
                                     list(input_shape) if input_shape else None,
                                     **kwargs)
Exemplo n.º 19
0
def row_to_sample(row, column_info, model_type="wide_n_deep"):
    wide_tensor = get_wide_tensor(row, column_info)
    deep_tensor = JTensor.from_ndarray(get_deep_tensor(row, column_info))
    label = row[column_info.label]
    model_type = model_type.lower()
    if model_type == "wide_n_deep":
        feature = [wide_tensor, deep_tensor]
    elif model_type == "wide":
        feature = wide_tensor
    elif model_type == "deep":
        feature = deep_tensor
    else:
        raise TypeError("Unsupported model_type: %s" % model_type)
    return Sample.from_jtensor(feature, label)
Exemplo n.º 20
0
def row_to_sample(row, column_info, model_type="wide_n_deep"):
    wide_tensor = get_wide_tensor(row, column_info)
    deep_tensor = JTensor.from_ndarray(get_deep_tensor(row, column_info))
    label = row[column_info.label]
    model_type = model_type.lower()
    if model_type == "wide_n_deep":
        feature = [wide_tensor, deep_tensor]
    elif model_type == "wide":
        feature = wide_tensor
    elif model_type == "deep":
        feature = deep_tensor
    else:
        raise TypeError("Unsupported model_type: %s" % model_type)
    return Sample.from_jtensor(feature, label)
Exemplo n.º 21
0
def save_variable_bigdl(tensors, target_path, bigdl_type="float"):
    """
    Save a variable dictionary to a Java object file, so it can be read by BigDL

    :param tensors: tensor dictionary
    :param target_path: where is the Java object file store
    :param bigdl_type: model variable numeric type
    :return: nothing
    """
    jtensors = {}
    for tn in tensors.keys():
        jtensors[tn] = JTensor.from_ndarray(tensors[tn])

    callBigDlFunc(bigdl_type, "saveTensorDictionary", jtensors, target_path)
Exemplo n.º 22
0
def save_variable_bigdl(tensors, target_path, bigdl_type="float"):
    """
    Save a variable dictionary to a Java object file, so it can be read by BigDL

    :param tensors: tensor dictionary
    :param target_path: where is the Java object file store
    :param bigdl_type: model variable numeric type
    :return: nothing
    """
    import numpy as np
    jtensors = {}
    for tn in tensors.keys():
        if not isinstance(tensors[tn], np.ndarray):
            value = np.array(tensors[tn])
        else:
            value = tensors[tn]
        jtensors[tn] = JTensor.from_ndarray(value)

    callBigDlFunc(bigdl_type, "saveTensorDictionary", jtensors, target_path)
Exemplo n.º 23
0
 def from_pytorch(model):
     """
     Create a TorchModel directly from PyTorch model, e.g. model in torchvision.models.
     :param model: a PyTorch model, or a function to create PyTorch model
     """
     weights = []
     import types
     if isinstance(model, types.FunctionType) or isinstance(
             model, types.ClassType):
         for param in trainable_param(model()):
             weights.append(param.view(-1))
     else:
         for param in trainable_param(model):
             weights.append(param.view(-1))
     flatten_weight = torch.nn.utils.parameters_to_vector(
         weights).data.numpy()
     bys = io.BytesIO()
     torch.save(model, bys, pickle_module=zoo_pickle_module)
     weights = JTensor.from_ndarray(flatten_weight)
     jvalue = callZooFunc("float", "createTorchModel", bys.getvalue(),
                          weights)
     net = TorchModel(jvalue, bys.getvalue())
     return net
Exemplo n.º 24
0
def row_to_sample(row, column_info, model_type="wide_n_deep"):
    """
    convert a row to sample given column feature information of a WideAndDeep model

    :param row: Row of userId, itemId, features and label
    :param column_info: ColumnFeatureInfo specify information of different features
    :return: TensorSample as input for WideAndDeep model
    """

    wide_tensor = get_wide_tensor(row, column_info)
    deep_tensor = get_deep_tensors(row, column_info)
    deep_tensors = [JTensor.from_ndarray(ele) for ele in deep_tensor]
    label = row[column_info.label]
    model_type = model_type.lower()
    if model_type == "wide_n_deep":
        feature = [wide_tensor] + deep_tensors
    elif model_type == "wide":
        feature = wide_tensor
    elif model_type == "deep":
        feature = deep_tensors
    else:
        raise TypeError("Unsupported model_type: %s" % model_type)
    return Sample.from_jtensor(feature, label)
Exemplo n.º 25
0
 def __init__(self, weights=None, size_average=True, bigdl_type="float"):
     super(MultiLabelSoftMarginCriterion,
           self).__init__(None, bigdl_type, JTensor.from_ndarray(weights),
                          size_average)
Exemplo n.º 26
0
 def __init__(self, weights=None, size_average=True, bigdl_type="float"):
     super(CrossEntropyCriterion,
           self).__init__(None, bigdl_type, JTensor.from_ndarray(weights),
                          size_average)
Exemplo n.º 27
0
 def __init__(self, module_bytes, weights, bigdl_type="float"):
     weights = JTensor.from_ndarray(weights)
     self.module_bytes = module_bytes
     self.value = callZooFunc(bigdl_type, self.jvm_class_constructor(),
                              module_bytes, weights)
     self.bigdl_type = bigdl_type
Exemplo n.º 28
0
 def __init__(self, value, bigdl_type="float"):
     super(Constant, self).__init__(None, bigdl_type,
                                    JTensor.from_ndarray(value))
Exemplo n.º 29
0
 def __init__(self, data, name=None, bigdl_type="float"):
     self.data = data
     super(Constant, self).__init__(None, bigdl_type, JTensor.from_ndarray(data), name)