def __init__(self, X, Y, model, criterion, end_trigger, batch_size, optim_method=None, cores=None, bigdl_type="float"): if not optim_method: optim_methods = {model.name(): SGD()} elif isinstance(optim_method, OptimMethod): optim_methods = {model.name(): optim_method} elif isinstance(optim_method, JavaObject): optim_methods = {model.name(): OptimMethod(optim_method, bigdl_type)} else: optim_methods = optim_method if cores is None: cores = multiprocessing.cpu_count() JavaValue.__init__(self, None, bigdl_type, [JTensor.from_ndarray(X) for X in to_list(X)], JTensor.from_ndarray(Y), model.value, criterion, optim_methods, end_trigger, batch_size, cores)
def set_validation(self, batch_size, X_val, Y_val, trigger, val_method=None): """ Configure validation settings. :param batch_size: validation batch size :param X_val: features of validation dataset :param Y_val: label of validation dataset :param trigger: validation interval :param val_method: the ValidationMethod to use,e.g. "Top1Accuracy", "Top5Accuracy", "Loss" """ if val_method is None: val_method = [Top1Accuracy()] callBigDlFunc(self.bigdl_type, "setValidation", self.value, batch_size, trigger, [JTensor.from_ndarray(X) for X in to_list(X_val)], JTensor.from_ndarray(Y_val), to_list(val_method))
def __init__(self, learningrate=1e-3, learningrate_decay=0.0, weightdecay=0.0, momentum=0.0, dampening=DOUBLEMAX, nesterov=False, leaningrate_schedule=None, learningrates=None, weightdecays=None, bigdl_type="float"): super(SGD, self).__init__(None, bigdl_type, learningrate, learningrate_decay, weightdecay, momentum, dampening, nesterov, leaningrate_schedule if (leaningrate_schedule) else Default(), JTensor.from_ndarray(learningrates), JTensor.from_ndarray(weightdecays))
def __init__(self, weights=None, size_average=True, bigdl_type="float"): super(MultiLabelSoftMarginCriterion, self).__init__(None, bigdl_type, JTensor.from_ndarray(weights), size_average)
def __init__(self, weights=None, size_average=True, bigdl_type="float"): super(CrossEntropyCriterion, self).__init__(None, bigdl_type, JTensor.from_ndarray( weights), size_average)
def __init__(self, learningrate=1e-3, learningrate_decay=0.0, weightdecay=0.0, momentum=0.0, dampening=DOUBLEMAX, nesterov=False, leaningrate_schedule=None, learningrates=None, weightdecays=None, bigdl_type="float"): super(SGD, self).__init__( None, bigdl_type, learningrate, learningrate_decay, weightdecay, momentum, dampening, nesterov, leaningrate_schedule if (leaningrate_schedule) else Default(), JTensor.from_ndarray(learningrates), JTensor.from_ndarray(weightdecays))
def __init__(self, weights=None, size_average=True, logProbAsInput=True, bigdl_type="float"): super(ClassNLLCriterion, self).__init__(None, bigdl_type, JTensor.from_ndarray(weights), size_average, logProbAsInput)
def set_running_std(self, running_std): """ Set the running variance of the BatchNormalization layer. :param running_std: a Numpy array. """ callBigDlFunc(self.bigdl_type, "setRunningStd", self.value, JTensor.from_ndarray(running_std)) return self
def set_running_mean(self, running_mean): """ Set the running mean of the BatchNormalization layer. :param running_mean: a Numpy array. """ callBigDlFunc(self.bigdl_type, "setRunningMean", self.value, JTensor.from_ndarray(running_mean)) return self
def __init__(self, X, y, model, criterion, end_trigger, batch_size, optim_method=None, cores=None, bigdl_type="float"): if cores is None: cores = multiprocessing.cpu_count() JavaValue.__init__(self, None, bigdl_type, [JTensor.from_ndarray(X) for X in to_list(X)], JTensor.from_ndarray(y), model.value, criterion, optim_method if optim_method else SGD(), end_trigger, batch_size, cores)
def __init__(self, p=1, weights=None, margin=1.0, size_average=True, bigdl_type="float"): super(MultiMarginCriterion, self).__init__(None, bigdl_type, p, JTensor.from_ndarray(weights), margin, size_average)
def __init__(self, log_prob_as_input=False, zero_based_label=True, weights=None, size_average=True, padding_value=-1, bigdl_type="float"): super(SparseCategoricalCrossEntropy, self).__init__(None, bigdl_type, log_prob_as_input, zero_based_label, JTensor.from_ndarray(weights), size_average, padding_value)
def __init__(self, input_dim, output_dim, init="uniform", weights=None, trainable=True, input_length=None, W_regularizer=None, input_shape=None, **kwargs): if input_length: input_shape = (input_length, ) super(Embedding, self).__init__(None, input_dim, output_dim, init, JTensor.from_ndarray(weights), trainable, W_regularizer, list(input_shape) if input_shape else None, **kwargs)
def row_to_sample(row, column_info, model_type="wide_n_deep"): wide_tensor = get_wide_tensor(row, column_info) deep_tensor = JTensor.from_ndarray(get_deep_tensor(row, column_info)) label = row[column_info.label] model_type = model_type.lower() if model_type == "wide_n_deep": feature = [wide_tensor, deep_tensor] elif model_type == "wide": feature = wide_tensor elif model_type == "deep": feature = deep_tensor else: raise TypeError("Unsupported model_type: %s" % model_type) return Sample.from_jtensor(feature, label)
def save_variable_bigdl(tensors, target_path, bigdl_type="float"): """ Save a variable dictionary to a Java object file, so it can be read by BigDL :param tensors: tensor dictionary :param target_path: where is the Java object file store :param bigdl_type: model variable numeric type :return: nothing """ jtensors = {} for tn in tensors.keys(): jtensors[tn] = JTensor.from_ndarray(tensors[tn]) callBigDlFunc(bigdl_type, "saveTensorDictionary", jtensors, target_path)
def save_variable_bigdl(tensors, target_path, bigdl_type="float"): """ Save a variable dictionary to a Java object file, so it can be read by BigDL :param tensors: tensor dictionary :param target_path: where is the Java object file store :param bigdl_type: model variable numeric type :return: nothing """ import numpy as np jtensors = {} for tn in tensors.keys(): if not isinstance(tensors[tn], np.ndarray): value = np.array(tensors[tn]) else: value = tensors[tn] jtensors[tn] = JTensor.from_ndarray(value) callBigDlFunc(bigdl_type, "saveTensorDictionary", jtensors, target_path)
def from_pytorch(model): """ Create a TorchModel directly from PyTorch model, e.g. model in torchvision.models. :param model: a PyTorch model, or a function to create PyTorch model """ weights = [] import types if isinstance(model, types.FunctionType) or isinstance( model, types.ClassType): for param in trainable_param(model()): weights.append(param.view(-1)) else: for param in trainable_param(model): weights.append(param.view(-1)) flatten_weight = torch.nn.utils.parameters_to_vector( weights).data.numpy() bys = io.BytesIO() torch.save(model, bys, pickle_module=zoo_pickle_module) weights = JTensor.from_ndarray(flatten_weight) jvalue = callZooFunc("float", "createTorchModel", bys.getvalue(), weights) net = TorchModel(jvalue, bys.getvalue()) return net
def row_to_sample(row, column_info, model_type="wide_n_deep"): """ convert a row to sample given column feature information of a WideAndDeep model :param row: Row of userId, itemId, features and label :param column_info: ColumnFeatureInfo specify information of different features :return: TensorSample as input for WideAndDeep model """ wide_tensor = get_wide_tensor(row, column_info) deep_tensor = get_deep_tensors(row, column_info) deep_tensors = [JTensor.from_ndarray(ele) for ele in deep_tensor] label = row[column_info.label] model_type = model_type.lower() if model_type == "wide_n_deep": feature = [wide_tensor] + deep_tensors elif model_type == "wide": feature = wide_tensor elif model_type == "deep": feature = deep_tensors else: raise TypeError("Unsupported model_type: %s" % model_type) return Sample.from_jtensor(feature, label)
def __init__(self, weights=None, size_average=True, bigdl_type="float"): super(CrossEntropyCriterion, self).__init__(None, bigdl_type, JTensor.from_ndarray(weights), size_average)
def __init__(self, module_bytes, weights, bigdl_type="float"): weights = JTensor.from_ndarray(weights) self.module_bytes = module_bytes self.value = callZooFunc(bigdl_type, self.jvm_class_constructor(), module_bytes, weights) self.bigdl_type = bigdl_type
def __init__(self, value, bigdl_type="float"): super(Constant, self).__init__(None, bigdl_type, JTensor.from_ndarray(value))
def __init__(self, data, name=None, bigdl_type="float"): self.data = data super(Constant, self).__init__(None, bigdl_type, JTensor.from_ndarray(data), name)