def main(readcsv=read_csv, method='defaultDense'): nFeatures = 6 nClasses = 5 nIterations = 1000 stepLength = 1.0e-4 infile = "./data/batch/logreg_train.csv" # Read the data data = readcsv(infile, range(nFeatures)) dep_data = readcsv(infile, range(nFeatures, nFeatures + 1)) nVectors = data.shape[0] # configure a function func = d4p.optimization_solver_cross_entropy_loss(nClasses, nVectors, interceptFlag=True) func.setup(data, dep_data, None) # configure a algorithm stepLengthSequence = np.array([[stepLength]], dtype=np.double) alg = d4p.optimization_solver_lbfgs(func, stepLengthSequence=stepLengthSequence, nIterations=nIterations) # do the computation nParameters = nClasses * (nFeatures + 1) initialPoint = np.full((nParameters, 1), 0.001, dtype=np.double) res = alg.compute(initialPoint) # result provides minimum and nIterations assert res.minimum.shape == (nParameters, 1) assert res.nIterations[0][0] <= nIterations return res
def _daal4py_cross_entropy_loss_extra_args(nClasses, beta, X, y, l1=0.0, l2=0.0, fit_intercept=True, value=True, gradient=True, hessian=False): X = make2d(X) nSamples, nFeatures = X.shape y = make2d(y) beta = make2d(beta) n = X.shape[0] results_to_compute = _resultsToCompute_string(value=value, gradient=gradient, hessian=hessian) objective_function_algorithm_instance = daal4py.optimization_solver_cross_entropy_loss( nClasses=nClasses, numberOfTerms=n, fptype=getFPType(X), method='defaultDense', interceptFlag=fit_intercept, penaltyL1=l1 / n, penaltyL2=l2 / n, resultsToCompute=results_to_compute) objective_function_algorithm_instance.setup(X, y, beta) return (objective_function_algorithm_instance, X, y, n)