from suanpan.docker.arguments import Int, String, Table, Bool, Float, ListOfString from arguments import SklearnModel from catboost import CatBoostRegressor @dc.input( Table( key="inputData", table="inputTable", partition="inputPartition", required=True ) ) @dc.column(ListOfString(key="featureColumns", default=[])) @dc.column(String(key="labelColumn", default="MEDV")) @dc.param( Int( key="iterations", default=1000, help="The maximum number of trees that can be built when solving machine learning problems.", ) ) @dc.param(Float(key="learningRate", default=0.03, help="The learning rate.")) @dc.param(Int(key="depth", default=6, help="Depth of the tree.")) @dc.param( Float( key="l2LeafReg", default=3.0, help="Coefficient at the L2 regularization term of the cost function.", ) ) @dc.param( Float( key="rsm",
String( key="missing", default="none", help="Available options are ‘none’, ‘drop’, and ‘raise’.", ) ) @dc.param( String( key="method", default="lbfgs", help="‘newton’, ‘bfgs’, ‘lbfgs’, ‘powell’, ‘cg’, ‘ncg’, ‘basinhopping’," " ‘minimize’", ) ) @dc.param( Int(key="maxiter", default=35, help="The maximum number of iterations to perform.") ) @dc.param(Int(key="disp", default=1, help="Set to True to print convergence messages.")) @dc.output(SklearnModel(key="outputModel")) def SPLogit(context): # 从 Context 中获取相关数据 args = context.args # 查看上一节点发送的 args.inputData 数据 df = args.inputData featureColumns = args.featureColumns labelColumn = args.labelColumn features = df[featureColumns].values label = df[labelColumn].values
String( key="missing", default="none", help="Available options are ‘none’, ‘drop’, and ‘raise’.", )) @dc.param( String( key="trend", default="c", help= "Whether to include a constant or not. ‘c’ includes constant, ‘nc’ no constant.", )) @dc.param(String(key="method", default="cmle", help="‘cmle’, ‘mle’")) @dc.param( Int(key="maxiter", default=35, help="The maximum number of function evaluations.")) @dc.param( Int(key="disp", default=1, help="If True, convergence information is output.")) @dc.param( Int( key="maxlag", default=None, help="If ic is None, then maxlag is the lag length used in fit.", )) @dc.output(SklearnModel(key="outputModel")) def SPAR(context): # 从 Context 中获取相关数据 args = context.args
@dc.param( String( key="trend", default="c", help="Whether to include a constant or not. ‘c’ includes constant, ‘nc’ no constant.", ) ) @dc.param( String( key="method", default="css-mle", help="This is the loglikelihood to maximize.‘css-mle’,’mle’,’css’", ) ) @dc.param( Int(key="maxiter", default=500, help="The maximum number of function evaluations.") ) @dc.param( Int(key="disp", default=5, help="If True, convergence information is printed.") ) @dc.output(SklearnModel(key="outputModel")) def SPARMA(context): # 从 Context 中获取相关数据 args = context.args # 查看上一节点发送的 args.inputData 数据 inputdata = args.inputData inputdata = ( pd.DataFrame(inputdata[args.labelColumn].values, index=inputdata.index) if args.timestampIndex else pd.DataFrame( inputdata[args.labelColumn].values,
from suanpan.docker.arguments import Int, String, Bool, Float, ListOfString, Table import lightgbm as lgb from arguments import SklearnModel @dc.input( Table(key="inputData", table="inputTable", partition="inputPartition", required=True)) @dc.column(ListOfString(key="featureColumns", default=["f1", "f2", "f3", "f4"])) @dc.column(String(key="labelColumn", default="label")) @dc.param( Int(key="maxDepth", default=-1, help="Maximum tree depth for base learners")) @dc.param( String( key="boostingType", default="gbdt", help="Specify which booster to use: 'goss', 'rf' or 'dart'", )) @dc.param( Int(key="numLeaves", default=31, help="Maximum tree leaves for base learners.")) @dc.param( Float(key="learningRate", default=0.1, help="Boosting learning rate.")) @dc.param( Int(key="nEstimators", default=100,
@dc.param( ListOfFloat( key="ar", default=[0.75, -0.25], help= "coefficient for autoregressive lag polynomial, including zero lag", )) @dc.param( ListOfFloat( key="ma", default=[0.65, 0.35], help= "coefficient for moving-average lag polynomial, including zero lag", )) @dc.param( Int(key="nsample", default=250, help="length of simulated time series")) @dc.param(Float(key="sigma", default=1.0, help="standard deviation of noise")) @dc.param(Int(key="randomSeed", default=12345, help="random seed")) @dc.param(Bool(key="dateCol", default=True, help="date in dataset")) @dc.param( String( key="startDate", default="19800131", help="The first abbreviated date, for instance, '1965q1' or '1965m1'", )) @dc.param(String(key="freq", default="M", help="DateOffset")) @dc.output(Csv(key="outputData")) def SPARMASample(context): # 从 Context 中获取相关数据 args = context.args # 查看上一节点发送的 args.inputData 数据
# coding=utf-8 from __future__ import absolute_import, print_function from suanpan.docker import DockerComponent as dc from suanpan.docker.arguments import Csv, ListOfString, String, Int import statsmodels.api as sm from arguments import SklearnModel @dc.input(Csv(key="inputData")) @dc.column(ListOfString(key="featureColumns", default=["a", "b", "c", "d"])) @dc.column(String(key="labelColumn", default="e")) @dc.param(Int(key="rho", default=1, help="Order of the autoregressive covariance")) @dc.param( String( key="missing", default="none", help="Available options are ‘none’, ‘drop’, and ‘raise’.", ) ) @dc.param(String(key="method", default="pinv", help="Can be “pinv”, “qr”. ")) @dc.output(SklearnModel(key="outputModel")) def SPGLSAR(context): # 从 Context 中获取相关数据 args = context.args # 查看上一节点发送的 args.inputData 数据 df = args.inputData featureColumns = args.featureColumns labelColumn = args.labelColumn
@dc.column(String(key="labelColumn", default="e")) @dc.param( String( key="family", default="Gaussian", help= "The default is Gaussian. Binomial, Gamma, Gaussian, InverseGaussian" "NegativeBinomial, Poisson, Tweedie", )) @dc.param( String( key="missing", default="none", help="Available options are ‘none’, ‘drop’, and ‘raise’.", )) @dc.param(Int(key="maxiter", default=100, help="Default is 100.")) @dc.output(SklearnModel(key="outputModel")) def SPGLM(context): # 从 Context 中获取相关数据 args = context.args # 查看上一节点发送的 args.inputData 数据 df = args.inputData featureColumns = args.featureColumns labelColumn = args.labelColumn features = df[featureColumns].values label = df[labelColumn].values family = args.family result = getattr(sm.families, family)()
String( key="M", default="HuberT", help= "The default is LeastSquares. HuberT, RamsayE, AndrewWave, TrimmedMean" "Hampel, TukeyBiweight", )) @dc.param( String( key="missing", default="none", help="Available options are ‘none’, ‘drop’, and ‘raise’.", )) @dc.param( Int(key="maxiter", default=50, help="The maximum number of iterations to try.")) @dc.output(SklearnModel(key="outputModel")) def SPRLM(context): # 从 Context 中获取相关数据 args = context.args # 查看上一节点发送的 args.inputData 数据 df = args.inputData featureColumns = args.featureColumns labelColumn = args.labelColumn features = df[featureColumns].values label = df[labelColumn].values M = args.M result = getattr(statsmodels.robust.norms, M)()
" allowed to vary over time.", ) ) @dc.param( Bool( key="mleRegression", default=True, help="Whether or not to use estimate the regression coefficients for the" " exogenous variables as part of maximum likelihood estimation or " "through the Kalman filter", ) ) @dc.param( Int( key="trendOffset", default=1, help="The offset at which to start time trend values.", ) ) @dc.param( Int(key="disp", default=5, help="If True, convergence information is printed.") ) @dc.param( Int(key="maxiter", default=50, help="The maximum number of function evaluations.") ) @dc.param( String( key="method", default="lbfgs", help="The method determines which solver from scipy.optimize is used " "‘newton’, ‘bfgs’, ‘lbfgs’, ‘powell’, ‘cg’, ‘ncg’, ‘basinhopping’",