def __init__(self, fit_mode='ne', learning_rate=0.001, iter_max=1000, mini_batch=256, L2_n=0.0, early_stop=True): #校验参数类型和取值 #check_type(变量名,变量类型,要求类型) #check_limit(变量名,限制条件,正确取值提示) check_type('fit_mode', type(fit_mode), type('')) check_type('learning_rate', type(learning_rate), type(0.0)) check_type('iter_max', type(iter_max), type(0)) check_type('mini_batch', type(mini_batch), type(0)) check_type('L2_n', type(L2_n), type(0.0)) check_type('early_stop', type(early_stop), type(True)) fit_mode = fit_mode.lower() mode_list = ['ne', 'sgd'] check_limit('fit_mode', fit_mode in mode_list, str(mode_list)) check_limit('learning_rate', learning_rate > 0.0, 'value>0.0') check_limit('iter_max', iter_max > 0, 'value>0') check_limit('mini_batch', mini_batch >= 0, 'value>=0') check_limit('L2_n', L2_n >= 0.0, 'value>=0.0') self.fit_mode = fit_mode self.learning_rate = learning_rate self.iter_max = iter_max self.mini_batch = mini_batch self.L2_n = L2_n self.early_stop = early_stop self.dp_tool = None
def assess(self,y,p_y,mode=None): '''\n Function: 使用输入的观测值和预测值进行模型评估 Notes: 注意数据集的数据类型,分类首选类型str,回归首选类型float64, 拟合时数据集采用非首选类型可能会导致此处类型不匹配,建议提前转换 Parameters ---------- y:观测值,Series类型 p_y:预测值,Series类型 mode:模式,str类型,默认使用内部集成单元的属性, 'c'->分类,'r'->回归 ---------- Returns ------- 0: 分类->准确率,回归->R方,float类型 ------- ''' #校验参数 if type(mode)==type(None): mode=self.units[0].tree.mode check_type('mode',type(mode),type('')) mode_list=['c','r'] check_limit('mode',mode in mode_list,str(mode_list)) y,continuity_y=self.unit_test.check_input_y_(y,name='y') p_y,continuity_p_y=self.unit_test.check_input_y_(p_y,name='p_y') check_index_match(y,p_y,'y','p_y') #分类模式求准确率,回归模式求R2 if mode=='c': return stats.accuracy(y,p_y) elif mode=='r': return stats.r_sqr(y,p_y)
def selection(self,test_X,test_y,units=None,units_oob_score=None, use='oob',return_units=False,show_time=False): '''\n Function: 在生成好的模型上进行选择,筛选出集成单元的一个子集 Notes: 作用类似于决策树的剪枝,通过一些规则生成可选子集, 再通过在测试集上的表现选择最优的一个,能够得到更简单且泛化能力更强的模型 Parameters ---------- test_X: 测试集特征列,DataFrame类型 test_y: 测试集目标列,Series类型 units: 集成单元,list(DecitionTree)类型 units_oob_score: 集成单元obb评分,list(float)类型 use: 使用的选择方法,str类型,默认'oob' 'rd'->随机选择,'oob'->oob选择 return_units: 是否以返回值形式给到选择后的集成单元,bool类型,默认False show_time: 是否显示耗时,bool类型,默认值False ---------- Returns ------- 0: 分类->准确率,回归->R方,float类型 ------- ''' start = time.clock() if units==None: units=self.units if units_oob_score==None: units_oob_score=self.units_oob_score #输入校验 check_type('units',type(units),type([])) check_type('element in units',type(units[0]),type(dt.DecisionTree())) check_type('units_oob_score',type(units_oob_score),type([])) check_type('element in units_oob_score',type(units_oob_score[0]),[type(0.0),np.float64]) check_type('use',type(use),type('')) check_type('return_units',type(return_units),type(True)) use_list=['rd','oob'] check_limit('use',use in use_list,str(use_list)) test_X,continuity_X=self.unit_test.check_input_X_(test_X,'test_X') test_y,continuity_y=self.unit_test.check_input_y_(test_y,'test_y') check_index_match(test_X,test_y,'test_X','test_y') features=[] for unit in units: features+=unit.tree.features features=list(set(features)) check_items_match(test_X.columns,features,'test_X','tree','features',mode='right') #选择 if use=='rd': subset=self.random_selection_(test_X,test_y,units) elif use=='oob': subset=self.oob_selection_(test_X,test_y,units,units_oob_score) end = time.clock() if show_time==True: print('\ntime used for selection:%f'%(end-start)) if return_units==False: self.units=subset else: return subset
def bind_func_(self, activation, softmax, optimizer, mode): #参数类型校验 check_type('activation', type(activation), [type(''), type(())]) check_type('softmax', type(softmax), type(True)) check_type('optimizer', type(optimizer), type('')) #参数值校验 activation_list = ['sigm', 'tanh', 'relu'] check_limit('activation', activation in activation_list, str(activation_list)) optimizer_list = ['sgd', 'magd', 'nagd', 'adam'] check_limit('optimizer', optimizer in optimizer_list, str(optimizer_list)) #绑定函数 #隐含层激活函数 if activation == 'sigm': self.activation_ = self.sigmoid_ elif activation == 'tanh': self.activation_ = self.tanh_ elif activation == 'relu': self.activation_ = self.relu_ else: raise ValueError('Unknown activation function') self.activation = activation #输出层激活函数和代价函数 if mode == 'c': if softmax == False: self.cost = 'ce' self.output_activation = 'sigm' self.cost_ = self.cross_ent_ self.output_activation_ = self.sigmoid_ else: self.cost = 'log' self.output_activation = 'soft' self.cost_ = self.log_like_ self.output_activation_ = self.softmax_ elif mode == 'r': self.cost = 'mse' self.output_activation = 'none' self.cost_ = self.mean_sqr_err_ self.output_activation_ = self.identity_ else: raise ValueError('Unknown mode') self.softmax = softmax #优化器 if optimizer == 'sgd': self.optimizer_ = self.sgd_ elif optimizer == 'magd': self.optimizer_ = self.momentum_ elif optimizer == 'nagd': self.optimizer_ = self.nesterov_ elif optimizer == 'adam': self.optimizer_ = self.adam_ else: raise ValueError('Unknown optimizer') self.optimizer = optimizer
def __init__(self,mode='c',units_n=10,units_type='cart', depth_max=None,split_sample_n=2,leaf_sample_n=1, features_use='sqrt',features_reuse=True): #校验参数类型和取值 #check_type(变量名,变量类型,要求类型) #check_limit(变量名,限制条件,正确取值提示) check_type('mode',type(mode),type('')) mode_list=['c','r'] mode=mode.lower() check_limit('mode',mode in mode_list,str(mode_list)) check_type('units_n',type(units_n),type(0)) check_limit('units_n',units_n>=1,'value>=1') check_type('units_type',type(units_type),type('')) type_list=['id3','c4.5','cart'] units_type=units_type.lower() check_limit('units_type',units_type in type_list,str(type_list)) #保存参数 self.unit_test=dt.DecisionTree(mode=mode,model_type=units_type,depth_max=depth_max, split_sample_n=split_sample_n,leaf_sample_n=leaf_sample_n, features_use=features_use,features_reuse=features_reuse) self.mode=mode self.units_n=units_n self.units_type=units_type self.depth_max=depth_max self.split_sample_n=split_sample_n self.leaf_sample_n=leaf_sample_n self.features_use=features_use self.features_reuse=features_reuse
def __init__(self,mode='c',units_type='cart',iter_max=10,depth_max=0, learning_rate=1.0): #校验参数类型和取值 check_type('mode',type(mode),type('')) type_list=['r','c'] mode=mode.lower() check_limit('mode',mode in type_list,str(type_list)) check_type('units_type',type(units_type),type('')) type_list=['cart'] units_type=units_type.lower() check_limit('units_type',units_type in type_list,str(type_list)) check_type('iter_max',type(iter_max),type(0)) check_limit('iter_max',iter_max>=1,'value>=1') check_type('learning_rate',type(learning_rate),type(0.0)) check_limit('learning_rate',learning_rate>0.0,'value>0.0') check_limit('learning_rate',learning_rate<=1.0,'value<=1.0') if type(depth_max)==type(0): if depth_max==0: if mode=='r': depth_max=3 elif mode=='c': depth_max=3 #保存参数 #注:此处depth_max参考了sklearn,尝试过回归也用depth_max=1,效果很糟糕 self.unit_test=dt.DecisionTree(mode='r',model_type='cart', depth_max=depth_max) self.mode=mode self.units_type='cart' self.units_mode='r' self.iter_max=iter_max self.depth_max=depth_max self.learning_rate=learning_rate
def assess(self,y,p_y,mode=None): '''\n Function: 使用输入的观测值和预测值进行模型评估 Notes: 注意数据集的数据类型,分类首选类型str,回归首选类型float64, 拟合时数据集采用非首选类型可能会导致此处类型不匹配,建议提前转换 Parameters ---------- y:观测值,Series类型 p_y:预测值,Series类型 mode:模式,str类型,默认使用内部集成单元的属性, 'c'->分类,'r'->回归 ---------- Returns ------- 0: 分类->准确率,回归->R方,float类型 ------- ''' #校验参数 if type(mode)==type(None): mode=self.mode check_type('mode',type(mode),type('')) mode_list=['c','r'] check_limit('mode',mode in mode_list,str(mode_list)) check_index_match(y,p_y,'y','p_y') #分类模式求准确率,回归模式求R2 if mode=='c': return stats.accuracy(y.astype('str'),p_y.astype('str')) elif mode=='r': r_sqr=stats.r_sqr(y,p_y) if r_sqr<0: print('warning: R2 is less than 0, which means bad fitting,'+ '\ntry to reduce the learning rate') return r_sqr
def predict(self,X,units=None,mode=None,classes=None,units_result=False, return_proba=False,return_paths=False,show_time=False): '''\n Function: 使用输入数据和所有集成单元进行预测,没有输入集成单元时使用内部缓存 Parameters ---------- X: 所有特征列,DataFrame类型 units: 集成单元,list(DecitionTree)类型,默认调用内部缓存 mode: 模式,分类->'c',回归->'r',默认'c' classes: 分类标签列表,list(str)类型 units_result: 是否返回每个单元的分类结果,bool类型,默认False return_proba: 是否返回分类概率,分类模式下有效,bool类型,默认值False, 分类概率不能直接用于评估 return_paths: 是否返回决策路径,bool类型,默认值False (路径信息以str类型返回,可转换为list使用) show_time: 是否显示耗时,bool类型,默认值False ---------- Returns ------- 0: 预测的分类/分类概率,Series/DataFrame类型 1: 各个单元的预测的分类/分类概率,list(Series)/list(DataFrame)类型 2: 所有数据最终抵达的节点和决策路径,list(DataFrame)类型 ------- ''' start = time.clock() #校验参数 if type(units)==type(None): units=self.units if type(mode)==type(None): mode=self.mode check_type('mode',type(mode),type('')) mode_list=['c','r'] check_limit('mode',mode in mode_list,str(mode_list)) if (type(classes)==type(None))&(mode=='c'): classes=self.classes check_type('units',type(units),type([])) if len(units)==0: raise ValueError('lack of units') if mode=='r': check_type('element in units',type(units[0]),type(dt.DecisionTree())) elif mode=='c': check_type('element in units',type(units[0][0]),type(dt.DecisionTree())) check_type('return_proba',type(return_proba),type(True)) check_type('return_paths',type(return_paths),type(True)) check_type('show_time',type(show_time),type(True)) X,continuity_X=self.unit_test.check_input_X_(X) features=[] if mode=='c': for units_ in units: for unit in units_: features+=unit.tree.features elif mode=='r': for unit in units: features+=unit.tree.features features=list(set(features)) check_items_match(X.columns,features,'X','unit','features',mode='right') #分类模式先求分类概率,回归模式直接求回归值 n=len(X) if mode=='c': #定义存放分类结果的DataFrame p_y=pd.DataFrame( np.zeros((n,len(classes))), index=X.index,columns=classes) elif mode=='r': #定义存放回归值的Series p_y=pd.Series(np.zeros(n),index=X.index) #逐个调用每个单元进行预测,并将结果累加 units_p_y,units_paths=[],[] for i in range(len(units)): if show_time==True: print('\npredicting with unit %d ---'%i) if mode=='r': if return_paths==True: p_y_,paths=units[i].predict(X,return_proba=True,return_paths=True, show_time=show_time,check_input=False) units_paths.append(paths) else: p_y_=units[i].predict(X,return_proba=True,return_paths=False, show_time=show_time,check_input=False) p_y+=p_y_ if units_result==True: if (mode=='c')&(return_proba==False): p_y_=units[i].choose_class_(p_y_,classes) units_p_y.append(p_y_) #分类模式需要调用子单元对每个类的概率进行预测 elif mode=='c': classes_p_y,classes_paths=[],[] for j in range(len(classes)): if return_paths==True: p_y_,paths=units[i][j].predict(X,return_proba=True,return_paths=True, show_time=show_time,check_input=False) classes_paths.append(paths) else: p_y_=units[i][j].predict(X,return_proba=True,return_paths=False, show_time=show_time,check_input=False) p_y.iloc[:,j]+=p_y_ if units_result==True: if (mode=='c')&(return_proba==False): p_y_=units[i].choose_class_(p_y_,classes) classes_p_y.append(p_y_) if return_paths==True: units_paths.append(classes_paths) if units_result==True: units_p_y.append(classes_p_y) #返回分类概率或唯一分类 if (mode=='c')&(return_proba==False): p_y=self.unit_test.choose_class_(p_y,classes) end = time.clock() if show_time==True: print('\ntotal time used for predict: %f'%(end-start)) if units_result==True: if return_paths==True: return p_y,units_p_y,paths else: return p_y,units_p_y else: if return_paths==True: return p_y,paths else: return p_y
def __init__(self, mode='c', multi_class='ovr', iter_max=10, C=10.0, k_type='lin', k_args=None, relax=0.001, eps=1.0): check_type('mode', type(mode), type('')) check_type('multi_class', type(multi_class), type('')) check_type('iter_max', type(iter_max), type(0)) check_type('C', type(C), type(0.0)) check_type('k_type', type(k_type), type('')) check_type('relax', type(relax), type(0.0)) check_type('eps', type(eps), type(0.0)) mode_list, mode_list2 = ['c', 'r'], ['ovr', 'tree'] check_limit('mode', mode in mode_list, str(mode_list)) check_limit('multi_class', multi_class in mode_list2, str(mode_list2)) check_limit('iter_max', iter_max > 0, 'value>0') check_limit('C', C > 0.0, 'value>0.0') check_limit('relax', relax > 0.0, 'value>0.0') check_limit('eps', eps > 0.0, 'value>0.0') type_list = ['lin', 'pol', 'rbf'] check_limit('k_type', k_type in type_list, str(type_list)) if k_type != 'lin': check_type('k_args', type(k_args), type({})) if k_type == 'pol': if ('R' in k_args.keys()) & ('d' in k_args.keys()): check_type('k_args:R', type(k_args['R']), type(0.0)) check_type('k_args:d', type(k_args['d']), type(0)) check_limit('k_args:d', k_args['d'] > 0, 'value>0') else: raise ValueError( 'k_args should provide R and d for k_type: pol') if k_type == 'rbf': if 'sigma' in k_args.keys(): check_type('k_args:sigma', type(k_args['sigma']), type(0.0)) check_limit('k_args:sigma', k_args['sigma'] > 0.0, 'value>0.0') else: raise ValueError('k_args should provide sigma for k_type: rbf') self.C = C self.k_type = k_type self.k_args = k_args self.iter_max = iter_max self.relax = relax self.mode = mode self.multi_class = multi_class self.eps = eps
def __init__(self, input_shape=(28, 28), output_shape=(10, ), hidden_layers=(100, ), mode='c', activation='sigm', cost='mse', optimizer='sgd', batch_size=256, iter_max=100, learning_rate=0.1, L2_alpha=0.0001, dropout_p=0.0, early_stop=10, lr_atten_rate=0.9, lr_atten_max=10, momentum_p=0.9, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-8, relu_a=0.0): check_type('input_shape', type(input_shape), type(())) check_type('output_shape', type(output_shape), type(())) check_type('hidden_layers', type(hidden_layers), type(())) check_type('mode', type(mode), type('')) check_type('batch_size', type(batch_size), type(0)) check_type('iter_max', type(iter_max), type(0)) check_type('learning_rate', type(learning_rate), type(0.0)) check_type('L2_alpha', type(L2_alpha), type(0.0)) check_type('dropout_p', type(dropout_p), type(0.0)) if type(early_stop) == type(True): if early_stop == True: early_stop = 20 else: early_stop = iter_max check_type('early_stop', type(early_stop), type(0)) check_type('lr_atten_rate', type(lr_atten_rate), type(0.0)) check_type('lr_atten_max', type(lr_atten_max), type(0)) check_type('momentum_p', type(momentum_p), type(0.0)) check_type('adam_beta1', type(adam_beta1), type(0.0)) check_type('adam_beta2', type(adam_beta2), type(0.0)) check_type('adam_eps', type(adam_eps), type(0.0)) check_type('relu_a', type(relu_a), type(0.0)) for item in input_shape: check_type('item in input_shape', type(item), type(0)) for item in input_shape: check_type('item in output_shape', type(item), type(0)) for item in input_shape: check_type('item in hidden_layers', type(item), type(0)) mode_list = ['c', 'r'] check_limit('mode', mode in mode_list, str(mode_list)) check_limit('batch_size', batch_size > 0, 'value>0') check_limit('iter_max', iter_max > 0, 'value>0') check_limit('learning_rate', learning_rate > 0.0, 'value>0.0') check_limit('L2_alpha', L2_alpha >= 0.0, 'value>=0.0') check_limit('dropout_p', (dropout_p >= 0.) & (dropout_p < 1.), '0.<=value<1.') check_limit('early_stop', early_stop > 0, 'value>0') check_limit('lr_atten_rate', (lr_atten_rate > 0.) & (lr_atten_rate <= 1.), '0.<value<=1.') check_limit('lr_atten_max', lr_atten_max >= 0, 'value>=0') check_limit('momentum_p', (momentum_p > 0.) & (momentum_p < 1.), '0<value<1') check_limit('adam_beta1', (adam_beta1 > 0.) & (adam_beta1 < 1.), '0<value<1') check_limit('adam_beta2', (adam_beta2 > 0.) & (adam_beta2 < 1.), '0<value<1') check_limit('adam_eps', adam_eps > 0., 'value>0') check_limit('relu_a', (relu_a >= 0.) & (relu_a <= 1.), '0<=value<=1') self.mode = mode if mode == 'r': print('\nwarning: output_shape has been changed to (1,)') output_shape = (1, ) self.input_shape = input_shape self.output_shape = output_shape self.hidden_layers = hidden_layers self.input_size = int(np.array(input_shape).prod()) self.output_size = int(np.array(output_shape).prod()) self.layers = (self.input_size, ) + hidden_layers + ( self.output_size, ) self.bind_func_(activation, cost, optimizer) self.random_init_() self.batch_size = batch_size self.iter_max = iter_max self.learning_rate = learning_rate self.L2_alpha = L2_alpha self.dropout_p = dropout_p self.early_stop = early_stop self.lr_atten_rate = lr_atten_rate self.lr_atten_max = lr_atten_max self.momentum_p = momentum_p self.adam_beta1 = adam_beta1 self.adam_beta2 = adam_beta2 self.adam_eps = adam_eps self.relu_a = relu_a self.time_cost = pd.Series(np.zeros(9), index=[ 'Total', 'input check', 'mini batch', 'forward prop', 'back prop', '--cost', '--grad', '--delta', 'monitor' ], name='time_cost') self.classes = [] self.iter_total = 0
def bind_func_(self, activation, cost, optimizer): #参数类型校验 check_type('activation', type(activation), [type(''), type(())]) check_type('cost', type(cost), type('')) check_type('optimizer', type(optimizer), type('')) #参数值校验 activation_list = ['sigm', 'tanh', 'relu', 'soft', 'none'] layers = self.layers activation_config = [] if type(activation) == type(''): check_limit('activation', activation in activation_list, str(activation_list)) for i in range(len(layers) - 1): activation_config.append(activation) else: for item in activation: check_limit('item in activation', item in activation_list, str(activation_list)) #将激活函数设置分配到各个层 if len(activation) == 1: for i in range(len(layers) - 1): activation_config.append(activation[0]) elif len(activation) == 2: for i in range(len(layers) - 2): activation_config.append(activation[0]) activation_config.append(activation[1]) elif len(activation) == len(layers) - 1: for i in range(len(layers) - 1): activation_config.append(activation[i]) else: raise ValueError('\nconfig error: layers do not match') if (activation_config[-1] in ('tanh', 'relu', 'none')) & (self.mode == 'c'): print( '\nwarning: you set tanh/relu/none activation of output for classification' ) if (activation_config[-1] in ('sigm', 'soft')) & (self.mode == 'r'): print( '\nwarning: you set sigmoid/softmax activation of output for regression' ) cost_list = ['mse', 'ce', 'log'] check_limit('cost', cost in cost_list, str(cost_list)) if (cost in ['ce', 'log']) & (self.mode == 'r'): print( '\nwarning: you set cross entropy/log like cost for regression' ) optimizer_list = ['sgd', 'magd', 'nagd', 'adam'] check_limit('optimizer', optimizer in optimizer_list, str(optimizer_list)) #绑定函数 activations_ = [] for i in range(len(activation_config)): if activation_config[i] == 'sigm': activations_.append(self.sigmoid_) elif activation_config[i] == 'tanh': activations_.append(self.tanh_) elif activation_config[i] == 'relu': activations_.append(self.relu_) elif activation_config[i] == 'soft': activations_.append(self.softmax_) elif activation_config[i] == 'none': activations_.append(self.identity_) else: raise ValueError('Unknown activation function') self.activation = activation_config self.activations_ = activations_ if cost == 'mse': self.cost_ = self.mean_sqr_err_ elif cost == 'ce': self.cost_ = self.cross_ent_ elif cost == 'log': self.cost_ = self.log_like_ else: raise ValueError('Unknown cost function') self.cost = cost if optimizer == 'sgd': self.optimizer_ = self.sgd_ elif optimizer == 'magd': self.optimizer_ = self.momentum_ elif optimizer == 'nagd': self.optimizer_ = self.nesterov_ elif optimizer == 'adam': self.optimizer_ = self.adam_ else: raise ValueError('Unknown optimizer') self.optimizer = optimizer
def __init__(self, mode='c', hidden_layers=(100, ), activation='sigm', softmax=False, optimizer='nagd', batch_size=100, iter_max=100, learning_rate=0.01, l2_alpha=0.0001, dropout_p=0.0, early_stop=10, attenuation=0.9, momentum_p=0.9, adam_beta1=0.9, adam_beta2=0.999, adam_eps=1e-8, relu_a=0.0, external_monitor=None): check_type('hidden_layers', type(hidden_layers), type(())) check_type('mode', type(mode), type('')) check_type('batch_size', type(batch_size), type(0)) check_type('iter_max', type(iter_max), type(0)) check_type('learning_rate', type(learning_rate), type(0.0)) check_type('l2_alpha', type(l2_alpha), type(0.0)) check_type('dropout_p', type(dropout_p), type(0.0)) if type(early_stop) == type(True): if early_stop == True: early_stop = 20 else: early_stop = iter_max check_type('early_stop', type(early_stop), type(0)) check_type('attenuation', type(attenuation), type(0.0)) check_type('momentum_p', type(momentum_p), type(0.0)) check_type('adam_beta1', type(adam_beta1), type(0.0)) check_type('adam_beta2', type(adam_beta2), type(0.0)) check_type('adam_eps', type(adam_eps), type(0.0)) check_type('relu_a', type(relu_a), type(0.0)) mode_list = ['c', 'r'] check_limit('mode', mode in mode_list, str(mode_list)) check_limit('batch_size', batch_size > 0, 'value>0') check_limit('iter_max', iter_max > 0, 'value>0') check_limit('learning_rate', learning_rate > 0.0, 'value>0.0') check_limit('l2_alpha', l2_alpha >= 0.0, 'value>=0.0') check_limit('dropout_p', (dropout_p >= 0.) & (dropout_p < 1.), '0.<=value<1.') check_limit('early_stop', early_stop > 0, 'value>0') check_limit('attenuation', (attenuation > 0.) & (attenuation <= 1.), '0.<value<=1.') check_limit('momentum_p', (momentum_p > 0.) & (momentum_p < 1.), '0<value<1') check_limit('adam_beta1', (adam_beta1 > 0.) & (adam_beta1 < 1.), '0<value<1') check_limit('adam_beta2', (adam_beta2 > 0.) & (adam_beta2 < 1.), '0<value<1') check_limit('adam_eps', adam_eps > 0., 'value>0') check_limit('relu_a', (relu_a >= 0.) & (relu_a <= 1.), '0<=value<=1') self.mode = mode self.hidden_layers = hidden_layers self.bind_func_(activation, softmax, optimizer, mode) self.batch_size = batch_size self.iter_max = iter_max self.learning_rate = learning_rate self.l2_alpha = l2_alpha self.dropout_p = dropout_p self.early_stop = early_stop self.attenuation = attenuation self.momentum_p = momentum_p self.adam_beta1 = adam_beta1 self.adam_beta2 = adam_beta2 self.adam_eps = adam_eps self.relu_a = relu_a self.external_monitor = external_monitor self.is_fitted = False self.iter_total = 0 self.time_cost = pd.Series(np.zeros(9), index=[ 'Total', 'input check', 'mini batch', 'forward prop', 'back prop', '--grad', '--delta', 'update', 'monitor' ], name='time_cost')