def train_and_pred(df_train,df_valid,df_test,model_type,model_num,params,use_lgb=use_lgb,num_iterations=10000):
    ##### create dataset for gbdt
    if not use_lgb:
        X_train, y_train, X_test, user_test, X_valid,y_valid, user_valid  =create_Dataset(df_train,df_valid,df_test) 
    else:
        lgb_train,lgb_eval,X_train, y_train,X_test,user_test,X_valid, y_valid, user_valid  =create_Dataset(df_train,df_valid,df_test)
    ##### some  specify filename
    result_file='train_valid_result_'+str(model_num)+'.txt' 
    model_file ='model_'+str(model_num)+'.pkl'
    valid_csv  ='tianchi/valid_'+str(model_num)+'.csv'
    test_csv   ='tianchi/test_'+str(model_num)+'.csv'
    with open(result_file,'a') as fa:
        now_time=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
        fa.writelines('\n\n\n'+str(now_time)+'\n')
        fa.writelines('learning_rate: '+str(params['learning_rate'])+'\n')
    ##### train
    print('Start training the model_num = '+str(model_num)+'...')
    if model_type=="gbdt":
        if use_lgb:
            #gbm=lgb.LGBMRegressor(**params)
            #gbm.fit(X_train,y_train)
            gbm=lgb.train(params=params,
                        train_set=lgb_train,
                        valid_sets=lgb_eval,
                        feval=self_eval,
                        num_boost_round=num_iterations,
                        early_stopping_rounds=2000)
        else:
            print "current params:   ",params
            gbm = GradientBoostingRegressor(**params)
            gbm.fit(X_train,y_train)
    elif model_type=="linear":
        #min_max_scaler = preprocessing.MinMaxScaler()
        #X_train= min_max_scaler.fit_transform(X_train)
        #X_test = min_max_scaler.transform(X_test)
        #X_valid= min_max_scaler.transform(X_valid)
        gbm= LinearRegression(normalize=True)
        gbm.fit(X_train,y_train)
    else:
        pass
    ##### save model to file
    print ("save model ...")
    joblib.dump(gbm,model_file)
    ##### predict
    print('Start predicting...')
    if use_lgb:
        y_pred_train=np.mat(gbm.predict(X_train,num_iteration=gbm.best_iteration)).T
        y_pred_valid=np.mat(gbm.predict(X_valid,num_iteration=gbm.best_iteration)).T
        y_pred_test =np.mat(gbm.predict(X_test ,num_iteration=gbm.best_iteration)).T
    else:
        y_pred_train=np.mat(gbm.predict(X_train)).T
        y_pred_valid=np.mat(gbm.predict(X_valid)).T
        y_pred_test =np.mat(gbm.predict(X_test )).T
    ##### combine to valid_df and test_df
    #y_pred_test =abs(y_pred_test)
    #y_pred_valid=abs(y_pred_valid)
    valid_df=pd.DataFrame(np.column_stack((user_valid,y_pred_valid,np.mat(y_valid).T)),columns=['shop_id','pay_day','label_valid','label'])
    test_df= pd.DataFrame(np.column_stack((user_test,y_pred_test)),columns=['shop_id','pay_day','label_test'])
    ###### add some rule to deal the valid_df and test_df 
    #valid_df=rule_of_deal_results(valid_df,df_train,"label_valid") 
    #test_df =rule_of_deal_results(test_df, df_train,"label_test" )
    ##### eval the valid_df 
    train_set_error=eval_of_results(y_train,y_pred_train)
    valid_set_error=eval_of_results(y_valid,y_pred_valid)
    ##### test_df and  valid_df  to csv and sql
    valid_df.to_csv(valid_csv,index=False)
    valid_df.to_sql("valid_results_"+str(model_num),engine_global,index=False,if_exists="replace",chunksize=10000)
    test_df.to_csv(test_csv,index=False) 
    test_df.to_sql("test_results_"+str(model_num),engine_global,index=False,if_exists="replace",chunksize=10000)
    ##### compute feature_importance
    if hasattr(gbm,"feature_importances_"):
        print("compute feature_importance ...")
        feature_importance=gbm.feature_importances_
        feature_names=list(X_train.columns.astype(str))
        sorted_idx = np.argsort(feature_importance)
    elif hasattr(gbm,"feature_importance"):
        print("compute feature_importance ...")
        ### "gain" is better than "split"
        feature_importance=gbm.feature_importance(importance_type="gain")
        feature_names=list(X_train.columns.astype(str))
        sorted_idx = np.argsort(feature_importance)
    else:
        pass
    ##### write feature_importance to txt
    with open('feature_name_importance.txt','a') as fa:
        fa.writelines('\n\n\n\n\n\n##################  new model ###################\n\n')
        now_time=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
        params_str=[
                    "\nmax_depth    : "+str(params["max_depth"]),
                    "\nnum_leaves   : "+str(params["num_leaves"]),
                    "\nlearning_rate: "+str(params["learning_rate"])]
        if hasattr(gbm,"feature_importances_") or hasattr(gbm,"feature_importance"):
            print("write feature_importance ...")
            for i in sorted_idx:
                st=str(feature_names[i])+' : '+str(feature_importance[i])+'\n'
                fa.writelines(st)
        fa.writelines("\n\n")
        fa.writelines(str(now_time)+'\n\n')
        fa.writelines("\n\n####  train_set_error: "+str(train_set_error)+"\n")
        fa.writelines("####  valid_set_error: "+str(valid_set_error)+"\n")
        for pa in params_str:
            fa.writelines(pa)
    return train_set_error,valid_set_error,valid_df,test_df,gbm.best_iteration