Exemplo n.º 1
0
    jstr_insert += '],"infomation_gain":['
    for idx, line in enumerate(IT_score):  # IT
        if idx < num_to_show:
            jstr_insert += '"' + line.rstrip() + '",'
    jstr_insert = jstr_insert[:len(jstr_insert) - 1]  # remove last ','
    jstr_insert += '],"combined":['
    for idx, line in enumerate(combined_score):  # Combined
        if idx < num_to_show:
            jstr_insert += '"' + line.rstrip() + '",'
    jstr_insert = jstr_insert[:len(jstr_insert) - 1]  # remove last ','
    jstr_insert += ']}}'
    jstr_insert = jstr_insert.replace("\t", ",")

    #print "jstr_insert=",jstr_insert
    ## write to mongoDB.myml.dataset_info, ignore doc with duplicated key
    ret = query_mongo.upsert_doc_t(mongo_tuples, filter, jstr_insert,
                                   upsert_flag)
    print "INFO: Upsert count for feature importance=", ret

    print 'INFO: Finished!'
    return 0


# combint to one table
def combine_with_coef(row_id_str, coef_arr, FIRM_list, IT_list, Prob_list,
                      out_filename, feat_sample_count_arr):
    #print "INFO: combined fname=",out_filename
    if os.path.exists(out_filename):
        try:
            os.remove(out_filename)
        except OSError, e:
            print("ERROR: %s - %s." % (e.out_filename, e.strerror))
def train(row_id_str, ds_id, hdfs_feat_dir, local_out_dir, ml_opts_jstr, excluded_feat_cslist
    , sp_master, spark_rdd_compress, spark_driver_maxResultSize, sp_exe_memory, sp_core_max
    , zipout_dir, zipcode_dir, zip_file_name
    , mongo_tuples, labelnameflag, fromweb
    , training_fraction, jobname, model_data_folder ): 
    

    # zip func in other files for Spark workers ================= ================
    zip_file_path = ml_build_zip_file(zipout_dir, zipcode_dir, zip_file_name, prefix='zip_feature_util')
    print "INFO: zip_file_path=",zip_file_path
    

    # ML model filename ====
    model_fname=os.path.join(model_data_folder, row_id_str+'.pkl')
    print "INFO: model_data_folder=",model_data_folder    
    # create out folders and clean up old model files ====
    ml_util.ml_prepare_output_dirs(row_id_str,local_out_dir,model_data_folder,model_fname)   

    # init Spark context ====
    sc=ml_util.ml_get_spark_context(sp_master
        , spark_rdd_compress
        , spark_driver_maxResultSize
        , sp_exe_memory
        , sp_core_max
        , jobname
        , [zip_file_path]) 

    
    t0 = time()
    t00 = t0
    
    # check if ml_opts.has_excluded_feat ==1 ===================================
    has_excluded_feat=0
    if not ml_opts_jstr is None:
        ml_opts=json.loads(ml_opts_jstr)
        if "has_excluded_feat" in ml_opts:
            has_excluded_feat=ml_opts["has_excluded_feat"]

    # get excluded feature list from mongo ========== ===
    if str(has_excluded_feat) == "1" and excluded_feat_cslist is None:
        excluded_feat_cslist=ml_util.ml_get_excluded_feat(row_id_str, mongo_tuples)
    print "INFO: excluded_feat_cslist=",excluded_feat_cslist
            
    # source libsvm filename  
    libsvm_data_file = os.path.join(hdfs_feat_dir , "libsvm_data")
    print "INFO: libsvm_data_file=", libsvm_data_file

    # load feature count file
    feat_count_file=libsvm_data_file+"_feat_count"
    feature_count=zip_feature_util.get_feature_count(sc,feat_count_file)
    print "INFO: feature_count=",feature_count

    
    # load sample RDD from text file   
    #   also exclude selected features in sample ================ =====
    # format (LabeledPoint,hash) from str2LabeledPoint_hash() 
    #samples_rdd = MLUtils.loadLibSVMFile(sc, libsvm_data_file)
    samples_rdd,feature_count = zip_feature_util.get_sample_rdd(sc, libsvm_data_file, feature_count, excluded_feat_cslist)

    all_data = samples_rdd.collect()
    sample_count=len(all_data)
    # 2-D array
    features_list = [x.features.toArray() for x,_ in all_data]
    # label array
    labels_list_all = [x.label for x,_ in all_data]
    # hash array
    hash_list_all = [x for _,x in all_data]

    # convert to np array
    labels_list_all = array(labels_list_all)
    features_array = np.array(features_list)
    hash_list_all=np.array(hash_list_all)
    
    # generate sparse matrix (csr) for all samples
    features_sparse_mtx = csr_matrix(features_array)

    ### randomly split the samples into training and testing data ===============
    X_train_sparse, X_test_sparse, labels_train, labels_test, train_hash_list, test_hash_list = \
            cross_validation.train_test_split(features_sparse_mtx, labels_list_all, hash_list_all, test_size=(1-training_fraction) )
    # X_test_sparse is scipy.sparse.csr.csr_matrix
    testing_sample_count = len(labels_test)
    training_sample_count=len(labels_train)
    training_lbl_cnt_list=Counter(labels_train)
    testing_lbl_cnt_list=Counter(labels_test)
    
    print "INFO: training sample count=",training_sample_count,", testing sample count=",testing_sample_count,",sample_count=",sample_count
    print "INFO: training label list=",training_lbl_cnt_list,", testing label list=",testing_lbl_cnt_list
    print "INFO: train_hash_list count=",len(train_hash_list),", test_hash_list count=",len(test_hash_list)
    t1 = time()
    print 'INFO: running time: %f' %(t1-t0)
    
    ###############################################
    ###########build learning model################
    ###############################################
    
    ### parse parameters and generate the model ###
    (clf, model_name, api, cv, param_dic) = parse_param_and_get_model(ml_opts)
    if model_name == "none":
        print "ERROR: model name not found!"
        return -1

    #param_jobj=json.loads(ml_opts_jstr);
    #print "param_jobj=",param_jobj
        
    ########################################################
    ##########Grid Search with cross validation#############
    ########################################################    
    json2save={}
    json2save["rid"]=int(row_id_str)
    json2save["key"]="cv_result"
    #json2save["param_str"]=ml_opts_jstr
    json2save["param_dic"]=param_dic
    cv_grid=[]
    if api == "centralized":
        #########run with Scikit-learn API (for comparison)######
        print "INFO: ******************Grid Search with Scikit-learn API************"

        t0 = time()
        
        # Set the parameters by cross-validation
        #tuned_parameters = [{'C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000, 100000]}]
        #tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4], \
        #                 'C': [1, 10, 100, 1000]}, \
        #                {'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]

        scores = ['accuracy']
        json2save["scores"]=scores
        #print json2save
        
        for score in scores: # for one item only? score=accuracy
            print("INFO: # Tuning hyper-parameters for %s" % score)
            #print()

            grid = grid_search.GridSearchCV(estimator = clf, param_grid = param_dic, cv=cv, scoring= score)
            grid.fit(X_train_sparse, labels_train)
            
            print "INFO: Best parameters set found on development set:"
            print "INFO: grid.best_params_=",grid.best_params_
            print "INFO: Grid scores on development set:" 
            for key in grid.best_params_:
                print "INFO: best_params["+key+"]=", grid.best_params_[key]
                if key.lower()=="regtype":
                    ml_opts['regularization']=str(grid.best_params_[key]) # add best param to 
                else:
                    ml_opts[key.lower()]=str(grid.best_params_[key]) # add best param to 
            # save best param to db as json string
            j_str=json.dumps(ml_opts);
            json2save["param_str"]=j_str;
            print "INFO: grid_scores_ with params:"
            for params, mean_score, scores in grid.grid_scores_:
                print "INFO: %0.3f (+/-%0.03f) for %r" % (mean_score, scores.std() * 2, params)
                #outstr='%s,%0.3f,%0.03f,%s' % (params,mean_score, scores.std() * 2,"Selected" if params==grid.best_params_ else "")
                outj={}
                outj["param"]=params
                outj["average_accuracy"]="%0.3f" % (mean_score)
                outj["std_deviation"]="%0.3f" % (scores.std() * 2)
                outj["selected"]="%s" % ("Selected" if params==grid.best_params_ else "")
                
                cv_grid.append(outj)
        
        clf_best = grid.best_estimator_
        t1 = time()
        ############# END run with SKlearn ######
        print 'INFO: Grid Search with SKlearn running time: %f' %(t1-t0)
        t0 = time()
    else:
    
        #############run with SPARK######
        
        print "INFO: ******************Grid Search with SPARK************"
            
        all_comb_list_of_dic = get_all_combination_list_of_dic(param_dic) 
        print "INFO: Total number of searching combinations=", len(all_comb_list_of_dic) 
        #print "all_comb_list_of_dic: ", all_comb_list_of_dic
        params_rdd = sc.parallelize(all_comb_list_of_dic)
        
        ###broad cast clf, traning data, testing data to all workers###
        X_broadcast = sc.broadcast(X_train_sparse)
        y_broadcast = sc.broadcast(labels_train)
        clf_broadcast = sc.broadcast(clf)
        
        ### Grid Search with CV in multiple workers ###
        models = params_rdd.map(lambda x: learn_with_params(clf_broadcast.value, X_broadcast.value, y_broadcast.value, cv, x)).sortByKey(ascending = False).cache()
        
        (ave_accuracy, (clf_best, p_dic_best, std2))  = models.first()
        # output results #

        print "INFO: Best parameters set found for ", model_name, " is: "
        print "INFO: ",
        for key in p_dic_best:
            print key, " = ", p_dic_best[key],
            if key.lower()=="regtype":
                ml_opts['regularization']=str(p_dic_best[key]) 
            else:
                ml_opts[key.lower()]=str(p_dic_best[key]) # add best param to 
            # save best param to db as json string
        print ""
        j_str=json.dumps(ml_opts);
        json2save["param_str"]=j_str;

        print "INFO: Average accuracy with CV = ", cv, ": ", ave_accuracy
        
        ######## print complete report #######
        print "INFO: Grid scores on development set:"
        all_results = models.collect()
        for i in range(0, len(all_results)):
            (ave_accu_i, (clf_i, p_dic_i, std2_i)) = all_results[i]
            print "INFO: ",ave_accu_i, " for ", p_dic_i
            print "INFO: %0.3f (+/-%0.03f) for " % (ave_accu_i, std2_i), p_dic_i
            #outstr='%s,%0.3f,%0.03f,%s' % ( p_dic_i, ave_accu_i, std2_i, "Selected" if p_dic_i==p_dic_best else "")
            outj={}
            outj["param"]=p_dic_i
            outj["average_accuracy"]="%0.3f" % (ave_accu_i)
            outj["std_deviation"]="%0.3f" % (std2_i)
            outj["selected"]="%s" % ("Selected" if p_dic_i==p_dic_best else "")
            
            cv_grid.append(outj)
        print " "
        
        t1 = time()
        
        ############# END run with SPARK######
        print 'INFO: Grid search with SPARK running time: %f' %(t1-t0)
    
    ##################################################################################
    #print "cv_grid=",cv_grid
    #json2save["cv_grid_title"]='param,average_accuracy,std_deviation,selected' 
    json2save["cv_grid_data"]=cv_grid
    json2save['clf_best']=str(clf_best).replace("\n","").replace("    ","")
    cv_result=json.dumps(json2save)
    #print "INFO: cv_result=",cv_result
    filter='{"rid":'+row_id_str+',"key":"cv_result"}'
    upsert_flag=True
    ## write to mongoDB.myml.dataset_info, ignore doc with duplicated key
    # db.dataset_info.createIndex({"rid":1,"key":1},{unique:true})
    ret=query_mongo.upsert_doc_t(mongo_tuples,filter,cv_result,upsert_flag)
    print "INFO: Upsert count for cv_result: ret=",ret
 
    ##################################################################################
    ##########Retrain with best model for training set and output results#############
    ##################################################################################
    print "INFO: **********Retrain with best model for training set and output results************"
    
    clf_best.fit(X_train_sparse, labels_train)
    #### save clf_best for future use ####
    #joblib.dump(clf_best, model_data_folder + row_id_str+'.pkl')
    joblib.dump(clf_best, model_fname) 
    
    ### Evaluating the model on testing data
    labels_pred = clf_best.predict(X_test_sparse)
    accuracy = clf_best.score(X_test_sparse, labels_test)
    print "INFO: Accuracy = ", accuracy
    
    
    ######################################the rest of the code is the same as train_sklean.py (replace clf with clf_best)#####################################################################
    clf=clf_best
    print "INFO: model type=",type(clf)," clf=",clf

    # get data from model ================================
    coef=None
    intercept=None
    try:
        if type(clf) in ( classes.SVC , classes.NuSVC) :# svm didn't have coef_
            col_num=clf.support_vectors_.shape[1]
        else: #linear only
            # coef_ is only available when using a linear kernel
            col_num = len(clf.coef_[0])
            coef=clf.coef_[0]
            intercept=clf.intercept_[0] # only get 1st item?
            #print "**model:clf.coef_[0] =",clf.coef_[0]
    except Exception as e:
        print "WARNING: Can't get clf.coef_[0]. e=",e,", get total features from meta-data"
        col_num = 0 #how to get feature number for sparse array? 
    print "INFO: total feature # in the model: ", col_num

    jfeat_coef_dict={}
    # create feature coefficient file ================================
    if coef is None:
        print "WARNING: model weights not found!"    
    else:
        feat_filename=os.path.join(local_out_dir,row_id_str+"_feat_coef.json")
        print "INFO: feat_filename=",feat_filename
        # save coef_arr to mongo & create jfeat_coef_dict===
        jfeat_coef_dict=ml_util.ml_save_coef_build_feat_coef(row_id_str, mongo_tuples, coef, intercept, feat_filename, ds_id)
    #print "INFO: jfeat_coef_dict=", jfeat_coef_dict
    print "INFO: jfeat_coef_dict len=", len(jfeat_coef_dict )


    # filename for false pred 
    false_pred_fname=os.path.join(local_out_dir,row_id_str+"_false_pred.json")
    print "INFO: false_pred_fname=", false_pred_fname

    # build files for false pred & score graph
    (score_arr_0, score_arr_1, max_score,min_score)=ml_build_false_pred(X_test_sparse,coef,intercept
        , labels_test, labels_pred, test_hash_list, model_name, jfeat_coef_dict, false_pred_fname) 

    # save pred output
    pred_out_arr=[]
    for i in range(0,len(labels_test)):
        pred_out_arr.append((labels_test[i], labels_pred[i], test_hash_list[i]))
    pred_ofname=os.path.join(local_out_dir,row_id_str+"_pred_output.pkl")
    print "INFO: pred_ofname=", pred_ofname
    ml_util.ml_pickle_save(pred_out_arr,pred_ofname)
    
    ###################################################
    ### generate label names (family names) ###########
    ### connect to database to get the column list which contains all column number of the corresponding feature####
    ###################################################
    
    if labelnameflag == 1:
        key = "dic_name_label"
        jstr_filter='{"rid":'+row_id_str+',"key":"'+key+'"}'
        jstr_proj='{"value":1}'

        # get parent dataset's data
        if ds_id != row_id_str:
            jstr_filter='{"rid":'+ds_id+',"key":"'+key+'"}'
        
        doc=query_mongo.find_one_t(mongo_tuples, jstr_filter, jstr_proj)
        dic_list = doc['value']
        
        label_dic = {}
        for i in range(0, len(dic_list)):
            for key in dic_list[i]:
                label_dic[dic_list[i][key]] = key.encode('UTF8')
        print "INFO: label_dic:", label_dic
    else:
        label_dic = {}
        label_set = set(labels_list_all)
        for label_value in label_set:
            label_dic[int(label_value)] = str(int(label_value))
        print "INFO: ******generated label_dic:", label_dic 
    
    labels_list = []
    for key in sorted(label_dic):
        labels_list.append(label_dic[key])
    
    ### generate sample numbers of each family in testing data###
    testing_sample_number = len(labels_test)
    print "INFO: testing_sample_number=", testing_sample_number
    test_cnt_dic = {}
    for key in label_dic:
        test_cnt_dic[key] = 0
    for i in range (0, testing_sample_number):
        for key in label_dic:
            if labels_test[i] == key:
                test_cnt_dic[key] = test_cnt_dic[key] + 1
    print "INFO: Number of samples in each label is=", test_cnt_dic
    
    ###############################################
    ###########plot prediction result figure#######
    ###############################################
    pred_fname=os.path.join(local_out_dir,row_id_str+"_1"+".png")
    true_fname=os.path.join(local_out_dir,row_id_str+"_2"+".png")
    pred_xlabel='Prediction (Single Run)'
    true_xlabel='True Labels (Single Run)'
    test_cnt_dic=ml_util.ml_plot_predict_figures(labels_pred.tolist(), labels_test.tolist(), labels_list, label_dic, testing_sample_count 
        , pred_xlabel, pred_fname, true_xlabel, true_fname)
    print "INFO: figure files: ", pred_fname, true_fname
    print "INFO: Number of samples in each label is=", test_cnt_dic

    roc_auc=None
    #fscore=None 
    perf_measures=None
    class_count=len(labels_list)
    dataset_info={"training_fraction":training_fraction, "class_count":class_count,"dataset_count":sample_count}
    #############################################################
    ###################for 2 class only (plot ROC curve)#########
    #############################################################
    if len(labels_list) == 2:

        # build data file for score graph
        score_graph_fname=os.path.join(local_out_dir,row_id_str+"_score_graph.json")
        print "INFO: score_graph_fname=", score_graph_fname
        ml_build_pred_score_graph(score_arr_0,score_arr_1,model_name, score_graph_fname,max_score,min_score)

            
        do_ROC=True
        reverse_label_dic = dict((v,k) for k, v in label_dic.items())
        if 'clean' in reverse_label_dic:
            flag_clean = reverse_label_dic['clean']
        elif 'benign' in reverse_label_dic:
            flag_clean = reverse_label_dic['benign']
        elif '0' in reverse_label_dic:
            flag_clean = 0
        else:
            print "No ROC curve generated: 'clean' or '0' must be a label for indicating negative class!"
            do_ROC=False
            
        if do_ROC:
            # calculate fscore  ==========
            perf_measures=ml_util.calculate_fscore(labels_test, labels_pred)
            print "INFO: perf_measures=",perf_measures
            
            confidence_score = clf_best.decision_function(X_test_sparse)
                    
            if flag_clean == 0:
                scores = [x for x in confidence_score]
                s_labels = [x for x in labels_test]
                testing_N = test_cnt_dic[0]
                testing_P = test_cnt_dic[1]
            else:
                scores = [-x for x in confidence_score]
                s_labels = [1-x for x in labels_test]
                testing_N = test_cnt_dic[1]
                testing_P = test_cnt_dic[0]
                
            # create ROC data file ======== ==== 
            roc_auc=ml_create_roc_files(row_id_str, scores, s_labels, testing_N, testing_P
                , local_out_dir, row_id_str)
                
            perf_measures["roc_auc"]=roc_auc
            
                
    # only update db for web request
    if fromweb=="1": 
        #print "database update"
        str_sql="UPDATE atdml_document set "+"accuracy = '"+str(accuracy*100)+"%" \
            +"', status = 'learned', processed_date ='"+str(datetime.datetime.now()) \
            +"',ml_opts='"+j_str \
            +"', perf_measures='"+json.dumps(perf_measures) \
            +"', dataset_info='"+json.dumps(dataset_info) \
            +"' where id="+row_id_str
        ret=exec_sqlite.exec_sql(str_sql)
        print "INFO: Data update done! ret=", str(ret)
    else:
        print "INFO: accuracy = '"+str(accuracy*100)+"%"
    
    print 'INFO: total running time: %f' %(t1-t00)
    
    print 'INFO: Finished!'
    return 0
def train(row_id_str, ds_id, hdfs_feat_dir, local_out_dir, ml_opts_jstr,
          excluded_feat_cslist, sp_master, spark_rdd_compress,
          spark_driver_maxResultSize, sp_exe_memory, sp_core_max, zipout_dir,
          zipcode_dir, zip_file_name, mongo_tuples, labelnameflag, fromweb,
          training_fraction, jobname):

    if not os.path.exists(local_out_dir):
        os.makedirs(local_out_dir)

    # zip func in other files for Spark workers ================= ================
    zip_file_path = ml_build_zip_file(zipout_dir,
                                      zipcode_dir,
                                      zip_file_name,
                                      prefix='zip_feature_util')
    print "INFO: zip_file_path=", zip_file_path

    # get_spark_context
    sc = ml_util.ml_get_spark_context(sp_master, spark_rdd_compress,
                                      spark_driver_maxResultSize,
                                      sp_exe_memory, sp_core_max, jobname,
                                      [zip_file_path])

    t0 = time()
    t00 = t0

    # check if ml_opts.has_excluded_feat ==1 ===================================
    has_excluded_feat = 0
    ml_opts = {}
    if not ml_opts_jstr is None:
        ml_opts = json.loads(ml_opts_jstr)
        if "has_excluded_feat" in ml_opts:
            has_excluded_feat = ml_opts["has_excluded_feat"]

    #print "has_excluded_feat=",has_excluded_feat,",excluded_feat_cslist=",excluded_feat_cslist

    # get excluded feature list from mongo ========== ===
    if str(has_excluded_feat) == "1" and excluded_feat_cslist is None:
        excluded_feat_cslist = ml_util.ml_get_excluded_feat(
            row_id_str, mongo_tuples)
    print "INFO: excluded_feat_cslist=", excluded_feat_cslist
    ### generate Labeled point
    libsvm_data_file = os.path.join(hdfs_feat_dir, "libsvm_data")
    print "INFO: libsvm_data_file:", libsvm_data_file

    # load feature count file
    feat_count_file = libsvm_data_file + "_feat_count"
    feature_count = zip_feature_util.get_feature_count(sc, feat_count_file)
    print "INFO: feature_count=", feature_count

    # load sample RDD from text file
    #   also exclude selected features in sample ================ =====
    # format (LabeledPoint,hash) from str2LabeledPoint_hash()
    #samples_rdd = MLUtils.loadLibSVMFile(sc, libsvm_data_file)
    samples_rdd, feature_count = zip_feature_util.get_sample_rdd(
        sc, libsvm_data_file, feature_count, excluded_feat_cslist)
    #samples_rdd = MLUtils.loadLibSVMFile(sc, libsvm_data_file)

    # get distinct label list
    labels_list_all = samples_rdd.map(
        lambda p: p[0].label).distinct().collect()

    ### generate training and testing data
    training_rdd, testing_rdd = samples_rdd.randomSplit(
        [training_fraction, 1 - training_fraction])
    training_rdd = training_rdd.map(lambda p: p[0])  # keep LabeledPoint only
    training_rdd.cache()
    training_sample_count = training_rdd.count()
    training_lbl_cnt_list = training_rdd.map(
        lambda p: (p.label, 1)).reduceByKey(add).collect()
    testing_rdd.cache()
    testing_sample_count = testing_rdd.count()
    testing_lbl_cnt_list = testing_rdd.map(
        lambda p: (p[0].label, 1)).reduceByKey(add).collect()
    sample_count = training_sample_count + testing_sample_count

    t1 = time()
    print "INFO: training sample count=", training_sample_count, ", testing sample count=", testing_sample_count
    print "INFO: training label list=", training_lbl_cnt_list, ", testing label list=", testing_lbl_cnt_list
    print "INFO: labels_list_all=", labels_list_all
    print "INFO: training and testing samples generated!"
    print 'INFO: running time: %f' % (t1 - t0)
    t0 = t1

    ##############################################
    ########### Grid Search with CV ##############
    ##############################################

    ### get the parameters for cross validation and grid search ###
    (cv, model_name, param_dict) = generate_param(ml_opts)

    ### generate label names (family names) #####
    ### connect to database to get the column list which contains all column number of the corresponding feature####
    if labelnameflag == 1:
        label_dic = ml_util.ml_get_label_dict(row_id_str, mongo_tuples, ds_id)
        print "INFO: label_dic:", label_dic

    else:
        label_dic = {}
        label_set = set(labels_list_all)
        for label_value in label_set:
            label_dic[int(label_value)] = str(int(label_value))
        print "INFO: generated label_dic:", label_dic

    labels_list = []
    for key in sorted(label_dic):
        labels_list.append(label_dic[key])
    #print "labels:", labels_list
    class_num = len(labels_list)
    if class_num > 2:
        print "INFO: Multi-class classification! Number of classes = ", class_num

    #### generate training and testing rdd(s) for CV#####
    split_prob = 1.0 / float(cv)
    split_prob_list = []
    for i in range(0, cv):
        split_prob_list.append(split_prob)

    list_rdd = training_rdd.randomSplit(split_prob_list)
    list_train_rdd = []
    list_test_rdd = []
    for i in range(0, cv):
        list_rdd[i].cache()
    for i in range(0, cv):
        tr_rdd = sc.emptyRDD()
        for j in range(0, cv):
            if j == i:
                pass
            else:
                tr_rdd = tr_rdd + list_rdd[j]
        tr_rdd.cache()
        list_train_rdd.append(tr_rdd)
        list_test_rdd.append(list_rdd[i])

    all_comb_list_of_dic = get_all_combination_list_of_dic(param_dict)
    print "INFO: Total number of searching combinations:", len(
        all_comb_list_of_dic)

    ### loop for all parameter combinations and search the best parameters with CV###
    results = []
    for p in range(0, len(all_comb_list_of_dic)):
        params = all_comb_list_of_dic[p]
        C = params['C']
        iteration_num = params['iterations']
        regularization = params['regType']

        scores = []
        for i in range(0, cv):
            train_rdd = list_train_rdd[i]
            test_rdd = list_test_rdd[i]
            train_number = train_rdd.count()
            regP = C / float(train_number)

            ### build model ###
            if model_name == "linear_svm_with_sgd":
                #print "====================1: Linear SVM============="
                model_classification = SVMWithSGD.train(
                    train_rdd,
                    regParam=regP,
                    iterations=iteration_num,
                    regType=regularization)  # regParam = 1/(sample_number*C)
            elif model_name == "logistic_regression_with_lbfgs":
                #print "====================2: LogisticRegressionWithLBFGS============="
                model_classification = LogisticRegressionWithLBFGS.train(
                    train_rdd,
                    regParam=regP,
                    iterations=iteration_num,
                    regType=regularization,
                    numClasses=class_num)  # regParam = 1/(sample_number*C)
            elif model_name == "logistic_regression_with_sgd":
                #print "====================3: LogisticRegressionWithSGD============="
                model_classification = LogisticRegressionWithSGD.train(
                    train_rdd,
                    regParam=regP,
                    iterations=iteration_num,
                    regType=regularization)  # regParam = 1/(sample_number*C)
            else:
                print "ERROR: Training model selection error: no valid ML model selected!"
                return

            ### Evaluating the model on testing data
            labelsAndPreds = test_rdd.map(
                lambda p: (p.label, model_classification.predict(p.features)))
            labelsAndPreds.cache()
            test_sample_number = test_rdd.count()
            testErr = labelsAndPreds.filter(
                lambda (v, p): v != p).count() / float(test_sample_number)
            accuracy = 1 - testErr
            #print "Accuracy = ", accuracy
            scores.append(accuracy)

        ss = np.asarray(scores)
        #print "%0.3f (+/-%0.03f) for " % (ss.mean(), ss.std() * 2), params
        results.append((ss.mean(), ss.std() * 2, params))

    sorted_results = sorted(results, key=lambda x: x[0], reverse=1)
    (best_accuracy, best_std2, best_param) = sorted_results[0]
    print "INFO: ml_opts_jstr=", ml_opts_jstr
    print "INFO: best_param=", best_param

    #ml_opts=json.loads(ml_opts_jstr);
    print "INFO: ml_opts=", ml_opts

    ##############################################
    ######output Grid Search results##############
    ##############################################
    json2save = {}
    json2save["rid"] = int(row_id_str)
    json2save["key"] = "cv_result"
    #json2save["param_str"]=ml_opts_jstr
    json2save["param_dic"] = param_dict
    cv_grid = []
    print ""
    print "INFO: =====Grid Search Results for SPARK ======"
    print "INFO: Best parameters set found for ", model_name, " is: "
    for key in best_param:
        print "INFO:", key, "=", best_param[key]
        if key.lower() == "regtype":
            ml_opts['regularization'] = str(best_param[key])
        else:
            ml_opts[key.lower()] = str(best_param[key])  # add best param to
    ml_opts_jstr = json.dumps(ml_opts)
    json2save["param_str"] = ml_opts_jstr
    print "INFO: Average accuracy with CV = ", cv, ": ", best_accuracy
    print ""
    print "INFO: Grid scores on development set:"
    for i in range(0, len(sorted_results)):
        (ave_accu_i, std2_i, param_i) = sorted_results[i]
        print "%0.3f (+/-%0.03f) for " % (ave_accu_i, std2_i), param_i
        #outstr='%s,%0.3f,%0.03f,%s' % (param_i,ave_accu_i, std2_i,"Selected" if param_i==best_param else "")
        outj = {}
        outj["param"] = param_i
        outj["average_accuracy"] = "%0.3f" % (ave_accu_i)
        outj["std_deviation"] = "%0.3f" % (std2_i)
        outj["selected"] = "%s" % ("Selected" if param_i == best_param else "")
        cv_grid.append(outj)
    print " "

    t1 = time()
    print 'INFO: Grid Search with CV run time: %f' % (t1 - t0)
    t0 = time()

    ##################################################################################
    json2save["cv_grid_data"] = cv_grid
    cv_result = json.dumps(json2save)
    print "INFO: cv_result=", cv_result
    filter = '{"rid":' + row_id_str + ',"key":"cv_result"}'
    upsert_flag = True
    ## write to mongoDB.myml.dataset_info, ignore doc with duplicated key
    # db.dataset_info.createIndex({"rid":1,"key":1},{unique:true})
    ret = query_mongo.upsert_doc_t(mongo_tuples, filter, cv_result,
                                   upsert_flag)
    print "INFO: Upsert count for mllib cv_result: ret=", ret

    ############################################################################################
    ########### retrain with all training data and generate the final model with results #######
    ############################################################################################
    C = best_param['C']
    iteration_num = best_param['iterations']
    regularization = best_param['regType']
    regP = C / float(training_sample_count)

    ######################################the rest of the code is the same as train_MLlib.py #####################################################################

    if model_name == "linear_svm_with_sgd":
        ### 1: linearSVM
        print "INFO: ====================1: Linear SVM============="
        model_classification = SVMWithSGD.train(
            training_rdd,
            regParam=regP,
            iterations=iteration_num,
            regType=regularization)  # regParam = 1/(sample_number*C)
        #print model_classification
    elif model_name == "logistic_regression_with_lbfgs":
        ### 2: LogisticRegressionWithLBFGS
        print "INFO: ====================2: LogisticRegressionWithLBFGS============="
        model_classification = LogisticRegressionWithLBFGS.train(
            training_rdd,
            regParam=regP,
            iterations=iteration_num,
            regType=regularization,
            numClasses=class_num)  # regParam = 1/(sample_number*C)
    elif model_name == "logistic_regression_with_sgd":
        ### 3: LogisticRegressionWithSGD
        print "INFO: ====================3: LogisticRegressionWithSGD============="
        model_classification = LogisticRegressionWithSGD.train(
            training_rdd,
            regParam=regP,
            iterations=iteration_num,
            regType=regularization)  # regParam = 1/(sample_number*C)
    else:
        print "INFO: Training model selection error: no valid ML model selected!"
        return

    print "INFO: model type=", type(model_classification)

    # create feature coefficient file ================================
    coef_arr = None
    intercept = None
    if model_classification.weights is None:
        print "WARNING: model weights not found!"
    else:
        coef_arr = model_classification.weights.toArray().tolist()
        # save to mongo
        key = "coef_arr"
        ret = ml_util.save_json_t(row_id_str, key, coef_arr, mongo_tuples)
        # save intercept to mongo
        key = "coef_intercept"
        intercept = model_classification.intercept
        ret = ml_util.save_json_t(row_id_str, key, intercept, mongo_tuples)

        # feature list + coef file =============
        feat_filename = os.path.join(local_out_dir,
                                     row_id_str + "_feat_coef.json")
        print "INFO: feat_filename=", feat_filename

        # create feature list + coef file =============================================== ============
        # expect a dict of {"fid":(coef, feature_raw_string)}
        jret = ml_util.build_feat_list_t(row_id_str, feat_filename, None, None,
                                         coef_arr, ds_id, mongo_tuples)

        # special featuring for IN or libsvm
        if jret is None:
            jret = ml_util.build_feat_coef_raw_list_t(row_id_str,
                                                      feat_filename, coef_arr,
                                                      ds_id, mongo_tuples)
        if jret is None:
            print "WARNING: Cannot create sample list for testing dataset. "

        jfeat_coef_dict = jret
        print "INFO: coef_arr len=", len(
            coef_arr), ", feature_count=", feature_count
        # for multi-class
        if len(coef_arr) != feature_count:
            jfeat_coef_dict = {}
            print "WARNING: feature list can't be shown for multi-class classification"

        # Calculate prediction and Save testing dataset
        bt_coef_arr = sc.broadcast(coef_arr)
        bt_intercept = sc.broadcast(intercept)
        bt_jfeat_coef_dict = sc.broadcast(jfeat_coef_dict)
        ### Evaluating the model on testing dataset: label, predict label, score, feature list
        print "INFO: intercept=", intercept
        print "INFO: coef_arr len=", len(coef_arr)
        print "INFO: jfeat_coef_dict len=", len(jfeat_coef_dict)

        # get prediction of testing dataset : (tlabel, plabel, score, libsvm, raw feat str, hash) ==============================
        if len(coef_arr) == feature_count:
            testing_pred_rdd = testing_rdd.map(lambda p: (
                 p[0].label \
                ,model_classification.predict(p[0].features) \
                ,zip_feature_util.calculate_hypothesis(p[0].features, bt_coef_arr.value, bt_intercept.value, model_name) \
                ,p[0].features \
                ,p[1] \
            ) ).cache()
        else:  # for multi-class, no prediction score;, TBD for better solution: how to display multiple weights for each class
            testing_pred_rdd = testing_rdd.map(lambda p: (
                 p[0].label \
                ,model_classification.predict(p[0].features) \
                ,0 \
                ,p[0].features \
                ,p[1] \
            ) ).cache()

        # save false prediction to local file
        false_pred_fname = os.path.join(local_out_dir,
                                        row_id_str + "_false_pred.json")
        print "INFO: false_pred_fname=", false_pred_fname
        false_pred_data=testing_pred_rdd.filter(lambda p: p[0] != p[1])\
            .map(lambda p: (p[0],p[1],p[2] \
            ,zip_feature_util.get_dict_coef_raw4feat(zip_feature_util.sparseVector2dict(p[3]), bt_jfeat_coef_dict.value)
            ,p[4]  ) ) \
            .collect()
        print "INFO: false predicted count=", len(false_pred_data)
        false_pred_arr = []
        with open(false_pred_fname, "w") as fp:
            for sp in false_pred_data:
                jsp = {
                    "tlabel": sp[0],
                    "plabel": sp[1],
                    "score": sp[2],
                    "feat": sp[3],
                    "hash": sp[4]
                }
                #print "jsp=",jsp
                false_pred_arr.append(jsp)
            fp.write(json.dumps(false_pred_arr))

        # save prediction results, format: label, prediction, hash
        pred_ofname = os.path.join(local_out_dir,
                                   row_id_str + "_pred_output.pkl")
        print "INFO: pred_ofname=", pred_ofname
        pred_out_arr = testing_pred_rdd.map(lambda p:
                                            (p[0], p[1], p[4])).collect()
        ml_util.ml_pickle_save(pred_out_arr, pred_ofname)

    ### Evaluating the model on testing data
    #labelsAndPreds = testing_rdd.map(lambda p: (p.label, model_classification.predict(p.features)))
    labelsAndPreds = testing_pred_rdd.map(lambda p: (p[0], p[1]))
    labelsAndPreds.cache()
    #testing_sample_count = testing_rdd.count()
    testErr = labelsAndPreds.filter(lambda (v, p): v != p).count() / float(
        testing_sample_count)
    accuracy = 1 - testErr
    print "INFO: Accuracy = ", accuracy

    ### Save model
    #save_dir = config.get('app', 'HADOOP_MASTER')+'/user/hadoop/yigai/row_6/'
    #save_dir = config.get('app', 'HADOOP_MASTER')+config.get('app', 'HDFS_MODEL_DIR')+'/'+row_id_str
    save_dir = os.path.join(config.get('app', 'HADOOP_MASTER'),
                            config.get('app', 'HDFS_MODEL_DIR'), row_id_str)
    try:
        hdfs.ls(save_dir)
        #print "find hdfs folder"
        hdfs.rmr(save_dir)
        #print "all files removed"
    except IOError as e:
        print "WARNING: I/O error({0}): {1}".format(
            e.errno, e.strerror), ". At HDFS=", save_dir
    except:
        print "WARNING: Unexpected error:", sys.exc_info(
        )[0], ". At HDFS=", save_dir

    model_classification.save(sc, save_dir)

    ###load model if needed
    #sameModel = SVMModel.load(sc, save_dir)

    t1 = time()
    print 'INFO: training run time: %f' % (t1 - t0)
    t0 = t1

    ###############################################
    ###########plot prediction result figure#######
    ###############################################

    labels = labelsAndPreds.collect()
    true_label_list = [x for x, _ in labels]
    pred_label_list = [x for _, x in labels]

    pred_fname = os.path.join(local_out_dir, row_id_str + "_1" + ".png")
    true_fname = os.path.join(local_out_dir, row_id_str + "_2" + ".png")
    pred_xlabel = 'Prediction (Single Run)'
    true_xlabel = 'True Labels (Single Run)'
    test_cnt_dic = ml_util.ml_plot_predict_figures(
        pred_label_list, true_label_list, labels_list, label_dic,
        testing_sample_count, pred_xlabel, pred_fname, true_xlabel, true_fname)

    plt.show()
    perf_measures = None
    dataset_info = {
        "training_fraction": training_fraction,
        "class_count": class_num,
        "dataset_count": sample_count
    }
    #############################################################
    ###################for 2 class only (plot ROC curve)#########
    #############################################################
    if len(labels_list) == 2:

        do_ROC = True
        reverse_label_dic = dict((v, k) for k, v in label_dic.items())
        if 'clean' in reverse_label_dic:
            flag_clean = reverse_label_dic['clean']
        elif 'benign' in reverse_label_dic:
            flag_clean = reverse_label_dic['benign']
        elif '0' in reverse_label_dic:
            flag_clean = 0
        else:
            print "WARNING: No ROC curve generated: 'clean' or '0' must be a label for indicating negative class!"
            do_ROC = False

        # build data file for score graph
        score_graph_fname = os.path.join(local_out_dir,
                                         row_id_str + "_score_graph.json")
        print "INFO: score_graph_fname=", score_graph_fname

        # build score_arr_0, score_arr_1
        #    format: tlabel, plabel, score, libsvm, raw feat str, hash
        graph_arr = testing_pred_rdd.map(lambda p:
                                         (int(p[0]), float(p[2]))).collect()
        score_arr_0 = []
        score_arr_1 = []
        max_score = 0
        min_score = 0
        for p in graph_arr:
            if p[0] == 0:
                score_arr_0.append(p[1])
            else:
                score_arr_1.append(p[1])
            # save max,min score
            if p[1] > max_score:
                max_score = p[1]
            elif p[1] < min_score:
                min_score = p[1]

        ml_build_pred_score_graph(score_arr_0, score_arr_1, model_name,
                                  score_graph_fname, max_score, min_score)
        #print "score_arr_0=",score_arr_0
        #print "score_arr_1=",score_arr_1
        #print "max_score=",max_score
        #print "min_score=",min_score

        if do_ROC:

            perf_measures = ml_util.calculate_fscore(true_label_list,
                                                     pred_label_list)
            print "RESULT: perf_measures=", perf_measures
            model_classification.clearThreshold()
            scoreAndLabels = testing_rdd.map(lambda p: (
                model_classification.predict(p[0].features), int(p[0].label)))
            #metrics = BinaryClassificationMetrics(scoreAndLabels)
            #areROC = metrics.areaUnderROC
            #print areROC
            scoreAndLabels_list = scoreAndLabels.collect()
            if flag_clean == 0:
                scores = [x for x, _ in scoreAndLabels_list]
                s_labels = [x for _, x in scoreAndLabels_list]
                testing_N = test_cnt_dic[0]
                testing_P = test_cnt_dic[1]
            else:
                scores = [-x for x, _ in scoreAndLabels_list]
                s_labels = [1 - x for _, x in scoreAndLabels_list]
                testing_N = test_cnt_dic[1]
                testing_P = test_cnt_dic[0]
            #print scores
            #print s_labels
            # create ROC data file ======== ====
            roc_auc = ml_create_roc_files(row_id_str, scores, s_labels,
                                          testing_N, testing_P, local_out_dir,
                                          row_id_str)

            perf_measures["roc_auc"] = roc_auc

    # only update db for web request
    if fromweb == "1":
        #print "database update"
        str_sql="UPDATE atdml_document set "+"accuracy = '"+str(accuracy*100)+"%" \
            +"', status = 'learned', processed_date ='"+str(datetime.datetime.now()) \
            +"',ml_opts='"+ml_opts_jstr \
            +"', perf_measures='"+json.dumps(perf_measures) \
            +"', dataset_info='"+json.dumps(dataset_info) \
            +"' where id="+row_id_str
        ret = exec_sqlite.exec_sql(str_sql)
        print "INFO: Data update done! ret=", str(ret)
    else:
        print "INFO: accuracy = '" + str(accuracy * 100) + "%"

    t1 = time()
    print 'INFO: total run time: %f' % (t1 - t00)

    print 'INFO: Finished!'
    return 0
Exemplo n.º 4
0
def pca(row_id_str, ds_id, hdfs_feat_dir, local_out_dir  
    , sp_master, spark_rdd_compress, spark_driver_maxResultSize, sp_exe_memory, sp_core_max
    , zipout_dir, zipcode_dir, zip_file_name
    , mongo_tuples, fromweb, pca_jstr
    , jobname, model_data_folder ): 
    
    # create zip files for Spark workers ================= ================
    zip_file_path = ml_build_zip_file(zipout_dir, zipcode_dir, zip_file_name, prefix='zip_feature_util')
    print "INFO: zip_file_path=",zip_file_path

    
    # init Spark context ====
    sc=ml_util.ml_get_spark_context(sp_master
        , spark_rdd_compress
        , spark_driver_maxResultSize
        , sp_exe_memory
        , sp_core_max
        , jobname
        , [zip_file_path])     
    

    
    pca_param=json.loads(pca_jstr)
    if "k" in pca_param:
        k=pca_param["k"]
    else:
        k=None
    if "threshold" in pca_param:
        threshold=pca_param["threshold"]
    else:
        threshold=None         
    if "lib" in pca_param:
        lib=pca_param["lib"]
    else:
        lib='mllib'
  
    ret=-1
    # start here =================================================================== ===============
    t0 = time()
    
    # source libsvm filename  
    libsvm_data_file = os.path.join(hdfs_feat_dir , "libsvm_data")
    print "INFO: libsvm_data_file=", libsvm_data_file
    
    # load sample RDD from text file   
    # format Row(label, features, hash) from get_sample_dataframe() 
    samples_df, feature_count = zip_feature_util.get_sample_dataframe(sc, libsvm_data_file, 0, None)
    print "INFO: feature_count=",feature_count

    #df_pcaed format: hash,label, features 
    (df_pcaed, k, pca_model)=PCA_transform(sc, samples_df,feature_count, threshold, k) 
    print "INFO: Doing PCA... threshold=",threshold,",k=",k
    #print "df_pcaed=",df_pcaed.first()
    #print "k=",k
    #print "pca_model=",pca_model
    #print "pc=",pca_model.pc

    # pca model filename ============================= ===============
    if model_data_folder is None:
        if row_id_str != ds_id:
            # get from parent dataset
            model_data_folder  = os.path.join(config.get('app', 'HADOOP_MASTER'),config.get('app', 'HDFS_MODEL_DIR'), ds_id+"_pca")
        else:
            model_data_folder  = os.path.join(config.get('app', 'HADOOP_MASTER'),config.get('app', 'HDFS_MODEL_DIR'), row_id_str+"_pca")
            
    # create HDFS folder
    try:
        hdfs.mkdir(model_data_folder)
    except IOError as e:
        print "WARNING: I/O error({0}): {1}".format(e.errno, e.strerror),". At HDFS=", save_dir
    except:
        print "WARNING: Unexpected error:", sys.exc_info()[0] ,". At HDFS=", save_dir
    
        
    if not threshold is None:
        #pca_fname=os.path.join(hdfs_feat_dir , row_id_str+'_pca_'+str(threshold)+'.ml')
        pca_fname=os.path.join(model_data_folder , 'pca_model_'+str(threshold))
        libsvm_data_pca = os.path.join(hdfs_feat_dir , "libsvm_data_pca_"+str(threshold)+'.ml')
    else:
        pca_fname=os.path.join(model_data_folder , 'pca_model_'+str(k))
        libsvm_data_pca = os.path.join(hdfs_feat_dir , "libsvm_data_pca_"+str(k)+'.ml')
    
    # save pca model to HDFS ===============
    print "INFO: pca_fname=",pca_fname
    pca_model.write().overwrite().save(pca_fname)
    
    # save pca data to HDFS ============================= ===============
    print "INFO: libsvm_data_pca=",libsvm_data_pca
    # construct libsvm string
    libsvm_rdd=df_pcaed.rdd.map(lambda p: p[0]+" "+str(int(p[1]))+zip_feature_util.dv2libsvm(p[2].toArray()))
    
    # clean up old libsvm file ============================= ===============
    try:
        hdfs.rmr(libsvm_data_pca)
    except IOError as e:
        print "WARNING: I/O error({0}): {1}".format(e.errno, e.strerror)
    except:
        print "WARNING: Unexpected error at rmr():", sys.exc_info()[0]     

    # overwrite pca file at hdfs
    libsvm_rdd.saveAsTextFile(libsvm_data_pca)

    
    t1 = time()
    print 'INFO: PCA processing time: %f' %(t1-t0)
    
    ### insert pca_param into mongoDB  ###
    filter='{"rid":'+row_id_str+',"key":"pca_param"}'
    if not threshold is None:
        pca_param["threshold"]=threshold
    if not k is None:
        pca_param["k"]=k
    
    print "INFO: pca_param=",pca_param
    upsert_flag=True
    jstr_insert = '{ "rid":'+row_id_str+',"key":"pca_param", "value":'+json.dumps(pca_param)+'}'
    ret=query_mongo.upsert_doc_t(mongo_tuples,filter,jstr_insert,upsert_flag)
    print "INFO: Upsert count for pca_param=",ret
    
    # only update db for web request   ===========
    if fromweb=="1": 
        #print "database update"
        str_sql="UPDATE atdml_document set "  \
            +" status = 'pca-ed', processed_date ='"+str(datetime.datetime.now()) \
            +"' , ml_pca_opts = '"+json.dumps(pca_param) \
            +"' where id="+row_id_str
        ret=exec_sqlite.exec_sql(str_sql)
        print "INFO: Update Sqlite DB done! ret=", str(ret)

    
    t1 = time()
    print 'INFO: running time: %f' %(t1-t0)
    
    #print 'Finished!'
    return 0
def feat_extraction(row_id_str,
                    hdfs_dir_list,
                    hdfs_feat_dir,
                    model_data_folder,
                    sp_master,
                    spark_rdd_compress,
                    spark_driver_maxResultSize,
                    sp_exe_memory,
                    sp_core_max,
                    zipout_dir,
                    zipcode_dir,
                    zip_file_name,
                    mongo_tuples,
                    fromweb,
                    label_arr,
                    metadata_count,
                    label_idx,
                    data_idx,
                    pattern_str,
                    ln_delimitor,
                    data_field_list,
                    jkey_dict,
                    jobname,
                    num_gram,
                    feature_count_threshold,
                    token_dict=None,
                    HDFS_RETR_DIR=None,
                    remove_duplicated="N",
                    cust_featuring=None,
                    cust_featuring_params=None,
                    local_out_dir=None,
                    filter_ratio=None,
                    binary_flag=False):

    # zip func in other files for Spark workers ================= ================
    zip_file_path = ml_util.ml_build_zip_file(zipout_dir,
                                              zipcode_dir,
                                              zip_file_name,
                                              user_custom=cust_featuring)

    # get_spark_context
    spark = ml_util.ml_get_spark_session(sp_master, spark_rdd_compress,
                                         spark_driver_maxResultSize,
                                         sp_exe_memory, sp_core_max, jobname,
                                         zip_file_path)
    if spark:
        sc = spark.sparkContext
    # log time ================================================================ ================
    t0 = time()

    # input filename
    input_filename = "*"
    ext_type = '.gz'
    gz_list = None

    # single hdfs file
    if not ',' in hdfs_dir_list:  # single dir having *.gz ==== =========
        # read raw data from HDFS as .gz format ==========
        hdfs_files = os.path.join(hdfs_dir_list, input_filename + ext_type)
        # check if gz files in hdfs ============
        try:
            gz_list = hdfs.ls(hdfs_dir_list)
            print "INFO: check hdfs folder=", hdfs_dir_list

        except IOError as e:
            print "WARNING: I/O error({0}): {1}".format(e.errno, e.strerror)
        except:
            print "WARNING: Error at checking HDFS file:", sys.exc_info()[0]
        # use whole folder
        #print "gz_list",gz_list
        if gz_list is None or len(gz_list) == 0:
            print "ERROR: No file found by ", input_filename + ext_type  #,", use",hdfs_dir_list,"instead"
            return -2
        elif len(gz_list) == 1:
            # use dir as filename
            hdfs_files = hdfs_dir_list[0:-1]

    else:  # multiple dirs ==== =========
        hdfs_files = ""
        cnt = 0
        temp_lbl_list = []
        comma = ""
        print "INFO: before label_arr=", label_arr

        # check each folder
        for dr in hdfs_dir_list.split(','):
            #print "****=",dr
            if not len(dr) > 0:
                continue
            try:
                # remove space etc.
                dr = dr.strip()
                fdr = os.path.join(HDFS_RETR_DIR, dr)
                # ls didn't like "*"
                if '*' in fdr:
                    #gz_list=hdfs.ls(fdr.replace("*",""))
                    dn = os.path.dirname(fdr).strip()
                    bn = os.path.basename(
                        fdr).strip()  #print "dn=",dn,",bn=",bn
                    # get all names under folder and do filtering
                    gz_list = fnmatch.filter(hdfs.ls(dn), '*' + bn)
                else:
                    gz_list = hdfs.ls(fdr)
                cnt = cnt + len(gz_list)

                if len(gz_list) > 0:
                    hdfs_files = hdfs_files + comma + fdr
                    comma = ","
            except IOError as e:
                print "WARNING: I/O error({0}): {1}".format(
                    e.errno, e.strerror)
            except:
                print "WARNING: Error at checking HDFS file:", sys.exc_info(
                )[0]
        # use whole folder
        if cnt is None or cnt == 0:
            print "ERROR: No file found at", hdfs_files
            return -2
        else:
            print "INFO: total file count=", cnt
        # set convert flag only when multiple dir and label_arr has dirty label

        if not label_arr is None and len(
                label_arr) == 2 and label_arr[1] == "dirty":
            convert2dirty = "Y"
    print "INFO: hdfs_dir_list=", hdfs_dir_list
    print "INFO: hdfs_files=", hdfs_files

    cust_featuring_jparams = None
    # custom featuring
    if not cust_featuring is None and len(cust_featuring) > 0:
        # load user module =======
        user_func, cust_featuring_jparams = get_user_custom_func(
            cust_featuring, cust_featuring_params)
        # TBD apply    user_func

        all_hashes_cnt_dic = None
        all_hash_str_dic = None
        all_hashes_seq_dic = None
    else:
        print "ERROR: custom featuring type is needed"
    print "INFO: cust_featuring=", cust_featuring, "cust_featuring_jparams=", cust_featuring_jparams

    dnn_flag = False
    has_header = None
    label_col = None
    label_index = None
    # get featuring params
    if cust_featuring_jparams:
        if 'label_index' in cust_featuring_jparams:  # idx number for label, 0 based
            label_index = cust_featuring_jparams['label_index']
        if 'has_header' in cust_featuring_jparams:  # True/False
            has_header = eval(cust_featuring_jparams['has_header'])
            if has_header == 1:
                has_header = True
        if 'dnn_flag' in cust_featuring_jparams:  # True/False
            dnn_flag = cust_featuring_jparams['dnn_flag']
            if dnn_flag == 1:
                dnn_flag = True
            elif dnn_flag == 0:
                dnn_flag = False

    if label_index is None:
        label_index = 0
    elif not isinstance(label_index, int):
        label_index = eval(label_index)

    print "INFO: label_index=", label_index, ",has_header=", has_header, ",dnn_flag=", dnn_flag

    # read as DataFrame ===============================================
    df = spark.read.csv(hdfs_files, header=has_header)

    df.show()
    print "INFO: col names=", df.columns

    # get column name for label
    label_col = None
    for i, v in enumerate(df.columns):
        if i == label_index:
            label_col = v

    # get all distinct labels into an array  =============== provided by parameter?
    if label_arr is None and not label_col is None:
        label_arr = sorted([
            rw[label_col] for rw in df.select(label_col).distinct().collect()
        ])
    print "INFO: label_arr=", label_arr

    label_dic = {}
    # convert label_arr to dict; {label:number|
    for idx, label in enumerate(sorted(label_arr)):
        if not label in label_dic:
            label_dic[
                label] = idx  #starting from 0, value = idx, e.g., clean:0, dirty:1

    # add params for dataframe conversion
    cust_featuring_jparams["label_dict"] = label_dic
    # convert to int
    cust_featuring_jparams["label_index"] = label_index
    featuring_params = json.dumps(cust_featuring_jparams)
    # convert DataFrame row to libsvm string
    libsvm_rdd = df.rdd.map(lambda x: user_func(list(x), featuring_params))
    print "INFO: sample df row=", (libsvm_rdd.collect()[0])
    print "INFO: featuring_params=", featuring_params

    total_input_count = df.count()
    print "INFO: Total input sample count=", total_input_count
    #print "INFO: feature_count_threshold=",feature_count_threshold

    #get all hashes and total occurring count ===============
    #   all_hashes_cnt_dic: {'col index': total count,... }
    # build all_hashes_cnt_dic
    cnt_df = df.select(
        [count(when(~isnull(c), c)).alias(c) for c in df.columns])
    #cnt_df.show()
    cnt_arr = cnt_df.rdd.map(lambda x: list(x)).collect()
    feat_sample_count_arr = cnt_arr[0]
    #print "feat_sample_count_arr=",feat_sample_count_arr

    if all_hashes_cnt_dic is None:
        all_hashes_cnt_dic = {}
        idx = 1
        for i, v in enumerate(feat_sample_count_arr):
            if i != label_index:
                all_hashes_cnt_dic[idx] = v
                idx += 1
    #print "all_hashes_cnt_dic=",all_hashes_cnt_dic

    #get all hashes and their extracted string  ===============
    #   all_hash_str_dic: {hash:'str1', ...
    if all_hash_str_dic is None:
        # convert header to dict=index:string; excude label column
        all_hash_str_dic = {}
        idx = 1
        for i, v in enumerate(df.schema.names):
            if i != label_index:
                all_hash_str_dic[idx] = v
                idx += 1
    #print "all_hash_str_dic=",all_hash_str_dic

    # save labels to hdfs as text file==================================== ============
    hdfs_folder = hdfs_feat_dir  #+ "/"   # "/" is needed to create the folder correctly
    print "INFO: hdfs_folder=", hdfs_folder
    try:
        hdfs.mkdir(hdfs_folder)
    except IOError as e:
        print "WARNING: I/O error({0}): {1}".format(e.errno, e.strerror)
    except:
        print "WARNING: Unexpected error at mkdir:", sys.exc_info()[0]

    # clean up metadata_file
    metadata_file = os.path.join(hdfs_folder, metadata)  #"metadata"
    print "INFO: metadata_file=", metadata_file
    try:
        hdfs.rmr(metadata_file)
    except IOError as e:
        print "WARNING: I/O error({0}): {1}".format(e.errno, e.strerror)
    except:
        print "WARNING: Unexpected error at rmr():", sys.exc_info()[0]
    sc.parallelize(label_arr, 1).saveAsTextFile(metadata_file)

    #remap all hash values to continuous key/feature number ==============
    #     all_hashes_seq_dic: { hash : sequential_numb }
    if all_hashes_seq_dic is None:
        all_hashes_seq_dic = {}
        # csv column index as sequentail number
        remap2seq(all_hash_str_dic, all_hashes_seq_dic)
    #print "all_hashes_seq_dic=",all_hashes_seq_dic
    total_feature_numb = len(all_hashes_seq_dic)
    print "INFO: Total feature count=", len(all_hashes_seq_dic)

    # save feat_sample_count_arr data ==================================== ============
    filter = '{"rid":' + row_id_str + ',"key":"feat_sample_count_arr"}'
    upsert_flag = True
    jo_insert = {}
    jo_insert["rid"] = eval(row_id_str)
    jo_insert["key"] = "feat_sample_count_arr"
    jo_insert["value"] = feat_sample_count_arr
    jstr_insert = json.dumps(jo_insert)
    ret = query_mongo.upsert_doc_t(mongo_tuples, filter, jstr_insert,
                                   upsert_flag)
    print "INFO: Upsert count for feat_sample_count_arr=", ret
    # insert failed, save to local
    if ret == 0:
        # drop old record in mongo
        ret = query_mongo.delete_many(mongo_tuples, None, filter)
        if not os.path.exists(local_out_dir):
            os.makedirs(local_out_dir)
        fsca_hs = os.path.join(local_out_dir, row_id_str,
                               row_id_str + "_feat_sample_count_arr.pkl")
        print "WARNING: save feat_sample_count_arr to local"
        ml_util.ml_pickle_save(feat_sample_count_arr, fsca_hs)

    # get rdd statistics info
    # remove duplicated libsvm string; only keep the first duplicated item, assume space following key_idx
    if remove_duplicated == "Y":
        libsvm_rdd=libsvm_rdd \
            .map(lambda x: ( ','.join(x.split(' ')[metadata_count:]), x)) \
            .groupByKey().map(lambda x: list(x[1])[0] ) \
            .cache()
    cnt_list = libsvm_rdd.map(lambda x: (x.split(' ')[1], 1)).reduceByKey(
        add).collect()
    stats = libsvm_rdd.map(
        lambda x: len(x.split(' ')[metadata_count:])).stats()
    feat_count_max = stats.max()
    feat_count_stdev = stats.stdev()
    feat_count_mean = stats.mean()
    sample_count = stats.count()
    print "INFO: Non-Duplicated libsvm data: sample count=", sample_count, ",Feat count mean=", feat_count_mean, ",Stdev=", feat_count_stdev
    print "INFO:   ,max feature count=", feat_count_max
    print "INFO: Non-Duplicated Label count list=", cnt_list

    # clean up libsvm data ==================================== ============
    libsvm_data_file = os.path.join(hdfs_folder,
                                    libsvm_alldata_filename)  #"libsvm_data"
    print "INFO: libsvm_data_file=", libsvm_data_file
    try:
        hdfs.rmr(libsvm_data_file)
    except IOError as e:
        print "WARNING: I/O error({0}): {1} at libsvm_data_file clean up".format(
            e.errno, e.strerror)
    except:
        print "WARNING: Unexpected error at libsvm file clean up:", sys.exc_info(
        )[0]

    #codec = "org.apache.hadoop.io.compress.GzipCodec"
    #libsvm_rdd.saveAsTextFile(libsvm_data_file, codec)
    libsvm_rdd.saveAsTextFile(libsvm_data_file)  # TBD encrypted

    feat_count_file = libsvm_data_file + "_feat_count"
    print "INFO: feat_count_file=", feat_count_file
    try:
        hdfs.rmr(feat_count_file)
    except IOError as e:
        print "WARNING: I/O error({0}): {1} at feat_count clean up".format(
            e.errno, e.strerror)
    except:
        print "WARNING: Unexpected error at libsvm feature count clean up:", sys.exc_info(
        )[0]
    sc.parallelize([total_feature_numb], 1).saveAsTextFile(feat_count_file)

    # TBD ???  output text for DNN:[meta-data1,meta-data2,..., [feature tokens]] ================= DNN ===========
    if dnn_flag:  # special flag to tokenize and keep input orders
        print "INFO: processing data for DNN..."
        # create token dict
        # str_hash_dict: string to hash
        # all_hashes_seq_dic: hash to seq id
        if token_dict is None or len(token_dict) == 0:
            token_dict = {}
            str_hash_dict = {v: k for k, v in all_hash_str_dic.iteritems()}
            for k, v in str_hash_dict.iteritems():
                token_dict[k] = int(all_hashes_seq_dic[str(v)])
            #print "token_dict=",len(token_dict),token_dict

        # TBD here: need to implement non-binary feature
        dnn_rdd=df.rdd \
            .map(lambda x: tokenize_by_dict(x, data_idx, token_dict,label_idx, label_dic)) \
            .filter(lambda x: len(x) > metadata_count) \
            .filter(lambda x: type(x[metadata_count]) is list)
        #.cache()
        # filter duplication here
        #print dnn_rdd.take(3)

        dnn_data_file = os.path.join(hdfs_folder,
                                     dnn_alldata_filename)  #"dnn_data"
        print "INFO: dnn_data_file=", dnn_data_file
        try:
            hdfs.rmr(dnn_data_file)
        except IOError as e:
            print "WARNING: I/O error({0}): {1} at dnn_data_file clean up".format(
                e.errno, e.strerror)
        except:
            print "WARNING: Unexpected error at libsvm file clean up:", sys.exc_info(
            )[0]

        # clean up data
        dnn_npy_gz_file = os.path.join(hdfs_folder, row_id_str + "_dnn_")
        print "INFO: dnn_npy_gz_file=", dnn_npy_gz_file
        try:
            hdfs.rmr(dnn_npy_gz_file + "data.npy.gz")
            hdfs.rmr(dnn_npy_gz_file + "label.npy.gz")
            hdfs.rmr(dnn_npy_gz_file + "info.npy.gz")
        except IOError as e:
            print "WARNING: I/O error({0}): {1} at dnn_npy clean up".format(
                e.errno, e.strerror)
        except:
            print "WARNING: Unexpected error at dnn_npy file clean up:", sys.exc_info(
            )[0]
        # save new data
        try:
            dnn_rdd.saveAsTextFile(dnn_data_file)
        except:
            print "WARNING: Unexpected error at saving dnn data:", sys.exc_info(
            )[0]
        # show data statistics
        try:
            stats = dnn_rdd.map(lambda p: len(p[metadata_count])).stats()
            feat_count_max = stats.max()
            feat_count_stdev = stats.stdev()
            feat_count_mean = stats.mean()
            sample_count = stats.count()
            print "INFO: DNN data: sample count=", sample_count, ",Feat count mean=", feat_count_mean, ",Stdev=", feat_count_stdev
            print "INFO:   ,max feature count=", feat_count_max
        except:
            print "WARNING: Unexpected error at getting stats of dnn_rdd:", sys.exc_info(
            )[0]

    # clean up pca data in hdfs ============ ========================
    pca_files = '*' + libsvm_alldata_filename + "_pca_*"
    #print "INFO: pca_files=", pca_files
    try:
        f_list = hdfs.ls(hdfs_folder)
        if len(f_list) > 0:
            df_list = fnmatch.filter(f_list, pca_files)
            for f in df_list:
                print "INFO: rm ", f
                hdfs.rmr(f)
    except IOError as e:
        print "WARNING: I/O error({0}): {1}".format(e.errno, e.strerror)
    except:
        print "WARNING: Unexpected error at libsvm pca file clean up:", sys.exc_info(
        )[0]

    # clean up pca data in web local ============ ========================
    pca_fname = os.path.join(model_data_folder, row_id_str + '_pca_*.pkl*')
    print "INFO: pca_fname=", pca_fname

    try:
        for fl in glob.glob(pca_fname):
            print "INFO: remove ", fl
            os.remove(fl)
    except OSError, e:
        print("Error: %s - %s." % (e.pca_fname, e.strerror))
def feat_extr_ngram(row_id_str,
                    hdfs_dir_list,
                    hdfs_feat_dir,
                    model_data_folder,
                    sp_master,
                    spark_rdd_compress,
                    spark_driver_maxResultSize,
                    sp_exe_memory,
                    sp_core_max,
                    zipout_dir,
                    zipcode_dir,
                    zip_file_name,
                    mongo_tuples,
                    fromweb,
                    label_arr,
                    metadata_count,
                    label_idx,
                    data_idx,
                    pattern_str,
                    ln_delimitor,
                    data_field_list,
                    jkey_dict,
                    jobname,
                    num_gram,
                    feature_count_threshold,
                    token_dict=None,
                    HDFS_RETR_DIR=None,
                    remove_duplicated="N",
                    cust_featuring=None,
                    cust_featuring_params=None,
                    local_out_dir=None,
                    filter_ratio=None,
                    binary_flag=True):

    # zip func in other files for Spark workers ================= ================
    zip_file_path = ml_util.ml_build_zip_file(zipout_dir,
                                              zipcode_dir,
                                              zip_file_name,
                                              user_custom=cust_featuring)
    # get_spark_context
    sc = ml_util.ml_get_spark_context(sp_master, spark_rdd_compress,
                                      spark_driver_maxResultSize,
                                      sp_exe_memory, sp_core_max, jobname,
                                      [zip_file_path])
    # log time ================================================================ ================
    t0 = time()

    # input filename
    input_filename = "*"
    ext_type = '.gz'
    gz_list = None
    convert2dirty = "N"
    if not ',' in hdfs_dir_list:  # single dir having *.gz ==== =========
        # read raw data from HDFS as .gz format ==========
        rdd_files = os.path.join(hdfs_dir_list, input_filename + ext_type)
        # check if gz files in hdfs ============
        try:
            gz_list = hdfs.ls(hdfs_dir_list)
            print "INFO: check hdfs folder=", hdfs_dir_list

        except IOError as e:
            print "WARNING: I/O error({0}): {1}".format(e.errno, e.strerror)
        except:
            print "WARNING: Error at checking HDFS file:", sys.exc_info()[0]
        # use whole folder
        if gz_list is None or len(gz_list) == 0:
            print "ERROR: No file found by ", input_filename + ext_type  #,", use",hdfs_dir_list,"instead"
            return -2
        elif len(gz_list) == 1:
            # use dir as filename
            rdd_files = hdfs_dir_list[0:-1]

    else:  # multiple dirs ==== =========
        rdd_files = ""
        cnt = 0
        temp_lbl_list = []
        comma = ""
        print "INFO: before label_arr=", label_arr

        # check each folder
        for dr in hdfs_dir_list.split(','):
            #print "****=",dr
            if not len(dr) > 0:
                continue
            try:
                # remove space etc.
                dr = dr.strip()
                fdr = os.path.join(HDFS_RETR_DIR, dr)
                #print "fdr=",fdr
                # ls didn't like "*"
                if '*' in fdr:
                    #gz_list=hdfs.ls(fdr.replace("*",""))
                    dn = os.path.dirname(fdr).strip()
                    bn = os.path.basename(fdr).strip()
                    #print "dn=",dn,",bn=",bn
                    # get all names under folder and do filtering
                    gz_list = fnmatch.filter(hdfs.ls(dn), '*' + bn)
                    #print "gz_list=",gz_list
                else:
                    gz_list = hdfs.ls(fdr)
                cnt = cnt + len(gz_list)

                if len(gz_list) > 0:
                    rdd_files = rdd_files + comma + fdr
                    comma = ","
            except IOError as e:
                print "WARNING: I/O error({0}): {1}".format(
                    e.errno, e.strerror)
            except:
                print "WARNING: Error at checking HDFS file:", sys.exc_info(
                )[0]
        # use whole folder
        if cnt is None or cnt == 0:
            print "ERROR: No file found at", rdd_files
            return -2
        else:
            print "INFO: total file count=", cnt
        # set convert flag only when multiple dir and label_arr has dirty label
        #if label_arr is None: # create label arr if None
        #    label_arr=temp_lbl_list
        if not label_arr is None and len(
                label_arr) == 2 and label_arr[1] == "dirty":
            convert2dirty = "Y"
    print "INFO: rdd_files=", rdd_files

    txt_rdd = sc.textFile(rdd_files)  #, use_unicode=False

    total_input_count = txt_rdd.count()
    print "INFO: Total input sample count=", total_input_count
    # debug only
    #for x in txt_rdd.collect():
    #    print "t=",x
    print "INFO: hdfs_dir_list=", hdfs_dir_list
    print "INFO: label_arr=", label_arr
    print "INFO: feature_count_threshold=", feature_count_threshold

    #jkey_dict={"meta_list":["label","md5","mdate"], "data_key":"logs"}
    #   this dict depends on the format of input data
    if not data_field_list is None:
        jkey_dict = json.loads(jkey_dict)

        data_key = jkey_dict["data_key"]
        meta_list = jkey_dict["meta_list"]

        metadata_count = len(meta_list)
        data_idx = metadata_count
        print "INFO: jkey_dict=", jkey_dict
        print "INFO: meta_list=", meta_list
        print "INFO: data_key=", data_key
        print "INFO: data_field_list=", data_field_list
        print "INFO: metadata_count=", metadata_count

        featured_rdd = txt_rdd \
            .map(lambda x: preprocess_json(x,meta_list,data_key,data_field_list)) \
            .filter(lambda x: len(x) > metadata_count) \
            .filter(lambda x: type(x[metadata_count]) is list) \
            .map(lambda x: feature_extraction_ngram(x, data_idx, MAX_FEATURES, num_gram)) \
            .filter(lambda x: len(x) > metadata_count) \
            .filter(lambda x: type(x[metadata_count]) is dict) \
            .filter(lambda x: type(x[metadata_count+1]) is dict) \
            .filter(lambda x: len(x[metadata_count])> int(feature_count_threshold) ) \
            .cache()

        #print "INFO: featured_rdd="
        #for x in featured_rdd.collect():
        #    print "INFO: **** f=",x
    # user custom code for featuring  ============================================= ==========
    #   input txt_rdd format (string):  each text row for each sample
    #   output featured_rdd format (list):[meta-data1,meta-data2,..., hash_cnt_dic, hash_str_dic]
    elif not cust_featuring is None and len(cust_featuring) > 0:
        user_module = None
        user_func = None
        user_func_dnn = None
        # load user module =======
        try:
            modules = map(__import__, [CUSTOM_PREFIX + cust_featuring])
            user_module = modules[0]
            user_func = getattr(user_module, CUSTOM_FUNC)
        except Exception as e:
            print "ERROR: module=", CUSTOM_PREFIX + cust_featuring
            print "ERROR: user module error.", e.__doc__, e.message
            return -101
        try:
            jparams = json.loads(cust_featuring_params)
            if jparams and 'n-gram' in jparams:
                num_gram = jparams['n-gram']
            elif jparams and 'ngram' in jparams:
                num_gram = jparams['ngram']
            if jparams and 'binary_flag' in jparams:
                binary_flag = eval(jparams['binary_flag'])
        except Exception as e:
            print "ERROR: user params error.", e.__doc__, e.message
            return -200

        # convert feast into array. output format: [ meta1,meta2,..., [feat1,feat2,...]]
        tmp_rdd = txt_rdd.map(lambda x: user_func(x, cust_featuring_params)) \
            .filter(lambda x: len(x) > metadata_count) \
            .filter(lambda x: type(x[metadata_count]) is list).cache()
        print " tmp_rdd cnt=", tmp_rdd.count(
        ), ",ix=", data_idx, ",max f=", MAX_FEATURES, "ngram=", num_gram
        print "take(1) rdd=", tmp_rdd.take(1)

        # TBD for multivariant output format: [ meta1,meta2,..., [[feat1,feat2,...],[feat1,feat2,...],...]]

        # TBD only for num_gram available
        # for traditional ML, feat in a dict
        # output format: [ meta1,meta2,..., [[feat1,feat2,...],[feat1,feat2,...],...]]
        featured_rdd = tmp_rdd \
            .map(lambda x: feature_extraction_ngram(x, data_idx, MAX_FEATURES, num_gram)) \
            .filter(lambda x: len(x) > metadata_count) \
            .filter(lambda x: type(x[metadata_count]) is dict) \
            .filter(lambda x: type(x[metadata_count+1]) is dict) \
            .filter(lambda x: len(x[metadata_count])> int(feature_count_threshold) ) \
            .cache()

        all_hashes_cnt_dic = None
        all_hash_str_dic = None
        all_hashes_seq_dic = None
    else:
        print "INFO: pattern_str=", pattern_str + "<--"
        print "INFO: ln_delimitor=", ln_delimitor + "<--"
        print "INFO: label_idx=", label_idx
        print "INFO: data_idx=", data_idx
        print "INFO: metadata_count=", metadata_count
        print "INFO: filter_ratio=", filter_ratio

        # filter top and least percentage of feature
        if not filter_ratio is None and filter_ratio > 0 and filter_ratio < 1:
            # check total count here before continue
            upper_cnt = total_input_count * (1 - filter_ratio)
            lower_cnt = total_input_count * filter_ratio
            # set limit for lower bound. if total count is large, lower_cnt may exclude all features...
            # max lower count =  min( MAX_FILTER_LOWER_CNT, total_input_count/100 )
            if not MAX_FILTER_LOWER_CNT is None and lower_cnt > MAX_FILTER_LOWER_CNT:
                if MAX_FILTER_LOWER_CNT > total_input_count / 100:
                    lower_cnt = total_input_count / 100
                else:
                    lower_cnt = MAX_FILTER_LOWER_CNT

            print "INFO: filtering by count, upper bound=", upper_cnt, ",lower bound=", lower_cnt
            # find unique feature, count them, remove them if in highest and lowest % and then create a dict
            f_feat_set = Set (txt_rdd.map(lambda x:x.split(ln_delimitor)).flatMap(lambda x:Set(x[metadata_count:])) \
                .map(lambda x:(x,1)).reduceByKey(lambda a, b: a + b) \
                .filter(lambda x:x[1]<= upper_cnt and x[1]>= lower_cnt) \
                .map(lambda x:x[0]).collect() )

            print "INFO: f_feat_set len=", len(f_feat_set)
            broadcast_f_set = sc.broadcast(f_feat_set)

            #txt_rdd=txt_rdd.map(lambda x: filter_by_list(x, metadata_count,ln_delimitor, broadcast_f_list.value ))
            txt_rdd=txt_rdd.map(lambda x: x.split(ln_delimitor)) \
                        .map(lambda x: x[:metadata_count]+ [w for w in x[metadata_count:] if w and w in broadcast_f_set.value]) \
                        .map(lambda x: ln_delimitor.join(x))

        # preprocess by pattern matching and then extract n-gram features   #.encode('UTF8')
        #   input txt_rdd format (string):  meta-data1\tmeta-data2\t...\tdataline1\tdataline2\t...datalineN\n
        #   output featured_rdd format (list):[meta-data1,meta-data2,..., hash_cnt_dic, hash_str_dic]
        #       hash_cnt_dic: {hash,hash:count,...}  hash_str_dic: {hash: 'str1',... }
        tmp_rdd = txt_rdd \
            .map(lambda x: preprocess_pattern(x, metadata_count, pattern_str, ln_delimitor \
                                                , label_idx, label_arr, convert2dirty )) \
            .filter(lambda x: len(x) > metadata_count) \
            .filter(lambda x: type(x[metadata_count]) is list) #.cache() memory issue...
        #tmp_rdd_count=tmp_rdd.count()
        #print "INFO: After preprocessing count=",tmp_rdd_count
        featured_rdd = tmp_rdd \
            .map(lambda x: feature_extraction_ngram(x, data_idx, MAX_FEATURES, num_gram)) \
            .filter(lambda x: len(x) > metadata_count) \
            .filter(lambda x: type(x[metadata_count]) is dict) \
            .filter(lambda x: type(x[metadata_count+1]) is dict) \
            .filter(lambda x: len(x[metadata_count])> int(feature_count_threshold) ) \
            .cache()
        #feat_rdd_count=featured_rdd.count()
        #print "INFO: After featuring count=",feat_rdd_count

        all_hashes_cnt_dic = None
        all_hash_str_dic = None
        all_hashes_seq_dic = None

    #get all hashes and total occurring count ===============
    #   all_hashes_cnt_dic: {'hash,hash': total count,... }
    if all_hashes_cnt_dic is None:
        #all_hashes_cnt_dic = featured_rdd.map(lambda x: x[metadata_count]).reduce(lambda a, b: combine_dic_cnt(a, b))
        all_hashes_cnt_dic = dict(
            featured_rdd.flatMap(lambda x: x[metadata_count].items()).
            reduceByKey(lambda a, b: a + b).collect())

    #get all hashes and their extracted string  ===============
    #   all_hash_str_dic: {hash:'str1', ...
    if all_hash_str_dic is None:
        #all_hash_str_dic = featured_rdd.map(lambda x: x[metadata_count+1]).reduce(lambda a, b: combine_dic(a, b))
        all_hash_str_dic = dict(
            featured_rdd.flatMap(
                lambda x: x[metadata_count + 1].items()).distinct().collect())

    # get all labels into an array  =============== provided by parameter?
    if label_arr is None:
        # will force "clean" be 0 here
        label_arr = sorted(
            featured_rdd.map(
                lambda x: x[label_idx].lower()).distinct().collect())
        # debug only
        print "INFO: label_arr.=", json.dumps(sorted(label_arr))

    # save labels to hdfs as text file==================================== ============
    hdfs_folder = hdfs_feat_dir  #+ "/"   # "/" is needed to create the folder correctly
    print "INFO: hdfs_folder=", hdfs_folder
    try:
        hdfs.mkdir(hdfs_folder)
    except IOError as e:
        print "WARNING: I/O error({0}): {1}".format(e.errno, e.strerror)
    except:
        print "WARNING: Unexpected error at mkdir:", sys.exc_info()[0]

    # clean up metadata_file
    metadata_file = os.path.join(hdfs_folder, metadata)  #"metadata"
    print "INFO: metadata_file=", metadata_file
    try:
        hdfs.rmr(metadata_file)
    except IOError as e:
        print "WARNING: I/O error({0}): {1}".format(e.errno, e.strerror)
    except:
        print "WARNING: Unexpected error at rmr():", sys.exc_info()[0]
    sc.parallelize(label_arr, 1).saveAsTextFile(metadata_file)

    #remap all hash values to continuous key/feature number ==============
    #     all_hashes_seq_dic: { hash : sequential_numb }
    if all_hashes_seq_dic is None:
        all_hashes_seq_dic = {}
        remap2seq(
            all_hashes_cnt_dic,
            all_hashes_seq_dic)  #all_hashes_seq_dic has continuous key number
    #print "all_hashes_seq_dic=",all_hashes_seq_dic
    total_feature_numb = len(all_hashes_seq_dic)
    print "INFO: Total feature count=", len(all_hashes_seq_dic)

    # featured_rdd (list):    [meta-data1,meta-data2,..., hash_cnt_dic, hash_str_dic]
    # seq_featured_rdd(list): [meta-data1,meta-data2,..., hash_cnthsh_dict, hash_str_dic] (feat id in sorted sequence)
    # hash_cnt_dic: {hash: count}  hash_str_dic: {hash: 'str1,str2...' }
    #     set binary_flag to True, all feature:value will be 1
    broadcast_dic = sc.broadcast(all_hashes_seq_dic)
    seq_featured_rdd = featured_rdd.map(lambda x: convert2seq(
        x, label_idx, data_idx, broadcast_dic.value, binary_flag=binary_flag)
                                        ).cache()

    # get hash_cnthsh_dict then flatMap and reduce to (feat id, count)
    ct_rdd = seq_featured_rdd.flatMap(lambda x: [(i[0], i[1]) for i in x[
        data_idx].iteritems()]).reduceByKey(lambda a, b: a + b)
    # sorted by feature id as int
    feat_sample_count_arr = ct_rdd.sortBy(lambda x: int(x[0])).map(
        lambda x: x[1]).collect()
    # sort after collect may fail when rdd is huge
    #feat_sample_count_arr=[]
    #for i in sorted(ct_rdd.collect(), key=lambda t: int(t[0])):
    #    feat_sample_count_arr.append(i[1])
    print "INFO: feat_sample_count_arr len=", len(feat_sample_count_arr)

    # save feat_sample_count_arr data ==================================== ============
    filter = '{"rid":' + row_id_str + ',"key":"feat_sample_count_arr"}'
    upsert_flag = True
    jo_insert = {}
    jo_insert["rid"] = eval(row_id_str)
    jo_insert["key"] = "feat_sample_count_arr"
    jo_insert["value"] = feat_sample_count_arr
    jstr_insert = json.dumps(jo_insert)
    ret = query_mongo.upsert_doc_t(mongo_tuples, filter, jstr_insert,
                                   upsert_flag)
    print "INFO: Upsert count for feat_sample_count_arr=", ret
    # insert failed, save to local
    if ret == 0:
        # drop old record in mongo
        ret = query_mongo.delete_many(mongo_tuples, None, filter)
        if not os.path.exists(local_out_dir):
            os.makedirs(local_out_dir)
        fsca_hs = os.path.join(local_out_dir, row_id_str,
                               row_id_str + "_feat_sample_count_arr.pkl")
        print "WARNING: save feat_sample_count_arr to local"
        ml_util.ml_pickle_save(feat_sample_count_arr, fsca_hs)

    # save feature data; TBD. not used. ==================================== ============

    #libsvm_rdd=seq_featured_rdd.map(lambda x: convert2libsvm(x,label_idx,data_idx,label_arr))
    # put hash to the front of each row, assume hash is after label
    libsvm_rdd = seq_featured_rdd.map(
        lambda x: x[label_idx + 1] + " " + convert2libsvm(
            x, label_idx, data_idx, label_arr))
    # debug only
    #print "libsvm_rdd="
    #for i in libsvm_rdd.collect():
    #    print i

    # get rdd statistics info
    stats = featured_rdd.map(lambda p: len(p[metadata_count])).stats()
    feat_count_max = stats.max()
    feat_count_stdev = stats.stdev()
    feat_count_mean = stats.mean()
    sample_count = stats.count()
    print "INFO: libsvm data: sample count=", sample_count, ",Feat count mean=", feat_count_mean, ",Stdev=", feat_count_stdev
    print "INFO:   ,max feature count=", feat_count_max
    # find sample count
    lbl_arr = featured_rdd.map(lambda x: (x[label_idx], 1)).reduceByKey(
        add).collect()
    print "INFO: Sample count by label=", lbl_arr

    # remove duplicated libsvm string; only keep the first duplicated item, assume space following key_idx
    if remove_duplicated == "Y":
        libsvm_rdd=libsvm_rdd \
            .map(lambda x: ( ','.join(x.split(' ')[metadata_count:]), x)) \
            .groupByKey().map(lambda x: list(x[1])[0] ) \
            .cache()
        cnt_list = libsvm_rdd.map(lambda x: (x.split(' ')[1], 1)).reduceByKey(
            add).collect()
        stats = libsvm_rdd.map(
            lambda x: len(x.split(' ')[metadata_count:])).stats()
        feat_count_max = stats.max()
        feat_count_stdev = stats.stdev()
        feat_count_mean = stats.mean()
        sample_count = stats.count()
        print "INFO: Non-Duplicated libsvm data: sample count=", sample_count, ",Feat count mean=", feat_count_mean, ",Stdev=", feat_count_stdev
        print "INFO:   ,max feature count=", feat_count_max
        print "INFO: Non-Duplicated Label count list=", cnt_list

    # clean up libsvm data ==================================== ============
    libsvm_data_file = os.path.join(hdfs_folder,
                                    libsvm_alldata_filename)  #"libsvm_data"
    print "INFO: libsvm_data_file=", libsvm_data_file
    try:
        #hdfs.ls(save_dir)
        #print "find hdfs folder"
        hdfs.rmr(libsvm_data_file)
        #if num_gram == 1:
        #   hdfs.rmr(dnn_data_file)
        #print "all files removed"
    except IOError as e:
        print "WARNING: I/O error({0}): {1} at libsvm_data_file clean up".format(
            e.errno, e.strerror)
    except:
        print "WARNING: Unexpected error at libsvm file clean up:", sys.exc_info(
        )[0]

    #codec = "org.apache.hadoop.io.compress.GzipCodec"
    #libsvm_rdd.saveAsTextFile(libsvm_data_file, codec)
    libsvm_rdd.saveAsTextFile(libsvm_data_file)  # TBD encrypted

    feat_count_file = libsvm_data_file + "_feat_count"
    print "INFO: feat_count_file=", feat_count_file
    try:
        hdfs.rmr(feat_count_file)
    except IOError as e:
        print "WARNING: I/O error({0}): {1} at feat_count clean up".format(
            e.errno, e.strerror)
    except:
        print "WARNING: Unexpected error at libsvm feature count clean up:", sys.exc_info(
        )[0]
    sc.parallelize([total_feature_numb], 1).saveAsTextFile(feat_count_file)

    label_dic = {}
    # assign label a number
    for idx, label in enumerate(sorted(label_arr)):
        if not label in label_dic:
            label_dic[
                label] = idx  #starting from 0, value = idx, e.g., clean:0, dirty:1

    # output text for DNN:[meta-data1,meta-data2,..., [feature tokens]] ================= DNN ===========
    if num_gram == 1:  # special flag to tokenize and keep input orders
        print "INFO: processing data for DNN..."
        # create token dict
        # str_hash_dict: string to hash
        # all_hashes_seq_dic: hash to seq id
        if token_dict is None or len(token_dict) == 0:
            token_dict = {}
            str_hash_dict = {v: k for k, v in all_hash_str_dic.iteritems()}
            for k, v in str_hash_dict.iteritems():
                token_dict[k] = int(all_hashes_seq_dic[str(v)])
            #print "token_dict=",len(token_dict),token_dict

        dnn_rdd = tmp_rdd \
            .map(lambda x: tokenize_by_dict(x, data_idx, token_dict,label_idx, label_dic)) \
            .filter(lambda x: len(x) > metadata_count) \
            .filter(lambda x: type(x[metadata_count]) is list)
        #.cache()
        # filter duplication here
        #print dnn_rdd.take(3)

        dnn_data_file = os.path.join(hdfs_folder,
                                     dnn_alldata_filename)  #"dnn_data"
        print "INFO: dnn_data_file=", dnn_data_file
        try:
            hdfs.rmr(dnn_data_file)
        except IOError as e:
            print "WARNING: I/O error({0}): {1} at dnn_data_file clean up".format(
                e.errno, e.strerror)
        except:
            print "WARNING: Unexpected error at libsvm file clean up:", sys.exc_info(
            )[0]

        # clean up data
        dnn_npy_gz_file = os.path.join(hdfs_folder, row_id_str + "_dnn_")
        print "INFO: dnn_npy_gz_file=", dnn_npy_gz_file
        try:
            hdfs.rmr(dnn_npy_gz_file + "data.npy.gz")
            hdfs.rmr(dnn_npy_gz_file + "label.npy.gz")
            hdfs.rmr(dnn_npy_gz_file + "info.npy.gz")
        except IOError as e:
            print "WARNING: I/O error({0}): {1} at dnn_npy clean up".format(
                e.errno, e.strerror)
        except:
            print "WARNING: Unexpected error at dnn_npy file clean up:", sys.exc_info(
            )[0]
        # save new data
        try:
            dnn_rdd.saveAsTextFile(dnn_data_file)
        except:
            print "WARNING: Unexpected error at saving dnn data:", sys.exc_info(
            )[0]
        # show data statistics
        try:
            stats = dnn_rdd.map(lambda p: len(p[metadata_count])).stats()
            feat_count_max = stats.max()
            feat_count_stdev = stats.stdev()
            feat_count_mean = stats.mean()
            sample_count = stats.count()
            print "INFO: DNN data: sample count=", sample_count, ",Feat count mean=", feat_count_mean, ",Stdev=", feat_count_stdev
            print "INFO:   ,max feature count=", feat_count_max
        except:
            print "WARNING: Unexpected error at getting stats of dnn_rdd:", sys.exc_info(
            )[0]

    # clean up pca data in hdfs ============ ========================
    pca_files = '*' + libsvm_alldata_filename + "_pca_*"
    #print "INFO: pca_files=", pca_files
    try:
        f_list = hdfs.ls(hdfs_folder)
        if len(f_list) > 0:
            df_list = fnmatch.filter(f_list, pca_files)
            for f in df_list:
                print "INFO: rm ", f
                hdfs.rmr(f)
    except IOError as e:
        print "WARNING: I/O error({0}): {1}".format(e.errno, e.strerror)
    except:
        print "WARNING: Unexpected error at libsvm pca file clean up:", sys.exc_info(
        )[0]

    # clean up pca data in web local ============ ========================
    pca_fname = os.path.join(model_data_folder, row_id_str + '_pca_*.pkl*')
    print "INFO: pca_fname=", pca_fname

    try:
        for fl in glob.glob(pca_fname):
            print "INFO: remove ", fl
            os.remove(fl)
    except OSError, e:
        print("Error: %s - %s." % (e.pca_fname, e.strerror))