def train(row_id_str,
          ds_id,
          hdfs_feat_dir,
          local_out_dir,
          ml_opts_jstr,
          excluded_feat_cslist,
          sp_master,
          spark_rdd_compress,
          spark_driver_maxResultSize,
          sp_exe_memory,
          sp_core_max,
          zipout_dir,
          zipcode_dir,
          zip_file_name,
          mongo_tuples,
          labelnameflag,
          fromweb,
          training_fraction,
          jobname,
          model_data_folder,
          random_seed=None):

    # create zip files for Spark workers ================= ================
    zip_file_path = ml_build_zip_file(zipout_dir,
                                      zipcode_dir,
                                      zip_file_name,
                                      prefix='zip_feature_util')
    print "INFO: zip_file_path=", zip_file_path

    # ML model filename ====
    model_fname = os.path.join(model_data_folder, row_id_str + '.pkl')
    print "INFO: model_data_folder=", model_data_folder
    # create out folders and clean up old model files ====
    ml_util.ml_prepare_output_dirs(row_id_str, local_out_dir,
                                   model_data_folder, model_fname)

    # init Spark context ====
    sc = ml_util.ml_get_spark_context(sp_master, spark_rdd_compress,
                                      spark_driver_maxResultSize,
                                      sp_exe_memory, sp_core_max, jobname,
                                      [zip_file_path])

    # start here =================================================================== ===============
    t0 = time()

    # check if ml_opts.has_excluded_feat ==1 ===================================
    has_excluded_feat = 0
    if not ml_opts_jstr is None:
        ml_opts = json.loads(ml_opts_jstr)
        if "has_excluded_feat" in ml_opts:
            has_excluded_feat = ml_opts["has_excluded_feat"]

    # get excluded feature list from mongo ========== ===
    if str(has_excluded_feat) == "1" and excluded_feat_cslist is None:
        excluded_feat_cslist = ml_util.ml_get_excluded_feat(
            row_id_str, mongo_tuples)
    print "INFO: excluded_feat_cslist=", excluded_feat_cslist

    # source libsvm filename
    libsvm_data_file = os.path.join(hdfs_feat_dir, "libsvm_data")
    print "INFO: libsvm_data_file=", libsvm_data_file

    # load feature count file
    feat_count_file = libsvm_data_file + "_feat_count"
    feature_count = zip_feature_util.get_feature_count(sc, feat_count_file)
    print "INFO: feature_count=", feature_count

    # load sample RDD from text file
    #   also exclude selected features in sample ================ =====
    # format (LabeledPoint,hash) from str2LabeledPoint_hash()
    #samples_rdd = MLUtils.loadLibSVMFile(sc, libsvm_data_file)
    samples_rdd, feature_count = zip_feature_util.get_sample_rdd(
        sc, libsvm_data_file, feature_count, excluded_feat_cslist)

    # collect all data to local for processing ===============
    all_data = samples_rdd.collect()
    sample_count = len(all_data)

    if not random_seed is None and int(random_seed) > 0:
        np.random.seed(int(random_seed))
        all_data = sorted(all_data, key=lambda x: x[1])

    # 2-D array
    features_list = [x.features.toArray() for x, _ in all_data]
    # label array
    labels_list_all = [x.label for x, _ in all_data]
    # hash array
    hash_list_all = [x for _, x in all_data]

    # convert to np array
    labels_list_all = array(labels_list_all)
    features_array = np.array(features_list)
    hash_list_all = np.array(hash_list_all)
    #print "features_list=",features_list

    # generate sparse matrix (csr) for all samples
    features_sparse_mtx = csr_matrix(features_array)

    # if ensamble is on, do special split here
    ### randomly split the samples into training and testing data ===============
    X_train_sparse, X_test_sparse, labels_train, labels_test, train_hash_list, test_hash_list = \
            cross_validation.train_test_split(features_sparse_mtx, labels_list_all, hash_list_all, test_size=(1-training_fraction) )
    # X_test_sparse is scipy.sparse.csr.csr_matrix
    testing_sample_count = len(labels_test)
    training_sample_count = len(labels_train)
    training_lbl_cnt_list = Counter(labels_train)
    testing_lbl_cnt_list = Counter(labels_test)
    print "INFO: training sample count=", training_sample_count, ", testing sample count=", testing_sample_count, ",sample_count=", sample_count
    print "INFO: training label list=", training_lbl_cnt_list, ", testing label list=", testing_lbl_cnt_list
    print "INFO: train_hash_list count=", len(
        train_hash_list), ", test_hash_list count=", len(test_hash_list)

    # random_seed testing
    if not random_seed is None:
        cnt = 0
        for i in train_hash_list:
            print i
            cnt = cnt + 1
            if cnt > 3:
                break

    #print "INFO: labels_list_all=",labels_list_all # too big
    t1 = time()
    print 'INFO: running time: %f' % (t1 - t0)

    ###############################################
    ###########build learning model ==================================================== ===============
    ###############################################

    ### parse parameters and generate the model ###
    (clf, model_name) = parse_param_and_get_model(ml_opts)
    if model_name == "none":
        print "ERROR: model name not found!"
        return -1

    #####fit the model to training dataset  ===============
    try:
        clf.fit(X_train_sparse, labels_train)
    except:
        print "ERROR: clf.fit(): clf=", clf
        print "ERROR: sys.exc_info:", sys.exc_info()[0]
        return -1

    print "INFO: model type=", type(clf), " clf=", clf
    #### save clf for future use ================== ===============
    joblib.dump(clf, model_fname)

    # get data from model ================================
    coef = None
    intercept = None
    # get column size =====
    try:
        if type(clf) in (classes.SVC, classes.NuSVC):  # svm didn't have coef_
            col_num = clf.support_vectors_.shape[1]
        else:  #linear only
            # coef_ is only available when using a linear kernel
            col_num = len(clf.coef_[0])
            coef = clf.coef_[0]
            intercept = clf.intercept_[0]  # only get 1st item?
            #print "**model:clf.coef_[0] =",clf.coef_[0]
            # save coef_ to Mongo
    except Exception as e:
        print "Warning: Can't get clf.coef_[0]. e=", e, ", get total features from meta-data"
        col_num = 0  #how to get feature number for sparse array?
    print "INFO: total feature # in the model: ", col_num

    jfeat_coef_dict = {}
    # create feature coefficient file ================================
    if coef is None:
        print "WARNING: model weights not found!"
    else:
        feat_filename = os.path.join(local_out_dir,
                                     row_id_str + "_feat_coef.json")
        print "INFO: feat_filename=", feat_filename
        # save coef_arr to mongo ===
        #jfeat_coef_dict=save_coef2db(row_id_str, mongo_tuples, coef, intercept, feat_filename, ds_id)
        jfeat_coef_dict = ml_util.ml_save_coef_build_feat_coef(
            row_id_str, mongo_tuples, coef, intercept, feat_filename, ds_id)
    #print "INFO: jfeat_coef_dict=", jfeat_coef_dict
    print "INFO: jfeat_coef_dict len=", len(jfeat_coef_dict)

    ### Evaluating the model on testing dataset  ===============
    labels_pred = clf.predict(X_test_sparse)
    accuracy = clf.score(X_test_sparse, labels_test)
    print "INFO: Accuracy = ", accuracy

    # filename for false prediction samples  ===============
    false_pred_fname = os.path.join(local_out_dir,
                                    row_id_str + "_false_pred.json")
    print "INFO: false_pred_fname=", false_pred_fname
    # build files for false pred & score graph
    (score_arr_0, score_arr_1, max_score,
     min_score) = ml_build_false_pred(X_test_sparse,
                                      coef,
                                      intercept,
                                      labels_test,
                                      labels_pred,
                                      test_hash_list,
                                      model_name,
                                      jfeat_coef_dict,
                                      false_pred_fname,
                                      row_id_str=row_id_str,
                                      ds_id=ds_id,
                                      mongo_tuples=mongo_tuples)

    # save pred output
    pred_out_arr = []
    for i in range(0, len(labels_test)):
        pred_out_arr.append(
            (labels_test[i], labels_pred[i], test_hash_list[i]))
    pred_ofname = os.path.join(local_out_dir, row_id_str + "_pred_output.pkl")
    print "INFO: pred_ofname=", pred_ofname
    ml_util.ml_pickle_save(pred_out_arr, pred_ofname)

    ###################################################
    ### generate label names (family names) ==================================================== ===============
    ###################################################
    if labelnameflag == 1:
        label_dic = ml_util.ml_get_label_dict(row_id_str, mongo_tuples, ds_id)
        print "INFO: label_dic =", label_dic
    else:
        label_dic = {}
        label_set = set(labels_list_all)
        for label_value in label_set:
            label_dic[int(label_value)] = str(int(label_value))
        print "INFO: label_dic=", label_dic

    labels_list = []
    for key in sorted(label_dic):
        labels_list.append(label_dic[key])

    ###############################################
    ###########plot prediction result figures ==================================================== ===============
    ###############################################
    pred_fname = os.path.join(local_out_dir, row_id_str + "_1" + ".png")
    true_fname = os.path.join(local_out_dir, row_id_str + "_2" + ".png")
    pred_xlabel = 'Prediction (Single Run)'
    true_xlabel = 'True Labels (Single Run)'
    test_cnt_dic = ml_util.ml_plot_predict_figures(labels_pred.tolist(),
                                                   labels_test.tolist(),
                                                   labels_list, label_dic,
                                                   testing_sample_count,
                                                   pred_xlabel, pred_fname,
                                                   true_xlabel, true_fname)
    print "INFO: figure files: ", pred_fname, true_fname
    #print "INFO: Number of samples in each label is=", test_cnt_dic

    roc_auc = None
    #fscore=None
    perf_measures = None
    class_count = len(labels_list)
    dataset_info = {
        "training_fraction": training_fraction,
        "class_count": class_count,
        "dataset_count": sample_count
    }
    #############################################################
    ###################for 2 class only (plot ROC curve) ==================================================== ===============
    #############################################################
    if class_count == 2:

        # build data file for score graph
        score_graph_fname = os.path.join(local_out_dir,
                                         row_id_str + "_score_graph.json")
        print "INFO: score_graph_fname=", score_graph_fname
        ml_build_pred_score_graph(score_arr_0, score_arr_1, model_name,
                                  score_graph_fname, max_score, min_score)

        do_ROC = True
        # clean is 0; dirty is 1
        reverse_label_dic = dict((v, k) for k, v in label_dic.items())
        if 'clean' in reverse_label_dic:
            flag_clean = reverse_label_dic['clean']
        elif 'benign' in reverse_label_dic:
            flag_clean = reverse_label_dic['benign']
        elif '0' in reverse_label_dic:
            flag_clean = 0
        else:
            print "WARNING: No ROC curve generated: 'clean' or '0' must be a label for indicating negative class!"
            do_ROC = False

        if do_ROC:
            # calculate fscore  ==========
            perf_measures = ml_util.calculate_fscore(labels_test, labels_pred)
            #fscore=perf_measures["fscore"]
            #acc=perf_measures["accuracy"]
            #phi=perf_measures["phi"]
            print "INFO: perf_measures=", perf_measures

            confidence_score = clf.decision_function(X_test_sparse)
            #print "INFO:confidence_score=",confidence_score

            if flag_clean == 0:
                scores = [x for x in confidence_score]
                s_labels = [x for x in labels_test]
                testing_N = test_cnt_dic[0]
                testing_P = test_cnt_dic[1]
            else:
                scores = [-x for x in confidence_score]
                s_labels = [1 - x for x in labels_test]
                testing_N = test_cnt_dic[1]
                testing_P = test_cnt_dic[0]

            # create ROC data file ======== ====
            roc_auc = ml_create_roc_files(row_id_str, scores, s_labels,
                                          testing_N, testing_P, local_out_dir,
                                          row_id_str)

            perf_measures["roc_auc"] = roc_auc

    # only update db for web request ==================================================== ===============
    if fromweb == "1":
        #print "database update"
        str_sql="UPDATE atdml_document set "+"accuracy = '"+str(accuracy*100)+"%" \
            +"', status = 'learned', processed_date ='"+str(datetime.datetime.now()) \
            +"', perf_measures='"+json.dumps(perf_measures) \
            +"', dataset_info='"+json.dumps(dataset_info) \
            +"' where id="+row_id_str
        ret = exec_sqlite.exec_sql(str_sql)
        print "INFO: Sqlite update done! ret=", str(ret)
    else:
        print "INFO: accuracy = '" + str(accuracy * 100) + "%"

    t1 = time()
    print 'INFO: running time: %f' % (t1 - t0)

    print 'INFO: Train Finished!'
    return 0
def train(row_id_str, ds_id, hdfs_feat_dir, local_out_dir, ml_opts_jstr,
          excluded_feat_cslist, sp_master, spark_rdd_compress,
          spark_driver_maxResultSize, sp_exe_memory, sp_core_max, zipout_dir,
          zipcode_dir, zip_file_name, mongo_tuples, labelnameflag, fromweb,
          training_fraction, jobname):

    if not os.path.exists(local_out_dir):
        os.makedirs(local_out_dir)

    # zip func in other files for Spark workers ================= ================
    zip_file_path = ml_build_zip_file(zipout_dir,
                                      zipcode_dir,
                                      zip_file_name,
                                      prefix='zip_feature_util')
    print "INFO: zip_file_path=", zip_file_path

    # get_spark_context
    sc = ml_util.ml_get_spark_context(sp_master, spark_rdd_compress,
                                      spark_driver_maxResultSize,
                                      sp_exe_memory, sp_core_max, jobname,
                                      [zip_file_path])

    t0 = time()
    t00 = t0

    # check if ml_opts.has_excluded_feat ==1 ===================================
    has_excluded_feat = 0
    ml_opts = {}
    if not ml_opts_jstr is None:
        ml_opts = json.loads(ml_opts_jstr)
        if "has_excluded_feat" in ml_opts:
            has_excluded_feat = ml_opts["has_excluded_feat"]

    #print "has_excluded_feat=",has_excluded_feat,",excluded_feat_cslist=",excluded_feat_cslist

    # get excluded feature list from mongo ========== ===
    if str(has_excluded_feat) == "1" and excluded_feat_cslist is None:
        excluded_feat_cslist = ml_util.ml_get_excluded_feat(
            row_id_str, mongo_tuples)
    print "INFO: excluded_feat_cslist=", excluded_feat_cslist
    ### generate Labeled point
    libsvm_data_file = os.path.join(hdfs_feat_dir, "libsvm_data")
    print "INFO: libsvm_data_file:", libsvm_data_file

    # load feature count file
    feat_count_file = libsvm_data_file + "_feat_count"
    feature_count = zip_feature_util.get_feature_count(sc, feat_count_file)
    print "INFO: feature_count=", feature_count

    # load sample RDD from text file
    #   also exclude selected features in sample ================ =====
    # format (LabeledPoint,hash) from str2LabeledPoint_hash()
    #samples_rdd = MLUtils.loadLibSVMFile(sc, libsvm_data_file)
    samples_rdd, feature_count = zip_feature_util.get_sample_rdd(
        sc, libsvm_data_file, feature_count, excluded_feat_cslist)
    #samples_rdd = MLUtils.loadLibSVMFile(sc, libsvm_data_file)

    # get distinct label list
    labels_list_all = samples_rdd.map(
        lambda p: p[0].label).distinct().collect()

    ### generate training and testing data
    training_rdd, testing_rdd = samples_rdd.randomSplit(
        [training_fraction, 1 - training_fraction])
    training_rdd = training_rdd.map(lambda p: p[0])  # keep LabeledPoint only
    training_rdd.cache()
    training_sample_count = training_rdd.count()
    training_lbl_cnt_list = training_rdd.map(
        lambda p: (p.label, 1)).reduceByKey(add).collect()
    testing_rdd.cache()
    testing_sample_count = testing_rdd.count()
    testing_lbl_cnt_list = testing_rdd.map(
        lambda p: (p[0].label, 1)).reduceByKey(add).collect()
    sample_count = training_sample_count + testing_sample_count

    t1 = time()
    print "INFO: training sample count=", training_sample_count, ", testing sample count=", testing_sample_count
    print "INFO: training label list=", training_lbl_cnt_list, ", testing label list=", testing_lbl_cnt_list
    print "INFO: labels_list_all=", labels_list_all
    print "INFO: training and testing samples generated!"
    print 'INFO: running time: %f' % (t1 - t0)
    t0 = t1

    ##############################################
    ########### Grid Search with CV ##############
    ##############################################

    ### get the parameters for cross validation and grid search ###
    (cv, model_name, param_dict) = generate_param(ml_opts)

    ### generate label names (family names) #####
    ### connect to database to get the column list which contains all column number of the corresponding feature####
    if labelnameflag == 1:
        label_dic = ml_util.ml_get_label_dict(row_id_str, mongo_tuples, ds_id)
        print "INFO: label_dic:", label_dic

    else:
        label_dic = {}
        label_set = set(labels_list_all)
        for label_value in label_set:
            label_dic[int(label_value)] = str(int(label_value))
        print "INFO: generated label_dic:", label_dic

    labels_list = []
    for key in sorted(label_dic):
        labels_list.append(label_dic[key])
    #print "labels:", labels_list
    class_num = len(labels_list)
    if class_num > 2:
        print "INFO: Multi-class classification! Number of classes = ", class_num

    #### generate training and testing rdd(s) for CV#####
    split_prob = 1.0 / float(cv)
    split_prob_list = []
    for i in range(0, cv):
        split_prob_list.append(split_prob)

    list_rdd = training_rdd.randomSplit(split_prob_list)
    list_train_rdd = []
    list_test_rdd = []
    for i in range(0, cv):
        list_rdd[i].cache()
    for i in range(0, cv):
        tr_rdd = sc.emptyRDD()
        for j in range(0, cv):
            if j == i:
                pass
            else:
                tr_rdd = tr_rdd + list_rdd[j]
        tr_rdd.cache()
        list_train_rdd.append(tr_rdd)
        list_test_rdd.append(list_rdd[i])

    all_comb_list_of_dic = get_all_combination_list_of_dic(param_dict)
    print "INFO: Total number of searching combinations:", len(
        all_comb_list_of_dic)

    ### loop for all parameter combinations and search the best parameters with CV###
    results = []
    for p in range(0, len(all_comb_list_of_dic)):
        params = all_comb_list_of_dic[p]
        C = params['C']
        iteration_num = params['iterations']
        regularization = params['regType']

        scores = []
        for i in range(0, cv):
            train_rdd = list_train_rdd[i]
            test_rdd = list_test_rdd[i]
            train_number = train_rdd.count()
            regP = C / float(train_number)

            ### build model ###
            if model_name == "linear_svm_with_sgd":
                #print "====================1: Linear SVM============="
                model_classification = SVMWithSGD.train(
                    train_rdd,
                    regParam=regP,
                    iterations=iteration_num,
                    regType=regularization)  # regParam = 1/(sample_number*C)
            elif model_name == "logistic_regression_with_lbfgs":
                #print "====================2: LogisticRegressionWithLBFGS============="
                model_classification = LogisticRegressionWithLBFGS.train(
                    train_rdd,
                    regParam=regP,
                    iterations=iteration_num,
                    regType=regularization,
                    numClasses=class_num)  # regParam = 1/(sample_number*C)
            elif model_name == "logistic_regression_with_sgd":
                #print "====================3: LogisticRegressionWithSGD============="
                model_classification = LogisticRegressionWithSGD.train(
                    train_rdd,
                    regParam=regP,
                    iterations=iteration_num,
                    regType=regularization)  # regParam = 1/(sample_number*C)
            else:
                print "ERROR: Training model selection error: no valid ML model selected!"
                return

            ### Evaluating the model on testing data
            labelsAndPreds = test_rdd.map(
                lambda p: (p.label, model_classification.predict(p.features)))
            labelsAndPreds.cache()
            test_sample_number = test_rdd.count()
            testErr = labelsAndPreds.filter(
                lambda (v, p): v != p).count() / float(test_sample_number)
            accuracy = 1 - testErr
            #print "Accuracy = ", accuracy
            scores.append(accuracy)

        ss = np.asarray(scores)
        #print "%0.3f (+/-%0.03f) for " % (ss.mean(), ss.std() * 2), params
        results.append((ss.mean(), ss.std() * 2, params))

    sorted_results = sorted(results, key=lambda x: x[0], reverse=1)
    (best_accuracy, best_std2, best_param) = sorted_results[0]
    print "INFO: ml_opts_jstr=", ml_opts_jstr
    print "INFO: best_param=", best_param

    #ml_opts=json.loads(ml_opts_jstr);
    print "INFO: ml_opts=", ml_opts

    ##############################################
    ######output Grid Search results##############
    ##############################################
    json2save = {}
    json2save["rid"] = int(row_id_str)
    json2save["key"] = "cv_result"
    #json2save["param_str"]=ml_opts_jstr
    json2save["param_dic"] = param_dict
    cv_grid = []
    print ""
    print "INFO: =====Grid Search Results for SPARK ======"
    print "INFO: Best parameters set found for ", model_name, " is: "
    for key in best_param:
        print "INFO:", key, "=", best_param[key]
        if key.lower() == "regtype":
            ml_opts['regularization'] = str(best_param[key])
        else:
            ml_opts[key.lower()] = str(best_param[key])  # add best param to
    ml_opts_jstr = json.dumps(ml_opts)
    json2save["param_str"] = ml_opts_jstr
    print "INFO: Average accuracy with CV = ", cv, ": ", best_accuracy
    print ""
    print "INFO: Grid scores on development set:"
    for i in range(0, len(sorted_results)):
        (ave_accu_i, std2_i, param_i) = sorted_results[i]
        print "%0.3f (+/-%0.03f) for " % (ave_accu_i, std2_i), param_i
        #outstr='%s,%0.3f,%0.03f,%s' % (param_i,ave_accu_i, std2_i,"Selected" if param_i==best_param else "")
        outj = {}
        outj["param"] = param_i
        outj["average_accuracy"] = "%0.3f" % (ave_accu_i)
        outj["std_deviation"] = "%0.3f" % (std2_i)
        outj["selected"] = "%s" % ("Selected" if param_i == best_param else "")
        cv_grid.append(outj)
    print " "

    t1 = time()
    print 'INFO: Grid Search with CV run time: %f' % (t1 - t0)
    t0 = time()

    ##################################################################################
    json2save["cv_grid_data"] = cv_grid
    cv_result = json.dumps(json2save)
    print "INFO: cv_result=", cv_result
    filter = '{"rid":' + row_id_str + ',"key":"cv_result"}'
    upsert_flag = True
    ## write to mongoDB.myml.dataset_info, ignore doc with duplicated key
    # db.dataset_info.createIndex({"rid":1,"key":1},{unique:true})
    ret = query_mongo.upsert_doc_t(mongo_tuples, filter, cv_result,
                                   upsert_flag)
    print "INFO: Upsert count for mllib cv_result: ret=", ret

    ############################################################################################
    ########### retrain with all training data and generate the final model with results #######
    ############################################################################################
    C = best_param['C']
    iteration_num = best_param['iterations']
    regularization = best_param['regType']
    regP = C / float(training_sample_count)

    ######################################the rest of the code is the same as train_MLlib.py #####################################################################

    if model_name == "linear_svm_with_sgd":
        ### 1: linearSVM
        print "INFO: ====================1: Linear SVM============="
        model_classification = SVMWithSGD.train(
            training_rdd,
            regParam=regP,
            iterations=iteration_num,
            regType=regularization)  # regParam = 1/(sample_number*C)
        #print model_classification
    elif model_name == "logistic_regression_with_lbfgs":
        ### 2: LogisticRegressionWithLBFGS
        print "INFO: ====================2: LogisticRegressionWithLBFGS============="
        model_classification = LogisticRegressionWithLBFGS.train(
            training_rdd,
            regParam=regP,
            iterations=iteration_num,
            regType=regularization,
            numClasses=class_num)  # regParam = 1/(sample_number*C)
    elif model_name == "logistic_regression_with_sgd":
        ### 3: LogisticRegressionWithSGD
        print "INFO: ====================3: LogisticRegressionWithSGD============="
        model_classification = LogisticRegressionWithSGD.train(
            training_rdd,
            regParam=regP,
            iterations=iteration_num,
            regType=regularization)  # regParam = 1/(sample_number*C)
    else:
        print "INFO: Training model selection error: no valid ML model selected!"
        return

    print "INFO: model type=", type(model_classification)

    # create feature coefficient file ================================
    coef_arr = None
    intercept = None
    if model_classification.weights is None:
        print "WARNING: model weights not found!"
    else:
        coef_arr = model_classification.weights.toArray().tolist()
        # save to mongo
        key = "coef_arr"
        ret = ml_util.save_json_t(row_id_str, key, coef_arr, mongo_tuples)
        # save intercept to mongo
        key = "coef_intercept"
        intercept = model_classification.intercept
        ret = ml_util.save_json_t(row_id_str, key, intercept, mongo_tuples)

        # feature list + coef file =============
        feat_filename = os.path.join(local_out_dir,
                                     row_id_str + "_feat_coef.json")
        print "INFO: feat_filename=", feat_filename

        # create feature list + coef file =============================================== ============
        # expect a dict of {"fid":(coef, feature_raw_string)}
        jret = ml_util.build_feat_list_t(row_id_str, feat_filename, None, None,
                                         coef_arr, ds_id, mongo_tuples)

        # special featuring for IN or libsvm
        if jret is None:
            jret = ml_util.build_feat_coef_raw_list_t(row_id_str,
                                                      feat_filename, coef_arr,
                                                      ds_id, mongo_tuples)
        if jret is None:
            print "WARNING: Cannot create sample list for testing dataset. "

        jfeat_coef_dict = jret
        print "INFO: coef_arr len=", len(
            coef_arr), ", feature_count=", feature_count
        # for multi-class
        if len(coef_arr) != feature_count:
            jfeat_coef_dict = {}
            print "WARNING: feature list can't be shown for multi-class classification"

        # Calculate prediction and Save testing dataset
        bt_coef_arr = sc.broadcast(coef_arr)
        bt_intercept = sc.broadcast(intercept)
        bt_jfeat_coef_dict = sc.broadcast(jfeat_coef_dict)
        ### Evaluating the model on testing dataset: label, predict label, score, feature list
        print "INFO: intercept=", intercept
        print "INFO: coef_arr len=", len(coef_arr)
        print "INFO: jfeat_coef_dict len=", len(jfeat_coef_dict)

        # get prediction of testing dataset : (tlabel, plabel, score, libsvm, raw feat str, hash) ==============================
        if len(coef_arr) == feature_count:
            testing_pred_rdd = testing_rdd.map(lambda p: (
                 p[0].label \
                ,model_classification.predict(p[0].features) \
                ,zip_feature_util.calculate_hypothesis(p[0].features, bt_coef_arr.value, bt_intercept.value, model_name) \
                ,p[0].features \
                ,p[1] \
            ) ).cache()
        else:  # for multi-class, no prediction score;, TBD for better solution: how to display multiple weights for each class
            testing_pred_rdd = testing_rdd.map(lambda p: (
                 p[0].label \
                ,model_classification.predict(p[0].features) \
                ,0 \
                ,p[0].features \
                ,p[1] \
            ) ).cache()

        # save false prediction to local file
        false_pred_fname = os.path.join(local_out_dir,
                                        row_id_str + "_false_pred.json")
        print "INFO: false_pred_fname=", false_pred_fname
        false_pred_data=testing_pred_rdd.filter(lambda p: p[0] != p[1])\
            .map(lambda p: (p[0],p[1],p[2] \
            ,zip_feature_util.get_dict_coef_raw4feat(zip_feature_util.sparseVector2dict(p[3]), bt_jfeat_coef_dict.value)
            ,p[4]  ) ) \
            .collect()
        print "INFO: false predicted count=", len(false_pred_data)
        false_pred_arr = []
        with open(false_pred_fname, "w") as fp:
            for sp in false_pred_data:
                jsp = {
                    "tlabel": sp[0],
                    "plabel": sp[1],
                    "score": sp[2],
                    "feat": sp[3],
                    "hash": sp[4]
                }
                #print "jsp=",jsp
                false_pred_arr.append(jsp)
            fp.write(json.dumps(false_pred_arr))

        # save prediction results, format: label, prediction, hash
        pred_ofname = os.path.join(local_out_dir,
                                   row_id_str + "_pred_output.pkl")
        print "INFO: pred_ofname=", pred_ofname
        pred_out_arr = testing_pred_rdd.map(lambda p:
                                            (p[0], p[1], p[4])).collect()
        ml_util.ml_pickle_save(pred_out_arr, pred_ofname)

    ### Evaluating the model on testing data
    #labelsAndPreds = testing_rdd.map(lambda p: (p.label, model_classification.predict(p.features)))
    labelsAndPreds = testing_pred_rdd.map(lambda p: (p[0], p[1]))
    labelsAndPreds.cache()
    #testing_sample_count = testing_rdd.count()
    testErr = labelsAndPreds.filter(lambda (v, p): v != p).count() / float(
        testing_sample_count)
    accuracy = 1 - testErr
    print "INFO: Accuracy = ", accuracy

    ### Save model
    #save_dir = config.get('app', 'HADOOP_MASTER')+'/user/hadoop/yigai/row_6/'
    #save_dir = config.get('app', 'HADOOP_MASTER')+config.get('app', 'HDFS_MODEL_DIR')+'/'+row_id_str
    save_dir = os.path.join(config.get('app', 'HADOOP_MASTER'),
                            config.get('app', 'HDFS_MODEL_DIR'), row_id_str)
    try:
        hdfs.ls(save_dir)
        #print "find hdfs folder"
        hdfs.rmr(save_dir)
        #print "all files removed"
    except IOError as e:
        print "WARNING: I/O error({0}): {1}".format(
            e.errno, e.strerror), ". At HDFS=", save_dir
    except:
        print "WARNING: Unexpected error:", sys.exc_info(
        )[0], ". At HDFS=", save_dir

    model_classification.save(sc, save_dir)

    ###load model if needed
    #sameModel = SVMModel.load(sc, save_dir)

    t1 = time()
    print 'INFO: training run time: %f' % (t1 - t0)
    t0 = t1

    ###############################################
    ###########plot prediction result figure#######
    ###############################################

    labels = labelsAndPreds.collect()
    true_label_list = [x for x, _ in labels]
    pred_label_list = [x for _, x in labels]

    pred_fname = os.path.join(local_out_dir, row_id_str + "_1" + ".png")
    true_fname = os.path.join(local_out_dir, row_id_str + "_2" + ".png")
    pred_xlabel = 'Prediction (Single Run)'
    true_xlabel = 'True Labels (Single Run)'
    test_cnt_dic = ml_util.ml_plot_predict_figures(
        pred_label_list, true_label_list, labels_list, label_dic,
        testing_sample_count, pred_xlabel, pred_fname, true_xlabel, true_fname)

    plt.show()
    perf_measures = None
    dataset_info = {
        "training_fraction": training_fraction,
        "class_count": class_num,
        "dataset_count": sample_count
    }
    #############################################################
    ###################for 2 class only (plot ROC curve)#########
    #############################################################
    if len(labels_list) == 2:

        do_ROC = True
        reverse_label_dic = dict((v, k) for k, v in label_dic.items())
        if 'clean' in reverse_label_dic:
            flag_clean = reverse_label_dic['clean']
        elif 'benign' in reverse_label_dic:
            flag_clean = reverse_label_dic['benign']
        elif '0' in reverse_label_dic:
            flag_clean = 0
        else:
            print "WARNING: No ROC curve generated: 'clean' or '0' must be a label for indicating negative class!"
            do_ROC = False

        # build data file for score graph
        score_graph_fname = os.path.join(local_out_dir,
                                         row_id_str + "_score_graph.json")
        print "INFO: score_graph_fname=", score_graph_fname

        # build score_arr_0, score_arr_1
        #    format: tlabel, plabel, score, libsvm, raw feat str, hash
        graph_arr = testing_pred_rdd.map(lambda p:
                                         (int(p[0]), float(p[2]))).collect()
        score_arr_0 = []
        score_arr_1 = []
        max_score = 0
        min_score = 0
        for p in graph_arr:
            if p[0] == 0:
                score_arr_0.append(p[1])
            else:
                score_arr_1.append(p[1])
            # save max,min score
            if p[1] > max_score:
                max_score = p[1]
            elif p[1] < min_score:
                min_score = p[1]

        ml_build_pred_score_graph(score_arr_0, score_arr_1, model_name,
                                  score_graph_fname, max_score, min_score)
        #print "score_arr_0=",score_arr_0
        #print "score_arr_1=",score_arr_1
        #print "max_score=",max_score
        #print "min_score=",min_score

        if do_ROC:

            perf_measures = ml_util.calculate_fscore(true_label_list,
                                                     pred_label_list)
            print "RESULT: perf_measures=", perf_measures
            model_classification.clearThreshold()
            scoreAndLabels = testing_rdd.map(lambda p: (
                model_classification.predict(p[0].features), int(p[0].label)))
            #metrics = BinaryClassificationMetrics(scoreAndLabels)
            #areROC = metrics.areaUnderROC
            #print areROC
            scoreAndLabels_list = scoreAndLabels.collect()
            if flag_clean == 0:
                scores = [x for x, _ in scoreAndLabels_list]
                s_labels = [x for _, x in scoreAndLabels_list]
                testing_N = test_cnt_dic[0]
                testing_P = test_cnt_dic[1]
            else:
                scores = [-x for x, _ in scoreAndLabels_list]
                s_labels = [1 - x for _, x in scoreAndLabels_list]
                testing_N = test_cnt_dic[1]
                testing_P = test_cnt_dic[0]
            #print scores
            #print s_labels
            # create ROC data file ======== ====
            roc_auc = ml_create_roc_files(row_id_str, scores, s_labels,
                                          testing_N, testing_P, local_out_dir,
                                          row_id_str)

            perf_measures["roc_auc"] = roc_auc

    # only update db for web request
    if fromweb == "1":
        #print "database update"
        str_sql="UPDATE atdml_document set "+"accuracy = '"+str(accuracy*100)+"%" \
            +"', status = 'learned', processed_date ='"+str(datetime.datetime.now()) \
            +"',ml_opts='"+ml_opts_jstr \
            +"', perf_measures='"+json.dumps(perf_measures) \
            +"', dataset_info='"+json.dumps(dataset_info) \
            +"' where id="+row_id_str
        ret = exec_sqlite.exec_sql(str_sql)
        print "INFO: Data update done! ret=", str(ret)
    else:
        print "INFO: accuracy = '" + str(accuracy * 100) + "%"

    t1 = time()
    print 'INFO: total run time: %f' % (t1 - t00)

    print 'INFO: Finished!'
    return 0
def train(row_id_str, ds_id, hdfs_feat_dir, local_out_dir, ml_opts_jstr, excluded_feat_cslist
    , sp_master, spark_rdd_compress, spark_driver_maxResultSize, sp_exe_memory, sp_core_max
    , zipout_dir, zipcode_dir, zip_file_name
    , mongo_tuples, labelnameflag, fromweb
    , training_fraction, jobname, model_data_folder ): 
    

    # zip func in other files for Spark workers ================= ================
    zip_file_path = ml_build_zip_file(zipout_dir, zipcode_dir, zip_file_name, prefix='zip_feature_util')
    print "INFO: zip_file_path=",zip_file_path
    

    # ML model filename ====
    model_fname=os.path.join(model_data_folder, row_id_str+'.pkl')
    print "INFO: model_data_folder=",model_data_folder    
    # create out folders and clean up old model files ====
    ml_util.ml_prepare_output_dirs(row_id_str,local_out_dir,model_data_folder,model_fname)   

    # init Spark context ====
    sc=ml_util.ml_get_spark_context(sp_master
        , spark_rdd_compress
        , spark_driver_maxResultSize
        , sp_exe_memory
        , sp_core_max
        , jobname
        , [zip_file_path]) 

    
    t0 = time()
    t00 = t0
    
    # check if ml_opts.has_excluded_feat ==1 ===================================
    has_excluded_feat=0
    if not ml_opts_jstr is None:
        ml_opts=json.loads(ml_opts_jstr)
        if "has_excluded_feat" in ml_opts:
            has_excluded_feat=ml_opts["has_excluded_feat"]

    # get excluded feature list from mongo ========== ===
    if str(has_excluded_feat) == "1" and excluded_feat_cslist is None:
        excluded_feat_cslist=ml_util.ml_get_excluded_feat(row_id_str, mongo_tuples)
    print "INFO: excluded_feat_cslist=",excluded_feat_cslist
            
    # source libsvm filename  
    libsvm_data_file = os.path.join(hdfs_feat_dir , "libsvm_data")
    print "INFO: libsvm_data_file=", libsvm_data_file

    # load feature count file
    feat_count_file=libsvm_data_file+"_feat_count"
    feature_count=zip_feature_util.get_feature_count(sc,feat_count_file)
    print "INFO: feature_count=",feature_count

    
    # load sample RDD from text file   
    #   also exclude selected features in sample ================ =====
    # format (LabeledPoint,hash) from str2LabeledPoint_hash() 
    #samples_rdd = MLUtils.loadLibSVMFile(sc, libsvm_data_file)
    samples_rdd,feature_count = zip_feature_util.get_sample_rdd(sc, libsvm_data_file, feature_count, excluded_feat_cslist)

    all_data = samples_rdd.collect()
    sample_count=len(all_data)
    # 2-D array
    features_list = [x.features.toArray() for x,_ in all_data]
    # label array
    labels_list_all = [x.label for x,_ in all_data]
    # hash array
    hash_list_all = [x for _,x in all_data]

    # convert to np array
    labels_list_all = array(labels_list_all)
    features_array = np.array(features_list)
    hash_list_all=np.array(hash_list_all)
    
    # generate sparse matrix (csr) for all samples
    features_sparse_mtx = csr_matrix(features_array)

    ### randomly split the samples into training and testing data ===============
    X_train_sparse, X_test_sparse, labels_train, labels_test, train_hash_list, test_hash_list = \
            cross_validation.train_test_split(features_sparse_mtx, labels_list_all, hash_list_all, test_size=(1-training_fraction) )
    # X_test_sparse is scipy.sparse.csr.csr_matrix
    testing_sample_count = len(labels_test)
    training_sample_count=len(labels_train)
    training_lbl_cnt_list=Counter(labels_train)
    testing_lbl_cnt_list=Counter(labels_test)
    
    print "INFO: training sample count=",training_sample_count,", testing sample count=",testing_sample_count,",sample_count=",sample_count
    print "INFO: training label list=",training_lbl_cnt_list,", testing label list=",testing_lbl_cnt_list
    print "INFO: train_hash_list count=",len(train_hash_list),", test_hash_list count=",len(test_hash_list)
    t1 = time()
    print 'INFO: running time: %f' %(t1-t0)
    
    ###############################################
    ###########build learning model################
    ###############################################
    
    ### parse parameters and generate the model ###
    (clf, model_name, api, cv, param_dic) = parse_param_and_get_model(ml_opts)
    if model_name == "none":
        print "ERROR: model name not found!"
        return -1

    #param_jobj=json.loads(ml_opts_jstr);
    #print "param_jobj=",param_jobj
        
    ########################################################
    ##########Grid Search with cross validation#############
    ########################################################    
    json2save={}
    json2save["rid"]=int(row_id_str)
    json2save["key"]="cv_result"
    #json2save["param_str"]=ml_opts_jstr
    json2save["param_dic"]=param_dic
    cv_grid=[]
    if api == "centralized":
        #########run with Scikit-learn API (for comparison)######
        print "INFO: ******************Grid Search with Scikit-learn API************"

        t0 = time()
        
        # Set the parameters by cross-validation
        #tuned_parameters = [{'C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000, 100000]}]
        #tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4], \
        #                 'C': [1, 10, 100, 1000]}, \
        #                {'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]

        scores = ['accuracy']
        json2save["scores"]=scores
        #print json2save
        
        for score in scores: # for one item only? score=accuracy
            print("INFO: # Tuning hyper-parameters for %s" % score)
            #print()

            grid = grid_search.GridSearchCV(estimator = clf, param_grid = param_dic, cv=cv, scoring= score)
            grid.fit(X_train_sparse, labels_train)
            
            print "INFO: Best parameters set found on development set:"
            print "INFO: grid.best_params_=",grid.best_params_
            print "INFO: Grid scores on development set:" 
            for key in grid.best_params_:
                print "INFO: best_params["+key+"]=", grid.best_params_[key]
                if key.lower()=="regtype":
                    ml_opts['regularization']=str(grid.best_params_[key]) # add best param to 
                else:
                    ml_opts[key.lower()]=str(grid.best_params_[key]) # add best param to 
            # save best param to db as json string
            j_str=json.dumps(ml_opts);
            json2save["param_str"]=j_str;
            print "INFO: grid_scores_ with params:"
            for params, mean_score, scores in grid.grid_scores_:
                print "INFO: %0.3f (+/-%0.03f) for %r" % (mean_score, scores.std() * 2, params)
                #outstr='%s,%0.3f,%0.03f,%s' % (params,mean_score, scores.std() * 2,"Selected" if params==grid.best_params_ else "")
                outj={}
                outj["param"]=params
                outj["average_accuracy"]="%0.3f" % (mean_score)
                outj["std_deviation"]="%0.3f" % (scores.std() * 2)
                outj["selected"]="%s" % ("Selected" if params==grid.best_params_ else "")
                
                cv_grid.append(outj)
        
        clf_best = grid.best_estimator_
        t1 = time()
        ############# END run with SKlearn ######
        print 'INFO: Grid Search with SKlearn running time: %f' %(t1-t0)
        t0 = time()
    else:
    
        #############run with SPARK######
        
        print "INFO: ******************Grid Search with SPARK************"
            
        all_comb_list_of_dic = get_all_combination_list_of_dic(param_dic) 
        print "INFO: Total number of searching combinations=", len(all_comb_list_of_dic) 
        #print "all_comb_list_of_dic: ", all_comb_list_of_dic
        params_rdd = sc.parallelize(all_comb_list_of_dic)
        
        ###broad cast clf, traning data, testing data to all workers###
        X_broadcast = sc.broadcast(X_train_sparse)
        y_broadcast = sc.broadcast(labels_train)
        clf_broadcast = sc.broadcast(clf)
        
        ### Grid Search with CV in multiple workers ###
        models = params_rdd.map(lambda x: learn_with_params(clf_broadcast.value, X_broadcast.value, y_broadcast.value, cv, x)).sortByKey(ascending = False).cache()
        
        (ave_accuracy, (clf_best, p_dic_best, std2))  = models.first()
        # output results #

        print "INFO: Best parameters set found for ", model_name, " is: "
        print "INFO: ",
        for key in p_dic_best:
            print key, " = ", p_dic_best[key],
            if key.lower()=="regtype":
                ml_opts['regularization']=str(p_dic_best[key]) 
            else:
                ml_opts[key.lower()]=str(p_dic_best[key]) # add best param to 
            # save best param to db as json string
        print ""
        j_str=json.dumps(ml_opts);
        json2save["param_str"]=j_str;

        print "INFO: Average accuracy with CV = ", cv, ": ", ave_accuracy
        
        ######## print complete report #######
        print "INFO: Grid scores on development set:"
        all_results = models.collect()
        for i in range(0, len(all_results)):
            (ave_accu_i, (clf_i, p_dic_i, std2_i)) = all_results[i]
            print "INFO: ",ave_accu_i, " for ", p_dic_i
            print "INFO: %0.3f (+/-%0.03f) for " % (ave_accu_i, std2_i), p_dic_i
            #outstr='%s,%0.3f,%0.03f,%s' % ( p_dic_i, ave_accu_i, std2_i, "Selected" if p_dic_i==p_dic_best else "")
            outj={}
            outj["param"]=p_dic_i
            outj["average_accuracy"]="%0.3f" % (ave_accu_i)
            outj["std_deviation"]="%0.3f" % (std2_i)
            outj["selected"]="%s" % ("Selected" if p_dic_i==p_dic_best else "")
            
            cv_grid.append(outj)
        print " "
        
        t1 = time()
        
        ############# END run with SPARK######
        print 'INFO: Grid search with SPARK running time: %f' %(t1-t0)
    
    ##################################################################################
    #print "cv_grid=",cv_grid
    #json2save["cv_grid_title"]='param,average_accuracy,std_deviation,selected' 
    json2save["cv_grid_data"]=cv_grid
    json2save['clf_best']=str(clf_best).replace("\n","").replace("    ","")
    cv_result=json.dumps(json2save)
    #print "INFO: cv_result=",cv_result
    filter='{"rid":'+row_id_str+',"key":"cv_result"}'
    upsert_flag=True
    ## write to mongoDB.myml.dataset_info, ignore doc with duplicated key
    # db.dataset_info.createIndex({"rid":1,"key":1},{unique:true})
    ret=query_mongo.upsert_doc_t(mongo_tuples,filter,cv_result,upsert_flag)
    print "INFO: Upsert count for cv_result: ret=",ret
 
    ##################################################################################
    ##########Retrain with best model for training set and output results#############
    ##################################################################################
    print "INFO: **********Retrain with best model for training set and output results************"
    
    clf_best.fit(X_train_sparse, labels_train)
    #### save clf_best for future use ####
    #joblib.dump(clf_best, model_data_folder + row_id_str+'.pkl')
    joblib.dump(clf_best, model_fname) 
    
    ### Evaluating the model on testing data
    labels_pred = clf_best.predict(X_test_sparse)
    accuracy = clf_best.score(X_test_sparse, labels_test)
    print "INFO: Accuracy = ", accuracy
    
    
    ######################################the rest of the code is the same as train_sklean.py (replace clf with clf_best)#####################################################################
    clf=clf_best
    print "INFO: model type=",type(clf)," clf=",clf

    # get data from model ================================
    coef=None
    intercept=None
    try:
        if type(clf) in ( classes.SVC , classes.NuSVC) :# svm didn't have coef_
            col_num=clf.support_vectors_.shape[1]
        else: #linear only
            # coef_ is only available when using a linear kernel
            col_num = len(clf.coef_[0])
            coef=clf.coef_[0]
            intercept=clf.intercept_[0] # only get 1st item?
            #print "**model:clf.coef_[0] =",clf.coef_[0]
    except Exception as e:
        print "WARNING: Can't get clf.coef_[0]. e=",e,", get total features from meta-data"
        col_num = 0 #how to get feature number for sparse array? 
    print "INFO: total feature # in the model: ", col_num

    jfeat_coef_dict={}
    # create feature coefficient file ================================
    if coef is None:
        print "WARNING: model weights not found!"    
    else:
        feat_filename=os.path.join(local_out_dir,row_id_str+"_feat_coef.json")
        print "INFO: feat_filename=",feat_filename
        # save coef_arr to mongo & create jfeat_coef_dict===
        jfeat_coef_dict=ml_util.ml_save_coef_build_feat_coef(row_id_str, mongo_tuples, coef, intercept, feat_filename, ds_id)
    #print "INFO: jfeat_coef_dict=", jfeat_coef_dict
    print "INFO: jfeat_coef_dict len=", len(jfeat_coef_dict )


    # filename for false pred 
    false_pred_fname=os.path.join(local_out_dir,row_id_str+"_false_pred.json")
    print "INFO: false_pred_fname=", false_pred_fname

    # build files for false pred & score graph
    (score_arr_0, score_arr_1, max_score,min_score)=ml_build_false_pred(X_test_sparse,coef,intercept
        , labels_test, labels_pred, test_hash_list, model_name, jfeat_coef_dict, false_pred_fname) 

    # save pred output
    pred_out_arr=[]
    for i in range(0,len(labels_test)):
        pred_out_arr.append((labels_test[i], labels_pred[i], test_hash_list[i]))
    pred_ofname=os.path.join(local_out_dir,row_id_str+"_pred_output.pkl")
    print "INFO: pred_ofname=", pred_ofname
    ml_util.ml_pickle_save(pred_out_arr,pred_ofname)
    
    ###################################################
    ### generate label names (family names) ###########
    ### connect to database to get the column list which contains all column number of the corresponding feature####
    ###################################################
    
    if labelnameflag == 1:
        key = "dic_name_label"
        jstr_filter='{"rid":'+row_id_str+',"key":"'+key+'"}'
        jstr_proj='{"value":1}'

        # get parent dataset's data
        if ds_id != row_id_str:
            jstr_filter='{"rid":'+ds_id+',"key":"'+key+'"}'
        
        doc=query_mongo.find_one_t(mongo_tuples, jstr_filter, jstr_proj)
        dic_list = doc['value']
        
        label_dic = {}
        for i in range(0, len(dic_list)):
            for key in dic_list[i]:
                label_dic[dic_list[i][key]] = key.encode('UTF8')
        print "INFO: label_dic:", label_dic
    else:
        label_dic = {}
        label_set = set(labels_list_all)
        for label_value in label_set:
            label_dic[int(label_value)] = str(int(label_value))
        print "INFO: ******generated label_dic:", label_dic 
    
    labels_list = []
    for key in sorted(label_dic):
        labels_list.append(label_dic[key])
    
    ### generate sample numbers of each family in testing data###
    testing_sample_number = len(labels_test)
    print "INFO: testing_sample_number=", testing_sample_number
    test_cnt_dic = {}
    for key in label_dic:
        test_cnt_dic[key] = 0
    for i in range (0, testing_sample_number):
        for key in label_dic:
            if labels_test[i] == key:
                test_cnt_dic[key] = test_cnt_dic[key] + 1
    print "INFO: Number of samples in each label is=", test_cnt_dic
    
    ###############################################
    ###########plot prediction result figure#######
    ###############################################
    pred_fname=os.path.join(local_out_dir,row_id_str+"_1"+".png")
    true_fname=os.path.join(local_out_dir,row_id_str+"_2"+".png")
    pred_xlabel='Prediction (Single Run)'
    true_xlabel='True Labels (Single Run)'
    test_cnt_dic=ml_util.ml_plot_predict_figures(labels_pred.tolist(), labels_test.tolist(), labels_list, label_dic, testing_sample_count 
        , pred_xlabel, pred_fname, true_xlabel, true_fname)
    print "INFO: figure files: ", pred_fname, true_fname
    print "INFO: Number of samples in each label is=", test_cnt_dic

    roc_auc=None
    #fscore=None 
    perf_measures=None
    class_count=len(labels_list)
    dataset_info={"training_fraction":training_fraction, "class_count":class_count,"dataset_count":sample_count}
    #############################################################
    ###################for 2 class only (plot ROC curve)#########
    #############################################################
    if len(labels_list) == 2:

        # build data file for score graph
        score_graph_fname=os.path.join(local_out_dir,row_id_str+"_score_graph.json")
        print "INFO: score_graph_fname=", score_graph_fname
        ml_build_pred_score_graph(score_arr_0,score_arr_1,model_name, score_graph_fname,max_score,min_score)

            
        do_ROC=True
        reverse_label_dic = dict((v,k) for k, v in label_dic.items())
        if 'clean' in reverse_label_dic:
            flag_clean = reverse_label_dic['clean']
        elif 'benign' in reverse_label_dic:
            flag_clean = reverse_label_dic['benign']
        elif '0' in reverse_label_dic:
            flag_clean = 0
        else:
            print "No ROC curve generated: 'clean' or '0' must be a label for indicating negative class!"
            do_ROC=False
            
        if do_ROC:
            # calculate fscore  ==========
            perf_measures=ml_util.calculate_fscore(labels_test, labels_pred)
            print "INFO: perf_measures=",perf_measures
            
            confidence_score = clf_best.decision_function(X_test_sparse)
                    
            if flag_clean == 0:
                scores = [x for x in confidence_score]
                s_labels = [x for x in labels_test]
                testing_N = test_cnt_dic[0]
                testing_P = test_cnt_dic[1]
            else:
                scores = [-x for x in confidence_score]
                s_labels = [1-x for x in labels_test]
                testing_N = test_cnt_dic[1]
                testing_P = test_cnt_dic[0]
                
            # create ROC data file ======== ==== 
            roc_auc=ml_create_roc_files(row_id_str, scores, s_labels, testing_N, testing_P
                , local_out_dir, row_id_str)
                
            perf_measures["roc_auc"]=roc_auc
            
                
    # only update db for web request
    if fromweb=="1": 
        #print "database update"
        str_sql="UPDATE atdml_document set "+"accuracy = '"+str(accuracy*100)+"%" \
            +"', status = 'learned', processed_date ='"+str(datetime.datetime.now()) \
            +"',ml_opts='"+j_str \
            +"', perf_measures='"+json.dumps(perf_measures) \
            +"', dataset_info='"+json.dumps(dataset_info) \
            +"' where id="+row_id_str
        ret=exec_sqlite.exec_sql(str_sql)
        print "INFO: Data update done! ret=", str(ret)
    else:
        print "INFO: accuracy = '"+str(accuracy*100)+"%"
    
    print 'INFO: total running time: %f' %(t1-t00)
    
    print 'INFO: Finished!'
    return 0
def train(row_id_str,
          ds_id,
          hdfs_feat_dir,
          local_out_dir,
          ml_opts_jstr,
          excluded_feat_cslist,
          sp_master,
          spark_rdd_compress,
          spark_driver_maxResultSize,
          sp_exe_memory,
          sp_core_max,
          zipout_dir,
          zipcode_dir,
          zip_file_name,
          mongo_tuples,
          labelnameflag,
          fromweb,
          training_fraction,
          jobname,
          random_seed=None):

    ### generate data folder and out folder, clean up if needed
    #local_out_dir = local_out_dir + "/"
    #if os.path.exists(local_out_dir):
    #    shutil.rmtree(local_out_dir) # to keep smaplelist file
    if not os.path.exists(local_out_dir):
        os.makedirs(local_out_dir)

    # create zip files for Spark workers ================= ================
    zip_file_path = ml_util.ml_build_zip_file(zipout_dir,
                                              zipcode_dir,
                                              zip_file_name,
                                              prefix='zip_feature_util')
    print "INFO: zip_file_path=", zip_file_path

    # get_spark_context
    sc = ml_util.ml_get_spark_context(sp_master, spark_rdd_compress,
                                      spark_driver_maxResultSize,
                                      sp_exe_memory, sp_core_max, jobname,
                                      [zip_file_path])

    t0 = time()

    # check if ml_opts.has_excluded_feat ==1 ===================================
    has_excluded_feat = 0
    ml_opts = {}
    if not ml_opts_jstr is None:
        ml_opts = json.loads(ml_opts_jstr)
        if "has_excluded_feat" in ml_opts:
            has_excluded_feat = ml_opts["has_excluded_feat"]
    #print "has_excluded_feat=",has_excluded_feat,",excluded_feat_cslist=",excluded_feat_cslist

    # get excluded feature list from mongo ========== ===
    if str(has_excluded_feat) == "1" and excluded_feat_cslist is None:
        excluded_feat_cslist = ml_util.ml_get_excluded_feat(
            row_id_str, mongo_tuples)
    print "INFO: excluded_feat_cslist=", excluded_feat_cslist

    # filename for featured data
    libsvm_data_file = os.path.join(hdfs_feat_dir, "libsvm_data")
    print "INFO: libsvm_data_file:", libsvm_data_file

    # load feature count file
    feat_count_file = libsvm_data_file + "_feat_count"
    feature_count = zip_feature_util.get_feature_count(sc, feat_count_file)
    print "INFO: feature_count=", feature_count

    # load sample RDD from text file
    #   also exclude selected features in sample ================ =====
    # format (LabeledPoint,hash) from str2LabeledPoint_hash()
    #samples_rdd = MLUtils.loadLibSVMFile(sc, libsvm_data_file)
    samples_rdd, feature_count = zip_feature_util.get_sample_rdd(
        sc, libsvm_data_file, feature_count, excluded_feat_cslist)

    # get distinct label list
    labels_list_all = samples_rdd.map(
        lambda p: p[0].label).distinct().collect()

    # split samples to training and testing data, format (LabeledPoint,hash)
    training_rdd, testing_rdd = samples_rdd.randomSplit(
        [training_fraction, 1 - training_fraction], seed=int(random_seed))
    training_rdd = training_rdd.map(lambda p: p[0])  # keep LabeledPoint only
    training_rdd.cache()
    training_sample_count = training_rdd.count()
    training_lbl_cnt_list = training_rdd.map(
        lambda p: (p.label, 1)).reduceByKey(add).collect()
    testing_rdd.cache()
    testing_sample_count = testing_rdd.count()
    testing_lbl_cnt_list = testing_rdd.map(
        lambda p: (p[0].label, 1)).reduceByKey(add).collect()
    sample_count = training_sample_count + testing_sample_count

    # random_seed testing
    if not random_seed is None:
        all_t = testing_rdd.collect()
        all_t = sorted(all_t, key=lambda x: x[1])
        cnt = 0
        for i in all_t:
            print i[1]
            cnt = cnt + 1
            if cnt > 3:
                break

    t1 = time()
    print "INFO: training sample count=", training_sample_count, ", testing sample count=", testing_sample_count
    print "INFO: training label list=", training_lbl_cnt_list, ", testing label list=", testing_lbl_cnt_list
    print "INFO: labels_list_all=", labels_list_all
    print "INFO: training and testing samples generated!"
    print 'INFO: running time: %f' % (t1 - t0)
    t0 = t1

    ###############################################
    ###########build learning model################
    ###############################################

    ### get the parameters###
    print "INFO: ======Learning Algorithm and Parameters============="
    #ml_opts = json.loads(ml_opts_jstr)
    model_name = ml_opts[
        'learning_algorithm']  # 1: linear_svm_with_sgd; 2: logistic_regression_with_lbfgs; 3: logistic_regression_with_sgd
    iteration_num = 0
    if 'iterations' in ml_opts:
        iteration_num = ml_opts['iterations']
    C = 0
    if 'c' in ml_opts:
        C = eval(ml_opts['c'])
    regularization = ""
    if 'regularization' in ml_opts:
        regularization = ml_opts['regularization']

    print "INFO: Learning Algorithm: ", model_name
    print "INFO: C = ", C
    print "INFO: iterations = ", iteration_num
    print "INFO: regType = ", regularization
    regP = C / float(training_sample_count)
    print "INFO: Calculated: regParam = ", regP

    ### generate label names (family names) #####
    ### connect to database to get the column list which contains all column number of the corresponding feature####
    if labelnameflag == 1:
        '''
        key = "dic_name_label"
        jstr_filter='{"rid":'+row_id_str+',"key":"'+key+'"}'
        jstr_proj='{"value":1}'
 
        # get parent dataset's data
        if ds_id != row_id_str:
            jstr_filter='{"rid":'+ds_id+',"key":"'+key+'"}'
 
        doc=query_mongo.find_one_t(mongo_tuples, jstr_filter, jstr_proj)
        dic_list = doc['value']
        print "INFO: dic_list=",dic_list
        
        label_dic = {}
        for i in range(0, len(dic_list)):
            for key in dic_list[i]:
                label_dic[dic_list[i][key]] = key.encode('UTF8')
        '''
        label_dic = ml_util.ml_get_label_dict(row_id_str, mongo_tuples, ds_id)
        print "INFO: label_dic:", label_dic
    else:
        label_dic = {}
        label_set = set(labels_list_all)
        for label_value in label_set:
            label_dic[int(label_value)] = str(int(label_value))
        print "INFO: generated label_dic:", label_dic

    labels_list = []
    for key in sorted(label_dic):
        labels_list.append(label_dic[key])
    print "INFO: labels:", labels_list
    class_num = len(labels_list)
    if class_num > 2:
        print "INFO: Multi-class classification! Number of classes = ", class_num

    ### build model ###

    if model_name == "linear_svm_with_sgd":
        ### 1: linearSVM
        print "INFO: ====================1: Linear SVM============="
        model_classification = SVMWithSGD.train(
            training_rdd,
            regParam=regP,
            iterations=iteration_num,
            regType=regularization)  # regParam = 1/(sample_number*C)
        #print model_classification
    elif model_name == "logistic_regression_with_lbfgs":
        ### 2: LogisticRegressionWithLBFGS
        print "INFO: ====================2: LogisticRegressionWithLBFGS============="
        model_classification = LogisticRegressionWithLBFGS.train(
            training_rdd,
            regParam=regP,
            iterations=iteration_num,
            regType=regularization,
            numClasses=class_num)  # regParam = 1/(sample_number*C)
    elif model_name == "logistic_regression_with_sgd":
        ### 3: LogisticRegressionWithSGD
        print "INFO: ====================3: LogisticRegressionWithSGD============="
        model_classification = LogisticRegressionWithSGD.train(
            training_rdd,
            regParam=regP,
            iterations=iteration_num,
            regType=regularization)  # regParam = 1/(sample_number*C)
    else:
        print "INFO: Training model selection error: no valid ML model selected!"
        return

    print "INFO: model type=", type(model_classification)

    # create feature coefficient file ================================
    coef_arr = None
    intercept = None
    if model_classification.weights is None:
        print "WARNING: model weights not found!"
    else:
        coef_weights = model_classification.weights
        #print "coef_weights=",coef_weights
        #print type(coef_weights),coef_weights.shape
        coef_arr = coef_weights.toArray().tolist()
        # save coef_arr to mongo
        key = "coef_arr"
        ret = ml_util.save_json_t(row_id_str, key, coef_arr, mongo_tuples)

        # save coef_arr to local file
        if ret == 0:
            # drop old record in mongo
            filter = '{"rid":' + row_id_str + ',"key":"coef_arr"}'
            ret = query_mongo.delete_many(mongo_tuples, None, filter)
            if not os.path.exists(local_out_dir):
                os.makedirs(local_out_dir)
            fn_ca = os.path.join(local_out_dir, row_id_str,
                                 row_id_str + "_coef_arr.pkl")
            print
            ml_util.ml_pickle_save(coef_arr, fn_ca)

        # save intercept to mongo
        intercept = model_classification.intercept
        key = "coef_intercept"
        ret = ml_util.save_json_t(row_id_str, key, intercept, mongo_tuples)

        # feature list + coef file =============
        feat_filename = os.path.join(local_out_dir,
                                     row_id_str + "_feat_coef.json")
        print "INFO: feat_filename=", feat_filename

        # create feature, coef & raw string file =============================================== ============
        # expect a dict of {"fid":(coef, feature_raw_string)}
        jret = ml_util.build_feat_list_t(row_id_str, feat_filename, None, None,
                                         coef_arr, ds_id, mongo_tuples)

        # special featuring for IN or libsvm
        if jret is None:
            jret = ml_util.build_feat_coef_raw_list_t(row_id_str,
                                                      feat_filename, coef_arr,
                                                      ds_id, mongo_tuples)
        if jret is None:
            print "WARNING: Cannot create sample list for testing dataset. "

        jfeat_coef_dict = jret
        print "INFO: coef_arr len=", len(
            coef_arr), ", feature_count=", feature_count
        # for multi-class
        if len(coef_arr) != feature_count:
            jfeat_coef_dict = {}
            print "WARNING: coef count didn't match feature count.  multi-class classification was not supported"

        # Calculate prediction and Save testing dataset
        bt_coef_arr = sc.broadcast(coef_arr)
        bt_intercept = sc.broadcast(intercept)
        bt_jfeat_coef_dict = sc.broadcast(jfeat_coef_dict)
        ### Evaluating the model on testing dataset: label, predict label, score, feature list
        print "INFO: intercept=", intercept
        print "INFO: coef_arr len=", len(coef_arr), type(coef_arr)
        print "INFO: jfeat_coef_dict len=", len(
            jfeat_coef_dict)  #, jfeat_coef_dict

        # get prediction of testing dataset : (tlabel, plabel, score, libsvm, raw feat str, hash) ==============================
        if len(coef_arr) == feature_count:
            testing_pred_rdd = testing_rdd.map(lambda p: (
                 p[0].label \
                ,model_classification.predict(p[0].features) \
                ,zip_feature_util.calculate_hypothesis(p[0].features, bt_coef_arr.value, bt_intercept.value, model_name) \
                ,p[0].features \
                ,p[1] \
            ) ).cache()
        else:  # for multi-class, no prediction score; TBD for better solution: how to display multiple weights for each class
            testing_pred_rdd = testing_rdd.map(lambda p: (
                 p[0].label \
                ,model_classification.predict(p[0].features) \
                ,"-" \
                ,p[0].features \
                ,p[1] \
            ) ).cache()
        ''',p[0].features.dot(bt_coef_arr.value)+bt_intercept.value \
        # Save testing dataset for analysis
        libsvm_testing_output = hdfs_feat_dir + "libsvm_testing_output_"+row_id_str
        print "INFO: libsvm_testing_output=", libsvm_testing_output
        try:
            hdfs.rmr(libsvm_testing_output)
        except IOError as e:
            print "WARNING: I/O error({0}): {1}".format(e.errno, e.strerror)
        except:
            print "WARNING: Unexpected error at libsvm_testing_output file clean up:", sys.exc_info()[0] 
        # save only false prediction?
        #testing_pred_rdd.filter(lambda p: p[0] != p[1]).saveAsTextFile(libsvm_testing_output)
        testing_pred_rdd.saveAsTextFile(libsvm_testing_output)
        
        '''
        #test_tmp=testing_pred_rdd.collect()

        # save false prediction to local file
        false_pred_fname = os.path.join(local_out_dir,
                                        row_id_str + "_false_pred.json")
        print "INFO: false_pred_fname=", false_pred_fname
        false_pred_data=testing_pred_rdd.filter(lambda p: p[0] != p[1])\
            .map(lambda p: (p[0],p[1],p[2] \
            ,zip_feature_util.get_dict_coef_raw4feat(zip_feature_util.sparseVector2dict(p[3]), bt_jfeat_coef_dict.value)
            ,p[4]  ) ) \
            .collect()
        print "INFO: false predicted count=", len(false_pred_data)
        false_pred_arr = []
        with open(false_pred_fname, "w") as fp:
            for sp in false_pred_data:
                jsp = {
                    "tlabel": sp[0],
                    "plabel": sp[1],
                    "score": sp[2],
                    "feat": sp[3],
                    "hash": sp[4]
                }
                #print "jsp=",jsp
                false_pred_arr.append(jsp)
            fp.write(json.dumps(false_pred_arr))

        # save prediction results, format: label, prediction, hash
        pred_ofname = os.path.join(local_out_dir,
                                   row_id_str + "_pred_output.pkl")
        print "INFO: pred_ofname=", pred_ofname
        pred_out_arr = testing_pred_rdd.map(lambda p:
                                            (p[0], p[1], p[4])).collect()
        ml_util.ml_pickle_save(pred_out_arr, pred_ofname)
        '''
        one_item= testing_pred_rdd.first()
        print "one_item=",one_item
        sparse_arr=one_item[3]

        dict_feat=zip_feature_util.sparseVector2dict(sparse_arr)
        print "len=",len(dict_feat),"dict_feat=",dict_feat
        dict_weit=zip_feature_util.add_coef2dict(coef_arr,dict_feat)
        print "len=",len(dict_weit),"dict_weit=",dict_weit
        '''
    # Calculate Accuracy. labelsAndPreds = (true_label,predict_label)
    labelsAndPreds = testing_pred_rdd.map(lambda p: (p[0], p[1]))
    labelsAndPreds.cache()
    testing_sample_number = testing_rdd.count()
    testErr = labelsAndPreds.filter(lambda (v, p): v != p).count() / float(
        testing_sample_number)
    accuracy = 1 - testErr
    print "INFO: Accuracy = ", accuracy

    ### Save model
    #save_dir = config.get('app', 'HADOOP_MASTER')+'/user/hadoop/yigai/row_6/'
    #save_dir = config.get('app', 'HADOOP_MASTER')+config.get('app', 'HDFS_MODEL_DIR')+'/'+row_id_str
    save_dir = os.path.join(config.get('app', 'HADOOP_MASTER'),
                            config.get('app', 'HDFS_MODEL_DIR'), row_id_str)
    try:
        hdfs.ls(save_dir)
        #print "find hdfs folder"
        hdfs.rmr(save_dir)
        #print "all files removed"
    except IOError as e:
        print "WARNING: I/O error({0}): {1}".format(
            e.errno, e.strerror), ". At HDFS=", save_dir
    except:
        print "WARNING: Unexpected error:", sys.exc_info(
        )[0], ". At HDFS=", save_dir
    model_classification.save(sc, save_dir)

    ###load model if needed
    #sameModel = SVMModel.load(sc, save_dir)

    t1 = time()
    print 'INFO: training run time: %f' % (t1 - t0)
    t0 = t1

    ###############################################
    ###########plot prediction result figure ==================================================== ===============
    ###############################################

    labels = labelsAndPreds.collect()
    true_label_list = [x for x, _ in labels]
    pred_label_list = [x for _, x in labels]

    pred_fname = os.path.join(local_out_dir, row_id_str + "_1" + ".png")
    true_fname = os.path.join(local_out_dir, row_id_str + "_2" + ".png")
    pred_xlabel = 'Prediction (Single Run)'
    true_xlabel = 'True Labels (Single Run)'
    test_cnt_dic = ml_util.ml_plot_predict_figures(
        pred_label_list, true_label_list, labels_list, label_dic,
        testing_sample_count, pred_xlabel, pred_fname, true_xlabel, true_fname)
    print "INFO: figure files: ", pred_fname, true_fname
    #print "INFO: Number of samples in each label is=", test_cnt_dic

    roc_auc = None
    perf_measures = None
    dataset_info = {
        "training_fraction": training_fraction,
        "class_count": class_num,
        "dataset_count": sample_count
    }
    #############################################################
    ###################for 2 class only (plot ROC curve) ==================================================== ===============
    #############################################################
    if len(labels_list) == 2:

        do_ROC = True
        reverse_label_dic = dict((v, k) for k, v in label_dic.items())
        if 'clean' in reverse_label_dic:
            flag_clean = reverse_label_dic['clean']
        elif 'benign' in reverse_label_dic:
            flag_clean = reverse_label_dic['benign']
        elif '0' in reverse_label_dic:
            flag_clean = 0
        else:
            print "INFO: No ROC curve generated: 'clean','benign' or '0' must be a label for indicating negative class!"
            do_ROC = False

        # build data file for score graph
        score_graph_fname = os.path.join(local_out_dir,
                                         row_id_str + "_score_graph.json")
        print "INFO: score_graph_fname=", score_graph_fname

        # build score_arr_0, score_arr_1
        #    format: tlabel, plabel, score, libsvm, raw feat str, hash
        graph_arr = testing_pred_rdd.map(lambda p:
                                         (int(p[0]), float(p[2]))).collect()
        score_arr_0 = []
        score_arr_1 = []
        max_score = 0
        min_score = 0
        for p in graph_arr:
            if p[0] == 0:
                score_arr_0.append(p[1])
            else:
                score_arr_1.append(p[1])
            # save max,min score
            if p[1] > max_score:
                max_score = p[1]
            elif p[1] < min_score:
                min_score = p[1]

        ml_build_pred_score_graph(score_arr_0, score_arr_1, model_name,
                                  score_graph_fname, max_score, min_score)

        if do_ROC:

            perf_measures = ml_util.calculate_fscore(true_label_list,
                                                     pred_label_list)
            print "RESULT: perf_measures=", perf_measures
            '''
            # calculate fscore  ==========
            tp = labelsAndPreds.filter(lambda (v, p): v == 1 and p==1 ).count() 
            fp = labelsAndPreds.filter(lambda (v, p): v == 0 and p==1 ).count() 
            fn = labelsAndPreds.filter(lambda (v, p): v == 1 and p==0 ).count() 
            tn = labelsAndPreds.filter(lambda (v, p): v == 0 and p==0 ).count() 
            print "RESULT: tp=",tp,",fp=",fp,",fn=",fn,",tn=",tn
            precision=float(tp)/(tp+fp)
            recall=float(tp)/(tp+fn)
            print "RESULT: precision=",precision,",recall=",recall
            acc=(tp+tn)/(float(testing_sample_number))
            fscore=2*((precision*recall)/(precision+recall))
            print "RESULT: fscore=",fscore,",acc=",acc  
            '''
            model_classification.clearThreshold()
            scoreAndLabels = testing_rdd.map(lambda p: (
                model_classification.predict(p[0].features), int(p[0].label)))
            #metrics = BinaryClassificationMetrics(scoreAndLabels)
            #areROC = metrics.areaUnderROC
            #print areROC
            scoreAndLabels_list = scoreAndLabels.collect()

            if flag_clean == 0:
                scores = [x for x, _ in scoreAndLabels_list]
                s_labels = [x for _, x in scoreAndLabels_list]
                testing_N = test_cnt_dic[0]
                testing_P = test_cnt_dic[1]
            else:
                scores = [-x for x, _ in scoreAndLabels_list]
                s_labels = [1 - x for _, x in scoreAndLabels_list]
                testing_N = test_cnt_dic[1]
                testing_P = test_cnt_dic[0]

            # create ROC data file ======== ====
            roc_auc = ml_create_roc_files(row_id_str, scores, s_labels,
                                          testing_N, testing_P, local_out_dir,
                                          row_id_str)
            #, local_out_dir, file_name_given)

            perf_measures["roc_auc"] = roc_auc

    # only update db for web request ==================================================== ===============
    if fromweb == "1":
        #print "database update"
        str_sql="UPDATE atdml_document set "+"accuracy = '"+str(accuracy*100)+"%" \
            +"', status = 'learned', processed_date ='"+str(datetime.datetime.now()) \
            +"', perf_measures='"+json.dumps(perf_measures) \
            +"', dataset_info='"+json.dumps(dataset_info) \
            +"' where id="+row_id_str
        ret = exec_sqlite.exec_sql(str_sql)
        print "INFO: Data update done! ret=", str(ret)
    else:
        print "INFO: accuracy = '" + str(accuracy * 100) + "%"

    print 'INFO: Finished!'
    return 0