def selectFeaturesForKeyword(keyword, threshold = 0.223):
    import Trainer, PythonVersionHandler
    from pyspark.mllib.regression import LabeledPoint
    featureList = Trainer.featuresList[:-2]
    Trainer.setFeatureVector(featureList)
    trainData, testData, weights, accuracy = getTrainedWeights(keyword)
    removedFeatures = []
    accuracies = [accuracy]
    weightsRow = list(weights)
    while (not isImportant(weights, threshold = threshold)) and len(weights) > 1:
        index, featureList, removedFeature = eliminate(weights, featureList)
        removedFeatures.append(removedFeature)
        Trainer.setFeatureVector(featureList)
        def getReducedVector(lp):
            newFeatures = list(lp.features)
            newFeatures.pop(index)
            return LabeledPoint(lp.label, newFeatures)
        trainData = trainData.map(getReducedVector)
        testData = testData.map(getReducedVector)
        model = Trainer.trainPairWiseData(trainData, dataName = 'TrainData')
        accuracy = Trainer.evaluateModelOnData(model, testData, dataName = 'TestData')
        accuracies.append(accuracy)
        weights = list(model.weights)
        weightsRow.append('X')
        weightsRow.extend(weights)
    PythonVersionHandler.print_('Keyword: ' + keyword)
    PythonVersionHandler.print_('Selected features: ' + str(featureList))
    PythonVersionHandler.print_('Following features have reduced by order: ' + str(removedFeatures))
    PythonVersionHandler.print_('Accuracies from each step: ' + str(accuracies))
    row = [keyword]
    row.extend(featureList)
    row.append('X')
    row.extend(removedFeatures)
    row.extend(accuracies)
    return row, weightsRow