def standardScaler(self):
        from pyspark.ml.feature import StandardScaler

        dataFrame = self.session.read.format("libsvm").load(
            self.dataDir + "/data/mllib/sample_libsvm_data.txt")
        scaler = StandardScaler(inputCol="features",
                                outputCol="scaledFeatures",
                                withStd=True,
                                withMean=False)

        scalerModel = scaler.fit(dataFrame)
        scaledData = scalerModel.transform(dataFrame)
        scaledData.show()

        scaler = MinMaxScaler(inputCol="features", outputCol="scaledFeatures")

        # Compute summary statistics and generate MinMaxScalerModel
        scalerModel = scaler.fit(dataFrame)

        # rescale each feature to range [min, max].
        scaledData = scalerModel.transform(dataFrame)
        print("Features scaled to range: [%f, %f]" %
              (scaler.getMin(), scaler.getMax()))
        scaledData.select("features", "scaledFeatures").show()

        scaler = MaxAbsScaler(inputCol="features", outputCol="scaledFeatures")

        # Compute summary statistics and generate MaxAbsScalerModel
        scalerModel = scaler.fit(dataFrame)

        # rescale each feature to range [-1, 1].
        scaledData = scalerModel.transform(dataFrame)

        scaledData.select("features", "scaledFeatures").show()