예제 #1
0
파일: test_torch.py 프로젝트: jambit/sensAI
def test_MLPClassifier(irisDataSet, irisClassificationTestCase, testResources):
    featureNames = irisDataSet.getInputOutputData().inputs.columns
    dftNorm = DFTNormalisation([DFTNormalisation.Rule(re.escape(f)) for f in featureNames], defaultTransformerFactory=sklearn.preprocessing.StandardScaler)
    model = sensai.torch.models.MultiLayerPerceptronVectorClassificationModel(hiddenDims=(50,25,8), cuda=False, epochs=100, optimiser="adam",
            batchSize=200, normalisationMode=NormalisationMode.NONE, hidActivationFunction=torch.tanh) \
        .withName("torchMLPClassifier") \
        .withInputTransformers([dftNorm]) \
        .withFeatureGenerator(FeatureGeneratorTakeColumns())
    irisClassificationTestCase.testMinAccuracy(model, 0.8)
예제 #2
0
 def test_multiColumnSingleRule(self):
     arr = np.array([1, 5, 10])
     df = pd.DataFrame({"foo": arr, "bar": arr * 100})
     dft = DFTNormalisation([
         DFTNormalisation.Rule(
             r"foo|bar",
             transformer=sklearn.preprocessing.MaxAbsScaler(),
             independentColumns=False)
     ])
     df2 = dft.fitApply(df)
     assert np.all(df2.foo == arr / 1000) and np.all(df2.bar == arr / 10)
예제 #3
0
 def test_arrayValued(self):
     arr = np.array([1, 5, 10])
     df = pd.DataFrame({"foo": [arr, 2 * arr, 10 * arr]})
     dft = DFTNormalisation([
         DFTNormalisation.Rule(
             r"foo|bar",
             transformer=sklearn.preprocessing.MaxAbsScaler(),
             arrayValued=True)
     ])
     df2 = dft.fitApply(df)
     assert np.all(df2.foo.iloc[0] == arr /
                   100) and np.all(df2.foo.iloc[-1] == arr / 10)