Beispiel #1
0
def test():
    # data
    cal_housing = fetch_california_housing()
    X = cal_housing['data']
    Y = np.reshape(cal_housing['target'], (-1, 1))

    # train models
    iters = 100
    name = ["0", "1", "2", "3", "4"]
    model = [
        SCNN(8, 1, 0, update=Update.Rprop()),
        SCNN(8, 1, 1, update=Update.Rprop()),
        SCNN(8, 1, 2, update=Update.Rprop()),
        SCNN(8, 1, 3, update=Update.Rprop()),
        SCNN(8, 1, 4, update=Update.Rprop())
    ]
    error = np.zeros((len(model), iters))
    for i in range(iters):
        for m in range(len(model)):
            error[m, i] = model[m].partial_fit(X, Y)
        print(i + 1, "complete")

    # plot results
    plt.figure()
    plt.title('Error Curves')
    for m in range(len(model)):
        plt.semilogy(error[m], label=name[m])
    plt.legend()

    plt.show()
Beispiel #2
0
def linearMnist():
    model = LLS(n_components,
                10,
                outputAct=Activation.Softmax(),
                update=Update.Rprop(),
                error=Error.JsDivergence(),
                regularization=Regularize.Ridge())
    testMnist(model)
Beispiel #3
0
def scnnMnist():
    model = SCNN(n_components,
                 10,
                 hiddenSize=1,
                 hiddenAct=Activation.Selu(),
                 iweight=Initialize.lecun_normal,
                 outputAct=Activation.Softmax(),
                 update=Update.Rprop(),
                 error=Error.JsDivergence(),
                 regularization=Regularize.Ridge())
    testMnist(model)
def test():
   # base data
   X = np.random.randn( 1000, 1 ) * 10 + 50
   Y = X * 2 - 10

   # add noise
   X += np.random.randn( 1000, 1 ) * 2
   Y += np.random.randn( 1000, 1 ) * 2

   # split
   trainX = X[ :900 ]
   trainY = Y[ :900 ]
   testX = X[ 900: ]
   testY = Y[ 900: ]

   # for prediction line
   plotX = np.array( [ min( X ), max( X ) ] )
   
   iters = 2000
   name = [ "RMSProp", "Momentum", "Nesterov", "SGD", "Rprop", "Adam" ]
   model = [ LLS( 1, 1, update=Update.RmsProp() ),
             LLS( 1, 1, update=Update.Momentum( 1e-7 ) ),
             LLS( 1, 1, update=Update.NesterovMomentum( 1e-7 ) ),
             LLS( 1, 1, update=Update.Sgd( 1e-7 ) ),
             LLS( 1, 1, update=Update.Rprop() ),
             LLS( 1, 1, update=Update.Adam() ) ]
   error = np.zeros( ( len( model ), iters ) )
   for i in range( iters ):
      for m in range( len( model ) ):
         error[ m, i ] = model[ m ].partial_fit( trainX, trainY )
      print( i + 1, "complete" )

   # plot results
   plt.figure()
   plt.title( 'Data Space' )
   plt.scatter( trainX, trainY, label='train' )
   plt.scatter( testX, testY, label='test' )
   plt.plot( plotX, model[ 4 ].predict( plotX ).x_, label='prediction' )
   plt.legend()

   plt.figure()
   plt.title( 'Error Curves' )
   for m in range( len( model ) ):
      plt.semilogy( error[ m ], label=name[ m ] )
   plt.legend()

   plt.show()