layer2 = Softmax(inputs=((None, 1000), layer1_act.get_outputs()), outputs=10, out_as_probs=True) # create the mlp from the two layers mlp = Prototype(layers=[layer1, layer1_act, layer2]) # define the loss function loss = Neg_LL(inputs=mlp.get_outputs(), targets=vector("y", dtype="int64"), one_hot=False) # make an optimizer to train it (AdaDelta is a good default) # optimizer = AdaDelta(model=mlp, dataset=mnist, n_epoch=20) optimizer = AdaDelta(dataset=mnist, loss=loss, epochs=20) # perform training! # optimizer.train() mlp.train(optimizer) # test it on some images! test_data, test_labels = mnist.test_inputs, mnist.test_targets test_data = test_data[:25] test_labels = test_labels[:25] # use the run function! preds = mlp.run(test_data)[0] print('-------') print(argmax(preds, axis=1)) print(test_labels.astype('int32')) print() print() del mnist del mlp del optimizer
layer1_act = Activation(inputs=((None, 1000), layer1.get_outputs()), activation='relu') # create the softmax classifier layer2 = Softmax(inputs=((None, 1000), layer1_act.get_outputs()), outputs=10, out_as_probs=True) # create the mlp from the two layers mlp = Prototype(layers=[layer1, layer1_act, layer2]) # define the loss function loss = Neg_LL(inputs=mlp.get_outputs(), targets=vector("y", dtype="int64"), one_hot=False) # make an optimizer to train it (AdaDelta is a good default) # optimizer = AdaDelta(model=mlp, dataset=mnist, n_epoch=20) optimizer = AdaDelta(dataset=mnist, loss=loss, epochs=20) # perform training! # optimizer.train() mlp.train(optimizer) # test it on some images! test_data, test_labels = mnist.test_inputs, mnist.test_targets test_data = test_data[:25] test_labels = test_labels[:25] # use the run function! preds = mlp.run(test_data)[0] print('-------') print(argmax(preds, axis=1)) print(test_labels.astype('int32')) print() print() del mnist del mlp del optimizer
mlp = Prototype(layers=[layer1, layer1_act, layer2]) # define the loss function loss = Neg_LL(inputs=mlp.get_outputs(), targets=vector("y", dtype="int64"), one_hot=False) #plot the loss if BOKEH_AVAILABLE: plot = Plot("mlp_mnist", monitor_channels=Monitor("loss", loss.get_loss()), open_browser=True) else: plot = None # make an optimizer to train it (AdaDelta is a good default) # optimizer = AdaDelta(model=mlp, dataset=mnist, n_epoch=20) optimizer = AdaDelta(dataset=mnist, loss=loss, epochs=20) # perform training! # optimizer.train() mlp.train(optimizer, plot=plot) # test it on some images! test_data, test_labels = mnist.test_inputs, mnist.test_targets test_data = test_data[:25] test_labels = test_labels[:25] # use the run function! preds = mlp.run(test_data) print('-------') print(argmax(preds, axis=1)) print(test_labels.astype('int32')) print() print() del mnist del mlp del optimizer
one_hot=False) #plot the loss if BOKEH_AVAILABLE: plot = Plot("mlp_mnist", monitor_channels=Monitor("loss", loss.get_loss()), open_browser=True) else: plot = None # make an optimizer to train it (AdaDelta is a good default) # optimizer = AdaDelta(model=mlp, dataset=mnist, n_epoch=20) optimizer = AdaDelta(dataset=mnist, loss=loss, epochs=20) # perform training! # optimizer.train() mlp.train(optimizer, plot=plot) # test it on some images! test_data, test_labels = mnist.test_inputs, mnist.test_targets test_data = test_data[:25] test_labels = test_labels[:25] # use the run function! preds = mlp.run(test_data) print('-------') print(argmax(preds, axis=1)) print(test_labels.astype('int32')) print() print() del mnist del mlp del optimizer