コード例 #1
0
def test_mean_squared_derivative(backend_default):
    outputs = np.array([0.5, 1.0, 0.0, 0.0001]).reshape((4, 1))
    targets = np.array(([0.5, 0.0, 1.0, 0.2])).reshape((4, 1))
    expected_result = (outputs - targets) / outputs.shape[1] / outputs.shape[0]
    compare_tensors(MeanSquared(),
                    outputs,
                    targets,
                    expected_result,
                    deriv=True,
                    tol=1e-8)
コード例 #2
0
def test_mean_squared_limits(backend_default):
    outputs = np.array([0.5, 1.0, 0.0, 0.0001]).reshape((4, 1))
    targets = np.array(([0.5, 0.0, 1.0, 0.2])).reshape((4, 1))
    expected_result = np.mean((outputs - targets) ** 2, axis=0, keepdims=True) / 2.
    compare_tensors(MeanSquared(), outputs, targets, expected_result, tol=1e-7)
コード例 #3
0
                 reset_cells=False),
            Affine(train_set.nfeatures, init, bias=init, activation=Identity())
        ]
    else:
        layers = [
            LSTM(hidden,
                 init,
                 activation=Logistic(),
                 gate_activation=Tanh(),
                 reset_cells=True),
            RecurrentLast(),
            Affine(train_set.nfeatures, init, bias=init, activation=Identity())
        ]

    model = Model(layers=layers)
    cost = GeneralizedCost(MeanSquared())
    optimizer = RMSProp(stochastic_round=args.rounding)

    callbacks = Callbacks(model, eval_set=valid_set, **args.callback_args)

    # fit model
    model.fit(train_set,
              optimizer=optimizer,
              num_epochs=args.epochs,
              cost=cost,
              callbacks=callbacks)

    # =======visualize how the model does on validation set==============
    # run the trained model on train and valid dataset and see how the outputs match
    train_output = model.get_outputs(train_set).reshape(
        -1, train_set.nfeatures)
コード例 #4
0
    layers = [
        Affine(nout=50, init=w, bias=b, activation=Rectlin()),
        Dropout(keep=0.5),
        Affine(nout=50, init=w, bias=b, activation=Rectlin()),
        Dropout(keep=0.4),
        Affine(nout=3, init=w, bias=b, activation=Softmax()),
        Dropout(keep=0.3)
    ]

    # Optimizer
    optimizer = GradientDescentMomentum(0.1,
                                        momentum_coef=0.9,
                                        stochastic_round=args.rounding)

    # Cost
    cost = GeneralizedCost(costfunc=MeanSquared())

    model = Model(layers=layers)

    callbacks = Callbacks(model, eval_set=val_iter, **args.callback_args)

    # Training
    model.fit(train_iter,
              optimizer=optimizer,
              num_epochs=1,
              cost=cost,
              callbacks=callbacks)

    # Evluate
    evaluate(model, val_iter, Metric=Misclassification())