예제 #1
0
 def loss(y, t):
     return nn_plankton.log_loss(y, t) + lambda_reg * reg_term
예제 #2
0
 def loss(y, t):
     return nn_plankton.log_loss(y, t) + reg_param * (alpha * L1 +
                                                      (1 - alpha) * L2)
 def loss(y, t):
     return nn_plankton.log_loss(y, t) + lambda_reg * reg_term
 def loss(y, t):
     return nn_plankton.log_loss(y, t) + reg_param*(alpha * L1 + (1-alpha) * L2)
예제 #5
0
]
del predictions_list
for i in range(n_models):
    print(individual_prediction_errors[i],
          os.path.basename(valid_predictions_paths[i]))
print()

# optimizing weights
X = theano.shared(predictions_stack)  # source predictions
t = theano.shared(utils.one_hot(t_valid))  # targets
W = T.vector('W')

s = T.nnet.softmax(W).reshape((W.shape[0], 1, 1))
weighted_avg_predictions = T.sum(X * s,
                                 axis=0)  # T.tensordot(X, s, [[0], [0]])
error = nn_plankton.log_loss(weighted_avg_predictions, t)
grad = T.grad(error, W)

f = theano.function([W], error)
g = theano.function([W], grad)

w_init = np.zeros(n_models, dtype=theano.config.floatX)
out, loss, _ = scipy.optimize.fmin_l_bfgs_b(f,
                                            w_init,
                                            fprime=g,
                                            pgtol=1e-09,
                                            epsilon=1e-08,
                                            maxfun=10000)

weights = np.exp(out)
weights /= weights.sum()
print "Individual prediction errors"
individual_prediction_errors = [utils.log_loss(p, t_valid) for p in predictions_list]
del predictions_list
for i in xrange(n_models):
    print individual_prediction_errors[i], os.path.basename(valid_predictions_paths[i])
print

# optimizing weights
X = theano.shared(predictions_stack)  # source predictions
t = theano.shared(utils.one_hot(t_valid))  # targets
W = T.vector('W')

s = T.nnet.softmax(W).reshape((W.shape[0], 1, 1))
weighted_avg_predictions = T.sum(X * s, axis=0)  # T.tensordot(X, s, [[0], [0]])
error = nn_plankton.log_loss(weighted_avg_predictions, t)
grad = T.grad(error, W)

f = theano.function([W], error)
g = theano.function([W], grad)

w_init = np.zeros(n_models, dtype=theano.config.floatX)
out, loss, _ = scipy.optimize.fmin_l_bfgs_b(f, w_init, fprime=g, pgtol=1e-09, epsilon=1e-08, maxfun=10000)

weights = np.exp(out)
weights /= weights.sum()

print 'Optimal weights'
for i in xrange(n_models):
    print weights[i], os.path.basename(valid_predictions_paths[i])
print