valid_iter = threaded_iterator(valid_iter)


# Construct loss function & accuracy
def discrete_predict(predictions):
    return T.cast(T.round(T.clip(predictions, 0, 4)), 'int32')

predictions = nn.layers.get_output(output_layer, deterministic=False)
mse_loss = squared_error(predictions, y).mean()
params = nn.layers.get_all_params(output_layer, regularizable=True)
regularization = sum(T.sum(p ** 2) for p in params)
train_loss = mse_loss + l2_reg * regularization
one_hot_pred = T.eye(num_class, dtype='int32')[discrete_predict(predictions)]
one_hot_target = T.eye(num_class, dtype='int32')[y]
train_accuracy = accuracy(one_hot_pred, one_hot_target)
train_kappa = quad_kappa(one_hot_pred, one_hot_target)


valid_predictions = nn.layers.get_output(output_layer, deterministic=True)
valid_loss = squared_error(valid_predictions, y).mean()
one_hot_pred_val = T.eye(num_class)[discrete_predict(valid_predictions)]
valid_accuracy = accuracy(one_hot_pred_val, one_hot_target)
valid_kappa = quad_kappa(one_hot_pred_val, one_hot_target)

# Scale grads
all_params = nn.layers.get_all_params(output_layer, trainable=True)
all_grads = T.grad(train_loss, all_params)
#scaled_grads = nn.updates.total_norm_constraint(all_grads, max_norm=5, return_norm=False)

# Construct update
updates = nn.updates.nesterov_momentum(all_grads, all_params, learning_rate=lr, momentum=momentum)
train_iter = threaded_iterator(train_iter)
valid_iter = threaded_iterator(valid_iter)


# Construct loss function & accuracy
predictions = nn.layers.get_output(output_layer)
train_loss = categorical_crossentropy(predictions, y)
train_loss = train_loss.mean()
#params = nn.layers.get_all_params(output_layer, regularizable=True)
#regularization = sum(T.sum(p ** 2) for p in params)
#l2_penalty = regularization * l2_reg
all_layers = nn.layers.get_all_layers(output_layer)
l2_penalty = nn.regularization.regularize_layer_params(all_layers, nn.regularization.l2) * l2_reg
train_loss = train_loss + l2_penalty
train_accuracy = accuracy(predictions, y)
train_kappa = quad_kappa(predictions, y)


valid_predictions = nn.layers.get_output(output_layer, deterministic=True)
valid_loss = categorical_crossentropy(valid_predictions, y)
valid_loss = valid_loss.mean()
valid_accuracy = accuracy(valid_predictions, y)
valid_kappa = quad_kappa(valid_predictions, y)

# Scale grads
all_params = nn.layers.get_all_params(output_layer, trainable=True)
all_grads = T.grad(train_loss, all_params)
#scaled_grads = nn.updates.total_norm_constraint(all_grads, max_norm=10, return_norm=False)

# Construct update
updates = nn.updates.nesterov_momentum(all_grads, all_params, learning_rate=lr, momentum=momentum)
Exemple #3
0
    kappa_loss = quad_kappa_loss(y, t, y_pow=2)
    return kappa_loss + 0.5 * T.clip(log_loss, 0.6, 10**3)


def discrete_predict(predictions):
    return T.round(T.clip(predictions, 0, 4))


predictions = nn.layers.get_output(output_layer, deterministic=False)
train_log_loss, train_reg_loss, train_multi_loss = multi_task_loss(
    predictions, y)
params = nn.layers.get_all_params(output_layer, regularizable=True)
regularization = sum(T.sum(p**2) for p in params)
train_loss = train_multi_loss + l2_reg * regularization
train_accuracy = accuracy(predictions[:, :num_class], y)
train_kappa = quad_kappa(predictions[:, :num_class], y)

valid_predictions = nn.layers.get_output(output_layer, deterministic=True)
valid_log_loss, valid_reg_loss, valid_multi_loss = multi_task_loss(
    valid_predictions, y)
valid_accuracy = accuracy(valid_predictions[:, :num_class], y)
valid_kappa = quad_kappa(valid_predictions[:, :num_class], y)

# Scale grads
all_params = nn.layers.get_all_params(output_layer, trainable=True)
all_grads = T.grad(train_loss, all_params)
#scaled_grads = nn.updates.total_norm_constraint(all_grads, max_norm=5, return_norm=False)

# Construct update
updates = nn.updates.nesterov_momentum(all_grads,
                                       all_params,
def hybrid_loss(y, t):
    log_loss = categorical_crossentropy(y, t).mean()
    kappa_loss = quad_kappa_loss(y, t, y_pow=2)
    return kappa_loss + 0.5 * T.clip(log_loss, 0.6, 10 ** 3)

def discrete_predict(predictions):
    return T.round(T.clip(predictions, 0, 4))


predictions = nn.layers.get_output(output_layer, deterministic=False)
train_log_loss, train_reg_loss, train_multi_loss = multi_task_loss(predictions, y)
params = nn.layers.get_all_params(output_layer, regularizable=True)
regularization = sum(T.sum(p ** 2) for p in params)
train_loss = train_multi_loss + l2_reg * regularization
train_accuracy = accuracy(predictions[:, :num_class], y)
train_kappa = quad_kappa(predictions[:, :num_class], y)


valid_predictions = nn.layers.get_output(output_layer, deterministic=True)
valid_log_loss, valid_reg_loss, valid_multi_loss = multi_task_loss(valid_predictions, y)
valid_accuracy = accuracy(valid_predictions[:, :num_class], y)
valid_kappa = quad_kappa(valid_predictions[:, :num_class], y)

# Scale grads
all_params = nn.layers.get_all_params(output_layer, trainable=True)
all_grads = T.grad(train_loss, all_params)
#scaled_grads = nn.updates.total_norm_constraint(all_grads, max_norm=5, return_norm=False)

# Construct update
updates = nn.updates.nesterov_momentum(all_grads, all_params, learning_rate=lr, momentum=momentum)
#updates = nn.updates.adam(all_grads, all_params, learning_rate=0.0001)
Exemple #5
0

# Construct loss function & accuracy
def discrete_predict(predictions):
    return T.cast(T.round(T.clip(predictions, 0, 4)), 'int32')


predictions = nn.layers.get_output(output_layer, deterministic=False)
mse_loss = squared_error(predictions, y).mean()
params = nn.layers.get_all_params(output_layer, regularizable=True)
regularization = sum(T.sum(p**2) for p in params)
train_loss = mse_loss + l2_reg * regularization
one_hot_pred = T.eye(num_class, dtype='int32')[discrete_predict(predictions)]
one_hot_target = T.eye(num_class, dtype='int32')[y]
train_accuracy = accuracy(one_hot_pred, one_hot_target)
train_kappa = quad_kappa(one_hot_pred, one_hot_target)

valid_predictions = nn.layers.get_output(output_layer, deterministic=True)
valid_loss = squared_error(valid_predictions, y).mean()
one_hot_pred_val = T.eye(num_class)[discrete_predict(valid_predictions)]
valid_accuracy = accuracy(one_hot_pred_val, one_hot_target)
valid_kappa = quad_kappa(one_hot_pred_val, one_hot_target)

# Scale grads
all_params = nn.layers.get_all_params(output_layer, trainable=True)
all_grads = T.grad(train_loss, all_params)
#scaled_grads = nn.updates.total_norm_constraint(all_grads, max_norm=5, return_norm=False)

# Construct update
updates = nn.updates.nesterov_momentum(all_grads,
                                       all_params,