示例#1
0
                           normalize=(mean, std),
                           process_func=parallel_augment,
                           testing=True)
# Transform batchiterator to a threaded iterator
train_iter = threaded_iterator(train_iter)
valid_iter = threaded_iterator(valid_iter)

# Count the number of trainable parameters in the model
num_params = nn.layers.count_params(output_layer, trainable=True)
print('Number of trainable parameters: {}'.format(num_params))

# Construct loss function & accuracy
predictions = nn.layers.get_output(output_layer)
train_log_loss = categorical_crossentropy(predictions, y)
train_log_loss = train_log_loss.mean()
train_kappa_loss = quad_kappa_loss(predictions, y, y_pow=y_pow)
params = nn.layers.get_all_params(output_layer, regularizable=True)
regularization = sum(T.sum(p**2) for p in params)
train_hybrid_loss = train_kappa_loss + log_scale * T.clip(
    train_log_loss, log_cutoff, 10**3) + l2_reg * regularization
train_accuracy = accuracy(predictions, y)

valid_predictions = nn.layers.get_output(output_layer, deterministic=True)
valid_log_loss = categorical_crossentropy(valid_predictions, y)
valid_log_loss = valid_log_loss.mean()
valid_kappa_loss = quad_kappa_loss(valid_predictions, y)
valid_loss = valid_kappa_loss
valid_accuracy = accuracy(valid_predictions, y)

# Scale grads
all_params = nn.layers.get_all_params(output_layer, trainable=True)
 def loss(y, t):
     return quad_kappa_loss(y, t, y_pow=y_pow)
 def loss(y, t):
     return quad_kappa_loss(y, t,
                            y_pow=y_pow)

predictions = nn.layers.get_output(output_layer, deterministic=True)
train_log_loss, train_reg_loss, train_multi_loss = multi_task_loss(predictions, y)
# train_loss1 = categorical_crossentropy(predictions[:, :num_class], y).mean()
# train_loss2 = categorical_crossentropy(predictions[:, num_class:], y).mean()
# train_hybrid_loss = hybrid_loss(predictions, y)
params = nn.layers.get_all_params(output_layer, regularizable=True)
regularization = sum(T.sum(p ** 2) for p in params)
# train_loss = train_loss1 + train_loss2 + l2_reg * regularization
train_loss = train_multi_loss + l2_reg * regularization
# train_loss = train_hybrid_loss + l2_reg * regularization
# train_accuracy = accuracy(predictions[:, num_class:], y)
# train_kappa = quad_kappa_loss(predictions[:, num_class:], y)
train_accuracy = accuracy(predictions[:, :num_class], y)
train_kappa = quad_kappa_loss(predictions[:, :num_class], y)
# train_accuracy = accuracy(predictions, y)
# train_kappa = quad_kappa_loss(predictions, y)


valid_predictions = nn.layers.get_output(output_layer, deterministic=True)
valid_log_loss, valid_reg_loss, valid_multi_loss = multi_task_loss(valid_predictions, y)
# valid_loss = categorical_crossentropy(valid_predictions[:, num_class:], y).mean()
# valid_loss = hybrid_loss(valid_predictions, y)
valid_accuracy = accuracy(valid_predictions[:, :num_class], y)
# valid_accuracy = accuracy(valid_predictions, y)
valid_kappa = quad_kappa_loss(valid_predictions[:, :num_class], y)
# valid_kappa = quad_kappa_loss(valid_predictions, y)

# Scale grads
all_params = nn.layers.get_all_params(output_layer, trainable=True)
def hybrid_loss(y, t):
    log_loss = categorical_crossentropy(y, t).mean()
    kappa_loss = quad_kappa_loss(y, t, y_pow=2)
    return kappa_loss + 0.5 * T.clip(log_loss, 0.6, 10 ** 3)
def hybrid_loss(y, t):
    log_loss = categorical_crossentropy(y, t).mean()
    kappa_loss = quad_kappa_loss(y, t, y_pow=2)
    return kappa_loss + 0.5 * T.clip(log_loss, 0.6, 10**3)
predictions = nn.layers.get_output(output_layer, deterministic=True)
train_log_loss, train_reg_loss, train_multi_loss = multi_task_loss(
    predictions, y)
#train_loss1 = categorical_crossentropy(predictions[:, :num_class], y).mean()
#train_loss2 = categorical_crossentropy(predictions[:, num_class:], y).mean()
#train_hybrid_loss = hybrid_loss(predictions, y)
params = nn.layers.get_all_params(output_layer, regularizable=True)
regularization = sum(T.sum(p**2) for p in params)
#train_loss = train_loss1 + train_loss2 + l2_reg * regularization
train_loss = train_multi_loss + l2_reg * regularization
#train_loss = train_hybrid_loss + l2_reg * regularization
#train_accuracy = accuracy(predictions[:, num_class:], y)
#train_kappa = quad_kappa_loss(predictions[:, num_class:], y)
train_accuracy = accuracy(predictions[:, :num_class], y)
train_kappa = quad_kappa_loss(predictions[:, :num_class], y)
#train_accuracy = accuracy(predictions, y)
#train_kappa = quad_kappa_loss(predictions, y)

valid_predictions = nn.layers.get_output(output_layer, deterministic=True)
valid_log_loss, valid_reg_loss, valid_multi_loss = multi_task_loss(
    valid_predictions, y)
#valid_loss = categorical_crossentropy(valid_predictions[:, num_class:], y).mean()
#valid_loss = hybrid_loss(valid_predictions, y)
valid_accuracy = accuracy(valid_predictions[:, :num_class], y)
#valid_accuracy = accuracy(valid_predictions, y)
valid_kappa = quad_kappa_loss(valid_predictions[:, :num_class], y)
#valid_kappa = quad_kappa_loss(valid_predictions, y)

# Scale grads
all_params = nn.layers.get_all_params(output_layer, trainable=True)
    return log_loss, reg_loss, 0.75 * log_loss + 0.25 * reg_loss


def hybrid_loss(y, t):
    log_loss = categorical_crossentropy(y, t).mean()
    kappa_loss = quad_kappa_loss(y, t, y_pow=2)
    return kappa_loss + 0.5 * T.clip(log_loss, 0.6, 10**3)


predictions = nn.layers.get_output(output_layer, deterministic=False)
log_loss = categorical_crossentropy(predictions, y).mean()
params = nn.layers.get_all_params(output_layer, regularizable=True)
regularization = sum(T.sum(p**2) for p in params)
train_loss = log_loss + l2_reg * regularization
train_accuracy = accuracy(predictions, y)
train_kappa = quad_kappa_loss(predictions, y)

valid_predictions = nn.layers.get_output(output_layer, deterministic=True)
valid_loss = categorical_crossentropy(valid_predictions, y).mean()
valid_accuracy = accuracy(valid_predictions, y)
valid_kappa = quad_kappa_loss(valid_predictions, y)

# Scale grads
all_params = nn.layers.get_all_params(output_layer, trainable=True)
all_grads = T.grad(train_loss, all_params)
#scaled_grads = nn.updates.total_norm_constraint(all_grads, max_norm=5, return_norm=False)

# Construct update
updates = nn.updates.nesterov_momentum(all_grads,
                                       all_params,
                                       learning_rate=lr,
                           normalize=(mean, std),
                           process_func=parallel_augment,
                           testing=True)
# Transform batchiterator to a threaded iterator
train_iter = threaded_iterator(train_iter)
valid_iter = threaded_iterator(valid_iter)

# Count the number of trainable parameters in the model
num_params = nn.layers.count_params(output_layer, trainable=True)
print('Number of trainable parameters: {}'.format(num_params))

# Construct loss function & accuracy
predictions = nn.layers.get_output(output_layer)
train_log_loss = categorical_crossentropy(predictions, y)
train_log_loss = train_log_loss.mean()
train_kappa_loss = quad_kappa_loss(predictions, y, y_pow=y_pow)
params = nn.layers.get_all_params(output_layer, regularizable=True)
regularization = sum(T.sum(p ** 2) for p in params)
train_hybrid_loss = train_kappa_loss + log_scale * T.clip(train_log_loss, log_cutoff, 10 ** 3) + l2_reg * regularization
train_accuracy = accuracy(predictions, y)


valid_predictions = nn.layers.get_output(output_layer, deterministic=True)
valid_log_loss = categorical_crossentropy(valid_predictions, y)
valid_log_loss = valid_log_loss.mean()
valid_kappa_loss = quad_kappa_loss(valid_predictions, y)
valid_loss = valid_kappa_loss
valid_accuracy = accuracy(valid_predictions, y)

# Scale grads
all_params = nn.layers.get_all_params(output_layer, trainable=True)
    return log_loss, reg_loss, 0.75 * log_loss + 0.25 * reg_loss


def hybrid_loss(y, t):
    log_loss = categorical_crossentropy(y, t).mean()
    kappa_loss = quad_kappa_loss(y, t, y_pow=2)
    return kappa_loss + 0.5 * T.clip(log_loss, 0.6, 10 ** 3)


predictions = nn.layers.get_output(output_layer, deterministic=False)
log_loss = categorical_crossentropy(predictions, y).mean()
params = nn.layers.get_all_params(output_layer, regularizable=True)
regularization = sum(T.sum(p ** 2) for p in params)
train_loss = log_loss + l2_reg * regularization
train_accuracy = accuracy(predictions, y)
train_kappa = quad_kappa_loss(predictions, y)


valid_predictions = nn.layers.get_output(output_layer, deterministic=True)
valid_loss = categorical_crossentropy(valid_predictions, y).mean()
valid_accuracy = accuracy(valid_predictions, y)
valid_kappa = quad_kappa_loss(valid_predictions, y)

# Scale grads
all_params = nn.layers.get_all_params(output_layer, trainable=True)
all_grads = T.grad(train_loss, all_params)
# scaled_grads = nn.updates.total_norm_constraint(all_grads, max_norm=5, return_norm=False)

# Construct update
updates = nn.updates.nesterov_momentum(all_grads, all_params, learning_rate=lr, momentum=momentum)
# updates = nn.updates.adam(all_grads, all_params, learning_rate=0.0001)