def init_multitask_autoencoder_learner( num_features, autoencoder_bottle_neck_feature_size, autoencoder_num_layers, shared_hidden_layer_size, user_dense_layer_hidden_size, num_classes, num_covariates, shared_layer_dropout_prob, user_head_dropout_prob, learning_rate, decay, class_weights, student_list): class_weights = torch.tensor(class_weights) model = multitask_autoencoder.MultiTaskAutoEncoderLearner( conversions.prepend_ids_with_string(student_list, "student_"), num_features, autoencoder_bottle_neck_feature_size, autoencoder_num_layers, shared_hidden_layer_size, user_dense_layer_hidden_size, num_classes, num_covariates, shared_layer_dropout_prob, user_head_dropout_prob) if torch.cuda.is_available(): model.cuda() class_weights = class_weights.cuda() reconstruction_criterion = torch.nn.L1Loss(reduction="sum") classification_criterion = torch.nn.CrossEntropyLoss(weight=class_weights) optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=decay) return model, reconstruction_criterion, classification_criterion, optimizer
for split_no, split in enumerate(splits): best_split_score = -1 epoch_at_best_score = 0 tensorified_data['train_ids'] = split['train_ids'] data['train_ids'] = split['train_ids'] tensorified_data['val_ids'] = split['val_ids'] data['val_ids'] = split['val_ids'] tensorified_data['test_ids'] = [] validation_user_statistics_over_epochs = [] model = multitask_autoencoder.MultiTaskAutoEncoderLearner( conversions.prepend_ids_with_string(student_list, "student_"), num_features, autoencoder_bottle_neck_feature_size, autoencoder_num_layers, shared_hidden_layer_size, user_dense_layer_hidden_size, num_classes, num_covariates, shared_layer_dropout_prob, user_head_dropout_prob) if cuda_enabled: model.cuda() class_weights = class_weights.cuda() reconstruction_criterion = torch.nn.L1Loss(reduction="sum") classification_criterion = torch.nn.CrossEntropyLoss( weight=class_weights) optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate, weight_decay=decay)