def valid(weights): running_loss = 0 correct = np.zeros(10) pos = np.zeros(10) total = 0 predictions = [np.array([])] * 10 actual = [np.array([])] * 10 v_neg_weights, v_class_weights = weights v_neg_weights, v_class_weights = v_neg_weights.to( args.device), v_class_weights.to(args.device) with torch.no_grad(): for batch, labels, seq_len in val_loader: # pass to GPU if available batch, labels = batch.to(args.device), labels.to(args.device) # run network optimizer.zero_grad() outputs = model(batch, seq_len, labels.shape[1]) multiplier = (((labels == 0).double() * v_neg_weights) + (labels == 1).double()) * v_class_weights mask = (multiplier > 0) * (labels <= 1) * (labels >= 0) loss = torch.mean( criterion(outputs.masked_select(mask), labels.masked_select(mask)) * multiplier.masked_select(mask)) running_loss += loss.cpu().data.numpy() # Validation accuracy for i in range(labels.shape[0]): targets = labels.data[i][:int(seq_len[i])].cpu().numpy() prob = outputs.data[i][:int(seq_len[i])].cpu().numpy() prediction = np.zeros(targets.shape) prediction[np.arange(prediction.shape[0]), np.argmax(prob, axis=1)] = 1 match = (targets == prediction) pos += np.sum(prediction, axis=0).astype(int) correct += np.sum(match, axis=0).astype(int) total += targets.shape[0] # for AUC for j in range(10): actual[j] = np.concatenate( (actual[j], labels[i][:int(seq_len[i]), j].view(-1).cpu().numpy())) predictions[j] = np.concatenate( (predictions[j], outputs[i][:int(seq_len[i]), j].view(-1).cpu().numpy())) val_losses[epoch] = running_loss / len(val_loader) val_acc[epoch] = correct / total val_freq[epoch] = pos / sum(pos) val_auc[epoch] = get_aucs(actual, predictions)
def train(weights): running_loss = 0 correct = np.zeros((10)) pos = np.zeros((10)) total = 0 predictions = [np.array([])] * 10 actual = [np.array([])] * 10 t_neg_weights, t_class_weights = weights t_neg_weights, t_class_weights = t_neg_weights.to( args.device), t_class_weights.to(args.device) for batch, labels, seq_len in train_loader: # pass to GPU if available batch, labels = batch.to(args.device), labels.to(args.device) # run network optimizer.zero_grad() outputs = model(batch) multiplier = (((labels == 0).double() * t_neg_weights) + (labels == 1).double()) * t_class_weights mask = (multiplier > 0) * (labels <= 1) * (labels >= 0) loss = torch.mean( criterion(outputs.masked_select(mask), labels.masked_select(mask)) * multiplier.masked_select(mask)) # adjust weights and record loss loss.backward() optimizer.step() running_loss += loss.cpu().data.numpy() # train accuracy for i in range(labels.shape[0]): targets = labels.data[i][:int(seq_len[i])].cpu().numpy() prob = outputs.data[i][:int(seq_len[i])].cpu().numpy() prediction = np.zeros(targets.shape) prediction[np.arange(prediction.shape[0]), np.argmax(prob, axis=1)] = 1 match = (targets == prediction) pos += np.sum(prediction, axis=0).astype(int) correct += np.sum(match, axis=0).astype(int) total += targets.shape[0] # for AUC for j in range(10): actual[j] = np.concatenate( (actual[j], labels[i][:int(seq_len[i]), j].view(-1).cpu().numpy())) predictions[j] = np.concatenate( (predictions[j], outputs[i][:int(seq_len[i]), j].detach().view(-1).cpu().numpy())) train_losses[epoch] = running_loss / len(train_loader) train_acc[epoch] = correct / total train_freq[epoch] = pos / sum(pos) train_auc[epoch] = get_aucs(actual, predictions)