def __init__(self): super().__init__() num_classes = 9 weight = torch.tensor([ 0.01030928, 0.00552486, 0.00344828, 0.01388889, 0.02222222, 0.01204819, 0.02272727, 0.00307692, 0.00055249, ]) self.criterion = nn.CrossEntropyLoss(weight=weight) # self.criterion = nn.CrossEntropyLoss() self.metrics = { "accuracy": Accuracy(), "recall_macro": Recall(num_classes=num_classes, average="macro"), "precision_macro": Precision(num_classes=num_classes, average="macro"), } self.classifier = nn.Sequential( nn.Linear(768, 256), nn.Dropout(0.5), nn.ReLU(), nn.Linear(256, 128), nn.Dropout(0.5), nn.ReLU(), nn.Linear(128, num_classes), )
def __init__(self, hparams): super(MaskRefineModel, self).__init__() self.save_hyperparameters() # Hyperparameters self.hparams = hparams self.lr = hparams.lr # Modules self.net = load_network(hparams) self.criterion = TverskyLoss(hparams.tversky_alpha, hparams.tversky_beta) # Metrics self.train_metrics = MetricCollection([Precision(), Recall(), F1(2)]) self.val_metrics = MetricCollection([Precision(), Recall(), F1(2)])
def __init__(self,MODEL,TRAIN_DATA,TRAIN_CODES,DEV_DATA,DEV_CODES,TEST_DATA,TEST_CODES,HIDDEN_UNIT1,BATCH_SIZE,LR,EPS,EPOCHS,FREEZE_BERT=False): super(CorefClassifier, self).__init__() #self.save_hyperparameters() self.BEST_THRESHOLD = 0 self.train_data = TRAIN_DATA self.train_codes = TRAIN_CODES self.dev_data = DEV_DATA self.dev_codes = DEV_CODES self.test_data = TEST_DATA self.test_codes = TEST_CODES self.model = AutoModel.from_pretrained(MODEL) self.hidden_unit1 = HIDDEN_UNIT1 if self.hidden_unit1: self.hidden_layer1 = nn.Linear(768, self.hidden_unit1) self.hidden_layer2 = nn.Linear(self.hidden_unit1, 1) else: self.hidden_layer1 = nn.Linear(768, 1) self.lossfn = nn.BCELoss() self.batch_size = BATCH_SIZE self.lr = LR self.eps = EPS self.epochs = EPOCHS if FREEZE_BERT: for param in self.model.parameters(): param.requires_grad = False #Metrics self.valid_metrics = MetricCollection([Accuracy(), Precision(num_classes=1, average='macro'), Recall(num_classes=1, average='macro'), F1(num_classes=1, average='macro') ]) self.test_metrics = MetricCollection([Accuracy(), Precision(num_classes=1, average='macro'), Recall(num_classes=1, average='macro'), F1(num_classes=1, average='macro') ])
def __init__(self, hparams): super(EdgeCompleteModel, self).__init__() self.save_hyperparameters() # Hyperparameters self.hparams = hparams self.lr = hparams.lr # Modules self.mask_refine_net = MaskRefineNet(hparams.mask_refine_weights) self.mask_refine_net.freeze() self.net = load_network(hparams) self.tv_loss = TverskyLoss(hparams.tversky_alpha, hparams.tversky_beta) # Metrics self.train_metrics = MetricCollection([Precision(), Recall(), F1(2)]) self.val_metrics = MetricCollection([Precision(), Recall(), F1(2)])
def __init__(self, **kwargs): super().__init__(**kwargs) self.model = nn.Module() self.criterion = F.cross_entropy self.metrics = nn.ModuleDict({ 'accuracy': Accuracy(), 'precision': Precision(), 'recall': Recall(), })
def __init__(self, params): super().__init__() self.hparams = params self.lr = self.hparams.lr self.save_hyperparameters() self.model = Net(name=self.hparams.model) self.criterion = torch.nn.BCEWithLogitsLoss() self.train_accuracy = Accuracy(subset_accuracy=True) self.val_accuracy = Accuracy(subset_accuracy=True) self.train_recall = Recall() self.val_recall = Recall() self.train_df = pd.read_csv( f'data_preprocessing/train_fold_{self.hparams.fold}.csv') self.valid_df = pd.read_csv( f'data_preprocessing/valid_fold_{self.hparams.fold}.csv') self.train_transforms = A.Compose([ A.Rotate(), A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.5), A.Resize(width=self.hparams.img_size, height=self.hparams.img_size), A.Normalize(), ToTensorV2(), ]) self.valid_transforms = A.Compose([ A.Resize(width=self.hparams.img_size, height=self.hparams.img_size), A.Normalize(), ToTensorV2(), ]) self.train_dataset = CellDataset(data_dir=self.hparams.data_dir, csv_file=self.train_df, transform=self.train_transforms) self.val_dataset = CellDataset(data_dir=self.hparams.data_dir, csv_file=self.valid_df, transform=self.valid_transforms)
def __init__(self): super().__init__() self.cnn = ConvNet() self.criterion = FocalLoss(alpha=0.5, gamma=2.0, reduction='mean') self.config = load_classifier_config() self.learning_rate = self.config['training']['learning_rates'] self.batch_size = self.config['training']['batch_sizes'] self.accuracy = Accuracy(num_classes=2) self.f1 = F1() self.recall = Recall() self.precision = Precision()
def validation_epoch_end(self, outputs): avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean() gap_epoch = self.gap[self.val_mode].compute_final() acc_metric = Accuracy() recall_metric = Recall() precision_metric = Precision() val_acc = acc_metric.forward( pred=torch.stack([batch['preds'] for batch in outputs]), target=torch.stack([batch['targets'] for batch in outputs])) val_recall = recall_metric.forward( pred=torch.stack([batch['preds'] for batch in outputs]), target=torch.stack([batch['targets'] for batch in outputs])) val_precision = precision_metric.forward( pred=torch.stack([batch['preds'] for batch in outputs]), target=torch.stack([batch['targets'] for batch in outputs])) val_logs = { 'val_loss': avg_loss, 'val_gap': gap_epoch, 'val_acc': val_acc, 'val_recall': val_recall, 'val_precision': val_precision } # reset metrics every epoch self.gap[self.val_mode].reset_stats() return { 'val_loss': avg_loss, 'val_acc': val_acc, 'val_recall': val_recall, 'val_precision': val_precision, 'log': val_logs, 'progress_bar': { 'val_acc': val_acc, 'gap': gap_epoch } }
def __init__(self): super().__init__() num_classes = len(emotion_dict) num_classes = 9 weight = torch.tensor([ 0.01030928, 0.00552486, 0.00344828, 0.01388889, 0.02222222, 0.01204819, 0.02272727, 0.00307692, 0.00055249, ]) rnn_hidden_size = 256 self.rnn = nn.GRU(768, rnn_hidden_size, 2, batch_first=True, bidirectional=False, dropout=0.3) self.criterion = nn.CrossEntropyLoss(weight=weight) # self.criterion = nn.CrossEntropyLoss() self.metrics = { "accuracy": Accuracy(), "recall_macro": Recall(num_classes=num_classes, average="macro"), "precision_macro": Precision(num_classes=num_classes, average="macro"), } self.classifier = nn.Sequential( nn.ReLU(), nn.Linear(rnn_hidden_size * 1, 128), nn.Dropout(0.3), nn.ReLU(), nn.Linear(128, 64), nn.Dropout(0.3), nn.ReLU(), nn.Linear(64, num_classes), )
def test_v1_5_metric_precision_recall(): AveragePrecision.__init__.warned = False with pytest.deprecated_call(match='It will be removed in v1.5.0'): AveragePrecision() Precision.__init__.warned = False with pytest.deprecated_call(match='It will be removed in v1.5.0'): Precision() Recall.__init__.warned = False with pytest.deprecated_call(match='It will be removed in v1.5.0'): Recall() PrecisionRecallCurve.__init__.warned = False with pytest.deprecated_call(match='It will be removed in v1.5.0'): PrecisionRecallCurve() pred = torch.tensor([0, 1, 2, 3]) target = torch.tensor([0, 1, 1, 1]) average_precision.warned = False with pytest.deprecated_call(match='It will be removed in v1.5.0'): assert average_precision(pred, target) == torch.tensor(1.) precision.warned = False with pytest.deprecated_call(match='It will be removed in v1.5.0'): assert precision(pred, target) == torch.tensor(0.5) recall.warned = False with pytest.deprecated_call(match='It will be removed in v1.5.0'): assert recall(pred, target) == torch.tensor(0.5) precision_recall.warned = False with pytest.deprecated_call(match='It will be removed in v1.5.0'): prec, rc = precision_recall(pred, target) assert prec == torch.tensor(0.5) assert rc == torch.tensor(0.5) precision_recall_curve.warned = False with pytest.deprecated_call(match='It will be removed in v1.5.0'): prec, rc, thrs = precision_recall_curve(pred, target) assert torch.equal(prec, torch.tensor([1., 1., 1., 1.])) assert torch.allclose(rc, torch.tensor([1., 0.6667, 0.3333, 0.]), atol=1e-4) assert torch.equal(thrs, torch.tensor([1, 2, 3]))
def __init__(self, num_classes): super().__init__() self.criterion = nn.CrossEntropyLoss() self.metrics = { "accuracy": Accuracy(), "recall_macro": Recall(num_classes=num_classes, average="macro"), "precision_macro": Precision(num_classes=num_classes, average="macro"), } self.model = models.resnet50(pretrained=True) ## Only the last layer is trained # for p in self.model.parameters(): # p.requires_grad = False self.num_ftrs = self.model.fc.in_features self.num_classes = num_classes self.model.fc = nn.Linear(self.num_ftrs, self.num_classes)
def __init__(self, hparams:dict): super().__init__() # self.model = models.resnet50(pretrained=True) self.model = models.wide_resnet50_2(pretrained=True) # self.model = models.densenet201(pretrained=True) self.model.fc = torch.nn.Linear(self.model.fc.in_features, 2) # self.model.classifier = torch.nn.Linear(self.model.classifier.in_features, 2) self.criterion = torch.nn.CrossEntropyLoss() # self.softmax = torch.nn.Softmax() # hyper param setting self.hparmas = hparams self.init_lr = hparams['optimizer_lr'] print(hparams) self.accuracy = Accuracy() self.prec = Precision(num_classes=1, is_multiclass=False) self.rc = Recall(num_classes=1, is_multiclass=False) self.f1 = F1(num_classes=1, multilabel=False) # self.confmat = ConfusionMatrix(num_classes=1) self.preds = [] self.gts = []
def __init__(self, *, d_model, hidden_dim=2048, num_heads, dropout, num_layers, activation='relu', kernel_size, num_emb, emb_dim, learning_rate): super().__init__() encoder_layer = nn.TransformerEncoderLayer(d_model, num_heads, dim_feedforward=hidden_dim, dropout=dropout, activation=activation) self.dataset = DataModule() self.learning_rate = learning_rate train_size = 185452 test_size = 39740 val_size = 39740 self.train_set, self.test_set, self.val_set = random_split( self.dataset, [train_size, test_size, val_size], generator=torch.Generator().manual_seed(42)) self.train_loader = DataLoader(self.train_set, batch_size=4, num_workers=4, pin_memory=True) self.val_loader = DataLoader(self.val_set, batch_size=4, num_workers=4, pin_memory=True) self.emb = nn.Embedding(num_emb, emb_dim, padding_idx=0) self.task_list = [ 'eco', 'result', 'black', 'white', 'whiteelo', 'blackelo', 'decade' ] task_labels = { 'eco': eco_dict, 'result': result_dict, 'black': names_dict, 'white': names_dict, 'whiteelo': elo_dict, 'blackelo': elo_dict, 'decade': year_dict } self.train_metrics_list = [] self.train_loss_metrics_list = [] self.classifiers = nn.ModuleDict() self.train_metrics = nn.ModuleDict() self.val_metrics = nn.ModuleDict() self.loss = nn.ModuleDict() for task in self.task_list: labels = task_labels[task] xmetrics = pl.metrics.MetricCollection([ Accuracy(), Precision(num_classes=len(labels), average='macro'), Recall(num_classes=len(labels), average='macro') ]) self.train_metrics[task] = xmetrics.clone() self.val_metrics[task] = xmetrics.clone() self.classifiers[task] = nn.Linear(emb_dim, len(labels)) self.loss[task] = nn.CrossEntropyLoss() self.encoder = nn.TransformerEncoder(encoder_layer, num_layers) self.smax = torch.nn.Softmax(dim=1)
def __init__(self, *, d_model, hidden_dim=2048, num_heads, dropout, num_layers, activation='relu', kernel_size, num_emb, emb_dim, learning_rate, label_encoder, hparams): super().__init__() self.losses_weight = { 'ECO': hparams.eco_loss_wt, 'Result': hparams.result_loss_wt, 'Black': hparams.black_loss_wt, 'White': hparams.white_loss_wt, 'WhiteElo2': hparams.whiteelo_loss_wt, 'BlackElo2': hparams.blackelo_loss_wt, 'Decade': hparams.decade_loss_wt } self.learning_rate = learning_rate self.task_list = [ 'ECO', 'Result', 'Black', 'White', 'WhiteElo2', 'BlackElo2', 'Decade' ] self.train_metrics_list = [] self.train_loss_metrics_list = [] self.wt_loss_metrics_list = [] self.classifiers = nn.ModuleDict() self.train_metrics = nn.ModuleDict() self.val_metrics = nn.ModuleDict() self.loss = nn.ModuleDict() for task in self.task_list: xmetrics = pl.metrics.MetricCollection([ Accuracy(), Precision(num_classes=label_encoder[task].vocab_size, average='macro'), Recall(num_classes=label_encoder[task].vocab_size, average='macro') ]) self.train_metrics[task] = xmetrics.clone() self.val_metrics[task] = xmetrics.clone() self.classifiers[task] = nn.Linear(emb_dim, label_encoder[task].vocab_size) self.loss[task] = nn.CrossEntropyLoss() self.emb = nn.Embedding(num_emb, emb_dim, padding_idx=0) encoder_layer = nn.TransformerEncoderLayer(d_model, num_heads, dim_feedforward=hidden_dim, dropout=dropout, activation=activation) self.encoder = nn.TransformerEncoder(encoder_layer, num_layers) self.smax = torch.nn.Softmax(dim=1)
def run_epoch(model, dataloader, criterion, optimizer=None, epoch=0, scheduler=None, device='cpu'): import pytorch_lightning.metrics.functional.classification as clmetrics from pytorch_lightning.metrics import Precision, Accuracy, Recall from sklearn.metrics import roc_auc_score, average_precision_score metrics = Accumulator() cnt = 0 total_steps = len(dataloader) steps = 0 running_corrects = 0 accuracy = Accuracy() precision = Precision(num_classes=2) recall = Recall(num_classes=2) preds_epoch = [] labels_epoch = [] for inputs, labels in dataloader: steps += 1 inputs = inputs.to(device) # torch.Size([2, 1, 224, 224]) labels = labels.to(device).unsqueeze(1).float() ## torch.Size([2, 1]) outputs = model(inputs) # [batch_size, nb_classes] loss = criterion(outputs, labels) if optimizer: loss.backward() optimizer.step() optimizer.zero_grad() preds_epoch.extend(torch.sigmoid(outputs).tolist()) labels_epoch.extend(labels.tolist()) threshold = 0.5 prob = (torch.sigmoid(outputs) > threshold).long() conf = torch.flatten( clmetrics.confusion_matrix(prob, labels, num_classes=2)) tn, fp, fn, tp = conf metrics.add_dict({ 'data_count': len(inputs), 'loss': loss.item() * len(inputs), 'tp': tp.item(), 'tn': tn.item(), 'fp': fp.item(), 'fn': fn.item(), }) cnt += len(inputs) if scheduler: scheduler.step() del outputs, loss, inputs, labels, prob logger.info(f'cnt = {cnt}') metrics['loss'] /= cnt def safe_div(x, y): if y == 0: return 0 return x / y _TP, _TN, _FP, _FN = metrics['tp'], metrics['tn'], metrics['fp'], metrics[ 'fn'] acc = (_TP + _TN) / cnt sen = safe_div(_TP, (_TP + _FN)) spe = safe_div(_TN, (_FP + _TN)) prec = safe_div(_TP, (_TP + _FP)) metrics.add('accuracy', acc) metrics.add('sensitivity', sen) metrics.add('specificity', spe) metrics.add('precision', prec) auc = roc_auc_score(labels_epoch, preds_epoch) aupr = average_precision_score(labels_epoch, preds_epoch) metrics.add('auroc', auc) metrics.add('aupr', aupr) logger.info(metrics) return metrics, preds_epoch, labels_epoch
class HPALit(pl.LightningModule): def __init__(self, params): super().__init__() self.hparams = params self.lr = self.hparams.lr self.save_hyperparameters() self.model = Net(name=self.hparams.model) self.criterion = torch.nn.BCEWithLogitsLoss() self.train_accuracy = Accuracy(subset_accuracy=True) self.val_accuracy = Accuracy(subset_accuracy=True) self.train_recall = Recall() self.val_recall = Recall() self.train_df = pd.read_csv( f'data_preprocessing/train_fold_{self.hparams.fold}.csv') self.valid_df = pd.read_csv( f'data_preprocessing/valid_fold_{self.hparams.fold}.csv') self.train_transforms = A.Compose([ A.Rotate(), A.HorizontalFlip(p=0.5), A.VerticalFlip(p=0.5), A.Resize(width=self.hparams.img_size, height=self.hparams.img_size), A.Normalize(), ToTensorV2(), ]) self.valid_transforms = A.Compose([ A.Resize(width=self.hparams.img_size, height=self.hparams.img_size), A.Normalize(), ToTensorV2(), ]) self.train_dataset = CellDataset(data_dir=self.hparams.data_dir, csv_file=self.train_df, transform=self.train_transforms) self.val_dataset = CellDataset(data_dir=self.hparams.data_dir, csv_file=self.valid_df, transform=self.valid_transforms) def train_dataloader(self): return DataLoader( self.train_dataset, batch_size=self.hparams.batch_size, shuffle=True, num_workers=self.hparams.n_workers, pin_memory=True, ) def val_dataloader(self): return DataLoader( self.val_dataset, batch_size=self.hparams.batch_size, shuffle=False, num_workers=self.hparams.n_workers, pin_memory=True, ) def configure_optimizers(self): optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr) scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', factor=0.5, patience=2, eps=1e-6) lr_scheduler = { 'scheduler': scheduler, 'interval': 'epoch', 'monitor': 'valid_loss_epoch' } return [optimizer], [lr_scheduler] def training_step(self, batch, batch_idx): x, y = batch pred = self.model(x) train_loss = self.criterion(pred, y) self.train_accuracy(torch.sigmoid(pred), y.type(torch.int)) self.train_recall(torch.sigmoid(pred), y.type(torch.int)) return {'loss': train_loss} def training_epoch_end(self, outputs): train_loss_epoch = torch.stack([x['loss'] for x in outputs]).mean() self.log('train_loss_epoch', train_loss_epoch) self.log('train_acc_epoch', self.train_accuracy.compute()) self.log('train_recall_epoch', self.train_recall.compute()) self.train_accuracy.reset() self.train_recall.reset() def validation_step(self, batch, batch_idx): x, y = batch pred = self.model(x) val_loss = self.criterion(pred, y) self.val_accuracy(torch.sigmoid(pred), y.type(torch.int)) self.val_recall(torch.sigmoid(pred), y.type(torch.int)) return {'valid_loss': val_loss} def validation_epoch_end(self, outputs): val_loss_epoch = torch.stack([x['valid_loss'] for x in outputs]).mean() self.log('valid_loss_epoch', val_loss_epoch) self.log('valid_acc_epoch', self.val_accuracy.compute()) self.log('valid_recall_epoch', self.val_recall.compute()) self.val_accuracy.reset() self.val_recall.reset()
def __init__(self, num_classes, average='macro', **kwargs): super(L_Recall, self).__init__() self.measure = Recall(num_classes=num_classes, average=average) return