Example #1
0
    def __init__(self,
                 in_channels: int,
                 out_channels: int,
                 hidden_channels: int = 64,
                 num_layers: int = 3,
                 dropout: float = 0.5):
        super().__init__()
        self.save_hyperparameters()

        self.convs = ModuleList()
        for _ in range(num_layers):
            mlp = Sequential(
                Linear(in_channels, 2 * hidden_channels),
                BatchNorm1d(2 * hidden_channels),
                ReLU(inplace=True),
                Linear(2 * hidden_channels, hidden_channels),
                BatchNorm1d(hidden_channels),
                ReLU(inplace=True),
            )
            conv = GINConv(mlp, train_eps=True)
            self.convs.append(conv)
            in_channels = hidden_channels

        self.classifier = Sequential(
            Linear(hidden_channels, hidden_channels),
            BatchNorm1d(hidden_channels),
            ReLU(inplace=True),
            Dropout(p=dropout),
            Linear(hidden_channels, out_channels),
        )

        self.train_acc = Accuracy()
        self.val_acc = Accuracy()
        self.test_acc = Accuracy()
    def __init__(self):
        super().__init__()

        # merced images are (3, 256, 256) (channels, width, height)
        self.conv1 = nn.Conv2d(in_channels=3,
                               out_channels=16,
                               kernel_size=3,
                               stride=1,
                               padding=0)
        self.conv2 = nn.Conv2d(in_channels=16,
                               out_channels=32,
                               kernel_size=5,
                               stride=1,
                               padding=0)
        self.conv3 = nn.Conv2d(in_channels=32,
                               out_channels=48,
                               kernel_size=3,
                               stride=2,
                               padding=0)
        self.conv4 = nn.Conv2d(in_channels=48,
                               out_channels=64,
                               kernel_size=3,
                               stride=2,
                               padding=0)
        self.pool = nn.MaxPool2d(2, 2)
        self.fc1 = nn.Linear(64 * 3 * 3, 128)

        self.train_acc = Accuracy()
        self.val_acc = Accuracy(compute_on_step=False)
        self.test_acc = Accuracy(compute_on_step=False)
Example #3
0
 def __init__(self, only_digits=False, input_channels=1, *args, **kwargs):
     model = CNN_OriginalFedAvg(only_digits=only_digits, input_channels=input_channels)
     super().__init__(*args, model=model, **kwargs)
     self.model = model
     # self.model.apply(init_weights)
     self.accuracy = Accuracy()
     self.train_accuracy = Accuracy()
def segmentation_model_attack(model,
                              model_type,
                              config,
                              num_classes=NUM_CLASSES):
    """Salt and pepper augmentation of segmentation images, return accuracy - difference between that and normal is
    a measure of resiliency"""

    if model_type == "pt":
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        test = perturbed_pt_gis_test_data()
        test_set = PT_GISDataset(test)
        testloader = DataLoader(test_set, batch_size=int(config['batch_size']))
        accuracy = Accuracy()
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        for sample in tqdm(testloader):
            cuda_in = sample[0].to(device)
            out = model(cuda_in)
            output = out.to('cpu').squeeze(1)
            accuracy(output, sample[1])
            # cuda_in = cuda_in.detach()
            # label = label.detach()
        return accuracy.compute().item()
    elif model_type == "tf":
        x_test, y_test = perturbed_tf_gis_test_data()
        test_acc = model.evaluate(x_test,
                                  y_test,
                                  batch_size=config['batch_size'])
        return test_acc
    else:
        print("Unknown model type, failure.")
        return None
    def __init__(self,
                 encoder,
                 DATA_PATH,
                 VAL_PATH,
                 hidden_dim,
                 image_size,
                 seed,
                 cpus,
                 transform=SimCLRTransform,
                 **classifier_hparams):
        super().__init__()

        self.DATA_PATH = DATA_PATH
        self.VAL_PATH = VAL_PATH
        self.transform = transform
        self.image_size = image_size
        self.cpus = cpus
        self.seed = seed

        self.batch_size = classifier_hparams['batch_size']
        self.classifier_hparams = classifier_hparams

        self.linear_layer = SSLEvaluator(
            n_input=encoder.embedding_size,
            n_classes=self.classifier_hparams['num_classes'],
            p=self.classifier_hparams['dropout'],
            n_hidden=hidden_dim)

        self.train_acc = Accuracy()
        self.val_acc = Accuracy(compute_on_step=False)

        self.encoder = encoder

        self.save_hyperparameters()
Example #6
0
    def __init__(self, num_channel, num_class, chunk_size=2048):
        super().__init__()

        self.num_channel = num_channel
        self.chunk_size = chunk_size
        self.network = nn.Sequential(
            nn.Conv1d(self.num_channel, 16, 3),
            nn.Conv1d(16, 32, 3, padding=1),
            nn.MaxPool1d(2, stride=2),
            nn.Conv1d(32, 32, 3, padding=1),
            nn.MaxPool1d(2, stride=2),
            nn.Conv1d(32, 32, 3, padding=1),
            nn.MaxPool1d(2, stride=2),
            nn.Conv1d(32, 64, 3, padding=1),
            nn.MaxPool1d(2, stride=2),
            nn.Conv1d(64, 64, 3, padding=1),
            nn.MaxPool1d(2, stride=2),
            nn.Flatten(),
            nn.Linear(4032, 1024),
            nn.ReLU(),
            nn.Linear(1024, 256),
            nn.ReLU(),
            nn.Linear(256, 64),
            nn.ReLU(),
            nn.Linear(64, num_class)
        )
        self.train_acc = Accuracy()
        self.valid_acc = Accuracy()
        self.test_acc = Accuracy()
        self.train_f1 = FBeta(num_classes=num_class)
        self.valid_f1 = FBeta(num_classes=num_class)
        self.test_f1 = FBeta(num_classes=num_class)

        self.predictions = []
        self.targets = []
Example #7
0
    def __init__(self, vocab_size, embedding_dim=32):
        super().__init__()

        self.embeddings_layer = nn.Embedding(vocab_size, embedding_dim)
        self.loss = nn.BCEWithLogitsLoss()
        self.valid_accuracy = Accuracy()
        self.test_accuracy = Accuracy()
    def __init__(self, lr: float = 0.01, num_blocks: int = 5):
        super().__init__()
        self.lr = lr
        self.num_blocks = num_blocks

        self.train_acc = Accuracy()
        self.valid_acc = Accuracy()
        self.test_acc = Accuracy()
Example #9
0
class IrisClassification(pl.LightningModule):
    def __init__(self, **kwargs):
        super().__init__()

        self.train_acc = Accuracy()
        self.val_acc = Accuracy()
        self.test_acc = Accuracy()
        self.args = kwargs

        self.fc1 = nn.Linear(4, 10)
        self.fc2 = nn.Linear(10, 10)
        self.fc3 = nn.Linear(10, 3)
        self.cross_entropy_loss = nn.CrossEntropyLoss()

        self.lr = kwargs.get("lr", 0.01)
        self.momentum = kwargs.get("momentum", 0.9)
        self.weight_decay = kwargs.get("weight_decay", 0.1)

    def forward(self, x):
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = F.relu(self.fc3(x))
        return x

    def configure_optimizers(self):
        return torch.optim.SGD(self.parameters(),
                               lr=self.lr,
                               momentum=self.momentum,
                               weight_decay=self.weight_decay)

    def training_step(self, batch, batch_idx):
        x, y = batch
        logits = self.forward(x)
        loss = self.cross_entropy_loss(logits, y)
        self.train_acc(torch.argmax(logits, dim=1), y)
        self.log("train_acc",
                 self.train_acc.compute(),
                 on_step=False,
                 on_epoch=True)
        self.log("loss", loss)
        return {"loss": loss}

    def validation_step(self, batch, batch_idx):
        x, y = batch
        logits = self.forward(x)
        loss = F.cross_entropy(logits, y)
        self.val_acc(torch.argmax(logits, dim=1), y)
        self.log("val_acc", self.val_acc.compute())
        self.log("val_loss", loss, sync_dist=True)

    def test_step(self, batch, batch_idx):
        x, y = batch
        logits = self.forward(x)
        loss = F.cross_entropy(logits, y)
        self.test_acc(torch.argmax(logits, dim=1), y)
        self.log("test_loss", loss)
        self.log("test_acc", self.test_acc.compute())
Example #10
0
    def __init__(self, ft_embedding_dim, hidden_dim=64):
        super().__init__()

        self.lstm_layer = nn.LSTM(ft_embedding_dim, hidden_dim, batch_first=True, bidirectional=True)
        self.dropout_layer = nn.Dropout(0.2)
        self.out_layer = nn.Linear(hidden_dim * 2, 1)

        self.loss = nn.BCEWithLogitsLoss()
        self.valid_accuracy = Accuracy()
        self.test_accuracy = Accuracy()
Example #11
0
def str_accuracy(m: Accuracy, detail: bool = False):
    backup = m.correct, m.total
    metric = m.compute()
    m.correct, m.total = backup
    if math.isnan(metric) or math.isinf(metric):
        return 'N/A'
    elif not detail:
        return f'{metric * 100:.2f}%'
    else:
        return f'{metric * 100:.2f}%(= {m.correct}/{m.total})'
Example #12
0
def test_wrong_params(top_k, threshold):
    preds, target = _input_mcls_prob.preds, _input_mcls_prob.target

    with pytest.raises(ValueError):
        acc = Accuracy(threshold=threshold, top_k=top_k)
        acc(preds, target)
        acc.compute()

    with pytest.raises(ValueError):
        accuracy(preds, target, threshold=threshold, top_k=top_k)
    def __init__(self):
        super().__init__()
        for i in range(3):
            setattr(self, f"layer_{i}", nn.Linear(32, 32))
            setattr(self, f"layer_{i}a", torch.nn.ReLU())
        setattr(self, "layer_end", nn.Linear(32, 3))

        self.train_acc = Accuracy()
        self.valid_acc = Accuracy()
        self.test_acc = Accuracy()
Example #14
0
    def __init__(self, **kwargs):
        super().__init__()

        self.train_acc = Accuracy()
        self.val_acc = Accuracy()
        self.test_acc = Accuracy()
        self.args = kwargs

        self.fc1 = nn.Linear(4, 10)
        self.fc2 = nn.Linear(10, 10)
        self.fc3 = nn.Linear(10, 3)
        self.cross_entropy_loss = nn.CrossEntropyLoss()
Example #15
0
 def __init__(self,
              num_classes,
              *args,
              weights=None,
              pretrain=True,
              **kwargs):
     model = torchvision.models.resnet18(pretrained=pretrain)
     model.fc = Linear(in_features=512, out_features=num_classes, bias=True)
     super().__init__(*args, model=model, **kwargs)
     self.model = model
     self.accuracy = Accuracy()
     self.train_accuracy = Accuracy()
     self.criterion = CrossEntropyLoss(weight=weights)
Example #16
0
    def __init__(
        self,
        num_layers: int = 2,
        hidden_channels: int = 128,
        heads: int = 8,
        groups: int = 16,
        dropout: float = 0.8,
        cached: bool = False,
        num_features: int = None,
        num_classes: int = None,
    ):
        super().__init__()

        assert num_features is not None
        assert num_classes is not None

        # utils from Lightning to save __init__ arguments
        self.save_hyperparameters()
        hparams = self.hparams

        # Instantiate metrics
        self.val_acc = Accuracy(hparams["num_classes"])
        self.test_acc = Accuracy(hparams["num_classes"])

        # Define DNA graph convolution model
        self.hidden_channels = hparams["hidden_channels"]
        self.lin1 = nn.Linear(hparams["num_features"],
                              hparams["hidden_channels"])

        # Create ModuleList to hold all convolutions
        self.convs = nn.ModuleList()

        # Iterate through the number of layers
        for _ in range(hparams["num_layers"]):

            # Create a DNA Convolution - This graph convolution relies on MultiHead Attention mechanism
            # to route information similar to Transformers.
            # https://github.com/rusty1s/pytorch_geometric/blob/master/torch_geometric/nn/conv/dna_conv.py#L172
            self.convs.append(
                DNAConv(
                    hparams["hidden_channels"],
                    hparams["heads"],
                    hparams["groups"],
                    dropout=hparams["dropout"],
                    cached=False,
                ))
        # classification MLP
        self.lin2 = nn.Linear(hparams["hidden_channels"],
                              hparams["num_classes"],
                              bias=False)
Example #17
0
    def __init__(self, cfg: Config) -> None:
        super().__init__()  # type: ignore

        self.logger: Union[LoggerCollection, WandbLogger, Any]
        self.wandb: Run

        self.cfg = cfg

        self.model = ConvNet(self.cfg)
        self.criterion = nn.CrossEntropyLoss()

        # Metrics
        self.train_acc = Accuracy()
        self.val_acc = Accuracy()
    def __init__(self, conf: DictConfig):
        super().__init__()

        self.trn_metric = Accuracy()
        self.val_metric = Accuracy()
        self.tst_metric = Accuracy()
        self.save_hyperparameters(conf)

        # instantiate objects
        self.model = Net(self.hparams)
        self.criterion = instantiate(self.hparams.loss)
        self.mixfunction = instantiate(self.hparams.mixmethod)
        if self.mixfunction is not None:
            _logger.info(f"Training with {self.mixfunction}")
Example #19
0
 def __init__(self,
              num_classes,
              *args,
              weights=None,
              pretrain=True,
              **kwargs):
     model = torchvision.models.mobilenet_v2(pretrained=pretrain)
     model.classifier = Sequential(
         Dropout(p=0.2, inplace=False),
         Linear(in_features=1280, out_features=num_classes, bias=True))
     super().__init__(*args, model=model, **kwargs)
     self.model = model
     self.accuracy = Accuracy()
     self.train_accuracy = Accuracy()
     self.criterion = CrossEntropyLoss(weight=weights)
    def __init__(self):
        super().__init__()

        num_classes = 9

        weight = torch.tensor([
            0.01030928,
            0.00552486,
            0.00344828,
            0.01388889,
            0.02222222,
            0.01204819,
            0.02272727,
            0.00307692,
            0.00055249,
        ])

        self.criterion = nn.CrossEntropyLoss(weight=weight)
        #         self.criterion = nn.CrossEntropyLoss()
        self.metrics = {
            "accuracy": Accuracy(),
            "recall_macro": Recall(num_classes=num_classes, average="macro"),
            "precision_macro": Precision(num_classes=num_classes,
                                         average="macro"),
        }

        self.classifier = nn.Sequential(
            nn.Linear(768, 256),
            nn.Dropout(0.5),
            nn.ReLU(),
            nn.Linear(256, 128),
            nn.Dropout(0.5),
            nn.ReLU(),
            nn.Linear(128, num_classes),
        )
    def __init__(self, max_batch_len, learning_rate, **kwargs):
        super().__init__()
        self.input_size = max_batch_len
        self.feature_size = 12
        self.flatten_size = self.input_size * self.feature_size
        self.hidden_size = 128
        self.d = 10
        self.learning_rate = learning_rate

        self.criterion_class = torch.nn.BCELoss()
        self.criterion_ae = torch.nn.MSELoss()

        self.encoder = nn.Sequential(
            nn.Linear(self.flatten_size, self.hidden_size), nn.ReLU(),
            nn.Linear(self.hidden_size, self.d))

        self.decoder = nn.Sequential(
            nn.Linear(self.d, self.hidden_size), nn.ReLU(),
            nn.Linear(self.hidden_size, self.flatten_size))

        self.classifier = nn.Sequential(nn.Linear(self.d, 1), nn.Dropout(0.3),
                                        nn.Sigmoid())

        self.save_hyperparameters()
        self.accuracy = Accuracy()
Example #22
0
def test_topk_accuracy(preds, target, exp_result, k, subset_accuracy):
    topk = Accuracy(top_k=k, subset_accuracy=subset_accuracy)

    for batch in range(preds.shape[0]):
        topk(preds[batch], target[batch])

    assert topk.compute() == exp_result

    # Test functional
    total_samples = target.shape[0] * target.shape[1]

    preds = preds.view(total_samples, 4, -1)
    target = target.view(total_samples, -1)

    assert accuracy(preds, target, top_k=k,
                    subset_accuracy=subset_accuracy) == exp_result
Example #23
0
    def __init__(
        self,
        num_classes,
        backbone="resnet18",
        num_features: int = None,
        pretrained=True,
        loss_fn: Callable = F.cross_entropy,
        optimizer: Type[torch.optim.Optimizer] = torch.optim.SGD,
        metrics: Union[Callable, Mapping, Sequence, None] = (Accuracy()),
        learning_rate: float = 1e-3,
    ):
        super().__init__(
            model=None,
            loss_fn=loss_fn,
            optimizer=optimizer,
            metrics=metrics,
            learning_rate=learning_rate,
        )

        self.save_hyperparameters()

        self.backbone, num_features = backbone_and_num_features(
            backbone, pretrained)

        self.head = nn.Sequential(
            nn.AdaptiveAvgPool2d((1, 1)),
            nn.Flatten(),
            nn.Linear(num_features, num_classes),
        )
    def __init__(self, encoder, DATA_PATH, batch_size, val_split, hidden_dims,
                 train_transform, val_transform, num_workers, **kwargs):
        super().__init__()

        self.DATA_PATH = DATA_PATH
        self.val_split = val_split
        self.batch_size = batch_size
        self.hidden_dims = hidden_dims
        self.train_transform = train_transform
        self.val_transform = val_transform
        self.num_workers = num_workers

        #data stuff
        shutil.rmtree('split_data', ignore_errors=True)
        if not (path.isdir(f"{self.DATA_PATH}/train")
                and path.isdir(f"{self.DATA_PATH}/val")):
            splitfolders.ratio(self.DATA_PATH,
                               output=f"split_data",
                               ratio=(1 - self.val_split, self.val_split),
                               seed=10)
            self.DATA_PATH = 'split_data'
            print(
                'automatically splitting data into train and validation data')

        self.num_classes = len(os.listdir(f'{self.DATA_PATH}/train'))

        #model stuff
        self.eval_acc = Accuracy()
        self.encoder, self.embedding_size = load_encoder(encoder, kwargs)
        self.fc1 = nn.Linear(self.embedding_size, self.hidden_dims)
        self.fc2 = nn.Linear(self.hidden_dims, self.num_classes)
Example #25
0
    def __init__(self, device, num_classes, topK=3):
        if not pl:
            return

        self.device = device
        self.topK = topK

        # https://github.com/PyTorchLightning/metrics/blob/master/torchmetrics/classification/f_beta.py#L221
        #mdmc_average = "samplewise"
        mdmc_average = "global"

        val_metrics = {
            'hamming_dist':
            HammingDistance() if HammingDistance is not None else None,
            'iou':
            IoU(num_classes=num_classes),
            'auroc':
            AUROC(num_classes=num_classes),
            'f1':
            F1(num_classes=num_classes,
               multilabel=True,
               mdmc_average=mdmc_average),
            'avg_precision':
            AveragePrecision(num_classes=num_classes),
            #'acc': Accuracy(num_classes=num_classes, mdmc_average = mdmc_average)
        }

        for k in range(1, topK + 1):
            val_metrics["top%d" % k] = Accuracy(top_k=k)
            val_metrics["top%d_f1" % k] = F1(top_k=k)

        self.val_metrics = torch.nn.ModuleDict(val_metrics).to(self.device)

        self.class_names = list(range(num_classes))
        self.label_binarizer = MultiLabelBinarizer(classes=self.class_names)
Example #26
0
    def __init__(
        self,
        embedding_dim: Optional[int] = None,
        backbone: str = "swav-imagenet",
        pretrained: bool = True,
        loss_fn: Callable = F.cross_entropy,
        optimizer: Type[torch.optim.Optimizer] = torch.optim.SGD,
        metrics: Union[Callable, Mapping, Sequence, None] = (Accuracy()),
        learning_rate: float = 1e-3,
        pooling_fn: Callable = torch.max
    ):
        super().__init__(
            model=None,
            loss_fn=loss_fn,
            optimizer=optimizer,
            metrics=metrics,
            learning_rate=learning_rate,
        )

        self.save_hyperparameters()
        self.backbone_name = backbone
        self.embedding_dim = embedding_dim
        assert pooling_fn in [torch.mean, torch.max]
        self.pooling_fn = pooling_fn

        self.backbone, num_features = backbone_and_num_features(backbone, pretrained)

        if embedding_dim is None:
            self.head = nn.Identity()
        else:
            self.head = nn.Sequential(
                nn.Flatten(),
                nn.Linear(num_features, embedding_dim),
            )
            rank_zero_warn('embedding_dim is not None. Remember to finetune first!')
Example #27
0
 def __init__(self, model, criterion, optimizer, scheduler=None):
     super(TaskCRAFT, self).__init__()
     self.model = model
     self.criterion = criterion
     self.optimizer = optimizer
     self.scheduler = scheduler
     self.metric = Accuracy()
Example #28
0
    def __init__(self, seq_len, max_batch_len, learning_rate, **kwargs):
        super().__init__()
        self.n_features = 12
        self.hidden_size = 256
        self.num_layers = 5
        self.dropout = 0.5
        self.seq_len = seq_len
        self.max_batch_size = max_batch_len
        self.criterion = torch.nn.BCELoss()
        self.learning_rate = learning_rate
        self.batch_size = int(self.max_batch_size / self.seq_len)

        self.output_size = 1
        self.fc_size_1 = 50
        self.fc_size_2 = 10

        self.lstm = nn.LSTM(input_size=self.n_features,
                            hidden_size=self.hidden_size,
                            num_layers=self.num_layers,
                            dropout=self.dropout,
                            batch_first=True)

        self.activation = nn.LeakyReLU()
        self.fc1 = nn.Linear(self.hidden_size, self.fc_size_1)
        self.fc2 = nn.Linear(self.fc_size_1, self.fc_size_2)
        self.fc3 = nn.Linear(self.fc_size_2, self.output_size)

        self.sigmoid = nn.Sigmoid()

        self.save_hyperparameters()
        self.accuracy = Accuracy()
Example #29
0
    def __init__(self, cfg, fold=0):
        super().__init__()
        self.fold = fold
        self.cfg = cfg
        trn_params = cfg['train_params']

        self.batch_size = get_or_default(trn_params, 'batch_size', 16)
        self.num_workers = get_or_default(trn_params, 'num_workers', 2)
        self.aug_type = get_or_default(cfg, 'aug', '0')
        self.margin_start = get_or_default(cfg, 'margin_start', 10)
        self.csv_path = get_or_default(cfg, 'csv_path',
                                       'input/train_folds.csv')
        self.trn_path = get_or_default(cfg, 'image_path', 'input/train')
        self.le = LabelEncoder()
        train = pd.read_csv(self.csv_path)
        self.le.fit(train.label_group)
        model = get_or_default(cfg, 'model', 'ShopeeModelTimm')
        self.model = model
        num_classes = len(self.le.classes_)

        if model == 'ShopeeModelTimm':
            self.model = ShopeeModelTimm(num_classes, backbone=cfg['backbone'])
        elif model == 'ShopeeModelResnext':
            self.model = ShopeeModelResnext(num_classes=num_classes)
        else:
            raise Exception('unsupported model {}'.format(model))

        self.crit = nn.CrossEntropyLoss()
        self.acc = Accuracy()
        print('using fold', self.fold)
Example #30
0
 def __init__(self, num_classes: int, *args, **kwargs):
     super().__init__(model=OmniglotModel(num_classes=num_classes),
                      *args,
                      **kwargs)
     self.model.apply(init_weights)
     self.criterion = nn.CrossEntropyLoss(reduction='sum')
     self.accuracy = Accuracy()