Пример #1
0
    def __init__(self, hid):
        super().__init__()

        sizes = (4096, *hid, 4430)
        for n, (i, j) in enumerate(zip(sizes, sizes[1:]), start=1):
            l = Linear(i, j)
            kaiming_normal_(l.weight)
            zeros_(l.bias)
            setattr(self, f'l{n}', l)

        self.act = ReLU(inplace=True)
        self.head = Softmax()

        self.__size = len(sizes) - 1

        metrics = MetricCollection(
            {str(n): Accuracy(top_k=n)
             for n in [1, 5, 10, 25, 50, 100]})

        self.train_metrics = metrics.clone(prefix='train_')
        self.valid_metrics = metrics.clone(prefix='valid_')
        self.test_metrics = metrics.clone(prefix='test_')

        layers = []
        for n in range(1, self.__size):
            layers.append(getattr(self, f'l{n}'))
            layers.append(self.act)
        layers.append(getattr(self, f'l{self.__size}'))

        self.body = Sequential(*layers)
Пример #2
0
    def __init__(self,
                 emb_vec=None,
                 model_name='awe',
                 input_dim=1200,
                 hidden_dim=512,
                 classes=3,
                 disable_nonlinear=False):  #input dim should be vectors.dim*3
        super().__init__()
        if emb_vec != None:
            self.emb_vec = nn.Embedding.from_pretrained(emb_vec, freeze=True)
        else:
            self.emb_vec = nn.Embedding(33635, 300)
        self.emb_dim = 300  #self.emb_vec.embedding_dim
        self.input_dim = 2048 * 4
        if model_name == 'awe':
            self.model = AverageEmbeddings()
            self.input_dim = 300 * 4
        elif model_name == 'lstm':
            self.model = LSTMR(self.emb_dim, 2048)  #2048 for lstm
        elif model_name == 'bilstm':
            self.model = BILSTM(self.emb_dim, 2048)  #2048 for lstm
            self.input_dim = 2048 * 2 * 4
        elif model_name == 'bilstm-max':
            self.model = BILSTMMaxPool(self.emb_dim, 2048)  #2048 for lstm
            self.input_dim = 2048 * 2 * 4
        #https://github.com/ihsgnef/InferSent-1/blob/6bea0ef38358a4b7f918cfacfc46d8607b516dc8/train_nli.py
        #Default droput is 0?
        self.dropout = 0
        self.hidden_dim = hidden_dim
        self.classes = classes
        if disable_nonlinear:
            self.classifier = nn.Sequential(
                nn.Linear(self.input_dim, self.hidden_dim),
                nn.Linear(self.hidden_dim, self.hidden_dim),
                nn.Linear(self.hidden_dim, self.classes))
        else:
            self.classifier = nn.Sequential(
                nn.Dropout(p=self.dropout),
                nn.Linear(self.input_dim, self.hidden_dim),
                nn.Tanh(),
                nn.Dropout(p=self.dropout),
                nn.Linear(self.hidden_dim, self.hidden_dim),
                nn.Tanh(),
                nn.Dropout(p=self.dropout),
                nn.Linear(self.hidden_dim, self.classes),
            )

        self.loss = nn.CrossEntropyLoss()
        self.softmax = nn.Softmax(dim=1)

        self.start_time = time.time()
        ##Metrics
        metrics = MetricCollection([Accuracy()])

        self.train_metrics = metrics.clone(prefix='train_')
        self.valid_metrics = metrics.clone(prefix='val_')
        self.train_acc = pl.metrics.Accuracy()
        self.valid_acc = pl.metrics.Accuracy()
        self.test_acc = pl.metrics.Accuracy()
        self.prev_val_acc = 0
Пример #3
0
def main(args):
    # Setup experiment
    if not torch.cuda.is_available():
        raise NotImplementedError("Training on CPU is not supported.")

    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    args.experiment = args.experiment or f"{args.model.replace('_', '-')}"
    args.experiment = "-".join([args.experiment])
    args.experiment_dir = os.path.join(
        args.output_dir, args.dataset,
        args.experiment + ("-cumulative" if args.all else ""))
    os.makedirs(args.experiment_dir, exist_ok=True)

    tb_writer = TensorBoardLogger(save_dir=args.experiment_dir)
    seed_everything(42, workers=True)

    train_loaders, valid_loaders = data.build_dataset(
        args.dataset,
        args.data_path,
        blurry=args.blurry,
        batch_size=args.batch_size)

    model = GCL(args, train_loaders)
    trainer = Trainer(gpus=-1,
                      distributed_backend='ddp',
                      max_epochs=len(valid_loaders),
                      reload_dataloaders_every_epoch=True,
                      plugins=DDPPlugin(find_unused_parameters=False),
                      logger=tb_writer)

    # for task_id in range(num_tasks):
    # print(f"task:    {task_id}")
    # train_loader = DataLoader(train_loaders[task_id], batch_size=10)

    metrics = MetricCollection(
        [Accuracy(), F1(args.num_classes, average='macro')])
    model.train_metrics = metrics.clone(prefix=f'train{model.curr_index}_')
    trainer.fit(model)
    trainer.train_loop.reset_train_val_dataloaders(model)
    if model.curr_index:
        temp = trainer.train_dataloader
        labels = []
        for batch in temp:
            labels.append(batch[1])
        print("Check if train loader if reloaded", labels)

    #! TEST
    test_loaders = [
        DataLoader(ds, batch_size=len(ds))
        for ds in valid_loaders[:model.curr_index + 1]
    ]
    model.test_metrics = nn.ModuleList(
        [metrics.clone(prefix=f'valid{i}_') for i in range(len(test_loaders))])

    trainer.test(model, test_dataloaders=test_loaders)
Пример #4
0
    def __init__(
            self,
            arch: str,
            optcfg: DictConfig,
            arch_ckpt: Optional[str] = None,
            schcfg: Optional[DictConfig] = None,
            **kwargs,
    ):
        super().__init__()

        self.schcfg = schcfg
        self.optcfg = optcfg
        self.save_hyperparameters()

        if arch_ckpt: 
            arch = arch_ckpt
        self.transformer = AutoModelForSequenceClassification.from_pretrained(arch, num_labels=7)

        # loss function
        self.criterion = nn.CrossEntropyLoss()

        # metrics
        mc = MetricCollection({
            "accuracy": Accuracy(threshold=0.0),
            "recall": Recall(threshold=0.0, num_classes=7, average='macro'),
            "precision": Precision(threshold=0.0, num_classes=7, average='macro'),
            "f1": F1(threshold=0.0, num_classes=7, average='macro'),
            "macro_auc": AUROC(num_classes=7, average='macro'),
            # "weighted_auc": AUROC(num_classes=7, average='weighted')
        })
        self.metrics: ModuleDict[str, MetricCollection] = ModuleDict({
            f"{phase}_metric": mc.clone()
            for phase in ["train", "valid", "test"]
        })
Пример #5
0
    def __init__(
        self,
        num_classes: int,
        effdet_backbone: str = "tf_efficientdet_d4",
        strides: List[int] = [8, 16, 32, 64, 128],
        sizes: List[Tuple[int, int]] = [(-1, 64), (64, 128), (128, 256), (256, 512), (512, 10000000)],
        threshold: Optional[float] = None,
        nms_threshold: Optional[float] = None,
        *args,
        **kwargs,
    ):
        super().__init__(*args, **kwargs)
        self.save_hyperparameters()
        self.num_classes = int(num_classes)
        self.strides = [int(x) for x in strides]
        self.sizes = [(int(x), int(y)) for x, y in sizes]

        # TODO train this from scratch using combustion EfficientDet
        # self._model = EffDetFCOS.from_predefined(
        #    compound_coeff, self.num_classes, fpn_levels=[3, 5, 7, 8, 9], strides=self.strides
        # )

        self._model = create_model(effdet_backbone, pretrained=True)
        del self._model.box_net
        del self._model.class_net

        fpn_filters = self._model.config.fpn_channels
        num_repeats = 4

        self.fcos = FCOSDecoder(fpn_filters, self.num_classes, num_repeats, strides)

        self.threshold = float(threshold) if threshold is not None else 0.05
        self.nms_threshold = float(nms_threshold) if nms_threshold is not None else 0.1
        self._criterion = FCOSLoss(self.strides, self.num_classes, radius=1, interest_range=self.sizes)

        # metrics
        metrics = MetricCollection({
            f"ap{thresh}": BoxAveragePrecision(iou_threshold=thresh / 100, compute_on_step=True)
            for thresh in (25, 50, 75)
        })
        self.val_metrics = metrics.clone(prefix="val/")
        self.test_metrics = metrics.clone(prefix="test/")

        # freeze backbone
        for param in self._model.backbone.parameters():
            param.requires_grad = False