def __init__(self, pre=True):
        super().__init__()
        self.encoder = torchvision.models.inception_v3(pretrained=pre)

        conv1 = BasicConv2d(4, 32, kernel_size=3, stride=2)
        if pre:
            w = self.encoder.Conv2d_1a_3x3.conv.weight
            conv1.conv.weight = nn.Parameter(torch.cat((w, 0.5 * (w[:, :1, :, :] + w[:, 2:, :, :])), dim=1))
        self.encoder.Conv2d_1a_3x3 = conv1
        self.encoder.AuxLogits = InceptionAux(768, num_class())
        self.encoder.fc = nn.Linear(2048, num_class())
        def __init__(self):
            super().__init__()
            num_feat = 2048
            self.dropch = torch.nn.Dropout2d(0.5)
            self.net = torch.nn.Sequential(
                torch.nn.Linear(num_class() * len(models), num_feat),
                torch.nn.LeakyReLU(),
                torch.nn.Dropout(0.2),
                torch.nn.Linear(num_feat, num_class())
                # torch.nn.Sigmoid()
            )
            self.w = torch.zeros((1, len(models), num_class()), requires_grad=True, device="cuda")
            self.w_model = torch.zeros((1, len(models), 1), requires_grad=True, device="cuda")

            self.net2 = torch.nn.Sequential(
                torch.nn.Linear(3 * num_class(), num_class()),
            )
            self.w2 = torch.zeros((1, 3, 1), requires_grad=True, device="cuda")
 def __init__(self, pre=True):
     super().__init__()
     self.encoder = pretrainedmodels.__dict__['inceptionv4'](num_classes=1000, pretrained='imagenet')
     conv1 = BasicConv2d(4, 32, kernel_size=3, stride=2)
     if pre:
         w = self.encoder.features[0].conv.weight
         conv1.conv.weight = nn.Parameter(torch.cat((w, 0.5 * (w[:, :1, :, :] + w[:, 2:, :, :])), dim=1))
     self.encoder.features[0].conv = conv1
     self.last_linear = nn.Linear(1536, num_class())
     pass
 def __init__(self, pre=True):
     super().__init__()
     self.encoder = pretrainedmodels.__dict__['senet154'](
         num_classes=1000, pretrained='imagenet')
     conv = nn.Conv2d(4, 64, kernel_size=3, stride=2, bias=False)
     if pre:
         w = self.encoder.layer0[0].weight
         conv.weight = nn.Parameter(
             torch.cat((w, 0.5 * (w[:, :1, :, :] + w[:, 2:, :, :])), dim=1))
     self.encoder.layer0[0] = conv
     self.last_linear = nn.Linear(2048, num_class())
 def __init__(self, pre=True):
     super().__init__()
     self.encoder = pretrainedmodels.__dict__['nasnetalarge'](
         num_classes=1000, pretrained='imagenet')
     conv0 = nn.Conv2d(4, 96, kernel_size=3, stride=2, bias=False)
     if pre:
         w = self.encoder.conv0.conv.weight
         conv0.weight = nn.Parameter(
             torch.cat((w, 0.5 * (w[:, :1, :, :] + w[:, 2:, :, :])), dim=1))
     self.encoder.conv0.conv = conv0
     self.last_linear = nn.Linear(4032, num_class())
     self.use_relu = True
    def get_ohs(id_list, preds_list):
        assert len(id_list) == len(preds_list)
        ohs = []
        for uuid, x in zip(id_list, preds_list):
            try:
                lb = [int(i) for i in dataset.loc[uuid]['Target'].split()]
            except KeyError:
                lb = [int(i) for i in dataset_hpa.loc[uuid]['Target'].split()]

            lb_onehot = np.eye(num_class(), dtype=np.float)[lb].sum(axis=0)
            ohs.append(lb_onehot)
        ohs = np.array(ohs, dtype=np.float)
        return ohs
    def __init__(self, pre=True):
        super().__init__()
        encoder = torchvision.models.vgg16_bn(pretrained=pre)

        self.conv1 = nn.Conv2d(4, 64, kernel_size=3, padding=1)
        if pre:
            w = encoder.features[0].weight
            self.conv1.weight = nn.Parameter(
                torch.cat((w, 0.5 * (w[:, :1, :, :] + w[:, 2:, :, :])), dim=1))

        self.layers = encoder.features[1:]
        self.fc = encoder.classifier[:-1]

        self.out = nn.Linear(4096, num_class())
 def _replace(self, pre, feat_size=1024):
     conv1 = nn.Conv2d(4,
                       64,
                       kernel_size=7,
                       stride=2,
                       padding=3,
                       bias=False)
     if pre:
         w = self.encoder.features[0].weight
         conv1.weight = nn.Parameter(
             torch.cat((w, 0.5 * (w[:, :1, :, :] + w[:, 2:, :, :])), dim=1))
     self.encoder.features[0] = conv1
     self.encoder.classifier = nn.Linear(feat_size, num_class())
     self.dropout = nn.Dropout2d(C.get()['dropout'])
    def set(self, encoder, feat_size, pre):
        self.conv1 = nn.Conv2d(4,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        if pre:
            w = encoder.conv1.weight
            self.conv1.weight = nn.Parameter(
                torch.cat((w, 0.5 * (w[:, :1, :, :] + w[:, 2:, :, :])), dim=1))

        self.bn1 = encoder.bn1
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)

        self.layer1 = encoder.layer1
        self.layer2 = encoder.layer2
        self.layer3 = encoder.layer3
        self.layer4 = encoder.layer4
        self.dropout = nn.Dropout2d(C.get()['dropout'])

        self.out = nn.Conv2d(feat_size, num_class(), kernel_size=3, padding=1)
        def __init__(self):
            super().__init__()
            num_feat = 1024
            self.dropch = torch.nn.Dropout2d(0.5)
            self.net = torch.nn.Sequential(
                torch.nn.Dropout(0.5),
                torch.nn.Linear(input_size, num_feat),
                torch.nn.BatchNorm1d(num_feat),
                torch.nn.ReLU6(),
            )

            self.net2 = torch.nn.Sequential(
                torch.nn.Dropout(0.5),
                torch.nn.Linear(num_feat, num_feat),
                torch.nn.BatchNorm1d(num_feat),
                torch.nn.ReLU6(),
                torch.nn.Linear(num_feat, num_feat),
                torch.nn.BatchNorm1d(num_feat),
                torch.nn.ReLU6(),

                # torch.nn.Sigmoid()
            )

            self.out = torch.nn.Linear(num_feat, num_class())
Ejemplo n.º 11
0
        with open(valid_pkl_s % model_name, 'rb') as f:
            d_valid = pickle.load(f)
        with open(test_pkl_s % model_name, 'rb') as f:
            d_test = pickle.load(f)
        valid_preds.append(
            np.expand_dims(np.concatenate(d_valid[key], axis=0), 1))
        test_preds.append(
            np.expand_dims(np.concatenate(d_test[key], axis=0), 1))

    valid_merged = np.concatenate(valid_preds, axis=1)
    test_merged = np.concatenate(test_preds, axis=1)

    valid_ohs = []
    for uuid, val_x in zip(ids_cvalid, valid_merged):
        lb = [int(x) for x in dataset.loc[uuid]['Target'].split()]
        lb_onehot = np.eye(num_class(), dtype=np.float)[lb].sum(axis=0)
        valid_ohs.append(lb_onehot)
    valid_ohs = np.array(valid_ohs, dtype=np.float)

    mskf = MultilabelStratifiedKFold(n_splits=5, random_state=200)
    train_index, test_index = next(
        mskf.split(list(range(len(valid_ohs))), valid_ohs))
    valid_t = valid_merged[train_index]
    valid_v = valid_merged[test_index]
    valid_ohs_t = valid_ohs[train_index]
    valid_ohs_v = valid_ohs[test_index]

    # train w for linear combination
    bs = 512
    w = torch.zeros((1, len(models), num_class()), requires_grad=True)
    generator = tqdm(range(200000))
Ejemplo n.º 12
0
 def one_hot(uuid):
     lb = [int(x) for x in labels.loc[uuid]['Target'].split()]
     lb = np.eye(num_class(), dtype=np.float)[lb].sum(axis=0)
     return lb
            epoch = checkpoint['epoch'] + 1

        for curr_epoch in range(epoch, C.get()['epoch'] + 1):
            # ----- train -----
            lr_schedule.step(metrics=prev_train_loss)
            model.train()
            train_result = run_epoch(model,
                                     d_train,
                                     optimizer=optimizer,
                                     title='train@%03d' % curr_epoch)
            prev_train_loss = train_result['loss']
            writer.add_scalar('loss/train', prev_train_loss, curr_epoch)
            writer.add_scalar('f1_best/train',
                              np.max(train_result['f1_scores']), curr_epoch)
            p, r = train_result['stats']
            for class_idx in range(num_class()):
                writer.add_scalar('precision_train/class%d' % class_idx,
                                  p[class_idx], curr_epoch)
                writer.add_scalar('recall_train/class%d' % class_idx,
                                  r[class_idx], curr_epoch)

            del train_result

            # ----- eval on valid/test -----
            model.eval()
            if curr_epoch % 10 == 0:
                valid_result = run_epoch(model,
                                         d_valid,
                                         title='valid',
                                         aug=False)
                valid_loss = valid_result['loss']
Ejemplo n.º 14
0
            d_valid = pickle.load(f)
        with open(test_pkl_s % model_name, 'rb') as f:
            d_test = pickle.load(f)
        valid_preds.append(np.expand_dims(np.concatenate(d_valid[key], axis=0), 1))
        test_preds.append(np.expand_dims(np.concatenate(d_test[key], axis=0), 1))

    valid_merged = np.concatenate(valid_preds, axis=1)
    test_merged = np.concatenate(test_preds, axis=1)

    split_idx = 1900
    valid_t = valid_merged[:split_idx]
    valid_v = valid_merged[split_idx:]
    valid_ohs = []
    for uuid, val_x in zip(ids_cvalid, valid_merged):
        lb = [int(x) for x in dataset.loc[uuid]['Target'].split()]
        lb_onehot = np.eye(num_class(), dtype=np.float)[lb].sum(axis=0)
        valid_ohs.append(lb_onehot)
    valid_ohs = np.array(valid_ohs, dtype=np.float)
    valid_ohs_t = valid_ohs[:split_idx]
    valid_ohs_v = valid_ohs[split_idx:]

    # train w for linear combination
    bs = 256
    w = torch.ones(len(models), requires_grad=True)
    generator = tqdm(range(100000))
    optimizer = torch.optim.SGD([w], lr=0.005, momentum=0.9, weight_decay=1e-5, nesterov=True)

    inp_v, lb_v = torch.Tensor(valid_v), torch.Tensor(valid_ohs_v)
    weighted_inp = torch.mean(inp_v, dim=1)
    loss = f1_loss(weighted_inp, lb_v)
    print('val loss=%.4f' % loss)
# param['objective'] = 'multi:softmax'
param['objective'] = 'multi:softprob'
# scale weight of positive examples
param['eta'] = 0.05
# param['max_depth'] = 1
param['silent'] = 1
param['nthread'] = 8
param['base_score'] = 0.5
param['booster'] = 'gblinear'
param['lambda'] = 0.00001
param['alpha'] = 0.00005
param['feature_selector'] = 'shuffle'
# param['n_estimators'] = 200
param['subsample'] = 0.8
param['colsample_bynode'] = 0.85
param['num_class'] = num_class()

if __name__ == '__main__':
    ensemble_181228_m3 = [
        'densenet121_hpa',
        'densenet169_hpa',
        'inceptionv4_lr0.0001',
    ]

    ensemble_181228 = [
        'densenet121_hpa', 'densenet169_hpa', 'inceptionv4_lr0.0001',
        'inception_hpa', 'resnet50_hpa', 'resnet34_hpa'
    ]

    ensemble_181229 = [
        'inception_fold0', 'inception_fold1', 'inception_fold2',