Exemple #1
0
 def to_dict(self):
     d = asdict(self)
     return reduce(
         lambda acc, f: update_in(acc, f[1], lambda _: d[f[0].name]),
         to_fields,
         {},
     )
Exemple #2
0
def or_bitfields(bitfields: List[Bitfield]) -> Bitfield:
    byte_slices = zip_longest(*bitfields)

    if len(set((len(b) for b in bitfields))) != 1:
        raise ValueError("The bitfield sizes are different")

    return Bitfield(bytes(map(reduce(operator.or_), byte_slices)))
Exemple #3
0
 def createIndexConverter(
         self, indexMap: IndexMap, inShape: Shape, outShape: Shape, boole: bool = False
 ) -> IndexConverter:
     if indexMap == {}:
         return NullIndexConverter()
     if boole and set(indexMap.values()) == {(0,)}:
         return UnitIndexConverter(len(self.outAst.shape), 1)
     try:
         indiceConverters = list(reduce(
             lambda ls, i: ls + [self.getIndiceConverter(valmap(lambda v, i=i: v[i], indexMap), ls, boole)],
             range(len(inShape)), [[(0, 0, 0, 1)] * len(outShape)]))[1:]
         if not indiceConverters:
             return ZeroIndexConverter()
         genCoeffs = lambda func, func0=identity: [func0([func(c) for c in l]) for l in indiceConverters]
         linearCoeffs = genCoeffs(nth(0))
         bs = genCoeffs(nth(1), sum)
         indexModCoeffs = genCoeffs(nth(2))
         indexModValues = genCoeffs(nth(3))
         if boole:
             return LinearIndiceConverter(linearCoeffs[0], bs[0], inShape[0], indexModCoeffs[0], indexModValues[0])
         else:
             return LinearIndexConverter(linearCoeffs, bs, inShape, indexModCoeffs, indexModValues)
     except LinearError:
         if all([s != -1 for s in outShape]):
             return FixIndexConverter(indexMap)
         else:
             raise TransformError("Fail to find a transform.")
Exemple #4
0
        def to_dict(self,
                    convert_values: bool = False) -> MutableMapping[str, Any]:
            to_fields = curried.pipe(
                fields(self.__class__),
                curried.map(lambda a:
                            (a, curried.get_in([to_key], a.metadata))),
                curried.filter(lambda f: f[1]),
                list,
            )

            if convert_values:
                d = asdict(self)
            else:
                d = {
                    a.name: getattr(self, a.name)
                    for a in fields(self.__class__)
                }

            if not to_fields:
                return d

            return curried.reduce(
                lambda acc, f: curried.update_in(acc, f[1], lambda _: d[f[0].
                                                                        name]),
                to_fields,
                {},
            )
Exemple #5
0
def validate(predicts, dataset, batch_size):
    loader = DataLoader(
        dataset,
        batch_size=batch_size,
        pin_memory=True,
        shuffle=False,
    )
    y_preds = np.array(predicts).mean(axis=0).argmax(axis=1)
    y_trues = pipe(
        loader,
        map(lambda x: x['label'].cpu().detach().tolist()),
        reduce(lambda x, y: x + y),
        np.array,
    )

    score = iou(
        y_preds,
        y_trues,
    )
    tn, fp, fn, tp = confusion_matrix(y_trues, y_preds).ravel()
    return {
        'TPR': tp / (tp + fn),
        'FNR': fn / (tp + fn),
        'FPR': fp / (fp + tn),
        'acc': (tp + tn) / (tp + tn + fp + fn),
        'pre': tp / (tp + fp),
        'iou': tp / (fn + tp + fp),
    }
Exemple #6
0
def predict(
    model_dirs,
    dataset,
    out_path,
    batch_size=512,
):

    device = torch.device("cuda")
    models = pipe(model_dirs, map(lambda x: os.path.join(x, '*.pt')),
                  map(glob.glob), concat, map(torch.load),
                  map(lambda x: x.eval().to(device)), list)
    loader = DataLoader(
        dataset,
        batch_size=batch_size,
        shuffle=False,
        pin_memory=True,
    )

    rows = []

    y_preds = []
    y_ids = []
    with torch.no_grad():
        for sample in loader:
            ids = sample['id']
            palser_x = sample['palsar'].to(device)

            normal_outputs = pipe(
                models,
                map(lambda x: x(palser_x)[0]),
                list,
            )
            output = pipe(
                [*normal_outputs],
                map(lambda x: x.softmax(dim=1)),
                reduce(lambda x, y: (x + y) / 2),
                lambda x: x.argmax(dim=1),
            )
            y_ids += ids
            y_preds += output.cpu().detach().tolist()

        rows = pipe(zip(y_ids, y_preds),
                    map(lambda x: {
                        'id': x[0],
                        'lable': x[1]
                    }), list)
        df = pd.DataFrame(rows)
        df.to_csv(out_path, sep='\t', header=False, index=False)
        return out_path
Exemple #7
0
def validate(
    models,
    loader,
):
    models = pipe(models, map(lambda x: x.eval()), list)

    device = torch.device("cuda")
    y_preds = []
    y_trues = []
    sum_loss = 0
    batch_len = 0
    for sample in loader:
        with torch.no_grad():
            palsar_x = sample['palsar'].to(device)
            landsat_y = sample['landsat'].to(device)
            labels = sample['label'].to(device)
            label_preds = pipe(models,
                               map(lambda x: x(palsar_x)[0].softmax(dim=1)),
                               reduce(lambda x, y: (x + y) / 2))
            y_preds += label_preds.argmax(dim=1).cpu().detach().tolist()
            y_trues += labels.cpu().detach().tolist()
            batch_len += 1

    score = iou(
        y_preds,
        y_trues,
    )

    tn, fp, fn, tp = confusion_matrix(y_trues, y_preds).ravel()
    return {
        'TPR': tp / (tp + fn),
        'FNR': fn / (tp + fn),
        'FPR': fp / (fp + tn),
        'acc': (tp + tn) / (tp + tn + fp + fn),
        'pre': tp / (tp + fp),
        'iou': tp / (fn + tp + fp),
    }
def test_reduce():
    assert reduce(add)((1, 2, 3)) == 6
Exemple #9
0
def test_reduce():
    assert reduce(add)((1, 2, 3)) == 6
Exemple #10
0
def predict(
    model_paths,
    dataset,
    log_dir,
    hdf5_path,
    log_interval=100,
):

    dataset.df.sort_index(inplace=True)
    loader = DataLoader(dataset, batch_size=1, shuffle=False)
    device = torch.device('cpu')
    if torch.cuda.is_available():
        device = torch.device("cuda")

    models = pipe(model_paths, map(torch.load), map(lambda x: x.to(device)),
                  list)

    for m in models:
        m.eval()
    df = pd.DataFrame()

    sample_ids = []
    rle_masks = []
    scores = []

    n_itr = 0
    images = []
    ids = []

    with torch.no_grad():
        for sample in loader:
            sample_id = sample['id'][0]
            image = sample['image'].to(device)
            ids.append(str(sample_id))

            normal_outputs = pipe(
                models,
                map(lambda x: x(image)[0]),
                list,
            )
            images.append(normal_outputs[0][0, 1, :, :])

            fliped_outputs = pipe(
                models,
                map(lambda x: x(image.flip([3]))[0].flip([3])),
                list,
            )
            output = pipe([*normal_outputs, *fliped_outputs],
                          map(lambda x: x.softmax(dim=1)),
                          reduce(lambda x, y: x + y / 2),
                          lambda x: F.softmax(x, dim=1),
                          lambda x: x.argmax(dim=1).float())

            sample_ids.append(sample_id)
            rle_masks.append(rl_enc(output.cpu().numpy().reshape(101, 101)))

            log_images = [image[0], output]
            if 'mask' in sample.keys():
                mask = sample['mask'].to(device)[0]
                log_images.append(mask)
                score = iou(output.cpu().numpy(), mask.cpu().numpy())
                scores.append(score)

            if n_itr % log_interval == 0:
                with SummaryWriter(log_dir) as w:
                    w.add_image(f"predict",
                                vutils.make_grid(log_images, scale_each=True),
                                n_itr)

            n_itr += 1

        df['id'] = sample_ids
        df['rle_mask'] = rle_masks
        if len(scores) > 0:
            df['score'] = scores
            score = df['score'].mean()
            with SummaryWriter(log_dir) as w:
                w.add_text('score', f'score: {score}')
        df = df.set_index('id')
        ids = pipe(ids, map(lambda x: x.encode('ascii', 'ignore')), list)

        images = torch.stack(images).cpu().numpy().astype(np.float16)
        with h5py.File(hdf5_path, 'w') as f:
            f.create_dataset('id', data=ids)
            f.create_dataset('mask', data=images)
        return df
Exemple #11
0
def train_multi(
    model_dir,
    sets,
    model_type,
    model_kwargs,
    epochs,
    batch_size,
    log_dir,
    landsat_weight,
    lr,
    num_ensamble,
    neg_scale,
):

    model_dir = Path(model_dir)
    model_dir.mkdir()

    device = torch.device("cuda")
    Model = getattr(mdl, model_type)

    models = pipe(range(num_ensamble),
                  map(lambda _: Model(**model_kwargs).to(device).train()),
                  list)

    optimizers = pipe(
        models,
        map(lambda x: optim.Adam(x.parameters(), amsgrad=True, lr=lr)),
        list,
    )

    model_paths = pipe(
        range(num_ensamble),
        map(lambda x: model_dir / f'{x}.pt'),
        list,
    )
    check_model_paths = pipe(
        range(num_ensamble),
        map(lambda x: model_dir / f'{x}_check.pt'),
        list,
    )

    pos_set = pipe(range(neg_scale), map(lambda _: sets['train_pos']),
                   reduce(lambda x, y: x + y))
    train_pos_loader = DataLoader(
        pos_set,
        batch_size=batch_size // 2,
        shuffle=True,
        pin_memory=True,
    )
    train_neg_loaders = pipe(
        range(num_ensamble),
        map(lambda x: DataLoader(
            sets['train_neg'],
            batch_size=batch_size // 2,
            pin_memory=True,
            sampler=ChunkSampler(
                epoch_size=len(pos_set),
                len_indices=len(sets['train_neg']),
                shuffle=True,
                start_at=x,
            ),
        )),
        list,
    )
    val_set = sets['val_neg'] + sets['val_pos']

    val_loader = DataLoader(
        val_set,
        batch_size=batch_size,
        pin_memory=True,
        shuffle=False,
    )

    batch_len = len(train_pos_loader)

    max_val_score = 0
    max_iou_train = 0
    min_vial_loss = 0
    mean_train_pos_loss = 0
    mean_train_neg_loss = 0
    mean_val_pos_loss = 0
    mean_val_neg_loss = 0
    min_train_pos_loss = 1
    for epoch in range(epochs):
        sum_train_loss = 0
        sum_val_loss = 0
        sum_train_score = 0
        sum_val_score = 0
        val_probs = []
        val_labels = []
        train_probs = []
        train_labels = []

        traineds = pipe(
            zip(models, optimizers, train_neg_loaders),
            map(lambda x: train_epoch(
                model=x[0],
                optimizer=x[1],
                neg_loader=x[2],
                pos_loader=train_pos_loader,
                criterion=criterion(landsat_weight),
                device=device,
            )),
            list,
        )
        train_loss = pipe(traineds, map(lambda x: x[1]), list, np.mean)
        models = pipe(
            traineds,
            map(lambda x: x[0]),
            list,
        )

        metrics = validate(
            models=models,
            loader=val_loader,
        )

        with SummaryWriter(log_dir) as w:
            w.add_scalars('loss', {
                'train': train_loss,
            }, epoch)
            w.add_scalars('score', {**metrics}, epoch)

            if max_val_score <= metrics['iou']:
                max_val_score = metrics['iou']
                w.add_text('iou', f"val: {metrics['iou']}, epoch: {epoch}",
                           epoch)
                pipe(zip(models, model_paths),
                     map(lambda x: torch.save(x[0], x[1])), list)

    return model_dir