def reduce_dim_chunk(encoder,
                     data_root,
                     wsis,
                     chunk,
                     batch_size=512,
                     device=0):

    data_loader = get_loaders(batch_size,
                              data_root,
                              chunk=chunk,
                              gpus=[device],
                              is_train=False,
                              with_name=True,
                              wsis=wsis)

    vecs = []
    paths = []
    for imgs, paths_ in tqdm(data_loader, desc="Reduced"):
        # 3 * 224 * 224 -> 256 * 1 * 1
        encoded = encoder(imgs.to(f"cuda:{device}"))
        encoded = encoded.cpu().detach().numpy().reshape(len(imgs), -1)
        vecs.extend(encoded)
        paths.extend(paths_)

    return vecs, paths
예제 #2
0
def main(args: Namespace) -> None:
    input_shape = (1, int(args.crop_size[0] * args.scale), int(args.crop_size[1] * args.scale))
    print('Input shape', 'x'.join(map(str, input_shape)), '[CxHxW]')

    set_global_seed(args.seed)

    train_loader, test_loader = get_loaders(args)
    loaders = OrderedDict([('train', train_loader), ('valid', test_loader)])

    model = m46(input_shape=input_shape, model_type=args.model_type)
    criterion = model.loss_function
    optimizer = torch.optim.Adam(lr=2e-5, betas=(0.5, 0.999), params=model.parameters())

    output_key = 'probs' if args.model_type == 'gender' else 'preds'
    runner = SupervisedRunner(input_key='image', output_key=output_key,
                              input_target_key='label',
                              device=args.device if is_available() else tdevice('cpu')
                              )
    callbacks = [clb.CriterionCallback(input_key='label', output_key=output_key)]
    if args.model_type == 'gender':
        callbacks += [clb.AccuracyCallback(prefix='accuracy', input_key='label',
                                           output_key=output_key, accuracy_args=[1],
                                           threshold=.5, num_classes=1, activation='none')]
    runner.train(
        model=model, criterion=criterion, optimizer=optimizer,
        scheduler=None, loaders=loaders, logdir=str(args.logdir),
        num_epochs=args.n_epoch, verbose=True, main_metric='loss',
        valid_loader='valid', callbacks=callbacks, minimize_metric=True,
        checkpoint_data={'params': model.init_params}
    )
def main():
    global args

    args = parse_args()

    path_to_load = Path(args.load).expanduser()
    if path_to_load.is_file():
        print(f"=> Loading checkpoint '{path_to_load}'")
        checkpoint = torch.load(
            path_to_load, map_location=lambda storage, loc: storage.cuda(0))
        print(f"=> Loaded checkpoint '{path_to_load}'")
    else:
        raise

    args = checkpoint["args"]

    model = get_model(args)

    model.cuda()

    work_dir = path_to_load.parent

    state_dict = copy.deepcopy(checkpoint["state_dict"])
    for p in checkpoint["state_dict"]:
        if p.startswith("module."):
            state_dict[p[len("module."):]] = state_dict.pop(p)

    model.load_state_dict(state_dict)

    x = torch.rand(2, 3, 256 * 6, 256 * 6).cuda()
    model = model.eval()
    if "efficientnet" in args.network.name:
        model.set_swish(memory_efficient=False)

    with torch.no_grad():
        traced_model = torch.jit.trace(model, x)

    traced_model.save(str(work_dir / f"model_{path_to_load.stem}.pt"))
    del traced_model
    del model

    dev_loader = get_loaders(args, test_only=True)
    metrics = {"score": Score(), "acc": Accuracy()}

    model = (torch.jit.load(str(
        work_dir / f"model_{path_to_load.stem}.pt")).cuda().eval())

    with torch.no_grad():
        for metric in metrics.values():
            metric.clean()

        epoch_step(dev_loader,
                   "[ Validating dev.. ]",
                   model=model,
                   metrics=metrics)
        for key, metric in metrics.items():
            print(f"{key} dev {metric.evaluate()}")
예제 #4
0
def main(cfg):
    torch.cuda.empty_cache()
    torch.manual_seed(cfg.param.seed)

    # Training settings
    cwd = Path(hydra.utils.get_original_cwd())
    wsi_dir = cwd/cfg.dir.wsi
    patch_dir = cwd/cfg.dir.patch
    ckpt = Checkpoint(
        cwd, cfg.gpus, cfg.dir.resume, cfg.dir.save_to, cfg.log.save_model)

    device = torch.device(
        f"cuda:{cfg.gpus[0]}" if cfg.gpus[0] != -1 else "cpu")

    model = build_model(gpus=cfg.gpus)
    optimizer = RAdam(model.parameters(), lr=cfg.param.lr)
    scheduler = StepLR(optimizer, step_size=1, gamma=cfg.param.gamma)
    if cfg.dir.resume:
        model, optimizer, scheduler = ckpt.load_state(
            model, optimizer, scheduler)
    criterion = get_loss_fn()

    train_wsi, test_wsi = split_wsi(
        wsi_dir, ckpt.save_to, cwd, ratio=cfg.data.ratio,
        projects=cfg.data.projects, strategies=cfg.data.strategies,
        limit=cfg.data.limit)
    for epoch in range(ckpt.start_epoch, cfg.param.epochs + 1):
        split_data(
            patch_dir, ckpt.save_to, train_wsi, test_wsi, cfg.data.chunks,
            epoch, cfg.dir.resume)
        for chunk in range(ckpt.start_chunk, cfg.data.chunks):
            data_loader = get_loaders(
                cfg.param.batch_size, ckpt.save_to, chunk, cfg.gpus)
            train(
                model, device, data_loader, optimizer, scheduler, criterion,
                epoch, cfg.param.epochs, chunk, cfg.data.chunks, ckpt)

        ckpt.start_chunk = 0
        scheduler.step()
        ckpt.save(model, optimizer, scheduler, epoch, chunk, loss=False)

    ckpt.close_writer()
예제 #5
0
def main(args):
    # Set up dataset
    train_loader, test_loader = get_loaders(args.batch_size)

    model = Siamese().cuda()
    opt = optim.SGD(model.parameters(),
                    lr=args.lr,
                    momentum=0.9)
    scheduler = optim.lr_scheduler.MultiStepLR(opt, [5, 10], 0.1)
    cudnn.benckmark = True

    print("\t".join(["Epoch", "TrainLoss", "TestLoss"]))
    for e in range(args.epochs):
        scheduler.step()
        model.train()
        train_loss, train_n = 0, 0
        for x1, x2, y in tqdm(train_loader, total=len(train_loader), leave=False):
            x1, x2 = Variable(x1.cuda()), Variable(x2.cuda())
            y = Variable(y.float().cuda()).view(y.size(0), 1)

            o1, o2 = model(x1), model(x2)
            loss = contractive_loss(o1, o2, y)
            opt.zero_grad()
            loss.backward()
            opt.step()
            train_loss = loss.data[0] * y.size(0)
            train_n += y.size(0)

        model.eval()
        test_loss, test_n = 0, 0
        for x1, x2, y in tqdm(test_loader, total=len(test_loader), leave=False):
            x1, x2 = Variable(x1.cuda()), Variable(x2.cuda())
            y = Variable(y.float().cuda()).view(y.size(0), 1)

            o1, o2 = model(x1), model(x2)
            loss = contractive_loss(o1, o2, y)
            test_loss = loss.data[0] * y.size(0)
            test_n += y.size(0)
        if (e + 1) % 5 == 0:
            torch.save(model, "./checkpoint/{}.tar".format(e+1))
        print("{}\t{:.6f}\t{:.6f}".format(e, train_loss / train_n, test_loss / test_n))
    }
    runner = SupervisedRunner(device='cuda',
                              input_key="image",
                              input_target_key="mask")
    scheduler = OneCycleLR(optimizer,
                           max_lr=0.0016,
                           steps_per_epoch=1,
                           epochs=num_epochs)
    # scheduler = OneCycleLRWithWarmup(
    #     optimizer,
    #     num_steps=num_epochs,
    #     lr_range=(0.0016, 0.0000001),
    #     init_lr = learning_rate,
    #     warmup_steps=15
    # )
    loaders = get_loaders(preprocessing_fn, batch_size=8)

    callbacks = [
        # Each criterion is calculated separately.
        CriterionCallback(input_key="mask",
                          prefix="loss_dice",
                          criterion_key="dice"),
        CriterionCallback(input_key="mask",
                          prefix="loss_iou",
                          criterion_key="iou"),
        CriterionCallback(input_key="mask",
                          prefix="loss_bce",
                          criterion_key="bce"),
        ClasswiseIouCallback(input_key="mask",
                             prefix='clswise_iou',
                             classes=CLASSES.keys()),
예제 #7
0
def main(args):
    # CUDA
    if args.use_cuda:
        use_cuda = torch.cuda.is_available()
        device = torch.device("cuda:0" if use_cuda else "cpu")
    else:
        use_cuda = False
        device = "cpu"
    args.device = device
    print("Using CUDA: ", use_cuda)

    # Random seed
    torch.manual_seed(args.seed)
    random.seed(args.seed)
    np.random.seed(args.seed)

    # Episodic memory system: Pre-train, test, analyze (hub retrieval)
    meta = True  # meta-learning for episodic memory system
    episodic_system = EpisodicSystem().to(device)
    data = get_loaders(batch_size=args.bs_episodic,
                       meta=meta,
                       use_images=False,
                       image_dir=args.image_dir,
                       n_episodes=args.N_episodic)
    train_data, train_loader, test_data, test_loader = data
    episodic_train_losses = train(meta, episodic_system, train_loader, args)
    episodic_train_acc = test(meta, episodic_system, train_loader, args)
    episodic_test_acc = test(meta, episodic_system, test_loader, args)
    episodic_analysis = analyze_episodic(episodic_system, test_data, args)
    print("Episodic system training accuracy:", episodic_train_acc)
    print("Episodic system testing accuracy:", episodic_test_acc)
    episodic_results = {
        'loss': episodic_train_losses,
        'train_acc': episodic_train_acc,
        'test_acc': episodic_test_acc,
        'analysis': episodic_analysis
    }

    # Cortical system: Train, test, analyze (PCA, correlation)
    meta = False  # cortical learning is vanilla
    cortical_system = CorticalSystem(use_images=args.use_images).to(device)
    data = get_loaders(batch_size=args.bs_cortical,
                       meta=False,
                       use_images=args.use_images,
                       image_dir=args.image_dir,
                       n_episodes=None)
    train_data, train_loader, test_data, test_loader = data
    cortical_train_losses = train(meta, cortical_system, train_loader, args)
    cortical_train_acc = test(meta, cortical_system, train_loader, args)
    cortical_test_acc = test(meta, cortical_system, test_loader, args)
    cortical_analysis = analyze_cortical(cortical_system, test_data, args)
    print("Cortical system training accuracy:", cortical_train_acc)
    print("Cortical system testing accuracy:", cortical_test_acc)
    cortical_results = {
        'loss': cortical_train_losses,
        'train_acc': cortical_train_acc,
        'test_acc': cortical_test_acc,
        'analysis': cortical_analysis
    }

    # Save results
    results = {'Episodic': episodic_results, 'Cortical': cortical_results}
    with open(args.out_file, 'wb') as f:
        pickle.dump(results, f)
예제 #8
0
def test_score(beam_size,
               encoder,
               decoder,
               imgs_path,
               df_path,
               vocab,
               return_results=False):

    loader = get_loaders(1,
                         imgs_path,
                         df_path,
                         transform,
                         vocab,
                         test=True,
                         n_workers=8)
    vocab_size = len(vocab)

    references = list()
    hypotheses = list()

    # For each image
    for i, (image, caps, caplens, allcaps) in enumerate(
            tqdm(loader, desc="EVALUATING AT BEAM SIZE " + str(beam_size))):

        k = beam_size

        # Move to GPU device, if available
        image = image.to(device)  # (1, 3, 256, 256)

        # Encode
        encoder_out = encoder(
            image)  # (1, enc_image_size, enc_image_size, encoder_dim)
        enc_image_size = encoder_out.size(1)
        encoder_dim = encoder_out.size(3)

        # Flatten encoding
        encoder_out = encoder_out.view(
            1, -1, encoder_dim)  # (1, num_pixels, encoder_dim)
        num_pixels = encoder_out.size(1)

        # We'll treat the problem as having a batch size of k
        encoder_out = encoder_out.expand(
            k, num_pixels, encoder_dim)  # (k, num_pixels, encoder_dim)

        # Tensor to store top k previous words at each step; now they're just <start>
        k_prev_words = torch.LongTensor([[vocab.stoi['<sos>']]] * k).to(
            device)  # (k, 1)

        # Tensor to store top k sequences; now they're just <start>
        seqs = k_prev_words  # (k, 1)

        # Tensor to store top k sequences' scores; now they're just 0
        top_k_scores = torch.zeros(k, 1).to(device)  # (k, 1)

        # Lists to store completed sequences and scores
        complete_seqs = list()
        complete_seqs_scores = list()

        # Start decoding
        step = 1
        h, c = decoder.init_hidden_state(encoder_out)

        # s is a number less than or equal to k, because sequences are removed from this process once they hit <end>
        while True:

            embeddings = decoder.embedding(k_prev_words).squeeze(
                1)  # (s, embed_dim)

            awe, _ = decoder.attention(encoder_out,
                                       h)  # (s, encoder_dim), (s, num_pixels)

            gate = decoder.sigmoid(
                decoder.f_beta(h))  # gating scalar, (s, encoder_dim)
            awe = gate * awe

            h, c = decoder.decode_step(torch.cat([embeddings, awe], dim=1),
                                       (h, c))  # (s, decoder_dim)

            scores = decoder.fc(h)  # (s, vocab_size)
            scores = F.log_softmax(scores, dim=1)

            # Add
            scores = top_k_scores.expand_as(scores) + scores  # (s, vocab_size)

            # For the first step, all k points will have the same scores (since same k previous words, h, c)
            if step == 1:
                top_k_scores, top_k_words = scores[0].topk(k, 0)  # (s)
            else:
                # Unroll and find top scores, and their unrolled indices
                top_k_scores, top_k_words = scores.view(-1).topk(k, 0)  # (s)

            # Convert unrolled indices to actual indices of scores
            prev_word_inds = top_k_words // vocab_size  # (s)
            next_word_inds = top_k_words % vocab_size  # (s)

            #             print(top_k_scores, top_k_words)
            # Add new words to sequences
            seqs = torch.cat(
                [seqs[prev_word_inds],
                 next_word_inds.unsqueeze(1)], dim=1)  # (s, step+1)

            # Which sequences are incomplete (didn't reach <end>)?
            incomplete_inds = [
                ind for ind, next_word in enumerate(next_word_inds)
                if next_word != vocab.stoi['<eos>']
            ]
            complete_inds = list(
                set(range(len(next_word_inds))) - set(incomplete_inds))

            # Set aside complete sequences
            if len(complete_inds) > 0:
                complete_seqs.extend(seqs[complete_inds].tolist())
                complete_seqs_scores.extend(top_k_scores[complete_inds])
            k -= len(complete_inds)  # reduce beam length accordingly

            # Proceed with incomplete sequences
            if k == 0:
                break
            seqs = seqs[incomplete_inds]
            h = h[prev_word_inds[incomplete_inds]]
            c = c[prev_word_inds[incomplete_inds]]
            encoder_out = encoder_out[prev_word_inds[incomplete_inds]]
            top_k_scores = top_k_scores[incomplete_inds].unsqueeze(1)
            k_prev_words = next_word_inds[incomplete_inds].unsqueeze(1)

            # Break if things have been going on too long
            if step > 50:
                break
            step += 1

        if len(complete_seqs_scores) == 0:
            continue
        i = complete_seqs_scores.index(max(complete_seqs_scores))
        seq = complete_seqs[i]

        # References
        img_caps = allcaps[0].tolist()
        img_captions = list(
            map(
                lambda c: [
                    w for w in c if w not in {
                        vocab.stoi['<sos>'], vocab.stoi['<eos>'], vocab.stoi[
                            '<pad>']
                    }
                ], img_caps))  # remove <start> and pads
        references.append(img_captions)

        # Hypotheses
        hypotheses.append([
            w for w in seq if w not in
            {vocab.stoi['<sos>'], vocab.stoi['<eos>'], vocab.stoi['<pad>']}
        ])

        assert len(references) == len(hypotheses)

    # Calculate BLEU-4 scores
#     bleu4 = corpus_bleu(references, hypotheses)
    b1, b2, b3, b4 = print_scores(references, hypotheses, vocab=vocab)

    if return_results:
        return references, hypotheses

    return b1, b2, b3, b4
def main():
    global args

    args = parse_args()
    print(args)

    init_dist(args)

    (train_loader, train_sampler), dev_loader = get_loaders(args)

    model = get_model(args)
    # model = model.to(memory_format=torch.channels_last)
    if args.dist.sync_bn:
        print("using apex synced BN")
        model = apex.parallel.convert_syncbn_model(model)

    model.cuda()

    criterion = get_criterion(args).cuda()

    opt = get_opt(args, model, criterion)

    scaler = torch.cuda.amp.GradScaler()

    # For distributed training, wrap the model with apex.parallel.DistributedDataParallel.
    # This must be done AFTER the call to amp.initialize.  If model = DDP(model) is called
    # before model, ... = amp.initialize(model, ...), the call to amp.initialize may alter
    # the types of model's parameters in a way that disrupts or destroys DDP's allreduce hooks.
    if args.dist.distributed:
        # By default, apex.parallel.DistributedDataParallel overlaps communication with
        # computation in the backward pass.
        # model = DDP(model)
        # delay_allreduce delays all communication to the end of the backward pass.
        model = apex.parallel.DistributedDataParallel(model,
                                                      delay_allreduce=True)

    best_score = 0
    metrics = {"score": Score(), "acc": Accuracy()}

    history = {k: {k_: [] for k_ in ["train", "dev"]} for k in ["loss"]}
    history.update({k: {v: [] for v in ["train", "dev"]} for k in metrics})

    work_dir = Path(args.general.work_dir) / f"{args.train.fold}"
    if args.dist.local_rank == 0 and not work_dir.exists():
        work_dir.mkdir(parents=True)

    # Optionally load model from a checkpoint
    if args.train.load:

        def _load():
            path_to_load = Path(args.train.load).expanduser()
            if path_to_load.is_file():
                print(f"=> loading model '{path_to_load}'")
                checkpoint = torch.load(
                    path_to_load,
                    map_location=lambda storage, loc: storage.cuda(args.dist.
                                                                   gpu),
                )
                model.load_state_dict(checkpoint["state_dict"])
                print(f"=> loaded model '{path_to_load}'")
            else:
                print(f"=> no model found at '{path_to_load}'")

        _load()

    scheduler = None
    if args.opt.scheduler == "cos":
        scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            opt, T_max=args.opt.T_max, eta_min=max(args.opt.lr * 1e-2, 1e-6))

    # Optionally resume from a checkpoint
    if args.train.resume:
        # Use a local scope to avoid dangling references
        def _resume():
            nonlocal history, best_score
            path_to_resume = Path(args.train.resume).expanduser()
            if path_to_resume.is_file():
                print(f"=> loading resume checkpoint '{path_to_resume}'")
                checkpoint = torch.load(
                    path_to_resume,
                    map_location=lambda storage, loc: storage.cuda(args.dist.
                                                                   gpu),
                )
                args.train.start_epoch = checkpoint["epoch"] + 1
                history = checkpoint["history"]
                best_score = max(history["score"]["dev"])
                model.load_state_dict(checkpoint["state_dict"])
                opt.load_state_dict(checkpoint["opt_state_dict"])
                scheduler.load_state_dict(checkpoint["sched_state_dict"])
                scaler.load_state_dict(checkpoint["scaler"])
                print(
                    f"=> resume from checkpoint '{path_to_resume}' (epoch {checkpoint['epoch']})"
                )
            else:
                print(f"=> no checkpoint found at '{path_to_resume}'")

        _resume()

    def saver(path):
        torch.save(
            {
                "epoch":
                epoch,
                "best_score":
                best_score,
                "history":
                history,
                "state_dict":
                model.state_dict(),
                "opt_state_dict":
                opt.state_dict(),
                "sched_state_dict":
                scheduler.state_dict() if scheduler is not None else None,
                "scaler":
                scaler.state_dict(),
                "args":
                args,
            },
            path,
        )

    for epoch in range(args.train.start_epoch, args.train.epochs + 1):

        if args.dist.distributed:
            train_sampler.set_epoch(epoch)

        for metric in metrics.values():
            metric.clean()

        loss = epoch_step(
            train_loader,
            f"[ Training {epoch}/{args.train.epochs}.. ]",
            model=model,
            criterion=criterion,
            metrics=metrics,
            scaler=scaler,
            opt=opt,
            batch_accum=args.train.batch_accum,
        )
        history["loss"]["train"].append(loss)
        for k, metric in metrics.items():
            history[k]["train"].append(metric.evaluate())

        if not args.train.ft:
            with torch.no_grad():
                for metric in metrics.values():
                    metric.clean()
                loss = epoch_step(
                    dev_loader,
                    f"[ Validating {epoch}/{args.train.epochs}.. ]",
                    model=model,
                    criterion=criterion,
                    metrics=metrics,
                    scaler=scaler,
                    opt=None,
                )
                history["loss"]["dev"].append(loss)
                for k, metric in metrics.items():
                    history[k]["dev"].append(metric.evaluate())
        else:
            history["loss"]["dev"].append(loss)
            for k, metric in metrics.items():
                history[k]["dev"].append(metric.evaluate())

        if scheduler is not None:
            scheduler.step()

        if args.dist.local_rank == 0:
            if history["score"]["dev"][-1] > best_score:
                best_score = history["score"]["dev"][-1]
                saver(work_dir / "best.pth")

            saver(work_dir / "last.pth")
            plot_hist(history, work_dir)

    return 0
예제 #10
0
if config.args.task in config.augment_testing_lst:
    task_dir = os.path.join(task_dir, config.args.task, config.args.runname,
                            datetime.datetime.now().strftime('%Y%m%d_%H%M%S'))

os.makedirs(task_dir, exist_ok=True)
logger.set_logger(os.path.join(task_dir, 'log_' + str(config.args.runname) + '.log'))
configfile = os.path.join(task_dir, 'conf_' + str(config.args.runname) + '.config')

config.log.info(f'==> Created subdir for run at: {task_dir}')

# args validation
if config.args.task == config.fairness_training:
    if not config.args.ignore_weights and not config.args.ignore_sampling_weights \
            and not config.args.ignore_loss_weights:
        config.log.warning("Both sampling and loss uses weights, it is advised to ignore weights for either one")
if config.args.task == config.augment_testing_odin:
    config.args.batch_size = 1

# save configuration parameters
with open(configfile, 'w') as f:
    for arg in vars(config.args):
        f.write('{}: {}\n'.format(arg, getattr(config.args, arg)))

config.log.info(f'Running task {config.args.task}...')
config.log.info('==> Loading dataset...')
train_loader, eval_loader, test_loader = dataset.get_loaders()

# run the task, usually training a model
task.run_task(run_dir, task_dir, train_loader, eval_loader, test_loader)
예제 #11
0
def main():
    args = get_args()
    model = SiameseSVMNet()
    if args.cuda:
        model = model.cuda()
    criterion = SVMLoss(args.C)
    optimizer = torch.optim.Adam(model.parameters())
    train_loader, validate_loader, test_data = get_loaders(args)

    def training(epoch):
        print('Epoch', epoch + 1)
        model.train()
        for batch_idx, (x0, x1, label) in enumerate(train_loader):
            if args.cuda:
                x0, x1, label = x0.cuda(), x1.cuda(), label.cuda()
            x0, x1, label = Variable(x0), Variable(x1), Variable(label)
            optimizer.zero_grad()
            output = model(x0, x1)
            loss = criterion(output, label)
            loss.backward()
            optimizer.step()
            if batch_idx % 100 == 0:
                print("\n Batch:  ", batch_idx, " / ", len(train_loader),
                      " --- Loss: ", loss.data[0])

    def validate():
        model.eval()
        acc = 0
        for batch_idx, (x0, x1, label) in enumerate(validate_loader):
            if args.cuda:
                x0, x1, label = x0.cuda(), x1.cuda(), label.cuda()
            x0, x1, label = Variable(x0), Variable(x1), Variable(label)
            output = model(x0, x1)
            acc += compute_accuracy(output, label).numpy()[0]

        acc = 100.0 * acc / len(validate_loader.dataset)
        print('\nValidation set: Accuracy: {}%\n'.format(acc))
        return acc

    def test(n, k):
        model.eval()
        clf = svm.SVC(C=args.C, kernel='linear')
        featuremodel = model.get_FeatureNet()
        if args.cuda:
            featuremodel = featuremodel.cuda()
        # choose classes
        acc = 0
        for i in range(args.test_number):
            random.seed(i)
            temp_ = []
            for i in range(1623 - 1200):
                temp_.append(i)
            random.shuffle(temp_)
            choosen_classes = temp_[:n]

            X_train = []
            y_train = []
            y_test = []
            X_test = []
            for cl in choosen_classes:
                for i in range(k):
                    X_train.append(test_data[cl * 20 + i][0])
                    if args.cuda:
                        X_train[-1] = X_train[-1].cuda()
                    y_train.append(cl)
                for i in range(k, 20):
                    X_test.append(test_data[cl * 20 + i][0])
                    if args.cuda:
                        X_test[-1] = X_test[-1].cuda()
                    y_test.append(cl)

            # calculate features
            train_features = []
            test_features = []
            for train_point in X_train:
                train_features.append(
                    featuremodel(Variable(train_point)).cpu().data.numpy())
            for test_point in X_test:
                test_features.append(
                    featuremodel(Variable(test_point)).cpu().data.numpy())

            # create features
            train_features = np.array(train_features)
            train_features = np.reshape(train_features,
                                        (train_features.shape[0], 4096))
            test_features = np.array(test_features)
            test_features = np.reshape(test_features,
                                       (test_features.shape[0], 4096))

            # predict with SVM
            clf.fit(train_features, y_train)
            pred = clf.predict(test_features)
            acc += accuracy_score(y_test, pred)

        acc = 100.0 * acc / args.test_number
        print('\nTest set: {} way {} shot Accuracy: {:.4f}%'.format(n, k, acc))
        return acc

    best_val = 0.0
    test_results = []
    for ep in range(args.epochs):
        training(ep)
        val = validate()
        if val > best_val:
            test_results = []
            test_results.append(test(5, 1))
            test_results.append(test(5, 5))
            test_results.append(test(20, 1))
            test_results.append(test(20, 5))

    # Print best results
    print('\nResult: 5 way 1 shot Accuracy: {:.4f}%'.format(test_results[0]))
    print('\nResult: 5 way 1 shot Accuracy: {:.4f}%'.format(test_results[1]))
    print('\nResult: 5 way 1 shot Accuracy: {:.4f}%'.format(test_results[2]))
    print('\nResult: 5 way 1 shot Accuracy: {:.4f}%\n'.format(test_results[3]))
예제 #12
0
def main():
    args = parser.parse_args()

    if args.seed is None:
        args.seed = random.randint(1, 10000)
    print("Random Seed: ", args.seed)
    random.seed(args.seed)
    torch.manual_seed(args.seed)
    if args.gpus:
        torch.cuda.manual_seed_all(args.seed)

    time_stamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
    if args.evaluate:
        args.results_dir = './tmp'
    if args.save is '':
        args.save = time_stamp
    save_path = os.path.join(args.results_dir, args.save)
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    if args.gpus is not None:
        args.gpus = [int(i) for i in args.gpus.split(',')]
        device = 'cuda:' + str(args.gpus[0])
        cudnn.benchmark = True
    else:
        device = 'cpu'

    if args.type == 'float64':
        dtype = torch.float64
    elif args.type == 'float32':
        dtype = torch.float32
    elif args.type == 'float16':
        dtype = torch.float16
    else:
        raise ValueError('Wrong type!')  # TODO int8

    # define net model
    # model = MobileNet2(input_size=args.input_size, scale=args.scaling, num_classes=args.num_class)
    model = resnet34(pretrained=False, modelpath=args.data_root, num_classes=args.num_class)
    num_parameters = sum([l.nelement() for l in model.parameters()])
    print(model)
    print('number of parameters: {}'.format(num_parameters))
    # print('FLOPs: {}'.format(
    #     flops_benchmark.count_flops(MobileNet2,
    #                                 args.batch_size // len(args.gpus) if args.gpus is not None else args.batch_size,
    #                                 device, dtype, args.input_size, 3, args.scaling)))

    # define loss function (criterion) and optimizer
    criterion = torch.nn.CrossEntropyLoss()
    if args.gpus is not None:
        model = torch.nn.DataParallel(model, args.gpus)
    model.to(device=device, dtype=dtype)
    criterion.to(device=device, dtype=dtype)

    optimizer = torch.optim.SGD(model.parameters(), args.learning_rate, momentum=args.momentum, weight_decay=args.decay,
                                nesterov=True)

    best_test = 0

    # optionally resume from a checkpoint
    data = None
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume, map_location=device)
            args.start_epoch = checkpoint['epoch'] - 1
            best_test = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
        elif os.path.isdir(args.resume):
            checkpoint_path = os.path.join(args.resume, 'checkpoint.pth.tar')
            csv_path = os.path.join(args.resume, 'results.csv')
            print("=> loading checkpoint csv '{}'".format(checkpoint_path))
            checkpoint = torch.load(checkpoint_path, map_location=device)
            args.start_epoch = checkpoint['epoch'] - 1
            best_test = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(checkpoint_path, checkpoint['epoch']))
            data = []
            with open(csv_path) as csvfile:
                reader = csv.DictReader(csvfile)
                for row in reader:
                    data.append(row)
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    if args.evaluate:
        test_loader = get_test_loaders(args.data_root, args.batch_size, args.input_size, args.workers)
        loss, top1, top5 = test(model, test_loader, criterion, device, dtype,classes)
        print("loss:{}, top1:{}, top5:{}".format(loss, top1, top5))
        # TODO
        return

    train_loader, val_loader = get_loaders(args.data_root, args.batch_size, args.batch_size, args.input_size,
                                           args.workers)
    if args.find_clr:
        find_bounds_clr(model, train_loader, optimizer, criterion, device, dtype, min_lr=args.min_lr,
                        max_lr=args.max_lr, step_size=args.epochs_per_step * len(train_loader), mode=args.mode,
                        save_path=save_path)
        return

    if args.clr:
        scheduler = CyclicLR(optimizer, base_lr=args.min_lr, max_lr=args.max_lr,
                             step_size=args.epochs_per_step * len(train_loader), mode=args.mode)
    else:
        scheduler = MultiStepLR(optimizer, milestones=args.schedule, gamma=args.gamma)

    csv_logger = CsvLogger(filepath=save_path, data=data)
    csv_logger.save_params(sys.argv, args)

    claimed_acc1 = None
    claimed_acc5 = None
    if args.input_size in claimed_acc_top1:
        if args.scaling in claimed_acc_top1[args.input_size]:
            claimed_acc1 = claimed_acc_top1[args.input_size][args.scaling]
            claimed_acc5 = claimed_acc_top5[args.input_size][args.scaling]
            csv_logger.write_text(
                'Claimed accuracies are: {:.2f}% top-1, {:.2f}% top-5'.format(claimed_acc1 * 100., claimed_acc5 * 100.))
    train_network(args.start_epoch, args.epochs, scheduler, model, train_loader, val_loader, optimizer, criterion,
                  device, dtype, args.batch_size, args.log_interval, csv_logger, save_path, claimed_acc1, claimed_acc5,
                  best_test)
def main():
    args = get_args()
    print(args)

    gpus = args.gpus
    os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
    os.environ['CUDA_VISIBLE_DEVICES'] = gpus
    device = 'cuda'

    n_gpu = len(gpus.split(','))
    set_seeds(args.seed)
    WELL_TYPE = 'treatment'
    data_path = args.data_path
    with_plates = args.with_plates
    model_name = args.model_name
    exp_suffix = args.exp_suffix
    bss = list(range(32, 129))
    cell_types = ['HEPG2', 'HUVEC', 'RPE', 'U2OS']
    root = Path('_'.join([model_name, exp_suffix]))
    cell_to_model = {
        'HEPG2': root / 'HEPG2' / 'seq_train_dev',
        'HUVEC': root / 'HUVEC' / 'seq_train_dev',
        'RPE': root / 'RPE' / 'seq_train_dev',
        'U2OS': root / 'U2OS' / 'seq_train_dev',
    }

    dev_tgs, dev_predictions, dev_predictions_fixed = [], [], []
    test_ids, predictions, predictions_fixed = [], [], []
    all_predictions = []
    for CELL_TYPE in cell_types:
        criterion = nn.BCEWithLogitsLoss()

        df_ft = rio.combine_metadata(base_path=data_path)
        df_ft.reset_index(inplace=True)
        df_ft = df_ft[(df_ft.well_type == WELL_TYPE)
                      & (df_ft.dataset == 'train')].copy()
        signle_df_ft = df_ft[df_ft['cell_type'] == CELL_TYPE].copy()

        NUM_CLASSES_FT = len(signle_df_ft.sirna.unique())

        signle_df_ft.sirna = signle_df_ft.sirna.apply(np.int64)

        train_exp_names_ft = sorted(signle_df_ft.experiment.unique())
        dev_exp_names_ft = train_exp_names_ft[-1:]
        train_exp_names_ft = train_exp_names_ft[:-1]
        print(train_exp_names_ft)
        print(dev_exp_names_ft)

        model_ft2 = get_model(name=model_name,
                              num_classes=NUM_CLASSES_FT,
                              with_plates=with_plates).to(device)
        path_to_pretrained2 = cell_to_model[CELL_TYPE] / 'model.pt'
        state_dict = torch.load(path_to_pretrained2)['state_dict']
        model_ft2.load_state_dict(state_dict, strict=False)
        FP16 = args.fp16 and IS_APEX
        if FP16:
            model_ft2 = apex.amp.initialize(model_ft2, opt_level='O1')
        if n_gpu > 1:
            model_ft2 = nn.parallel.DataParallel(model_ft2)

        loc_tgs = []
        loc_dev_preds = []
        loc_dev_preds_fixed = []
        for exp in dev_exp_names_ft:
            print(f'exp: {exp}')
            dev_predictions_bs = []
            for bs in bss:
                print(f'batch: {bs}')
                train_loaders_ft, dev_loaders1_ft, dev_loaders2_ft = get_loaders(
                    signle_df_ft,
                    train_exp_names_ft, [exp],
                    root=data_path,
                    batch_size=bs * n_gpu,
                    with_plates=with_plates)
                with torch.no_grad():
                    loss, acc, preds1, targets1, plates1 = epoch_step(
                        dev_loaders1_ft,
                        f'[ Validating {CELL_TYPE} 1 ({exp}/{bs}).. ]',
                        net=model_ft2,
                        criterion=criterion,
                        device=device,
                        with_preds=True,
                        opt=None,
                        fp16=FP16)
                    print(f'loss site 1: {loss:.4} ({len(preds1)})')
                    print(f'acc site 1: {acc:.4}')

                    loss, acc, preds2, targets2, plates2 = epoch_step(
                        dev_loaders2_ft,
                        f'[ Validating {CELL_TYPE} 2 ({exp}/{bs}).. ]',
                        net=model_ft2,
                        criterion=criterion,
                        device=device,
                        with_preds=True,
                        opt=None,
                        fp16=FP16)
                    print(f'loss site 2: {loss:.4}')
                    print(f'acc site 2: {acc:.4}')

                    assert (targets1 == targets2).all()
                    assert (plates1 == plates2).all()
                    preds = np.mean(np.stack([preds1, preds2]), axis=0)
                    dev_predictions_bs.append(preds)
                    acc = (preds.argmax(-1) == targets1).mean()
                    print(f'acc: {acc:.4}')

                print()
            loc_tgs.extend(targets1)
            preds = np.mean(np.array(dev_predictions_bs), axis=0)
            print(
                f'mean over batches: {(preds.argmax(-1) == targets1).mean():.4} ({len(preds)})'
            )
            loc_dev_preds.extend(preds.argmax(-1))
            fixed_preds = fix_preds(preds)
            assert len(fixed_preds) == len(preds), f'{len(fixed_preds)}'
            print(
                f'mean over batches (fixed): {(fixed_preds.c.values == targets1).mean():.4}'
            )
            loc_dev_preds_fixed.extend(fixed_preds.c.values)

        dev_tgs.extend(loc_tgs)
        dev_predictions.extend(loc_dev_preds)
        dev_predictions_fixed.extend(loc_dev_preds_fixed)

        test_df = rio.combine_metadata(base_path=data_path)
        test_df.reset_index(inplace=True)
        test_df = test_df[(test_df.well_type == WELL_TYPE)
                          & (test_df.dataset == 'test')].copy()
        to_test = test_df[test_df['cell_type'] == CELL_TYPE].copy()

        loc_ids = []
        loc_preds = []
        loc_preds_fixed = []
        loc_preds_all = []
        for exp in to_test.experiment.unique():
            print(f'exp: {exp}')
            predictions_bs = []
            for bs in bss:
                print(f'batch: {bs}')
                test_loaders1, test_loaders2 = get_test_loaders(
                    to_test, [exp],
                    root=data_path,
                    batch_size=bs * n_gpu,
                    with_plates=with_plates)
                with torch.no_grad():
                    preds1, ids1, plates1 = predict(
                        test_loaders1,
                        f'[ Testing {CELL_TYPE} 1 ({exp}/{bs}).. ]',
                        net=model_ft2,
                        device=device)
                    print(f'len {len(preds1)}')
                    preds2, ids2, plates2 = predict(
                        test_loaders2,
                        f'[ Testing {CELL_TYPE} 2 ({exp}/{bs}).. ]',
                        net=model_ft2,
                        device=device)

                    assert (ids1 == ids2).all()
                    assert (plates1 == plates2).all()
                    preds = np.mean(np.stack([preds1, preds2]), axis=0)
                    assert len(ids1) == len(preds)
                    predictions_bs.append(preds)

            loc_ids.extend(ids1)

            preds = np.mean(np.array(predictions_bs), axis=0)
            loc_preds.extend(preds.argmax(-1))
            fixed_preds = fix_preds(preds)
            assert len(fixed_preds) == len(preds)
            loc_preds_fixed.extend(fixed_preds.c.values)

            loc_preds_all.extend(preds)

        test_ids.extend(loc_ids)
        predictions.extend(loc_preds)
        predictions_fixed.extend(loc_preds_fixed)
        all_predictions.extend(loc_preds_all)

        assert len(test_ids) == len(predictions) == len(predictions_fixed)

    dev_tgs, dev_predictions, dev_predictions_fixed = map(
        np.array, [dev_tgs, dev_predictions, dev_predictions_fixed])
    all_predictions = np.array(all_predictions)
    print(f'acc           : {(dev_tgs == dev_predictions).mean():.4}')
    print(f'acc (fixed)   : {(dev_tgs == dev_predictions_fixed).mean():.4}')
    to_sub = pd.DataFrame(zip(
        test_ids,
        predictions,
        predictions_fixed,
        *all_predictions.T,
    ),
                          columns=[
                              'id_code',
                              'sirna',
                              'sirna_fixed',
                          ] + [f'p_{i}' for i in range(NUM_CLASSES_FT)])
    to_sub.to_csv(f'submission_SUB_ACC16_p.csv', index=False)

    # plate "leak"
    train_csv = pd.read_csv(data_path / 'train.csv')
    test_csv = pd.read_csv(data_path / 'test.csv')
    test_csv = pd.merge(test_csv, to_sub, how='left', on='id_code')
    sub = pd.read_csv(f'submission_SUB_ACC16_p.csv')
    assert (test_csv.id_code.values == sub.id_code.values).all()
    plate_groups = np.zeros((NUM_CLASSES_FT, 4), int)
    for sirna in range(NUM_CLASSES_FT):
        grp = train_csv.loc[train_csv.sirna ==
                            sirna, :].plate.value_counts().index.values
        assert len(grp) == 3
        plate_groups[sirna, 0:3] = grp
        plate_groups[sirna, 3] = 10 - grp.sum()

    all_test_exp = test_csv.experiment.unique()

    group_plate_probs = np.zeros((len(all_test_exp), 4))
    for idx in range(len(all_test_exp)):
        preds = sub.loc[test_csv.experiment == all_test_exp[idx],
                        'sirna_fixed'].values
        pp_mult = np.zeros((len(preds), NUM_CLASSES_FT))
        pp_mult[range(len(preds)), preds] = 1

        sub_test = test_csv.loc[test_csv.experiment == all_test_exp[idx], :]
        assert len(pp_mult) == len(sub_test)

        for j in range(4):
            mask = np.repeat(plate_groups[np.newaxis, :, j], len(pp_mult), axis=0) == \
                   np.repeat(sub_test.plate.values[:, np.newaxis], NUM_CLASSES_FT, axis=1)

            group_plate_probs[idx,
                              j] = np.array(pp_mult)[mask].sum() / len(pp_mult)
    exp_to_group = group_plate_probs.argmax(1)

    def select_plate_group(pp_mult, idx):
        sub_test = test_csv.loc[test_csv.experiment == all_test_exp[idx], :]
        assert len(pp_mult) == len(sub_test)
        mask = np.repeat(plate_groups[np.newaxis, :, exp_to_group[idx]], len(pp_mult), axis=0) != \
               np.repeat(sub_test.plate.values[:, np.newaxis], NUM_CLASSES_FT, axis=1)
        pp_mult[mask] = 0

        return pp_mult

    for idx in range(len(all_test_exp)):
        indices = (test_csv.experiment == all_test_exp[idx])

        preds = test_csv[indices].copy()
        preds = preds[[f'p_{i}' for i in range(NUM_CLASSES_FT)]].values

        preds = select_plate_group(preds, idx)
        sub.loc[indices, 'sirna_leak'] = preds.argmax(1)

        preds_fixed = fix_preds(preds)
        assert len(preds_fixed) == len(preds)
        sub.loc[indices, 'sirna_leak_fixed'] = preds_fixed.c.values

    sub.to_csv(f'submission_SUB_ACC16_p_leak.csv', index=False)
def main():
    args = get_args()
    print(args)

    gpus = args.gpus
    os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
    os.environ['CUDA_VISIBLE_DEVICES'] = gpus
    device = 'cuda'

    n_gpu = len(gpus.split(','))
    set_seeds(args.seed)
    WELL_TYPE = 'treatment'
    CELL_TYPE = args.cell_type
    data_path = args.data_path
    with_plates = args.with_plates
    model_name = args.model_name
    exp_suffix = args.exp_suffix
    FP16 = args.fp16 and IS_APEX
    batch_size = args.batch_size
    epochs = args.epochs
    lr = args.lr

    criterion = nn.BCEWithLogitsLoss()

    df = rio.combine_metadata(base_path=data_path)
    df.reset_index(inplace=True)
    df = df[(df.well_type != WELL_TYPE)].copy()
    signle_df = df[df['cell_type'] == CELL_TYPE].copy()
    NUM_CLASSES = len(signle_df.sirna.unique())

    mapping = {
        cl: ind
        for ind, cl in enumerate(sorted(signle_df.sirna.unique()))
    }
    signle_df.sirna = signle_df.sirna.apply(lambda x: mapping[x])

    train_exp_names = sorted(
        signle_df[signle_df.dataset == 'train'].experiment.unique())
    dev_exp_names = sorted(
        signle_df[signle_df.dataset == 'test'].experiment.unique())

    train_loaders, dev_loaders1, dev_loaders2 = get_loaders(
        signle_df,
        train_exp_names,
        dev_exp_names,
        root=data_path,
        batch_size=batch_size,
        n_gpu=n_gpu,
        with_plates=with_plates)

    path_to_exp = Path('_'.join([model_name, exp_suffix
                                 ])) / CELL_TYPE / 'seq_pretrain'
    if not path_to_exp.exists():
        path_to_exp.mkdir(parents=True)

    model = get_model(name=model_name,
                      num_classes=NUM_CLASSES,
                      with_plates=with_plates).to(device)
    opt = torch.optim.Adam(model.parameters(), lr=lr, amsgrad=True)
    if FP16:
        model, opt = apex.amp.initialize(model, opt, opt_level='O1')
    if n_gpu > 1:
        model = nn.parallel.DataParallel(model)

    scheduler = None
    train_model((train_loaders, dev_loaders1, dev_loaders2),
                model=model,
                criterion=criterion,
                opt=opt,
                path=path_to_exp,
                device=device,
                fp16=FP16,
                epochs=epochs,
                scheduler=scheduler)

    #     # pretrain head
    path_to_pretrained = path_to_exp / 'model.pt'
    df_ft = rio.combine_metadata(base_path=data_path)
    df_ft.reset_index(inplace=True)
    df_ft = df_ft[(df_ft.well_type == WELL_TYPE)
                  & (df_ft.dataset == 'train')].copy()
    signle_df_ft = df_ft[df_ft['cell_type'] == CELL_TYPE].copy()

    NUM_CLASSES_FT = len(signle_df_ft.sirna.unique())

    signle_df_ft.sirna = signle_df_ft.sirna.apply(np.int64)

    train_exp_names_ft = sorted(signle_df_ft.experiment.unique())
    dev_exp_names_ft = train_exp_names_ft[-1:]
    train_exp_names_ft = train_exp_names_ft[:-1]

    train_loaders_ft, dev_loaders1_ft, dev_loaders2_ft = get_loaders(
        signle_df_ft,
        train_exp_names_ft,
        dev_exp_names_ft,
        root=data_path,
        batch_size=batch_size,
        n_gpu=n_gpu,
        with_plates=with_plates)
    path_to_exp_ft = Path('_'.join([model_name, exp_suffix
                                    ])) / CELL_TYPE / 'seq_train_head'
    if not path_to_exp_ft.exists():
        path_to_exp_ft.mkdir(parents=True)
    model_ft = get_model(name=model_name,
                         num_classes=NUM_CLASSES_FT,
                         with_plates=with_plates).to(device)
    state_dict = torch.load(path_to_pretrained)['state_dict']
    state_dict.pop('classifier.weight')
    state_dict.pop('classifier.bias')
    model_ft.load_state_dict(state_dict, strict=False)
    for n, p in model_ft.named_parameters():
        if not n.startswith('classifier'):
            p.requires_grad = False
    opt_ft = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                     model_ft.parameters()),
                              lr=lr,
                              amsgrad=True)

    if FP16:
        model_ft, opt_ft = apex.amp.initialize(model_ft,
                                               opt_ft,
                                               opt_level='O1')
    if n_gpu > 1:
        model_ft = nn.parallel.DataParallel(model_ft)
    scheduler = None
    train_model((train_loaders_ft, dev_loaders1_ft, dev_loaders2_ft),
                model=model_ft,
                criterion=criterion,
                opt=opt_ft,
                path=path_to_exp_ft,
                device=device,
                fp16=FP16,
                epochs=epochs + 50,
                scheduler=scheduler)

    # finetune whole model
    path_to_exp_ft2 = Path('_'.join([model_name, exp_suffix
                                     ])) / CELL_TYPE / 'seq_train'
    if not path_to_exp_ft2.exists():
        path_to_exp_ft2.mkdir(parents=True)
    model_ft2 = get_model(name=model_name,
                          num_classes=NUM_CLASSES_FT,
                          with_plates=with_plates).to(device)
    path_to_pretrained2 = path_to_exp_ft / 'model.pt'
    state_dict = torch.load(path_to_pretrained2)['state_dict']
    model_ft2.load_state_dict(state_dict)
    opt_ft2 = torch.optim.Adam(model_ft2.parameters(), lr=lr, amsgrad=True)
    if FP16:
        model_ft2, opt_ft2 = apex.amp.initialize(model_ft2,
                                                 opt_ft2,
                                                 opt_level='O1')
    if n_gpu > 1:
        model_ft2 = nn.parallel.DataParallel(model_ft2)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(opt_ft2,
                                                     milestones=[120, 150],
                                                     gamma=args.gamma)
    train_model((train_loaders_ft, dev_loaders1_ft, dev_loaders2_ft),
                model=model_ft2,
                criterion=criterion,
                opt=opt_ft2,
                path=path_to_exp_ft2,
                device=device,
                fp16=FP16,
                epochs=epochs + 75,
                scheduler=scheduler)

    # finetune on validation
    path_to_exp_ft3 = Path('_'.join([model_name, exp_suffix
                                     ])) / CELL_TYPE / 'seq_train_dev'
    if not path_to_exp_ft3.exists():
        path_to_exp_ft3.mkdir(parents=True)
    opt_ft2 = torch.optim.Adam(model_ft2.parameters(), lr=1e-5, amsgrad=True)
    if FP16:
        model_ft2, opt_ft2 = apex.amp.initialize(model_ft2,
                                                 opt_ft2,
                                                 opt_level='O1')
    if n_gpu > 1:
        model_ft2 = nn.parallel.DataParallel(model_ft2)
    train_model(
        (list(it.chain(train_loaders_ft, dev_loaders1_ft,
                       dev_loaders2_ft)), dev_loaders1_ft, dev_loaders2_ft),
        model=model_ft2,
        criterion=criterion,
        opt=opt_ft2,
        path=path_to_exp_ft3,
        device=device,
        fp16=FP16,
        epochs=15,
        scheduler=None)
예제 #15
0
                                  p=1),
    albu.GridDistortion(p=1),
    # albu.RGBShift(p=1),
    ToTensor(),
])

valid_transforms = compose([pre_transforms(), post_transforms()])

batch_size = 4  # 16

loaders = get_loaders(
    images=ALL_IMAGES,
    masks=ALL_MASKS,
    random_state=SEED,
    train_transforms_fn=train_transforms,
    valid_transforms_fn=valid_transforms,
    batch_size=batch_size,
    valid_size=0.2,
    train_mask_path=mask_path,
    valid_mask_path=mask_path,
    # num_workers=2,
)

# %%
model = smp.Unet(
    encoder_name="resnet50",
    encoder_weights="imagenet",
    in_channels=3,
    classes=1,
)

criterion = {