Example #1
0
def eval_with_mean_accuracy(args, config, device):
    noisy_phrases = AmericanNationalCorpusDataset(
        config,
        transform_raw_phrase=ObliterateLetters(
            obliterate_ratio=config['replace_with_noise_probability']),
        transform_sample_dict=ToTensor())
    real_phrases = AmericanNationalCorpusDataset(
        config, transform_raw_phrase=None, transform_sample_dict=ToTensor())
    test_noisy_data_loader = DataLoader(noisy_phrases,
                                        batch_size=config['batch_size'],
                                        num_workers=config['num_workers'],
                                        shuffle=False)
    test_real_data_loader = DataLoader(real_phrases,
                                       batch_size=config['batch_size'],
                                       num_workers=config['num_workers'],
                                       shuffle=False)
    generator, _ = load_models(args, config, device)
    acc = measure_accuracy(generator, test_real_data_loader,
                           test_noisy_data_loader, device)
    print(f'Mean Accuracy: {acc:.2f}')
Example #2
0
def remove_background(input_image):
    # --------- 1. get image path and name ---------
    # model_name='u2net'#u2netp
    # model_dir = './saved_models/'+ model_name + '/' + model_name + '.pth'
    Output_Image_Path = './test_data/u2net_results/Alpha_Blending/out1.png'
    app.logger.debug('remove_background function called')

    # --------- 2. dataloader ---------
    # eval_transforms = transforms.Compose([RescaleT(320), ToTensor()])
    # # --------- 3. model define ---------
    # if (model_name == 'u2net'):
    #     print("...load U2NET---173.6 MB")
    #     net = U2NET(3, 1)
    # elif (model_name == 'u2netp'):
    #     print("...load U2NEP---4.7 MB")
    #     net = U2NETP(3, 1)
    # model_load_start_time = time.time()
    # net.load_state_dict(torch.load(model_dir, map_location='cpu'))
    # # net.load_state_dict(torch.load(model_dir))
    # print("Model Load Time: %s seconds ---" % (time.time() - model_load_start_time))
    #
    # net.eval()
    eval_transforms = transforms.Compose([ToTensor()])
    # --------- 4. inference for each image ---------
    # The below code takes the uploaded image and creates an alpha mask

    # Convert PIllow Image to OpenCV
    opencv_image = cv2.cvtColor(np.array(input_image), cv2.COLOR_RGB2BGRA)

    # Put opencv_image into dict so u2net tensor functions will work
    input_image_dict = {
        'imidx': np.array([[0]]),
        'image': opencv_image,
        'label': opencv_image
    }

    # Transform images so it is same size as model was trained on
    image_tensor = eval_transforms(input_image_dict)
    input = Variable(image_tensor['image'])
    d1, d2, d3, d4, d5, d6, d7 = net(input[None, ...].float())

    # normalization
    pred = d1[:, 0, :, :]
    pred = normPRED(pred)

    predict = pred
    predict = predict.squeeze()
    predict_np = predict.cpu().data.numpy()
    del d1, d2, d3, d4, d5, d6, d7
    return predict_np
Example #3
0
def show_examples(args, config, device='cpu', shuffle=False):
    generator, _ = load_models(args, config, device=device)

    noisy_phrases = AmericanNationalCorpusDataset(
        config,
        transform_raw_phrase=ObliterateLetters(
            obliterate_ratio=config['replace_with_noise_probability']),
        transform_sample_dict=ToTensor())

    noisy_data_loader = DataLoader(noisy_phrases,
                                   batch_size=1,
                                   num_workers=1,
                                   shuffle=shuffle)

    with torch.no_grad():
        for x in itertools.islice(noisy_data_loader, 5):
            _input = x['concat_phrase'].to(device)
            out = generator.forward(_input).cpu()
            print('#' * 40)
            print(noisy_phrases.show(x['raw_phrase']))
            print(noisy_phrases.show(out))
            print('#' * 40)
Example #4
0
    args = parser.parse_args()

    device = torch.device("cuda")
    model = AdaptativeAutoEncoder(args.filters).to(device=device)
    model.load_state_dict(torch.load(args.model_path))
    bias = model.encode.bias.cpu().detach().numpy()
    weights = model.encode.weight.cpu().detach().numpy()
    ranks = ranking_list(bias)

    results_path = '/'.join(
        args.model_path.split('/')[:-1:]) + '/visualizations'
    if not path.exists(results_path):
        os.makedirs(results_path)

    dataset = MriBrainDataset(args.data_path, args.caps_path, ToTensor())
    columns = ["diagnosis"]
    for i in range(len(ranks)):
        columns.append('hidden-' + str(i))
    hidden_df = pd.DataFrame(index=dataset.subjects_list(), columns=columns)

    for sub in range(len(dataset)):
        sample = dataset[sub]
        img_tensor = sample['image'].view(1, 1, 121, 145, 121).cuda()
        name = sample['name']
        diagnosis = 'diag-' + str(sample['diagnosis'])
        hidden_df.loc[name, 'diagnosis'] = diagnosis
        sub_path = path.join(results_path, diagnosis, name)
        if not path.exists(sub_path) and sub < args.max:
            os.makedirs(sub_path)
Example #5
0
parser.add_argument('--test-split-ratio', type=float, default=0.2, metavar='N',
                    help='split ratio for the test set')
parser.add_argument('--model-path', type=str, default=MODEL_PATH, metavar='PATH',
                    help='model save path')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

torch.manual_seed(args.seed)

device = torch.device("cuda" if args.cuda else "cpu")

kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}

face_dataset = AnimationFaceDataset('data', transform=transforms.Compose([
    Normalize(),
    ToTensor()
]))

test_size = int(len(face_dataset) * args.test_split_ratio)
lengths = [len(face_dataset) - test_size, test_size]
train_set, test_set = random_split(face_dataset, lengths)

train_loader = torch.utils.data.DataLoader(
    train_set, batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
    test_set, batch_size=args.batch_size, shuffle=True, **kwargs)


version_info = 'epoch_{}_KL_{}'

Example #6
0
def main():
    # data_dir = './train_data/'
    train_image_dir = './train_data/DUTS/DUTS-TR-Image/'
    train_label_dir = './train_data/DUTS/DUTS-TR-Mask/'

    model_dir = './saved_models/'

    resume_train = True
    saved_model_path = model_dir + 'model.pth'

    validation = True
    save_every = 1
    epoch_num = 100000
    batch_size_train = 16
    batch_size_val = 1
    train_num = 0
    val_num = 0

    if validation:
        val_image_dir = 'test_data/val/images/'
        val_label_dir = 'test_data/val/gts/'
        prediction_dir = './val_results/'

        val_img_name_list = glob.glob(val_image_dir + '*.jpg')
        val_lbl_name_list = glob.glob(val_label_dir + '*.png')

        val_dataset = DatasetLoader(img_name_list=val_img_name_list,
                                    lbl_name_list=val_lbl_name_list,
                                    transform=transforms.Compose(
                                        [Rescale(256),
                                         ToTensor()]))

        val_dataloader = DataLoader(val_dataset,
                                    batch_size=1,
                                    shuffle=False,
                                    num_workers=4)

    train_img_name_list = glob.glob(train_image_dir + '*.jpg')
    train_lbl_name_list = []

    for img_path in train_img_name_list:
        img_path = img_path.replace('.jpg', '.png')
        img_path = img_path.replace('DUTS-TR-Image', 'DUTS-TR-Mask')
        train_lbl_name_list.append(img_path)

    if len(train_img_name_list) == 0 or len(val_img_name_list) == 0:
        print('0 images found.')
        assert False

    print('Train images: ', len(train_img_name_list))
    print('Train labels: ', len(train_lbl_name_list))

    train_num = len(train_img_name_list)

    dataset = DatasetLoader(img_name_list=train_img_name_list,
                            lbl_name_list=train_lbl_name_list,
                            transform=transforms.Compose([
                                RandomHorizontalFlip(0.5),
                                RandomVerticalFlip(0.5),
                                Rescale(300),
                                RandomCrop(256),
                                ToTensor()
                            ]))
    dataloader = DataLoader(dataset,
                            batch_size=batch_size_train,
                            shuffle=True,
                            num_workers=4)

    model = MYNet(3, 1)
    model.cuda()

    from torchsummary import summary
    summary(model, input_size=(3, 256, 256))

    # optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=0.00001, nesterov=False)
    optimizer = optim.Adam(model.parameters(),
                           lr=0.01,
                           betas=(0.9, 0.999),
                           eps=1e-08,
                           weight_decay=0)
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                               milestones=[200000, 350000],
                                               gamma=0.1,
                                               last_epoch=-1)

    # scheduler = torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=0.0001,
    #     max_lr=0.01, step_size_up=8000, mode='triangular2')

    i_num_tot = 0
    loss_output = 0.0
    loss_pre_ref = 0.0
    i_num_epoch = 0
    epoch_init = 0

    if resume_train:
        print('Loading checkpoint: ', saved_model_path)
        checkpoint = torch.load(saved_model_path)
        model.load_state_dict(checkpoint['model_state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        scheduler.load_state_dict(checkpoint['scheduler_state_dict']),
        epoch_init = checkpoint['epoch'] + 1
        i_num_tot = checkpoint['i_num_tot'] + 1
        i_num_epoch = checkpoint['i_num_epoch']
        loss_output = checkpoint['loss_output']
        # loss_pre_ref = checkpoint['loss_pre_ref']

    log_file = open('logs/log.txt', 'a+')
    log_file.write(str(model) + '\n')
    log_file.close()

    print('Training...')
    _s = time.time()
    for epoch in range(epoch_init, epoch_num):
        model.train()
        print('Epoch {}...'.format(epoch))
        _time_epoch = time.time()
        for i, data in enumerate(dataloader):
            i_num_tot += 1
            i_num_epoch += 1

            inputs, labels = data

            inputs = inputs.cuda()
            labels = labels.cuda()

            optimizer.zero_grad()

            out = model(inputs)
            loss = muti_bce_loss_fusion(out, labels)

            loss[0].backward()
            optimizer.step()
            scheduler.step()

            loss_output += loss[0].item()
            # loss_pre_ref += loss[1].item()

            del out, inputs, labels

        print('Epoch time: {}'.format(time.time() - _time_epoch))
        if epoch % save_every == 0:  # save the model every X epochs
            state_dic = {
                'epoch': epoch,
                'i_num_tot': i_num_tot,
                'i_num_epoch': i_num_epoch,
                'loss_output': loss_output,
                # 'loss_pre_ref': loss_pre_ref,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'scheduler_state_dict': scheduler.state_dict(),
            }
            torch.save(state_dic, model_dir + 'model.pth')

        log = '[epoch: {:d}/{:d}, ite: {:d}] loss_output: {:.6f}, l: {:.6f}\n'.format(
            epoch, epoch_num, i_num_tot, loss_output / i_num_epoch,
            loss[0].item())

        del loss

        loss_output = 0
        loss_pre_ref = 0
        i_num_epoch = 0
        log_file = open('logs/log.txt', 'a+')
        log_file.write(log + '\n')
        log_file.close()
        print(log)

        if validation:
            model.eval()
            # val_i_num_tot = 0
            val_i_num_epoch = 0
            val_loss_output = 0
            # val_loss_pre_ref = 0
            val_log_file = open('logs/log_val.txt', 'a+')
            print('Evaluating...')
            with torch.no_grad():
                for val_i, val_data in enumerate(val_dataloader):
                    # val_i_num_tot += 1
                    val_i_num_epoch += 1

                    val_inputs, val_labels = val_data

                    val_inputs = val_inputs.cuda()
                    val_labels = val_labels.cuda()

                    val_out = model(val_inputs)

                    val_loss = muti_bce_loss_fusion(val_out, val_labels)

                    val_loss_output += val_loss[0].item()
                    # val_loss_pre_ref += val_loss0.item()

                    pred = val_out[0][:, 0, :, :]
                    pred = normPRED(pred)

                    save_output(val_img_name_list[val_i], pred, prediction_dir)

                    del val_out, val_inputs, val_labels, val_loss

            log_val = '[val: epoch: {:d}, ite: {:d}] loss_output: {:.6f}\n'.format(
                epoch, i_num_tot, val_loss_output / val_i_num_epoch)
            val_log_file.write(log_val + '\n')
            val_log_file.close()

    _t = 'Training time: ' + str(time.time() - _s) + '\n'
    print(_t)
    log_file = open('logs/log.txt', 'a+')
    log_file.write(_t)
    log_file.close()
Example #7
0
    args = parser.parse_args()

    if args.gpu and torch.cuda.is_available():
        device = torch.device('cuda')
    else:
        device = torch.device('cpu')

    lr = args.learning_rate * 0.00005

    results_path = path.join(args.results_path, args.name)
    if not path.exists(results_path):
        os.makedirs(results_path)

    composed = torchvision.transforms.Compose(
        [GaussianSmoothing(sigma=args.sigma),
         ToTensor(gpu=args.gpu)])

    if args.bids:
        trainset = BidsMriBrainDataset(args.train_path,
                                       args.caps_path,
                                       classes=args.n_classes,
                                       transform=composed,
                                       rescale=args.rescale)
    else:
        trainset = MriBrainDataset(args.train_path,
                                   args.caps_path,
                                   classes=args.n_classes,
                                   transform=composed,
                                   on_cluster=args.on_cluster)

    if args.classifier == 'basic':
Example #8
0
from torchvision import transforms
import numpy as np
from torchvision.utils import save_image

OUTPUT_SIZE = 8
EPOCH = 20
# KL = 1.0
# KL = 100.0
KL = 0.0

model = VAE()
model.load_state_dict(torch.load(f"model/model_epoch_{EPOCH}_KL_{KL}.pt"))

face_dataset = AnimationFaceDataset('data',
                                    transform=transforms.Compose(
                                        [Normalize(), ToTensor()]))


def use_upsampling():
    data_loader = torch.utils.data.DataLoader(face_dataset, batch_size=2)
    # TODO: currently failed
    for data in data_loader:
        _, mu, logvar = model(data['image'])
        upsampled_recon_x = nn.Upsample(size=[8, 20],
                                        mode='linear')(mu.unsqueeze(0))
        break


def manual():
    # for i in range(0, len(face_dataset), 2):
    for i in range(0, 10, 2):
Example #9
0
def train(config, save_dir, device='cpu'):
    noisy_phrases = AmericanNationalCorpusDataset(
        config,
        transform_raw_phrase=ObliterateLetters(
            obliterate_ratio=config['replace_with_noise_probability']),
        transform_sample_dict=ToTensor())
    real_phrases = AmericanNationalCorpusDataset(
        config, transform_raw_phrase=None, transform_sample_dict=ToTensor())

    logging.info(f'Dataset_size: {len(noisy_phrases)}')

    noisy_data_loader = DataLoader(
        noisy_phrases,
        batch_size=config['batch_size'],
        num_workers=config['num_workers'],
        shuffle=True,
        drop_last=True,
    )
    real_data_loader = DataLoader(
        real_phrases,
        batch_size=config['batch_size'],
        num_workers=config['num_workers'],
        shuffle=True,
        drop_last=True,
    )

    test_noisy_data_loader = DataLoader(noisy_phrases,
                                        batch_size=config['batch_size'],
                                        num_workers=config['num_workers'],
                                        shuffle=False,
                                        drop_last=True)

    test_real_data_loader = DataLoader(real_phrases,
                                       batch_size=config['batch_size'],
                                       num_workers=config['num_workers'],
                                       shuffle=False,
                                       drop_last=True)

    generator = GeneratorNet(config).to(device)
    discriminator = DiscriminatorNet(config).to(device)

    def show_params(model: GeneratorNet):
        for k, v in model.state_dict().items():
            print(
                f'{k:<30} {str(tuple(v.shape)):<16} {v.mean().item():.6f} {v.std().item():.6f}'
            )

    print('Generator')
    show_params(generator)
    print('Discriminator')
    show_params(discriminator)
    # exit(0)
    optimizer_gen = optim.Adam(
        generator.parameters(),
        lr=config['gen_learning_rate'],
        betas=(0.5, 0.9),
    )
    optimizer_dis = optim.Adam(
        discriminator.parameters(),
        lr=config['dis_learning_rate'],
        betas=(0.5, 0.9),
    )

    if save_dir is not None:
        writer = SummaryWriter(save_dir)
        copyfile('config.yaml', os.path.join(save_dir, 'config.yaml'))

    noisy_sampler = BatchSampler(noisy_data_loader)
    real_sampler = BatchSampler(real_data_loader)

    logs = collections.defaultdict(list)
    for step in range(1, config['steps'] + 1):
        for _ in range(config['dis_iter']):
            #### Discriminator_training
            discriminator.zero_grad()
            noisy_batch = noisy_sampler.sample_batch()
            real_batch = real_sampler.sample_batch()

            gen_input = noisy_batch['concat_phrase'].to(device)
            real_sample = real_batch['raw_phrase'].float().to(device)
            fake_sample = generator.forward(gen_input, temperature=0.9)

            fake_sample_pred = discriminator.forward(fake_sample)
            real_sample_pred = discriminator.forward(real_sample)

            gradient_penalty = compute_gradient_penalty(
                discriminator, real_sample, fake_sample)

            fake_score = fake_sample_pred.mean()
            real_score = real_sample_pred.mean()

            dis_loss = (fake_score - real_score +
                        config['gradient_penalty_ratio'] * gradient_penalty)

            dis_loss.backward()
            optimizer_dis.step()

            logs['gradient_penalty'].append(gradient_penalty.item())
            logs['real_score'].append(real_score.item())
            logs['fake_score'].append(fake_score.item())
            logs['dis_loss'].append(dis_loss.item())

        for _ in range(config['gen_iter']):
            #### Generator traning
            generator.zero_grad()
            noisy_batch = noisy_sampler.sample_batch()
            # real_batch = real_sampler.sample_batch()
            gen_input = noisy_batch['concat_phrase'].to(device)
            # real_sample = real_batch['raw_phrase'].float().to(device)
            # real_sample_pred = discriminator.forward(real_sample)
            # real_score = real_sample_pred.mean()

            fake_sample = generator.forward(gen_input, temperature=0.9)
            fake_sample_pred = discriminator.forward(fake_sample)
            fake_score = fake_sample_pred.mean()
            gen_loss = -fake_score
            gen_loss.backward()
            optimizer_gen.step()

            logs['real_score'].append(real_score.item())
            logs['fake_score'].append(fake_score.item())
            logs['gen_loss'].append(gen_loss.item())

        # Traning supervision. Saving scalars for tensorboard
        if step % config['print_step'] == 0:
            ###################################
            # Injected Prinitng
            ###################################
            print('#' * 80)
            p_fake = fake_sample.argmax(axis=-1)
            p_real = noisy_batch['raw_phrase'].argmax(axis=-1)

            def decode(xs):
                return ''.join(
                    "_" if x == ord('\0') or x <= 31 or x > 126 else chr(x)
                    for x in xs)

            for (rl, fk) in zip(p_real[:5], p_fake[:5]):
                print(decode(rl))
                print(decode(fk))
            print('#' * 80)
            step_fer = (np.array(p_fake.cpu()) == np.array(p_real)).mean()
            print(f'Acc: {step_fer:.2}')
            ###################################
            # Injected Prinitng
            ###################################
            for k in logs.keys():
                logs[k] = np.array(logs[k]).mean()
            logging.info(f'Step:{step:5d} gen_loss:{logs["gen_loss"]:.3f} '
                         f'dis_loss:{logs["dis_loss"]:.3f}')

            def assign_label(x):
                if 'score' in x:
                    return f'scores/{x}'
                if 'loss' in x:
                    return f'loss/{x}'
                return x

            if save_dir is not None:
                for k, v in logs.items():
                    writer.add_scalar(
                        assign_label(k),
                        v,
                        global_step=step,
                    )
            logs.clear()
        if (step % config['eval_step'] == 0 or step == config['steps']):
            acc = measure_accuracy(generator, test_real_data_loader,
                                   test_noisy_data_loader, device)
            logging.info(f'EvalStep:{step:5d} Accuracy: {acc:.2f}')
            if save_dir is not None:
                writer.add_scalar('accuracy/accuracy', acc, global_step=step)
                # Model saving
                torch.save(
                    generator.state_dict(),
                    os.path.join(save_dir, f'epoch_{step}_generator.pt'))
                torch.save(
                    discriminator.state_dict(),
                    os.path.join(save_dir, f'epoch_{step}_discriminator.pt'))
    if save_dir is not None:
        writer.close()
Example #10
0
from model import U2NETP # small version u2net 4.7 MB
from model import U2NET
# normalize the predicted SOD probability map
def normPRED(d):
    ma = torch.max(d)
    mi = torch.min(d)

    dn = (d-mi)/(ma-mi)

    return dn

model_name='u2net'#u2netp
model_dir = './saved_models/'+ model_name + '/' + model_name + '.pth'
# --------- 2. dataloader ---------
#eval_transforms = transforms.Compose([RescaleT(320), ToTensor()])
eval_transforms = transforms.Compose([ToTensor()])
# --------- 3. model define ---------
if(model_name=='u2net'):
    print("...load U2NET---173.6 MB")
    net = U2NET(3,1)
elif (model_name == 'u2netp'):
    print("...load U2NEP---4.7 MB")
    net = U2NETP(3, 1)
model_load_start_time = time.time()
net.load_state_dict(torch.load(model_dir, map_location='cpu'))
#net.load_state_dict(torch.load(model_dir))
print("Model Load Time: %s seconds ---" % (time.time() - model_load_start_time))
net.eval()

def remove_background(input_image):
    func_start_time = time.time()