コード例 #1
0
    def __init__(self, config, dataloader):
        self.batch_size = config.batch_size
        self.config = config
        self.lr = config.lr
        self.epoch = config.epoch
        self.num_epoch = config.num_epoch
        self.checkpoint_dir = config.checkpoint_dir
        self.model_path = config.checkpoint_dir
        self.device = torch.device(
            "cuda:0" if torch.cuda.is_available() else "cpu")
        self.data_loader = dataloader
        self.image_len = len(dataloader)
        self.num_classes = config.num_classes
        self.eps = config.eps
        self.rho = config.rho
        self.decay = config.decay
        self.sample_step = config.sample_step
        self.sample_dir = config.sample_dir
        self.gradient_loss_weight = config.gradient_loss_weight
        self.decay_batch_size = config.decay_batch_size

        self.build_model()
        self.optimizer = Adadelta(self.net.parameters(),
                                  lr=self.lr,
                                  eps=self.eps,
                                  rho=self.rho,
                                  weight_decay=self.decay)
        self.lr_scheduler_discriminator = torch.optim.lr_scheduler.LambdaLR(
            self.optimizer,
            LambdaLR(self.num_epoch, self.epoch, len(self.data_loader),
                     self.decay_batch_size).step)
コード例 #2
0
 def build_model(self):
     self.net = HairMatteNet()
     self.net.to(self.device)
     self.optimizer = Adadelta(self.net.parameters(),
                               lr=self.lr,
                               eps=self.eps,
                               rho=self.rho,
                               weight_decay=self.decay)
コード例 #3
0
ファイル: olivetti.py プロジェクト: ahx-code/Olivetti-CNN
def main():
    args = arguments()
    manual_seed(seed=args.seed)
    use_cuda = not args.no_cuda and is_available()
    dev = device("cuda" if use_cuda else "cpu")
    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
    data = split_data(test_size=0.35, generate_data=args.generate_data,
                      write_to_file=args.write_to_file)
    train_dataset = data['train_dataset']
    test_dataset = data['test_dataset']
    train_loader = DataLoader(dataset=train_dataset,
                              batch_size=args.train_batch_size,
                              shuffle=True, **kwargs)
    test_loader = DataLoader(dataset=test_dataset,
                             batch_size=args.test_batch_size,
                             shuffle=True, **kwargs)
    model = Net().to(device=dev)
    optimizer = Adadelta(params=model.parameters(), lr=args.lr, rho=0.9,
                         eps=1e-6, weight_decay=0)
    scheduler = StepLR(optimizer=optimizer, step_size=1, gamma=args.gamma)
    train_los, test_los = [], []
    for epoch in range(1, args.epochs + 1):
        tr_los = train(argument_object=args, model=model, dev=dev,
                       train_loader=train_loader, optimizer=optimizer,
                       epoch=epoch)
        te_los = test(model=model, dev=dev, test_loader=test_loader)
        scheduler.step(epoch=epoch)
        train_los.append(tr_los/len(train_loader))
        test_los.append(te_los)
    if args.save_model:
        save(obj=model.state_dict(), f="olivetti_cnn.h5")
    if args.epochs > 1:
        plot_loss(train_loss=train_los, test_loss=test_los)
コード例 #4
0
def create_optimizer(name, parameters, lr):
    if name == 'Adadelta':
        return Adadelta(parameters, lr=lr)
    elif name == 'Adam':
        return Adam(parameters, lr=lr)
    elif name == 'SGD':
        return SGD(parameters, lr=lr)
    else:
        raise KeyError(
            'Unknown optimizer type {!r}. Choose from [Adadelta | Adam | SGD]')
コード例 #5
0
ファイル: optimizer.py プロジェクト: Ba1Jun/MPCE
 def set_parameters(self, params):
     self.params = list(params)  # careful: params may be a generator
     if self.method == 'sgd':
         self.optimizer = SGD(self.params, lr=self.lr)
     elif self.method == 'adagrad':
         self.optimizer = Adagrad(self.params, lr=self.lr)
     elif self.method == 'adadelta':
         self.optimizer = Adadelta(self.params, lr=self.lr)
     elif self.method == 'adam':
         self.optimizer = Adam(self.params, lr=self.lr)
     else:
         raise RuntimeError("Invalid optim method: " + self.method)
コード例 #6
0
def main():
    use_cuda = torch.cuda.is_available()
    device = torch.device("cuda" if use_cuda else "cpu")

    train_loader = DataLoader(torchvision.datasets.ImageFolder(
        root=CHARS_PATH,
        transform=transforms.Compose([
            transforms.Grayscale(num_output_channels=1),
            transforms.ToTensor(),
            transforms.Normalize([0.5], [0.5])
        ])),
                              batch_size=BATCH_SIZE,
                              shuffle=True,
                              num_workers=12,
                              pin_memory=True)

    test_loader = DataLoader(torchvision.datasets.ImageFolder(
        root=CHARS_TEST_PATH,
        transform=transforms.Compose([
            transforms.Grayscale(num_output_channels=1),
            transforms.ToTensor(),
            transforms.Normalize([0.5], [0.5])
        ])),
                             batch_size=BATCH_SIZE,
                             shuffle=True,
                             num_workers=12,
                             pin_memory=True)

    model = Network().to(device)
    optimizer = Adadelta(model.parameters(), lr=LEARNING_RATE)
    scheduler = StepLR(optimizer, step_size=1)

    for epoch in range(1, EPOCHS + 1):
        train(model, device, train_loader, optimizer, epoch)
        test(model, device, test_loader)
        scheduler.step()

    torch.save(model.state_dict(), 'model.pt')
コード例 #7
0
def get_optimizer(params, settings):
    lrs = False
    if settings['optimizer'] == 'SGD':
        optimizer = torch.optim.SGD(params,
                                    lr=settings['lr'],
                                    momentum=settings['momentum'],
                                    weight_decay=settings['wd'])
        lrs = True
    elif settings['optimizer'] == 'Adagrad':
        optimizer = Adagrad(params,
                            lr=settings['lr'],
                            lr_decay=0,
                            weight_decay=settings['wd'],
                            initial_accumulator_value=0,
                            eps=1e-10)
    elif settings['optimizer'] == 'Adadelta':
        optimizer = Adadelta(params,
                             lr=1.0,
                             rho=0.9,
                             eps=1e-06,
                             weight_decay=settings['wd'])
    elif settings['optimizer'] == 'Adam':
        optimizer = Adam(params,
                         lr=settings['lr'],
                         betas=(0.9, 0.999),
                         eps=1e-08,
                         weight_decay=0,
                         amsgrad=False)
        lrs = True
    else:
        print('optimizer name invalid, using default SGD')
        optimizer = torch.optim.SGD(params,
                                    0.005,
                                    momentum=0.9,
                                    weight_decay=0.0005)
    return optimizer, lrs
コード例 #8
0
ファイル: train.py プロジェクト: jjjkkkjjj/pytorch.dl
         ]
    )

    train_dataset = datasets.SynthTextRecognitionDataset(transform=transform, target_transform=target_transform, augmentation=augmentation)
    train_loader = DataLoader(train_dataset,
                              batch_size=64,
                              shuffle=True,
                              collate_fn=batch_ind_fn,
                              num_workers=4,
                              pin_memory=True)
    """
    img, text = train_dataset[0]
    import cv2
    cv2.imshow(''.join(text), img)
    cv2.waitKey()
    """
    model = CRNN(class_labels=datasets.Alphanumeric_with_blank_labels, input_shape=(32, None, 1)).cuda()
    model.train()
    print(model)
    """
    img, text = train_dataset[0]
    text = [t.cuda() for t in text]
    p = model(img.unsqueeze(0).cuda(), text)
    """
    optimizer = Adadelta(model.parameters())

    save_manager = SaveManager(modelname='test', interval=1, max_checkpoints=3, plot_interval=1)

    trainer = TrainObjectRecognitionConsoleLogger(CTCLoss(blank=model.blankIndex, zero_infinity=True, reduction='mean'), model, optimizer)
    trainer.train_epoch(save_manager, 1000, train_loader)  # , evaluator=VOC2007Evaluator(val_dataset, iteration_interval=10))
コード例 #9
0
class Trainer:
    def __init__(self, config, dataloader):
        self.batch_size = config.batch_size
        self.config = config
        self.lr = config.lr
        self.epoch = config.epoch
        self.num_epoch = config.num_epoch
        self.checkpoint_dir = config.checkpoint_dir
        self.model_path = config.checkpoint_dir
        self.device = torch.device(
            "cuda:0" if torch.cuda.is_available() else "cpu")
        self.data_loader = dataloader
        self.image_len = len(dataloader)
        self.num_classes = config.num_classes
        self.eps = config.eps
        self.rho = config.rho
        self.decay = config.decay
        self.sample_step = config.sample_step
        self.sample_dir = config.sample_dir
        self.gradient_loss_weight = config.gradient_loss_weight
        self.decay_batch_size = config.decay_batch_size

        self.build_model()
        self.optimizer = Adadelta(self.net.parameters(),
                                  lr=self.lr,
                                  eps=self.eps,
                                  rho=self.rho,
                                  weight_decay=self.decay)
        self.lr_scheduler_discriminator = torch.optim.lr_scheduler.LambdaLR(
            self.optimizer,
            LambdaLR(self.num_epoch, self.epoch, len(self.data_loader),
                     self.decay_batch_size).step)

    def build_model(self):
        self.net = MobileHairNet().to(self.device)
        self.load_model()

    def load_model(self):
        print("[*] Load checkpoint in ", str(self.model_path))
        if not os.path.exists(self.model_path):
            os.makedirs(self.model_path)

        if not os.listdir(self.model_path):
            print("[!] No checkpoint in ", str(self.model_path))
            return

        model_path = os.path.join(self.model_path,
                                  f"MobileHairNet_epoch-{self.epoch-1}.pth")
        model = glob(model_path)
        model.sort()
        if not model:
            print(f"[!] No Checkpoint in {model_path}")
            return

        self.net.load_state_dict(
            torch.load(model[-1], map_location=self.device))
        print(f"[*] Load Model from {model[-1]}: ")

    def train(self):
        bce_losses = AverageMeter()
        image_gradient_losses = AverageMeter()
        image_gradient_criterion = ImageGradientLoss().to(self.device)
        bce_criterion = nn.CrossEntropyLoss().to(self.device)

        for epoch in range(self.epoch, self.num_epoch):
            bce_losses.reset()
            image_gradient_losses.reset()
            for step, (image, gray_image, mask) in enumerate(self.data_loader):
                image = image.to(self.device)
                mask = mask.to(self.device)
                gray_image = gray_image.to(self.device)

                pred = self.net(image)

                pred_flat = pred.permute(0, 2, 3, 1).contiguous().view(
                    -1, self.num_classes)
                mask_flat = mask.squeeze(1).view(-1).long()

                # preds_flat.shape (N*224*224, 2)
                # masks_flat.shape (N*224*224, 1)
                image_gradient_loss = image_gradient_criterion(
                    pred, gray_image)
                bce_loss = bce_criterion(pred_flat, mask_flat)

                loss = bce_loss + self.gradient_loss_weight * image_gradient_loss

                self.optimizer.zero_grad()
                loss.backward()
                self.optimizer.step()

                bce_losses.update(bce_loss.item(), self.batch_size)
                image_gradient_losses.update(
                    self.gradient_loss_weight * image_gradient_loss,
                    self.batch_size)
                iou = iou_loss(pred, mask)

                # save sample images
                if step % 10 == 0:
                    print(
                        f"Epoch: [{epoch}/{self.num_epoch}] | Step: [{step}/{self.image_len}] | "
                        f"Bce Loss: {bce_losses.avg:.4f} | Image Gradient Loss: {image_gradient_losses.avg:.4f} | "
                        f"IOU: {iou:.4f}")
                if step % self.sample_step == 0:
                    self.save_sample_imgs(image[0], mask[0],
                                          torch.argmax(pred[0], 0),
                                          self.sample_dir, epoch, step)
                    print('[*] Saved sample images')

            torch.save(
                self.net.state_dict(),
                f'{self.checkpoint_dir}/MobileHairNet_epoch-{epoch}.pth')

    def save_sample_imgs(self, real_img, real_mask, prediction, save_dir,
                         epoch, step):
        data = [real_img, real_mask, prediction]
        names = ["Image", "Mask", "Prediction"]

        fig = plt.figure()
        for i, d in enumerate(data):
            d = d.squeeze()
            im = d.data.cpu().numpy()

            if i > 0:
                im = np.expand_dims(im, axis=0)
                im = np.concatenate((im, im, im), axis=0)

            im = (im.transpose(1, 2, 0) + 1) / 2

            f = fig.add_subplot(1, 3, i + 1)
            f.imshow(im)
            f.set_title(names[i])
            f.set_xticks([])
            f.set_yticks([])

        p = os.path.join(save_dir, "epoch-%s_step-%s.png" % (epoch, step))
        plt.savefig(p)
コード例 #10
0
def main():
    args = get_arguments()

    # expriment name
    if not args.exp_name:
        args.exp_name = '_'.join([args.dataset, args.model])
    print("# Experiment: ", args.exp_name)

    # output folder
    output_folder = os.path.join(args.output_root, args.dataset, args.exp_name)
    os.makedirs(output_folder, exist_ok=True)
    print("# Output path: ", output_folder)

    # visdom
    global plotter
    if args.use_visdom:
        logging_folder = os.path.join(args.logging_root, args.dataset, args.exp_name)
        os.makedirs(logging_folder, exist_ok=True)
        plotter = utils.VisdomLinePlotter(env_name=args.exp_name, logging_path=os.path.join(logging_folder, 'vis.log'))
        print("# Visdom path: ", logging_folder)

    # dataset
    print("# Load datasets")
    train_datasets, val_datasets, test_datasets = get_datasets(args.dataset, args.dataset_folder, args.batch_size)
    num_classes = train_datasets[0].num_classes
    vocab = set(train_datasets[0].vocab)
    vocab = vocab.union(set(val_datasets[0].vocab))
    vocab = vocab.union(set(test_datasets[0].vocab))

    # pre-trained word2vec
    print("# Load pre-trained word2vec")
    pretrained_word2vec_cache = os.path.join(os.path.dirname(args.w2v_file), args.dataset + '_w2v.pkl')
    if os.path.isfile(pretrained_word2vec_cache):
        with open(pretrained_word2vec_cache, 'rb') as f:
            pretrained_word2vec = pickle.load(f)
    else:
        pretrained_word2vec = PretrainedWord2Vec(vocab, args.w2v_file)
        with open(pretrained_word2vec_cache, 'wb') as f:
            pickle.dump(pretrained_word2vec, f)

    # train
    print("# Start training")
    for cv, (train_dataset, val_dataset, test_dataset) in enumerate(zip(train_datasets, val_datasets, test_datasets)):
        # fix random seed
        utils.fix_random_seed(seed=const.RANDOM_SEED)

        # model
        cnn = get_model(args.model, num_classes, pretrained_word2vec)
        if torch.cuda.is_available():
            cnn.cuda()

        # dataloader
        train_loader = DataLoader(train_dataset, args.batch_size, shuffle=True, collate_fn=sentence_collate_fn)
        val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False, collate_fn=sentence_collate_fn)
        test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False, collate_fn=sentence_collate_fn)

        # optimizer
        optim = Adadelta(cnn.parameters(), rho=0.95, eps=1e-6)

        # criterion
        criterion = CrossEntropyLoss()

        # training
        if plotter:
            plotter.set_cv(cv)
        output_path = os.path.join(output_folder, 'cv_%d_best.pkl' % cv)
        train(args.num_epochs, cnn, train_loader, optim, criterion, val_loader, output_path)

        # evaluation
        utils.load_model(output_path, cnn)
        find_most_similar_words(cnn)
        accuracy = eval(cnn, test_loader)
        print('cross_val:', cv, '\taccuracy:', accuracy)
コード例 #11
0

def weight_init(module):
    class_name = module.__class__.__name__
    if class_name.find('Conv') != -1:
        module.weight.data.normal_(0, 0.02)
    if class_name.find('BatchNorm') != -1:
        module.weight.data.normal_(1, 0.02)
        module.bias.data.fill_(0)


crnn.apply(weight_init)

loss_function = CTCLoss(zero_infinity=True)
loss_function = loss_function.cuda()
optimizer = Adadelta(crnn.parameters())
converter = Converter(option.alphabet)
print_every = 100
total_loss = 0.0


def validation():
    print('start validation...')
    crnn.eval()
    total_loss = 0.0
    n_correct = 0
    for i, (input, label) in enumerate(validationset_dataloader):
        if i == len(validationset_dataloader) - 1:
            continue
        if i == 9:
            break
コード例 #12
0
from torchvision.transforms import *
from torch.optim.adadelta import Adadelta

if __name__ == '__main__':
    transform = Compose([Resize((360, 640)), ToTensor()])
    target_transform = Compose([
        GaussianHeatMap((360, 640), sigma2=10, threshold=128),
        LossHeatMap(256)
    ])

    train_dataset = AllTrackNetTennis(seq_num=3,
                                      transform=transform,
                                      target_transform=target_transform)
    train_loader = DataLoader(train_dataset, batch_size=2, shuffle=True)

    model = TrackNet(image_shape=(360, 640, 3), seq_num=3, batch_norm=True)
    print(model)
    #import torch
    #torch.save(model.state_dict(), './weights/tnet_init.pth')

    loss_func = CrossEntropy()
    optimizer = Adadelta(model.parameters(), lr=1.0)
    trainer = Trainer(model,
                      loss_func=loss_func,
                      optimizer=optimizer,
                      scheduler=None)

    trainer.train(1000,
                  train_loader,
                  savemodelname='tracknet',
                  checkpoints_epoch_interval=50)
コード例 #13
0
class HairSegmentation(object):
    def __init__(self, training_data_path, valid_data_path, test_data_path,
                 resolution, num_classes, decay_epoch, lr, rho, eps, decay,
                 gradient_loss_weight, resume_epochs, log_step, sample_step,
                 num_epochs, batch_size, train_results_dir, valid_results_dir,
                 test_results_dir, model_save_dir, log_dir):

        self.training_data_path = training_data_path
        self.valid_data_path = valid_data_path
        self.test_data_path = test_data_path
        self.resolution = resolution
        self.num_classes = num_classes

        self.decay_epoch = decay_epoch
        self.lr = lr
        self.rho = rho
        self.eps = eps
        self.decay = decay
        self.gradient_loss_weight = gradient_loss_weight

        self.resume_epochs = resume_epochs
        self.log_step = log_step
        self.sample_step = sample_step
        self.num_epochs = num_epochs
        self.batch_size = batch_size

        self.train_results_dir = train_results_dir
        self.valid_results_dir = valid_results_dir
        self.test_results_dir = test_results_dir
        self.model_save_dir = model_save_dir
        self.log_dir = log_dir

        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        self.colors = [(0, 0, 255), (255, 0, 0), (255, 0, 255), (255, 166, 0),
                       (255, 255, 0), (0, 255, 0), (0, 191, 255),
                       (255, 192, 203)]

        self.create_generator()
        self.build_model()
        self.writer = tensorboardX.SummaryWriter(self.log_dir)

    def create_generator(self):
        self.transform = transforms.Compose([
            transforms.Resize((self.resolution, self.resolution)),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])
        train_data = Generator(self.training_data_path, 'train',
                               self.resolution)
        self.train_dataloader = DataLoader(train_data,
                                           shuffle=True,
                                           batch_size=self.batch_size,
                                           num_workers=4,
                                           drop_last=True)

        valid_data = Generator(self.valid_data_path, 'valid', self.resolution)
        self.valid_dataloader = DataLoader(valid_data,
                                           shuffle=True,
                                           batch_size=self.batch_size,
                                           num_workers=4,
                                           drop_last=True)

        test_data = Generator(self.test_data_path, 'test', self.resolution)
        self.test_dataloader = DataLoader(test_data,
                                          shuffle=True,
                                          batch_size=self.batch_size,
                                          num_workers=4,
                                          drop_last=True)

    def build_model(self):
        self.net = HairMatteNet()
        self.net.to(self.device)
        self.optimizer = Adadelta(self.net.parameters(),
                                  lr=self.lr,
                                  eps=self.eps,
                                  rho=self.rho,
                                  weight_decay=self.decay)

    def restore_model(self, resume_epochs):
        print('Loading the trained models from epoch {}...'.format(
            resume_epochs))
        net_path = os.path.join(
            self.model_save_dir,
            '{}_epoch-HairMatteNet.ckpt'.format(resume_epochs))
        self.net.load_state_dict(
            torch.load(net_path, map_location=lambda storage, loc: storage))

    def train_epoch(self, epoch, start_time):
        self.net.train()
        for i, data in enumerate(self.train_dataloader, 0):
            image = data[0].to(self.device)
            gray_image = data[1].to(self.device)
            mask = data[2].to(self.device)

            pred = self.net(image)

            pred_flat = pred.permute(0, 2, 3, 1).contiguous().view(
                -1, self.num_classes)
            mask_flat = mask.squeeze(1).view(-1).long()

            image_gradient_loss = self.image_gradient_criterion(
                pred, gray_image)
            bce_loss = self.bce_criterion(pred_flat, mask_flat)
            loss = bce_loss + self.gradient_loss_weight * image_gradient_loss

            iou = iou_metric(pred, mask)

            self.optimizer.zero_grad()
            loss.backward()
            self.optimizer.step()

            losses = {}
            losses['train_bce_loss'] = bce_loss.item()
            losses[
                'train_image_gradient_loss'] = self.gradient_loss_weight * image_gradient_loss
            losses['train_loss'] = loss
            losses['train_iou'] = iou

            if (i + 1) % self.log_step == 0:
                et = time.time() - start_time
                et = str(datetime.timedelta(seconds=et))[:-7]
                log = "Elapsed [{}], Iteration [{}/{}], Epoch [{}/{}]".format(
                    et, i + 1, len(self.train_dataloader), epoch,
                    self.num_epochs)
                for tag, value in losses.items():
                    log += ", {}: {:.4f}".format(tag, value)
                    self.writer.add_scalar(
                        tag, value,
                        epoch * len(self.train_dataloader) + i + 1)
                print(log)

            if (i + 1) % self.sample_step == 0:
                with torch.no_grad():
                    out_results = []
                    for j in range(10):
                        out_results.append(denorm(image[j:j + 1]).data.cpu())
                        out_results.append(
                            mask.expand(-1, 3, -1, -1)[j:j + 1].data.cpu())
                        out_results.append(
                            torch.argmax(pred[j:j + 1],
                                         1).unsqueeze(0).expand(-1, 3, -1,
                                                                -1).data.cpu())

                        color = random.choice(self.colors)

                        result = dye_hair(denorm(image[j:j + 1]),
                                          mask[j:j + 1], color)
                        result = self.transform(
                            Image.fromarray(result)).unsqueeze(0)
                        out_results.append(denorm(result))

                        result = dye_hair(
                            denorm(image[j:j + 1]),
                            torch.argmax(pred[j:j + 1], 1).unsqueeze(0), color)
                        result = self.transform(
                            Image.fromarray(result)).unsqueeze(0)
                        out_results.append(denorm(result))

                    results_concat = torch.cat(out_results)
                    results_path = os.path.join(
                        self.train_results_dir,
                        '{}_epoch_train_results.jpg'.format(epoch))
                    save_image(results_concat, results_path, nrow=5, padding=0)
                    print('Saved real and fake images into {}...'.format(
                        results_path))

        if (epoch + 1) % 2 == 0:
            net_path = os.path.join(self.model_save_dir,
                                    '{}_epoch-HairMatteNet.ckpt'.format(epoch))
            torch.save(self.net.state_dict(), net_path)
            print('Saved model checkpoints into {}...'.format(
                self.model_save_dir))

    def valid_epoch(self, epoch):
        self.net.eval()
        losses = {
            'valid_bce_loss': 0,
            'valid_image_gradient_loss': 0,
            'valid_loss': 0,
            'valid_iou': 0
        }
        for i, data in enumerate(self.valid_dataloader, 0):
            image = data[0].to(self.device)
            gray_image = data[1].to(self.device)
            mask = data[2].to(self.device)

            with torch.no_grad():
                pred = self.net(image)

            pred_flat = pred.permute(0, 2, 3, 1).contiguous().view(
                -1, self.num_classes)
            mask_flat = mask.squeeze(1).view(-1).long()

            image_gradient_loss = self.image_gradient_criterion(
                pred, gray_image)
            bce_loss = self.bce_criterion(pred_flat, mask_flat)
            loss = bce_loss + self.gradient_loss_weight * image_gradient_loss

            iou = iou_metric(pred, mask)

            losses['valid_bce_loss'] += bce_loss.item()
            losses[
                'valid_image_gradient_loss'] += self.gradient_loss_weight * image_gradient_loss
            losses['valid_loss'] += loss
            losses['valid_iou'] += iou

            if i == 0:
                with torch.no_grad():
                    out_results = []
                    for j in range(10):
                        out_results.append(denorm(image[j:j + 1]).data.cpu())
                        out_results.append(
                            mask.expand(-1, 3, -1, -1)[j:j + 1].data.cpu())
                        out_results.append(
                            torch.argmax(pred[j:j + 1],
                                         1).unsqueeze(0).expand(-1, 3, -1,
                                                                -1).data.cpu())

                        color = random.choice(self.colors)

                        result = dye_hair(denorm(image[j:j + 1]),
                                          mask[j:j + 1], color)
                        result = self.transform(
                            Image.fromarray(result)).unsqueeze(0)
                        out_results.append(denorm(result))

                        result = dye_hair(
                            denorm(image[j:j + 1]),
                            torch.argmax(pred[j:j + 1], 1).unsqueeze(0), color)
                        result = self.transform(
                            Image.fromarray(result)).unsqueeze(0)
                        out_results.append(denorm(result))

                    results_concat = torch.cat(out_results)
                    results_path = os.path.join(
                        self.valid_results_dir,
                        '{}_epoch_valid_results.jpg'.format(epoch))
                    save_image(results_concat, results_path, nrow=5, padding=0)
                    print('Saved real and fake images into {}...'.format(
                        results_path))

        losses['valid_bce_loss'] /= i
        losses['valid_image_gradient_loss'] /= i
        losses['valid_loss'] /= i
        losses['valid_iou'] /= i

        log = "Eval ========================= Epoch [{}/{}]".format(
            epoch, self.num_epochs)
        for tag, value in losses.items():
            log += ", {}: {:.4f}".format(tag, value)
            self.writer.add_scalar(
                tag, value,
                epoch * len(self.train_dataloader) +
                len(self.train_dataloader) + 1)
        print(log)

    def train(self):
        if self.resume_epochs != 0:
            self.restore_model(self.resume_epochs)
            self.resume_epochs += 1

        self.image_gradient_criterion = ImageGradientLoss().to(self.device)
        self.bce_criterion = nn.CrossEntropyLoss().to(self.device)

        start_time = time.time()
        for epoch in range(self.resume_epochs, self.num_epochs, 1):
            self.train_epoch(epoch, start_time)
            self.valid_epoch(epoch)

        self.writer.close()

    def test(self):
        self.restore_model(self.resume_epochs)
        self.net.eval()

        metrics = {'iou': 0, 'f1_score': 0, 'acc': 0}
        for i, data in enumerate(self.valid_dataloader, 0):
            image = data[0].to(self.device)
            mask = data[2].to(self.device)

            with torch.no_grad():
                pred = self.net(image)

            metrics['iou'] += iou_metric(pred, mask)
            metrics['f1_score'] += F1_metric(pred, mask)
            metrics['acc'] += acc_metric(pred, mask)

            if i == 0:
                with torch.no_grad():
                    out_results = []
                    for j in range(10):
                        out_results.append(denorm(image[j:j + 1]).data.cpu())
                        out_results.append(
                            mask.expand(-1, 3, -1, -1)[j:j + 1].data.cpu())
                        out_results.append(
                            torch.argmax(pred[j:j + 1],
                                         1).unsqueeze(0).expand(-1, 3, -1,
                                                                -1).data.cpu())

                        color = random.choice(self.colors)

                        result = dye_hair(denorm(image[j:j + 1]),
                                          mask[j:j + 1], color)
                        result = self.transform(
                            Image.fromarray(result)).unsqueeze(0)
                        out_results.append(denorm(result))

                        result = dye_hair(
                            denorm(image[j:j + 1]),
                            torch.argmax(pred[j:j + 1], 1).unsqueeze(0), color)
                        result = self.transform(
                            Image.fromarray(result)).unsqueeze(0)
                        out_results.append(denorm(result))

                    results_concat = torch.cat(out_results)
                    results_path = os.path.join(
                        self.test_results_dir,
                        '{}_epoch_test_results.jpg'.format(self.resume_epochs))
                    save_image(results_concat, results_path, nrow=5, padding=0)
                    print('Saved real and fake images into {}...'.format(
                        results_path))

        metrics['iou'] /= i
        metrics['f1_score'] /= i
        metrics['acc'] /= i

        log = "Average metrics, Epoch {}".format(self.resume_epochs)
        for tag, value in metrics.items():
            log += ", {}: {:.4f}".format(tag, value)
        print(log)
コード例 #14
0
def evaluate(individual):
    torch.cuda.empty_cache()
    decoded_chromosome = individual.decode_chromosome()
    try:
        model = ConvNet(decoded_chromosome[1:])
        summary(model, input_size=(3, 64, 64), device="cpu")
    except ValueError as e:
        if str(e) == "Bad Network":
            return None, None

    transformations = {
        'train':
        transforms.Compose([
            transforms.RandomHorizontalFlip(),
            # transforms.RandomCrop(32, padding=4),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ]),
        'val':
        transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ]),
        'test':
        transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])
    }

    data_dir = "data"

    image_datasets = {
        x: datasets.ImageFolder(os.path.join(data_dir, x), transformations[x])
        for x in ['train', 'val', 'test']
    }
    dataloaders = {
        x: DataLoader(image_datasets[x], batch_size=32, shuffle=True)
        for x in ['train', 'val', 'test']
    }
    dataset_sizes = {
        x: len(image_datasets[x])
        for x in ['train', 'val', 'test']
    }

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = model.to(device)

    optimizer_name = decoded_chromosome[0]

    optimizer = None
    if optimizer_name == "adam":
        optimizer = optim.Adam(model.parameters())
    elif optimizer_name == "rmsprop":
        optimizer = RMSprop(model.parameters())
    elif optimizer_name == "adagrad":
        optimizer = Adagrad(model.parameters())
    elif optimizer_name == "adadelta":
        optimizer = Adadelta(model.parameters())

    criterion = nn.CrossEntropyLoss()

    now = datetime.now()
    model_name = now.strftime("%d%m%Y%H%M%S")

    # hl.build_graph(model, torch.zeros([1, 3, 64, 64]).to(device))

    return model_name, 1 / train_model(model_name,
                                       model,
                                       dataloaders,
                                       dataset_sizes,
                                       criterion,
                                       optimizer,
                                       num_epochs=10)
コード例 #15
0
    valid_loader, train_loader_ = load_dataset(data_files[1:2], batch_size,
                                               0.2)

    train_loader = data.DataLoader(data.ConcatDataset(
        [train_loader.dataset, train_loader_.dataset]),
                                   batch_size,
                                   shuffle=True)
    dataloader = {'train': train_loader, 'valid': valid_loader}
    del train_loader_

    # MODEL.
    graph_args = {'strategy': 'spatial'}
    model = TwoStreamSpatialTemporalGraph(graph_args, num_class).to(device)

    #optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    optimizer = Adadelta(model.parameters())

    losser = torch.nn.BCELoss()

    # TRAINING.
    loss_list = {'train': [], 'valid': []}
    accu_list = {'train': [], 'valid': []}
    for e in range(epochs):
        print('Epoch {}/{}'.format(e, epochs - 1))
        for phase in ['train', 'valid']:
            if phase == 'train':
                model = set_training(model, True)
            else:
                model = set_training(model, False)

            run_loss = 0.0
コード例 #16
0
ファイル: train.py プロジェクト: mengxiangke/SiaStegNet
else:
    raise NotImplementedError
if args.finetune is not None:
    net.load_state_dict(torch.load(args.finetune)['state_dict'], strict=False)

criterion_1 = nn.CrossEntropyLoss()
criterion_2 = src.models.ContrastiveLoss(margin=args.margin)

if args.cuda:
    net.cuda()
    criterion_1.cuda()
    criterion_2.cuda()

optimizer = Adamax(net.parameters(), lr=args.lr, eps=args.eps, weight_decay=args.wd)
if args.model == 'sid':
    optimizer = Adadelta(net.parameters(), lr=args.lr, eps=args.eps, weight_decay=args.wd)

lr_str = args.lr_str
if lr_str == 1:
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=200, gamma=0.1)
elif lr_str == 2:
    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[300, 400],
                                                     gamma=0.1)  # milestones=[900,975]
elif lr_str == 3:
    scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.9)
elif lr_str == 4:
    scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=100)
elif lr_str == 5:
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'max', factor=0.3,
                                                           patience=10, verbose=True, min_lr=0,
                                                           eps=1e-08)