示例#1
0
    def run(self, dataloader):

        self.on_epoch_start()

        logs = {}
        loss_meter = AverageValueMeter()
        metrics_meters = {metric.__name__: AverageValueMeter() for metric in self.metrics}

        with tqdm(dataloader, desc=self.stage_name, file=sys.stdout, disable=not (self.verbose)) as iterator:
            for x, y in iterator:
                x, y = x.to(self.device), y.to(self.device)
                loss, y_pred = self.batch_update(x, y)

                # update loss logs
                loss_value = loss.cpu().detach().numpy()
                loss_meter.add(loss_value)
                loss_logs = {self.loss.__name__: loss_meter.mean}
                logs.update(loss_logs)

                # update metrics logs
                for metric_fn in self.metrics:
                    metric_value = metric_fn(y_pred, y).cpu().detach().numpy()
                    metrics_meters[metric_fn.__name__].add(metric_value)
                metrics_logs = {k: v.mean for k, v in metrics_meters.items()}
                logs.update(metrics_logs)

                if self.verbose:
                    s = self._format_logs(logs)
                    iterator.set_postfix_str(s)

        return logs
示例#2
0
    def train_per_loader(self, trainloader):
        self.network.train()
        loss_meter = AverageValueMeter()
        for idx, (img, label) in enumerate(trainloader):
            baselr, finetunelr = self.adjust_learning_rate()
            img = img.to(self.device)
            label = label.to(self.device)
            if len(label.shape) == 5:
                label = label.view(-1, *(label.shape[2:]))
            output = self.network(img)
            loss = self.criterion(output, label)
            self.optim.zero_grad()
            loss.backward()
            self.optim.step()
            loss_meter.add(float(loss))

            if self.engine.local_rank == 0:
                if self.sche.cur_batches % config.log_inteval == 0:
                    self.logger.info(
                        "%s-epoch:%d/%d batch:%d/%d loss:%.4f base_lr:%e finetune_lr:%e"
                        % (self.tag_dir.split("/")[-1], self.sche.cur_epoch,
                           self.sche.total_epoch,
                           self.sche.cur_batches, self.sche.batches_per_epoch,
                           loss_meter.value()[0], baselr, finetunelr))
            self.sche.update()

        return loss_meter.value()[0]
示例#3
0
def val(dataloader, net):
    avg_acc=AverageValueMeter()
    avg_acc.reset()
    y_true =[]
    y_predict=[]
    y_predict_proba=[]
    net.eval()
    with t.no_grad():
        for i,(data,target) in enumerate(dataloader):
            data=data.type(t.FloatTensor)
            data = data.cuda()
            target = target.cuda()
            output = net(data)
            decision = output.max(1)[1]
            y_predict.extend(decision.cpu().numpy().tolist())
            proba = F.softmax(output,dim=1)[:,1]
            y_predict_proba.extend(proba.cpu().numpy().tolist())
            y_true.extend(target.cpu().numpy().tolist())
            acc = (decision==target).sum().item()/np.float(len(target))
            avg_acc.add(acc)
    avg_auc = roc_auc_score(y_true,y_predict_proba)

    cnf_matrix = confusion_matrix(y_true, y_predict)
    np.set_printoptions(precision=2)
    # print(avg_auc)
    net.train()
    return avg_acc.value()[0],avg_auc
示例#4
0
def train():
    vis = Visualizer(server='http://turing.livia.etsmtl.ca', env='EEG')
    data_root = '/home/AN96120/python_project/Seizure Prediction/processed_data/fft_meanlog_std_lowcut0.1highcut180nfreq_bands12win_length_sec60stride_sec60/Dog_1'
    dataloader_train = get_dataloader(data_root, training=True)
    dataloader_test = get_dataloader(data_root, training=False)
    # No interaction has been found in the training and testing dataset.
    weights = t.Tensor([1/(np.array(dataloader_train.dataset.targets)==0).mean(),1/(np.array(dataloader_train.dataset.targets)==1).mean()  ])
    criterion = nn.CrossEntropyLoss(weight=weights.cuda())

    net = convNet ()
    net.cuda()

    optimiser = t.optim.Adam(net.parameters(),lr= 1e-4,weight_decay=1e-4)
    loss_avg = AverageValueMeter()
    epochs = 10000
    for epoch in range(epochs):
        loss_avg.reset()
        for ii, (data, targets) in enumerate(dataloader_train):
            data, targets= data.type(t.FloatTensor), targets.type(t.LongTensor)
            data = data.cuda()
            targets = targets.cuda()
            optimiser.zero_grad()
            output = net(data)
            loss = criterion(output,targets)
            loss_avg.add(loss.item())
            loss.backward()
            optimiser.step()
        vis.plot('loss',loss_avg.value()[0])

        _,auc_train=val(dataloader_train,net)
        _, auc_test =val(dataloader_test,net)
        print(auc_train,auc_test)
示例#5
0
def val(val_dataloader, network, save=False):
    # network.eval()
    dice_meter_b = AverageValueMeter()
    dice_meter_f = AverageValueMeter()

    dice_meter_b.reset()
    dice_meter_f.reset()

    images = []
    with torch.no_grad():
        for i, (image, mask, _, _) in enumerate(val_dataloader):
            if mask.sum() == 0:
                continue
            image, mask = image.to(device), mask.to(device)

            proba = F.softmax(network(image), dim=1)
            predicted_mask = proba.max(1)[1]
            iou = dice_loss(predicted_mask, mask)

            dice_meter_f.add(iou[1])
            dice_meter_b.add(iou[0])

            if save:
                images = save_images(images, image, mask, proba[:, 1],
                                     predicted_mask)
    if save:
        grid = make_grid(images, nrow=4)
        return [[dice_meter_b.value()[0], dice_meter_f.value()[0]], grid]
    else:
        return [[dice_meter_b.value()[0], dice_meter_f.value()[0]], None]
示例#6
0
def val():
    global highest_dice_loss
    dice_loss_meter = AverageValueMeter()
    dice_loss_meter.reset()
    for i, (img, mask, weak_mask, _) in enumerate(val_loader):
        if (weak_mask.sum() <= 3) or (mask.sum() <= 10):
            # print('No mask has been found')
            continue
        if not ((list(img.shape[-2:]) == list(mask.shape[-2:])) and (
                list(img.shape[-2:]) == list(weak_mask.shape[-2:]))):
            continue
        img, mask, weak_mask = img.cuda(), mask.cuda(), weak_mask.cuda()

        predict_ = F.softmax(net(img), dim=1)
        segm = pred2segmentation(predict_)
        diceloss_F = dice_loss(segm, mask)
        diceloss_B = dice_loss(1 - segm, 1 - mask)
        dice_loss_meter.add((diceloss_F + diceloss_B).item() / 2)

        if i % 100 == 0:
            board_val_image.image(img[0], 'medical image')
            board_val_image.image(color_transform(weak_mask[0]), 'weak_mask')
            board_val_image.image(color_transform(segm[0]), 'prediction')
    board_loss.plot('dice_loss for validationset', dice_loss_meter.value()[0])

    if dice_loss_meter.value()[0] > highest_dice_loss:
        highest_dice_loss = dice_loss_meter.value()[0]
        torch.save(net.state_dict(), 'Enet_Square_barrier.pth')
        print('saved with dice:%f' % highest_dice_loss)
示例#7
0
def pretrain(dataloader, network, path=None):
    class config:
        lr = 1e-3
        epochs = 100
        path = '../checkpoint/pretrained_net.pth'

    pretrain_config = config()
    if path:
        pretrain_config.path = path
    network.to(device)
    criterion_ = CrossEntropyLoss2d()
    optimiser_ = torch.optim.Adam(network.parameters(), pretrain_config.lr)
    loss_meter = AverageValueMeter()
    for i in range(pretrain_config.epochs):
        loss_meter.reset()

        for i, (img, mask, weak_mask, _) in tqdm(enumerate(dataloader)):
            img, mask = img.to(device), mask.to(device)
            optimiser_.zero_grad()
            output = network(img)
            loss = criterion_(output, mask.squeeze(1))
            loss.backward()
            optimiser_.step()
            loss_meter.add(loss.item())

        # import ipdb
        # ipdb.set_trace()
        print(loss_meter.value()[0])
        torch.save(network.state_dict(), pretrain_config.path)
        # torch.save(network.parameters(),path)
        print('pretrained model saved.')
示例#8
0
def evaluate_iou(val_dataloader, network, save=False):
    network.eval()
    b_dice_meter = AverageValueMeter()
    f_dice_meter = AverageValueMeter()
    with torch.no_grad():
        images = []
        for i, (image, mask, weak_mask, pathname) in enumerate(val_dataloader):
            if mask.sum() == 0 or weak_mask.sum() == 0:
                continue
            image, mask, weak_mask = image.to(device), mask.to(
                device), weak_mask.to(device)
            proba = F.softmax(network(image), dim=1)
            predicted_mask = proba.max(1)[1]
            [b_iou, f_iou] = dice_loss(predicted_mask, mask)
            b_dice_meter.add(b_iou)
            f_dice_meter.add(f_iou)
            if save:
                images = save_images(images, image, proba, mask, weak_mask)

    network.train()
    if save:
        grid = make_grid(images, nrow=4)
        return [[b_dice_meter.value()[0], f_dice_meter.value()[0]], grid]
    else:
        return [[b_dice_meter.value()[0], f_dice_meter.value()[0]], None]
def val(net, dataloader_):
    global highest_iou
    net.eval()
    iou_meter_val = AverageValueMeter()
    loss_meter_val = AverageValueMeter()
    iou_meter_val.reset()
    for i, (img, mask, _) in tqdm(enumerate(dataloader_)):
        (img, mask) = (img.cuda(), mask.cuda()) if (torch.cuda.is_available()
                                                    and use_cuda) else (img,
                                                                        mask)
        pred_val = net(img)
        loss_val = criterion(pred_val, mask.squeeze(1))
        loss_meter_val.add(loss_val.item())
        iou_val = iou_loss(pred2segmentation(pred_val),
                           mask.squeeze(1).float(), class_number)[1]
        iou_meter_val.add(iou_val)
        if i % val_print_frequncy == 0:
            showImages(board_val_image, img, mask, pred2segmentation(pred_val))

    board_loss.plot('val_iou_per_epoch', iou_meter_val.value()[0])
    board_loss.plot('val_loss_per_epoch', loss_meter_val.value()[0])
    net.train()
    if highest_iou < iou_meter_val.value()[0]:
        highest_iou = iou_meter_val.value()[0]
        torch.save(
            net.state_dict(), 'checkpoint/modified_ENet_%.3f_%s.pth' %
            (iou_meter_val.value()[0], 'equal_' + str(Equalize)))
        print('The highest IOU is:%.3f' % iou_meter_val.value()[0],
              'Model saved.')
示例#10
0
def train():
    totalloss_meter = AverageValueMeter()
    sizeloss_meter = AverageValueMeter()
    celoss_meter = AverageValueMeter()

    for epoch in range(max_epoch):
        totalloss_meter.reset()
        celoss_meter.reset()
        sizeloss_meter.reset()
        if epoch % 5 == 0:
            for param_group in optimiser.param_groups:
                param_group['lr'] = lr * (0.9 ** (epoch // 3))
                print('learning rate:', param_group['lr'])
            print('save model:')
            # torch.save(net.state_dict(), 'U_net_2Class.pth')

        for i, (img, mask, weak_mask, _) in tqdm(enumerate(train_loader)):
            if (weak_mask.sum() == 0) or (mask.sum() == 0):
                # print('No mask has been found')
                continue
            if not ((list(img.shape[-2:]) == list(mask.shape[-2:])) and (
                    list(img.shape[-2:]) == list(weak_mask.shape[-2:]))):
                continue
            img, mask, weak_mask = img.cuda(), mask.cuda(), weak_mask.cuda()
            optimiser.zero_grad()
            predict = net(img)
            loss_ce = partialCECriterion(predict, weak_mask.squeeze(1))
            # loss_ce = torch.Tensor([0]).cuda()
            # celoss_meter.add(loss_ce.item())
            loss_size = sizeCriterion(predict)
            # loss_size = torch.Tensor([0]).cuda()
            sizeloss_meter.add(loss_size.item())
            loss = loss_ce + loss_size
            totalloss_meter.add(loss.item())
            loss.backward()
            torch.nn.utils.clip_grad_norm_(net.parameters(), 1e-4)
            optimiser.step()
            if i % 50 == 0:
                predict_ = F.softmax(predict, dim=1)
                segm = pred2segmentation(predict)
                print("ce_loss:%.4f,  size_loss:%.4f, FB percentage:%.2f" % (loss_ce.item(), loss_size.item(), ((
                                                                                                                            predict_[
                                                                                                                            :,
                                                                                                                            1,
                                                                                                                            :,
                                                                                                                            :] * weak_mask.data.float()).sum() / weak_mask.data.float().sum()).item()))
                board_train_image.image(img[0], 'medical image')
                board_train_image.image(color_transform(mask[0]), 'weak_mask')
                board_train_image.image(color_transform(weak_mask[0]), 'weak_mask')
                board_train_image.image(color_transform(segm[0]), 'prediction')
                if totalloss_meter.value()[0] < 1:
                    board_loss.plot('ce_loss', -np.log(loss_ce.item() + 1e-6))
                    board_loss.plot('size_loss', -np.log(loss_size.item() + 1e-6))
                    # board_loss.plot('size_loss', -np.log(sizeloss_meter.value()[0]))
        # print('train loss:%.5f'%celoss_meter.value()[0])
        val()
示例#11
0
    def run(self, dataloader):
        global exp_writer

        self.on_epoch_start()

        logs = {}
        loss_meter = AverageValueMeter()
        metrics_meters = {
            metric.__name__: AverageValueMeter()
            for metric in self.metrics
        }

        with tqdm(dataloader,
                  desc=self.stage_name,
                  file=sys.stdout,
                  disable=not (self.verbose)) as iterator:
            for x, y in iterator:
                x, y = x.to(self.device), y.to(self.device)
                loss, y_pred = self.batch_update(x, y)

                if self.cnt % 30 == 0:
                    exp_writer.write_image_to_tensorboard(
                        x.detach().cpu(),
                        y.detach().cpu().numpy(),
                        y_pred.detach().cpu())

                # update loss logs
                loss_value = loss.cpu().detach().numpy()
                loss_meter.add(loss_value)
                loss_logs = {self.loss.__name__: loss_meter.mean}
                logs.update(loss_logs)

                exp_writer.sw.add_scalar(
                    self.stage_name + '/loss_' + self.loss.__name__,
                    loss_value, self.cnt)

                # update metrics logs
                for metric_fn in self.metrics:
                    metric_value = metric_fn(y_pred, y).cpu().detach().numpy()

                    exp_writer.sw.add_scalar(
                        self.stage_name + '/metric_' + metric_fn.__name__,
                        metric_value, self.cnt)

                    metrics_meters[metric_fn.__name__].add(metric_value)
                metrics_logs = {k: v.mean for k, v in metrics_meters.items()}

                logs.update(metrics_logs)

                if self.verbose:
                    s = self._format_logs(logs)
                    iterator.set_postfix_str(s)

                self.cnt += 1

        return logs
示例#12
0
def validate(val_loader, model, criterion, opt):
    data_time = TimeMeter(unit=1)
    losses = AverageValueMeter()
    errors = ClassErrorMeter(topk=[1])
    # switch to evaluate mode
    if isinstance(model, list):
        for m in model:
            m.eval()
    else:
        model.eval()

    with torch.no_grad():
        end = time.time()
        for i, (win_past, inp_pred, labels, min_values) in enumerate(zip(*val_loaders)):
            win_past = win_past.cuda(opt['g'], non_blocking=True)
            inp_pred = inp_pred.cuda(opt['g'], non_blocking=True)
            labels = labels.cuda(opt['g'], non_blocking=True)
            min_values = min_values.cuda(opt['g'], non_blocking=True)

            # compute output
            yh_cl, yh_reg = basik_tasks_model(win_past, win_pred)
            
            loss = criterion[0](yh_cl, labels)
            ctx.losses['cl'].add(loss.item())
            loss += opt['lambda'] * criterion[1](output, min_values)
            # measure accuracy and record loss
            errors.add(yh_cl.data, targets[0].data)
            losses['reg'].add(loss.item() - ctx.losses['cl'].value())
            losses['tot'].add(loss.item())
 
            errors.add(yh_cl, labels)
            losses.add(loss.item())
          
            loss = losses['tot'].value()[0]
            top1 = errors.value()[0]

            # if i % opt['print_freq'] == 0:
            #     print('[{0}/{1}]\t'
            #           'Time {time:.3f}\t'
            #           'Loss {loss:.4f}\t'
            #           'Err@1 {top1:.3f}\t'
            #           'Err@5 {top5:.3f}'.format(
            #            i, 
            #            len(val_loader),
            #            time=data_time.value(), loss=loss, 
            #            top1=top1, top5=top5))

        print('Loss {loss:.4f}'
              ' * Err@1 {top1:.3f}\t'
              .format(loss=loss, top1=top1))
    stats = {'loss': loss, 'top1': top1}
    ctx.metrics = stats
    return stats
示例#13
0
def val(val_dataloader, network):
    network.eval()
    dice_meter = AverageValueMeter()
    dice_meter.reset()
    for i, (image, mask, _, _) in enumerate(val_dataloader):
        image, mask = image.to(device), mask.to(device)
        proba = F.softmax(network(image), dim=1)
        predicted_mask = proba.max(1)[1]
        iou = dice_loss(predicted_mask, mask).item()
        dice_meter.add(iou)
    print('val iou:  %.6f' % dice_meter.value()[0])
    return dice_meter.value()[0]
class CalculateLossCallback(TrainingCallback):
    def __init__(self, key):
        self.key = key
        self.average_value_meter = AverageValueMeter()

    def on_mode_begin(self, mode, log):
        self.average_value_meter.reset()
        log[self.key] = float('NaN')

    def on_batch_end(self, batch, log):
        batch_size = log['batch_size']
        self.average_value_meter.add(log['loss'] * batch_size, batch_size)
        log[self.key] = self.average_value_meter.value()[0]
示例#15
0
def evaluate(net, dataloader, device):
    net.eval()
    dice_meter = AverageValueMeter()
    dice_meter.reset()
    with torch.no_grad():
        for i, (img, mask, path) in enumerate(dataloader):
            img, mask = img.to(device), mask.to(device)
            pred = net(img)
            pred_mask = pred2segmentation(pred)
            dice_meter.add(dice_loss(pred_mask, mask))

    net.train()
    return dice_meter.value()[0]
示例#16
0
def train(mode='CL'):
    model = dccrn(mode)
    model.to(opt.device)

    train_data = THCHS30(phase='train')
    train_loader = DataLoader(train_data,
                              batch_size=opt.batch_size,
                              num_workers=opt.num_workers,
                              shuffle=True)

    optimizer = Adam(model.parameters(), lr=opt.lr)
    scheduler = MultiStepLR(optimizer,
                            milestones=[
                                int(opt.max_epoch * 0.5),
                                int(opt.max_epoch * 0.7),
                                int(opt.max_epoch * 0.9)
                            ],
                            gamma=opt.lr_decay)
    criterion = SISNRLoss()

    loss_meter = AverageValueMeter()

    for epoch in range(0, opt.max_epoch):
        loss_meter.reset()
        for i, (data, label) in enumerate(train_loader):
            data = data.to(opt.device)
            label = label.to(opt.device)

            spec, wav = model(data)

            optimizer.zero_grad()
            loss = criterion(wav, label)
            loss.backward()
            optimizer.step()

            loss_meter.add(loss.item())

            if (i + 1) % opt.verbose_inter == 0:
                print('epoch', epoch + 1, 'batch', i + 1, 'SI-SNR',
                      -loss_meter.value()[0])
        if (epoch + 1) % opt.save_inter == 0:
            print('save model at epoch {0} ...'.format(epoch + 1))
            save_path = os.path.join(
                opt.checkpoint_root,
                'DCCRN_{0}_{1}.pth'.format(mode, epoch + 1))
            torch.save(model.state_dict(), save_path)

        scheduler.step()

    save_path = os.path.join(opt.checkpoint_root, 'DCCRN_{0}.pth'.format(mode))
    torch.save(model.state_dict(), save_path)
示例#17
0
def val(dataloader,net):
    net.eval()
    acc = AverageValueMeter()
    acc.reset()
    for i, (img, label) in enumerate(dataloader):
        batch_size = len(label)
        images = Variable(img).cuda()
        labels = Variable(label.squeeze()).cuda()
        output = net(images)
        predictedLabel = torch.max(output,1)[1]
        acc_ = (predictedLabel==labels).sum().type(torch.FloatTensor)/batch_size
        acc.add(acc_.item())
    net.train()
    return acc.value()[0]
示例#18
0
def pretrain(train_dataloader, val_dataloader_, network, path=None, split_ratio=0.1):
    highest_iou = -1
    class config:
        lr = 1e-3
        epochs = 100
        path = 'checkpoint'


    pretrain_config = config()
    if path :
        pretrain_config.path = path
    network.to(device)
    criterion_ = CrossEntropyLoss2d()
    optimiser_ = torch.optim.Adam(network.parameters(),pretrain_config.lr)
    loss_meter = AverageValueMeter()
    fiou_tables = []

    for iteration in range(pretrain_config.epochs):
        loss_meter.reset()

        for i, (img,mask,weak_mask,_) in tqdm(enumerate(train_dataloader)):
            img,mask = img.to(device), mask.to(device)
            optimiser_.zero_grad()
            output = network(img)
            loss = criterion_(output,mask.squeeze(1))
            loss.backward()
            optimiser_.step()
            loss_meter.add(loss.item())
        print('train_loss: %.6f'%loss_meter.value()[0])

        if (iteration+1) %50 ==0:
            for param_group in optimiser_.param_groups:
                param_group['lr'] = param_group['lr'] * 0.5
                print('learning rate:', param_group['lr'])

        val_iou = val(val_dataloader_,network)
        fiou_tables.append(val_iou)
        if val_iou > highest_iou:
            highest_iou = val_iou
            torch.save(network.state_dict(),
                       os.path.join(pretrain_config.path, 'model_%.4f_split_%.3f.pth' % (val_iou, split_ratio)))
            print('pretrained model saved with %.4f.'%highest_iou)
    return fiou_tables
示例#19
0
def _main_loop(dataloader, mode='train'):
    acc_meter = AverageValueMeter()
    dataloader = tqdm(dataloader)
    for i, [signal, gt, _] in enumerate(dataloader):

        signal, gt = signal.float().to(device), gt.long().to(device)
        if mode == "train":
            cnn.train()
        else:
            cnn.eval()
        pred = cnn(signal)
        if mode == "train":
            loss = citerion(pred, gt)
            optimiser.zero_grad()
            loss.backward()
            optimiser.step()
        acc_meter.add(compute_acc(pred, gt))
        dataloader.set_postfix({"acc": float(acc_meter.value()[0])})

    return acc_meter.value()[0]
def test(nets_, test_loader_, device, **kwargs):
    class_number = 2
    """
    This function performs the evaluation with the test set containing labeled images.
    """

    map_(lambda x: x.eval(), nets_)

    dice_meters_test = [
        AverageValueMeter(),
        AverageValueMeter(),
        AverageValueMeter()
    ]
    mv_dice_score_meter = AverageValueMeter()

    with torch.no_grad():
        for i, (img, mask, _) in enumerate(test_loader_):

            (img, mask) = img.to(device), mask.to(device)
            distributions = torch.zeros(
                [img.shape[0], class_number, img.shape[2],
                 img.shape[3]]).to(device)

            for idx, net_i in enumerate(nets_):
                pred_test = nets_[idx](img)
                # plt.imshow(pred_test[0, 1].cpu().numpy())

                distributions += F.softmax(pred_test, 1)
                dice_test = dice_loss(pred2segmentation(pred_test),
                                      mask.squeeze(1))
                dice_meters_test[idx].add(dice_test)

            distributions /= 3
            mv_dice_score = dice_loss(pred2segmentation(distributions),
                                      mask.squeeze(1))
            mv_dice_score_meter.add(mv_dice_score.item())

    map_(lambda x: x.train(), nets_)

    return [dice_meters_test[idx] for idx in range(3)], mv_dice_score_meter
示例#21
0
文件: main.py 项目: MCD-50/X-Scan
 def run(mode):
     total_loss = AverageValueMeter()
     if mode == 'train':
         model.train()
     else:
         model.eval()
     bar = progressbar.ProgressBar()
     for batch_idx, data in bar(enumerate(iterators[mode]())):
         input = data['input']
         target = data['target']
         # print(target.max(), target.min())
         output = model(Variable(input.cuda(), volatile=mode != 'train'))
         loss = class_crit(
             output, Variable(target.cuda()))
         total_loss.add(loss[0].data[0])
         if mode == 'train':
             optimizer.zero_grad()
             loss.backward()
             optimizer.step()
         gc.collect()
     print("Total loss {}: {}".format(mode, total_loss.value()))
     return total_loss.value()[0]
示例#22
0
def val(model, dataloader, criterion):

    model.eval()
    device = t.device('cuda') if opt.use_gpu else t.device('cpu')
    ncorrect = 0
    nsample = 0
    loss_meter = AverageValueMeter()
    loss_meter.reset()
    for ii, (data, label) in enumerate(dataloader):
        nsample += data.size()[0]
        feature = data.to(device)
        target = label.to(device)
        prob = model(feature)
        loss = criterion(prob, target)
        score = t.nn.functional.softmax(prob, dim=1)
        index = score.topk(1)[1].view(-1)
        loss_meter.add(loss.item())
        ncorrect += (index == target).cpu().sum().item()

    accu = float(ncorrect) / nsample * 100
    loss = loss_meter.value()[0]
    return accu, loss
示例#23
0
    def run(self, num_episodes=1000000):
        from torchnet.meter import AverageValueMeter
        from pytorch_rl.utils.progress_bar import json_progress_bar

        reward_meter = AverageValueMeter()
        progress_bar = json_progress_bar(range(num_episodes), prefix='training')
        for i_episode in progress_bar:
            # Initialize the environment and state
            obs = torch.from_numpy(env.reset()).float()
            episode_rewards = 0
            for t in count():
                # Select and perform an action
                action = self.act(obs)[0]
                env.render()

                next_obs, reward, done, _ = env.step(action[0].item())
                next_obs = torch.from_numpy(next_obs).float()

                episode_rewards += reward
                # Store the transition in memory
                self.memory.push(obs, action, next_obs, done, torch.FloatTensor([reward]))

                # Move to the next state
                obs = next_obs

                self.optimize_model()
                if done:
                    break

            reward_meter.add(episode_rewards)
            from collections import OrderedDict
            stats = OrderedDict(episode=i_episode,
                                reward=reward_meter)
            progress_bar.log(stats)
            if i_episode % 10 == 0:
                progress_bar.print(stats)
def train():
    net.train()
    iou_meter = AverageValueMeter()
    loss_meter = AverageValueMeter()
    for epoch in range(max_epoch):
        iou_meter.reset()
        loss_meter.reset()
        if epoch % 5 == 0:
            for param_group in optimiser.param_groups:
                param_group['lr'] = param_group['lr'] * (0.95**(epoch // 10))
                print('learning rate:', param_group['lr'])

        for i, (img, mask, _) in tqdm(enumerate(train_loader)):
            (img, mask) = (img.cuda(),
                           mask.cuda()) if (torch.cuda.is_available()
                                            and use_cuda) else (img, mask)
            optimiser.zero_grad()
            pred = net(img)
            loss = criterion(pred, mask.squeeze(1))
            loss.backward()
            # torch.nn.utils.clip_grad_norm_(net.parameters(), 1e-3)
            optimiser.step()
            loss_meter.add(loss.item())
            iou = iou_loss(pred2segmentation(pred),
                           mask.squeeze(1).float(), class_number)[1]
            loss_meter.add(loss.item())
            iou_meter.add(iou)

            if i % train_print_frequncy == 0:
                showImages(board_train_image, img, mask,
                           pred2segmentation(pred))

        board_loss.plot('train_iou_per_epoch', iou_meter.value()[0])
        board_loss.plot('train_loss_per_epoch', loss_meter.value()[0])

        val(net, val_loader)
示例#25
0
class Yolov3COCOTrainer:
    def __init__(self):
        self.darknet53 = DarkNet53().to(opt.device)
        self.optimizer = self.init_optimizer()
        self.anchors = parse_anchors(opt.anchors_path)
        self.loss_layer = LossLayer(self.anchors)
        self.meter = AverageValueMeter()
        self.loss_dict = defaultdict(dict)
        self.img_size = opt.img_size
        self.scheduler = CosineAnnealingLR(self.optimizer, T_max=5, eta_min=0.)

    def use_pretrain(self, model_path, load_optimizer=True):
        if not os.path.exists(model_path):
            raise OSError(2, 'No such file or directory', model_path)
        else:
            print(f'use pretrained model: {model_path}')
            state_dict = torch.load(model_path)
            self.last_loss = state_dict['loss']
            if 'epoch' in state_dict.keys():
                self.epoch_num = state_dict['epoch']
            else:
                self.epoch_num = 0
            if 'total_steps' in state_dict.keys():
                self.total_steps = state_dict['total_steps']
            else:
                self.total_steps = 0
            if 'model' in state_dict.keys():
                print('loading pretrained model ...')
                self.darknet53.load_state_dict(state_dict['model'])
            if load_optimizer and 'optimizer' in state_dict.keys():
                print('loading pretrained optimizer ...')
                self.optimizer.load_state_dict((state_dict['optimizer']))

    def init_optimizer(self):
        if opt.optimizer == 'SGD':
            optimizer = optim.SGD(params=self.darknet53.parameters(),
                                  momentum=opt.optimizer_momentum,
                                  weight_decay=opt.optimizer_weight_decay,
                                  lr=opt.lr,
                                  nesterov=True)
            return optimizer
        elif opt.optimizer == 'Adam':
            optimizer = optim.Adam(params=self.darknet53.parameters(),
                                   weight_decay=opt.optimizer_weight_decay,
                                   lr=opt.lr)
            return optimizer
        else:
            ValueError()

    def save_model(self, epoch, steps, loss, save_path):
        model_state = self.darknet53.state_dict()
        optim_state = self.optimizer.state_dict()
        state_dict = {'model': model_state,
                      'optim': optim_state,
                      'epoch': epoch,
                      'steps': steps,
                      'loss': loss}
        diret = os.path.dirname(save_path)
        if not os.path.exists(diret):
            os.makedirs(diret)
        torch.save(state_dict, save_path)
        print('model saved...')

    def adjust_lr(self, epoch):
        if opt.optimizer == 'SGD':
            if epoch < 5:
                for group in self.optimizer.param_groups:
                    group['lr'] = opt.lr * 0.1
            elif 5 <= epoch < 50:
                for group in self.optimizer.param_gropus:
                    group['lr'] = opt.lr * 0.1
            else:
                for group in self.optimizer.param_groups:
                    group['lr'] = opt.lr * (0.1 ** (2 + epoch // 50))

    def train_step(self, imgs, targets, epoch):
        self.darknet53.train()
        assert len(targets) == 3
        preds = self.darknet53(imgs)
        # compute loss in 3 scale
        # pred: [N, 13, 13, 255]
        # target: [N, 13, 13, 3, 85]
        loss_list_13 = self.loss_layer(preds[0], targets[0])
        # self.print_fun(loss_list_13, 'fm_13')
        loss_list_26 = self.loss_layer(preds[1], targets[1])
        # self.print_fun(loss_list_26, 'fm_26')
        loss_list_52 = self.loss_layer(preds[2], targets[2])
        # self.print_fun(loss_list_52, 'fm_52')

        total_loss = loss_list_13[-1] + loss_list_26[-1] + loss_list_52[-1]
        txty_loss = loss_list_13[0] + loss_list_26[0] + loss_list_52[0]
        twth_loss = loss_list_13[1] + loss_list_26[1] + loss_list_52[1]
        noobj_conf_loss = loss_list_13[2] + loss_list_26[2] + loss_list_52[2]
        obj_conf_loss = loss_list_13[3] + loss_list_26[3] + loss_list_52[3]
        class_loss = loss_list_13[4] + loss_list_26[4] + loss_list_52[4]
        self.loss_dict = {'xy_loss': txty_loss.detach().cpu().item(),
                          'wh_loss': twth_loss.detach().cpu().item(),
                          'obj_conf_loss': obj_conf_loss.detach().cpu().item(),
                          'noobj_conf_loss': noobj_conf_loss.detach().cpu().item(),
                          'class_loss': class_loss.detach().cpu().item(),
                          'total_loss': total_loss.detach().cpu().item()}

        self.meter.add(total_loss.detach().cpu().item())
        self.optimizer.zero_grad()
        total_loss.backward()
        self.optimizer.step()

        # tune learning rate
        if self.scheduler is not None:
            self.scheduler.step(epoch)
        else:
            self.adjust_lr(epoch)

    def reorg_layer(self, preds, anchors):
        grid_size = preds.size(1)
        # ratio format is [h,w]
        ratio = self.img_size / grid_size
        # rescaled_anchors format is [w,h] / make anchors's scale same as predicts
        rescaled_anchors = torch.from_numpy(anchors / ratio).float().to(opt.device)

        # resahpe preds to [N, 13, 13, 3, 85]
        preds = preds.contiguous().view(-1, grid_size, grid_size, 3, 5 + opt.class_num)
        # box_xy: [N, 13, 13, 3, 2] / format [x, y]
        # box_wh: [N, 13, 13, 3, 2] / format [w, h]
        # confs: [N, 13, 13, 3, 1]
        # classes: [N, 13, 13, 3, 80]
        box_xy, box_wh, confs_logit, classes_logit = preds.split([2, 2, 1, opt.class_num], dim=-1)
        box_xy = box_xy.sigmoid()
        grid_x = np.arange(grid_size, dtype=np.float32)
        grid_y = np.arange(grid_size, dtype=np.float32)
        grid_x, grid_y = np.meshgrid(grid_x, grid_y)

        xy_offset = np.concatenate([grid_x.reshape(-1, 1), grid_y.reshape(-1, 1)], axis=-1)
        # xy_offset: [13, 13, 1, 2]
        xy_offset = torch.from_numpy(xy_offset).float().to(opt.device)
        xy_offset = xy_offset.contiguous().view(grid_size, grid_size, 1, 2)

        # rescale to input_image scale
        box_xy = (box_xy + xy_offset) * ratio
        # compute in the scale 13
        box_wh = torch.exp(box_wh) * rescaled_anchors
        # rescale to input_image scale
        box_wh = box_wh * ratio

        # reset scaled pred_box to bounding box format [x, y, w, h]
        # bboxes: [N, 13, 13, 3, 4]
        bboxes = torch.cat([box_xy, box_wh], dim=-1)

        return xy_offset, bboxes, confs_logit, classes_logit

    def predict(self, img):
        self.darknet53.eval()
        preds = self.darknet53(img)
        # import pickle
        # pickle.dump(preds, open('/home/dk/Desktop/mykite.pkl', 'wb'))
        result_13 = self.reorg_layer(preds[0], self.anchors['large'])
        result_26 = self.reorg_layer(preds[1], self.anchors['mid'])
        result_52 = self.reorg_layer(preds[2], self.anchors['small'])

        def _reshape(result):
            xy_offset, bbox, conf, prob = result
            grid_size = xy_offset.size(0)
            bbox = bbox.reshape(-1, grid_size * grid_size * opt.anchor_num, 4)
            conf = conf.reshape(-1, grid_size * grid_size * opt.anchor_num, 1)
            prob = prob.reshape(-1, grid_size * grid_size * opt.anchor_num, opt.class_num)
            # bbox: [N, 13*13*3, 4]
            # conf: [N, 13*13*3, 1]
            # prob: [N, 13*13*3, 82]
            return bbox, conf, prob

        bbox_out, conf_out, prob_out = [], [], []
        for result in [result_13, result_26, result_52]:
            bbox, conf, prob = _reshape(result)
            bbox_out.append(bbox)
            conf_out.append(conf.sigmoid())
            prob_out.append(prob.sigmoid())

        # boxes: [N, (13*13+26*26+52*52)*3, 4] / (center_x, center_y, width, height)
        # confs: [N, (13*13+26*26+52*52)*3, 1]
        # probs: [N, (13*13+26*26+52*52)*3, 80]
        boxes = torch.cat(bbox_out, dim=1)
        confs = torch.cat(conf_out, dim=1)
        probs = torch.cat(prob_out, dim=1)

        # [N, (13*13+26*26+52*52)*3, 1]
        xmin = boxes[..., [0]] - boxes[..., [2]] / 2
        ymin = boxes[..., [1]] - boxes[..., [3]] / 2
        xmax = boxes[..., [0]] + boxes[..., [2]] / 2
        ymax = boxes[..., [1]] + boxes[..., [3]] / 2
        # [N, (13*13+26*26+52*52)*3, 4] / [xmin, ymin, xmax, ymax]
        boxes = torch.cat([xmin, ymin, xmax, ymax], dim=-1)
        return boxes, confs, probs

    def print_fun(self, loss_list, name):
        print(name, ':')
        loss_name = ['txty_loss', 'twth_loss', 'noobj_conf_loss', 'obj_conf_loss', 'class_loss', 'total_loss']
        for n, loss in zip(loss_name, loss_list):
            print(n, ':', loss.detach().cpu().item())
示例#26
0
            img_st = net_st_fusion.forward(img_3d, need_result=True)

            #--update_netd--    Update D network: Ladv = |f(real) - f(fake)|_2
            #self.pred_real, self.feat_real = self.netd(self.input)
            #self.pred_fake, self.feat_fake = self.netd(self.fake.detach())
            netd.zero_grad()
            fake, latent_i, latent_o = netg(img_st)
            out_d_real, feat_true = netd(img_st)
            out_d_fake, feat_fake = netd(fake.detach())
            err_d = .5 * criterion_BCE(
                out_d_real, y_real_) + .5 * criterion_BCE(
                    out_d_fake, y_fake_)  #+ criterion_L2(feat_real, feat_fake)
            err_d.backward(retain_graph=True)
            optimizer_d.step()
            optimizer_f.step()
            errord_meter.add(err_d.data.cpu().numpy())
            vis.plot('errord', errord_meter.value()[0])
            # If D loss is zero, then re-initialize netD
            if err_d.item() < 1e-5:
                netd.apply(weights_init)

            #--update_netg--    Update G network: log(D(G(x)))  + ||G(x) - x||
            netg.zero_grad()
            #out_g, _ = netd(fake)
            err_g_bce = criterion_L2(feat_true, feat_fake)  # l_adv
            err_g_l1l = criterion_L1(fake, img_st)  # l_con
            err_g_enc = criterion_L2(latent_i, latent_o)  # l_enc
            err_g = err_g_bce * config.w_bce + err_g_l1l * config.w_rec + err_g_enc * config.w_enc
            err_g.backward()
            optimizer_g.step()
            optimizer_f.step()
示例#27
0
class AlphaGAN(object):
    def __init__(self, args):
        self.epoch = args.epoch
        self.batch_size = args.batch_size
        self.save_dir = args.save_dir
        self.gpu_mode = args.gpu_mode
        self.device = args.device
        self.lrG = args.lrG
        self.lrD = args.lrD
        self.com_loss = args.com_loss
        self.fine_tune = args.fine_tune
        self.visual = args.visual
        self.env = args.env
        self.d_every = args.d_every
        self.g_every = args.g_every

        if self.fine_tune:
            self.model_G = args.model
            self.model_D = args.model.replace('netG', 'netD')

        # network init
        self.G = NetG()
        if self.com_loss:
            self.D = NLayerDiscriminator(input_nc=4)
        else:
            self.D = NLayerDiscriminator(input_nc=2)

        print(self.G)
        print(self.D)

        if self.fine_tune:
            self.G.load_state_dict(t.load(self.model_G))
            self.D.load_state_dict(t.load(self.model_D))

        self.G_optimizer = t.optim.Adam(self.G.parameters(), lr=self.lrG)
        self.D_optimizer = t.optim.Adam(self.D.parameters(), lr=self.lrD)
        if self.gpu_mode:
            self.G.to(self.device)
            self.D.to(self.device)
            self.G_criterion = t.nn.SmoothL1Loss().to(self.device)
            self.D_criterion = t.nn.MSELoss().to(self.device)

        self.G_error_meter = AverageValueMeter()
        self.Alpha_loss_meter = AverageValueMeter()
        self.Com_loss_meter = AverageValueMeter()
        self.Adv_loss_meter = AverageValueMeter()
        self.D_error_meter = AverageValueMeter()

    def train(self, dataset):
        if self.visual:
            vis = Visualizer(self.env)

        for epoch in range(self.epoch):
            for ii, data in tqdm.tqdm(enumerate(dataset)):
                real_img = data['I']
                tri_img = data['T']

                if self.com_loss:
                    bg_img = data['B'].to(self.device)
                    fg_img = data['F'].to(self.device)

                # input to the G
                input_img = t.tensor(np.append(real_img.numpy(), tri_img.numpy(), axis=1)).to(self.device)

                # real_alpha
                real_alpha = data['A'].to(self.device)

                # vis.images(real_img.numpy()*0.5 + 0.5, win='input_real_img')
                # vis.images(real_alpha.cpu().numpy()*0.5 + 0.5, win='real_alpha')
                # vis.images(tri_img.numpy()*0.5 + 0.5, win='tri_map')

                # train D
                if ii % self.d_every == 0:
                    self.D_optimizer.zero_grad()

                    # real_img_d = input_img[:, 0:3, :, :]
                    tri_img_d = input_img[:, 3:4, :, :]

                    # 真正的alpha 交给判别器判断
                    if self.com_loss:
                        real_d = self.D(input_img)
                    else:
                        real_d = self.D(t.cat([real_alpha, tri_img_d], dim=1))

                    target_real_label = t.tensor(1.0)
                    target_real = target_real_label.expand_as(real_d).to(self.device)

                    loss_d_real = self.D_criterion(real_d, target_real)
                    #loss_d_real.backward()

                    # 生成器生成fake_alpha 交给判别器判断
                    fake_alpha = self.G(input_img)
                    if self.com_loss:
                        fake_img = fake_alpha*fg_img + (1 - fake_alpha) * bg_img
                        fake_d = self.D(t.cat([fake_img, tri_img_d], dim=1))
                    else:
                        fake_d = self.D(t.cat([fake_alpha, tri_img_d], dim=1))
                    target_fake_label = t.tensor(0.0)

                    target_fake = target_fake_label.expand_as(fake_d).to(self.device)

                    loss_d_fake = self.D_criterion(fake_d, target_fake)

                    loss_D = loss_d_real + loss_d_fake
                    loss_D.backward()
                    self.D_optimizer.step()
                    self.D_error_meter.add(loss_D.item())

                # train G
                if ii % self.g_every == 0:
                    self.G_optimizer.zero_grad()

                    real_img_g = input_img[:, 0:3, :, :]
                    tri_img_g = input_img[:, 3:4, :, :]

                    fake_alpha = self.G(input_img)
                    # fake_alpha 与 real_alpha的L1 loss
                    loss_g_alpha = self.G_criterion(fake_alpha, real_alpha)
                    loss_G = loss_g_alpha
                    self.Alpha_loss_meter.add(loss_g_alpha.item())

                    if self.com_loss:
                        fake_img = fake_alpha * fg_img + (1 - fake_alpha) * bg_img
                        loss_g_cmp = self.G_criterion(fake_img, real_img_g)

                        # 迷惑判别器
                        fake_d = self.D(t.cat([fake_img, tri_img_g], dim=1))
                        self.Com_loss_meter.add(loss_g_cmp.item())
                        loss_G = loss_G + loss_g_cmp

                    else:
                        fake_d = self.D(t.cat([fake_alpha, tri_img_g], dim=1))
                    target_fake = t.tensor(1.0).expand_as(fake_d).to(self.device)
                    loss_g_d = self.D_criterion(fake_d, target_fake)

                    self.Adv_loss_meter.add(loss_g_d.item())

                    loss_G = loss_G + loss_g_d

                    loss_G.backward()
                    self.G_optimizer.step()
                    self.G_error_meter.add(loss_G.item())

                if self.visual and ii % 20 == 0:
                    vis.plot('errord', self.D_error_meter.value()[0])
                    #vis.plot('errorg', self.G_error_meter.value()[0])
                    vis.plot('errorg', np.array([self.Adv_loss_meter.value()[0], self.Alpha_loss_meter.value()[0],
                                                 self.Com_loss_meter.value()[0]]), legend=['adv_loss', 'alpha_loss',
                                                                                           'com_loss'])

                    vis.images(tri_img.numpy()*0.5 + 0.5, win='tri_map')
                    vis.images(real_img.cpu().numpy() * 0.5 + 0.5, win='relate_real_input')
                    vis.images(real_alpha.cpu().numpy() * 0.5 + 0.5, win='relate_real_alpha')
                    vis.images(fake_alpha.detach().cpu().numpy(), win='fake_alpha')
                    if self.com_loss:
                        vis.images(fake_img.detach().cpu().numpy()*0.5 + 0.5, win='fake_img')
            self.G_error_meter.reset()
            self.D_error_meter.reset()

            self.Alpha_loss_meter.reset()
            self.Com_loss_meter.reset()
            self.Adv_loss_meter.reset()
            if epoch % 5 == 0:
                t.save(self.D.state_dict(), self.save_dir + '/netD' + '/netD_%s.pth' % epoch)
                t.save(self.G.state_dict(), self.save_dir + '/netG' + '/netG_%s.pth' % epoch)

        return
示例#28
0
    def train(self):

        if self.net == 'vgg16':
            photo_net = DataParallel(self._get_vgg16()).cuda()
            sketch_net = DataParallel(self._get_vgg16()).cuda()
        elif self.net == 'resnet34':
            photo_net = DataParallel(self._get_resnet34()).cuda()
            sketch_net = DataParallel(self._get_resnet34()).cuda()
        elif self.net == 'resnet50':
            photo_net = DataParallel(self._get_resnet50()).cuda()
            sketch_net = DataParallel(self._get_resnet50()).cuda()

        if self.fine_tune:
            photo_net_root = self.model_root
            sketch_net_root = self.model_root.replace('photo', 'sketch')

            photo_net.load_state_dict(
                t.load(photo_net_root, map_location=t.device('cpu')))
            sketch_net.load_state_dict(
                t.load(sketch_net_root, map_location=t.device('cpu')))

        print('net')
        print(photo_net)

        # triplet_loss = nn.TripletMarginLoss(margin=self.margin, p=self.p).cuda()
        photo_cat_loss = nn.CrossEntropyLoss().cuda()
        sketch_cat_loss = nn.CrossEntropyLoss().cuda()

        my_triplet_loss = TripletLoss().cuda()

        # optimizer
        photo_optimizer = t.optim.Adam(photo_net.parameters(), lr=self.lr)
        sketch_optimizer = t.optim.Adam(sketch_net.parameters(), lr=self.lr)

        if self.vis:
            vis = Visualizer(self.env)

        triplet_loss_meter = AverageValueMeter()
        sketch_cat_loss_meter = AverageValueMeter()
        photo_cat_loss_meter = AverageValueMeter()

        data_loader = TripleDataLoader(self.dataloader_opt)
        dataset = data_loader.load_data()

        for epoch in range(self.epochs):

            print('---------------{0}---------------'.format(epoch))

            if self.test and epoch % self.test_f == 0:

                tester_config = Config()
                tester_config.test_bs = 128
                tester_config.photo_net = photo_net
                tester_config.sketch_net = sketch_net

                tester_config.photo_test = self.photo_test
                tester_config.sketch_test = self.sketch_test

                tester = Tester(tester_config)
                test_result = tester.test_instance_recall()

                result_key = list(test_result.keys())
                vis.plot('recall',
                         np.array([
                             test_result[result_key[0]],
                             test_result[result_key[1]]
                         ]),
                         legend=[result_key[0], result_key[1]])
                if self.save_model:
                    t.save(
                        photo_net.state_dict(), self.save_dir + '/photo' +
                        '/photo_' + self.net + '_%s.pth' % epoch)
                    t.save(
                        sketch_net.state_dict(), self.save_dir + '/sketch' +
                        '/sketch_' + self.net + '_%s.pth' % epoch)

            photo_net.train()
            sketch_net.train()

            for ii, data in enumerate(dataset):

                photo_optimizer.zero_grad()
                sketch_optimizer.zero_grad()

                photo = data['P'].cuda()
                sketch = data['S'].cuda()
                label = data['L'].cuda()

                p_cat, p_feature = photo_net(photo)
                s_cat, s_feature = sketch_net(sketch)

                # category loss
                p_cat_loss = photo_cat_loss(p_cat, label)
                s_cat_loss = sketch_cat_loss(s_cat, label)

                photo_cat_loss_meter.add(p_cat_loss.item())
                sketch_cat_loss_meter.add(s_cat_loss.item())

                # triplet loss
                loss = p_cat_loss + s_cat_loss

                # tri_record = 0.
                '''
                for i in range(self.batch_size):
                    # negative
                    negative_feature = t.cat([p_feature[0:i, :], p_feature[i + 1:, :]], dim=0)
                    # print('negative_feature.size :', negative_feature.size())
                    # photo_feature
                    anchor_feature = s_feature[i, :]
                    anchor_feature = anchor_feature.expand_as(negative_feature)
                    # print('anchor_feature.size :', anchor_feature.size())

                    # positive
                    positive_feature = p_feature[i, :]
                    positive_feature = positive_feature.expand_as(negative_feature)
                    # print('positive_feature.size :', positive_feature.size())

                    tri_loss = triplet_loss(anchor_feature, positive_feature, negative_feature)

                    tri_record = tri_record + tri_loss

                    # print('tri_loss :', tri_loss)
                    loss = loss + tri_loss
                '''
                # print('tri_record : ', tri_record)

                my_tri_loss = my_triplet_loss(
                    s_feature, p_feature) / (self.batch_size - 1)
                triplet_loss_meter.add(my_tri_loss.item())
                # print('my_tri_loss : ', my_tri_loss)

                # print(tri_record - my_tri_loss)
                loss = loss + my_tri_loss
                # print('loss :', loss)
                # loss = loss / opt.batch_size

                loss.backward()

                photo_optimizer.step()
                sketch_optimizer.step()

                if self.vis:
                    vis.plot('triplet_loss',
                             np.array([
                                 triplet_loss_meter.value()[0],
                                 photo_cat_loss_meter.value()[0],
                                 sketch_cat_loss_meter.value()[0]
                             ]),
                             legend=[
                                 'triplet_loss', 'photo_cat_loss',
                                 'sketch_cat_loss'
                             ])

                triplet_loss_meter.reset()
                photo_cat_loss_meter.reset()
                sketch_cat_loss_meter.reset()
示例#29
0
def train(**kwargs):
    for k_, v_ in kwargs.items():
        setattr(opt, k_, v_)

    device = t.device('cuda') if opt.gpu else t.device('cpu')
    # if opt.vis:
    #     from visualize import Visualizer
    #     vis = Visualizer(opt.env)

    # 数据
    transforms = tv.transforms.Compose([
        tv.transforms.Resize(opt.image_size),
        tv.transforms.CenterCrop(opt.image_size),
        tv.transforms.ToTensor(),
        tv.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    dataset = tv.datasets.ImageFolder(root=opt.data_path, transform=transforms)
    dataloader = t.utils.data.DataLoader(dataset,
                                         batch_size=opt.batch_size,
                                         shuffle=True,
                                         num_workers=opt.num_workers,
                                         drop_last=True)

    # 网络
    netg, netd = NetG(opt), NetD(opt)
    map_location = lambda storage, loc: storage
    if opt.netd_path:
        netd.load_state_dict(t.load(opt.netd_path, map_location=map_location))
    if opt.netg_path:
        netg.load_state_dict(t.load(opt.netg_path, map_location=map_location))
    netd.to(device)
    netg.to(device)

    # 定义优化器和损失
    optimizer_g = t.optim.Adam(netg.parameters(),
                               opt.lr1,
                               betas=(opt.beta1, 0.999))
    optimizer_d = t.optim.Adam(netd.parameters(),
                               opt.lr2,
                               betas=(opt.beta1, 0.999))
    criterion = t.nn.BCELoss().to(device)

    # 真图片label为1,假图片label为0
    # noises为生成网络的输入
    true_labels = t.ones(opt.batch_size).to(device)
    fake_labels = t.zeros(opt.batch_size).to(device)
    fix_noises = t.randn(opt.batch_size, opt.nz, 1, 1).to(device)
    noises = t.randn(opt.batch_size, opt.nz, 1, 1).to(device)

    errord_meter = AverageValueMeter()
    errorg_meter = AverageValueMeter()

    epochs = range(opt.max_epoch)
    for epoch in iter(epochs):
        for ii, (img, _) in tqdm.tqdm(enumerate(dataloader)):
            real_img = img.to(device)

            if ii % opt.d_every == 0:
                # 训练判别器
                optimizer_d.zero_grad()
                ## 尽可能的把真图片判别为正确
                output = netd(real_img)
                error_d_real = criterion(output, true_labels)
                error_d_real.backward()

                ## 尽可能把假图片判别为错误
                noises.data.copy_(t.randn(opt.batch_size, opt.nz, 1, 1))
                fake_img = netg(noises).detach()  # 根据噪声生成假图
                output = netd(fake_img)
                error_d_fake = criterion(output, fake_labels)
                error_d_fake.backward()
                optimizer_d.step()

                error_d = error_d_fake + error_d_real

                errord_meter.add(error_d.item())

            if ii % opt.g_every == 0:
                # 训练生成器
                optimizer_g.zero_grad()
                noises.data.copy_(t.randn(opt.batch_size, opt.nz, 1, 1))
                fake_img = netg(noises)
                output = netd(fake_img)
                error_g = criterion(output, true_labels)
                error_g.backward()
                optimizer_g.step()
                errorg_meter.add(error_g.item())

            if opt.vis and ii % opt.plot_every == opt.plot_every - 1:
                ## 可视化
                if os.path.exists(opt.debug_file):
                    ipdb.set_trace()
                fix_fake_imgs = netg(fix_noises)
                vis.images(fix_fake_imgs.detach().cpu().numpy()[:64] * 0.5 +
                           0.5,
                           win='fixfake')
                vis.images(real_img.data.cpu().numpy()[:64] * 0.5 + 0.5,
                           win='real')
                vis.plot('errord', errord_meter.value()[0])
                vis.plot('errorg', errorg_meter.value()[0])

        if (epoch + 1) % opt.save_every == 0:
            # 保存模型、图片
            tv.utils.save_image(fix_fake_imgs.data[:64],
                                '%s/%s.png' % (opt.save_path, epoch),
                                normalize=True,
                                range=(-1, 1))
            t.save(netd.state_dict(), 'checkpoints/netd_%s.pth' % epoch)
            t.save(netg.state_dict(), 'checkpoints/netg_%s.pth' % epoch)
            errord_meter.reset()
            errorg_meter.reset()
示例#30
0
def train(**kwargs):
    for k_, v_ in kwargs.items():
        setattr(opt, k_, v_)

    # choosing device for training
    if opt.gpu:
        device = torch.device("cuda")
        print('using GPU')
    else:
        device = torch.device('cpu')
        print('using CPU')

    # data preprocessing
    transforms = tv.transforms.Compose([
        # 3*96*96
        tv.transforms.Resize(opt.img_size
                             ),  # resize images to img_size* img_size
        tv.transforms.CenterCrop(opt.img_size),
        tv.transforms.ToTensor(),
        tv.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    dataset = tv.datasets.ImageFolder(root=opt.data_path, transform=transforms)

    dataloader = DataLoader(
        dataset,  # loading dataset
        batch_size=opt.batch_size,  # setting batch size
        shuffle=True,  # choosing if shuffle or not
        num_workers=opt.num_workers,  # using multiple threads for processing
        drop_last=
        True  # if true, drop the last batch if the batch is not fitted the size of batch size
    )

    # initialize network
    netg, netd = NetG(opt), NetD(opt)
    map_location = lambda storage, loc: storage

    # torch.load for loading models
    if opt.netg_path:
        netg.load_state_dict(
            torch.load(f=opt.netg_path, map_location=map_location))
    if opt.netd_path:
        netd.load_state_dict(
            torch.load(f=opt.netd_path, map_location=map_location))

    # move models to device
    netd.to(device)
    netg.to(device)

    # Adam optimizer
    optimize_g = torch.optim.Adam(netg.parameters(),
                                  lr=opt.lr1,
                                  betas=(opt.beta1, 0.999))
    optimize_d = torch.optim.Adam(netd.parameters(),
                                  lr=opt.lr2,
                                  betas=(opt.beta1, 0.999))

    # BCEloss:-w(ylog x +(1 - y)log(1 - x))
    # y: real label,x: score from discriminator using sigmiod( 1: real, 0: fake)
    criterions = nn.BCELoss().to(device)

    # define labels
    true_labels = torch.ones(opt.batch_size).to(device)
    fake_labels = torch.zeros(opt.batch_size).to(device)

    # generate a noise with the distribution of N(1,1),dim = opt.nz,size = opt.batch_size
    noises = torch.randn(opt.batch_size, opt.nz, 1, 1).to(device)

    # for generating images when saving models
    fix_noises = torch.randn(opt.batch_size, opt.nz, 1, 1).to(device)

    errord_meter = AverageValueMeter()
    errorg_meter = AverageValueMeter()
    write = SummaryWriter(log_dir=opt.virs, comment='loss')

    # training
    for epoch in range(opt.max_epoch):
        for ii_, (img, _) in tqdm((enumerate(dataloader))):
            real_img = img.to(device)

            # begin training
            # train discriminator for every d_every batches
            if ii_ % opt.d_every == 0:
                # clear optimizer gradient
                optimize_d.zero_grad()

                output = netd(real_img)
                error_d_real = criterions(output, true_labels)
                error_d_real.backward()

                # generate fake image
                noises = noises.detach()
                # generate fake images data using noises
                fake_image = netg(noises).detach()
                # discriminator discriminate fake images
                output = netd(fake_image)
                error_d_fake = criterions(output, fake_labels)
                error_d_fake.backward()

                optimize_d.step()

                error_d = error_d_fake + error_d_real
                errord_meter.add(error_d.item())

            # train generator for every g_every batches
            if ii_ % opt.g_every == 0:
                optimize_g.zero_grad()
                noises.data.copy_(torch.randn(opt.batch_size, opt.nz, 1, 1))
                fake_image = netg(noises)
                output = netd(fake_image)
                error_g = criterions(output, true_labels)
                error_g.backward()

                optimize_g.step()

                errorg_meter.add(error_g.item())

        # draw graph of loss
        if ii_ % 5 == 0:
            write.add_scalar("Discriminator_loss", errord_meter.value()[0])
            write.add_scalar("Generator_loss", errorg_meter.value()[0])

        # saving models for save_every batches
        if (epoch + 1) % opt.save_every == 0:
            fix_fake_image = netg(fix_noises)
            tv.utils.save_image(fix_fake_image.data[:64],
                                "%s/%s.png" % (opt.save_path, epoch),
                                normalize=True)

            torch.save(netd.state_dict(),
                       'imgs3/' + 'netd_{0}.pth'.format(epoch))
            torch.save(netg.state_dict(),
                       'imgs3/' + 'netg_{0}.pth'.format(epoch))
            errord_meter.reset()
            errorg_meter.reset()

    write.close()
示例#31
0
def train(**kwargs):
    for k_, v_ in kwargs.items():
        setattr(opt, k_, v_)

    device=t.device('cuda') if opt.gpu else t.device('cpu')
    if opt.vis:
        from visualize import Visualizer
        vis = Visualizer(opt.env)

    # 数据
    transforms = tv.transforms.Compose([
        tv.transforms.Resize(opt.image_size),
        tv.transforms.CenterCrop(opt.image_size),
        tv.transforms.ToTensor(),
        tv.transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    dataset = tv.datasets.ImageFolder(opt.data_path, transform=transforms)
    dataloader = t.utils.data.DataLoader(dataset,
                                         batch_size=opt.batch_size,
                                         shuffle=True,
                                         num_workers=opt.num_workers,
                                         drop_last=True
                                         )

    # 网络
    netg, netd = NetG(opt), NetD(opt)
    map_location = lambda storage, loc: storage
    if opt.netd_path:
        netd.load_state_dict(t.load(opt.netd_path, map_location=map_location))
    if opt.netg_path:
        netg.load_state_dict(t.load(opt.netg_path, map_location=map_location))
    netd.to(device)
    netg.to(device)


    # 定义优化器和损失
    optimizer_g = t.optim.Adam(netg.parameters(), opt.lr1, betas=(opt.beta1, 0.999))
    optimizer_d = t.optim.Adam(netd.parameters(), opt.lr2, betas=(opt.beta1, 0.999))
    criterion = t.nn.BCELoss().to(device)

    # 真图片label为1,假图片label为0
    # noises为生成网络的输入
    true_labels = t.ones(opt.batch_size).to(device)
    fake_labels = t.zeros(opt.batch_size).to(device)
    fix_noises = t.randn(opt.batch_size, opt.nz, 1, 1).to(device)
    noises = t.randn(opt.batch_size, opt.nz, 1, 1).to(device)

    errord_meter = AverageValueMeter()
    errorg_meter = AverageValueMeter()


    epochs = range(opt.max_epoch)
    for epoch in iter(epochs):
        for ii, (img, _) in tqdm.tqdm(enumerate(dataloader)):
            real_img = img.to(device)

            if ii % opt.d_every == 0:
                # 训练判别器
                optimizer_d.zero_grad()
                ## 尽可能的把真图片判别为正确
                output = netd(real_img)
                error_d_real = criterion(output, true_labels)
                error_d_real.backward()

                ## 尽可能把假图片判别为错误
                noises.data.copy_(t.randn(opt.batch_size, opt.nz, 1, 1))
                fake_img = netg(noises).detach()  # 根据噪声生成假图
                output = netd(fake_img)
                error_d_fake = criterion(output, fake_labels)
                error_d_fake.backward()
                optimizer_d.step()

                error_d = error_d_fake + error_d_real

                errord_meter.add(error_d.item())

            if ii % opt.g_every == 0:
                # 训练生成器
                optimizer_g.zero_grad()
                noises.data.copy_(t.randn(opt.batch_size, opt.nz, 1, 1))
                fake_img = netg(noises)
                output = netd(fake_img)
                error_g = criterion(output, true_labels)
                error_g.backward()
                optimizer_g.step()
                errorg_meter.add(error_g.item())

            if opt.vis and ii % opt.plot_every == opt.plot_every - 1:
                ## 可视化
                if os.path.exists(opt.debug_file):
                    ipdb.set_trace()
                fix_fake_imgs = netg(fix_noises)
                vis.images(fix_fake_imgs.detach().cpu().numpy()[:64] * 0.5 + 0.5, win='fixfake')
                vis.images(real_img.data.cpu().numpy()[:64] * 0.5 + 0.5, win='real')
                vis.plot('errord', errord_meter.value()[0])
                vis.plot('errorg', errorg_meter.value()[0])

        if (epoch+1) % opt.save_every == 0:
            # 保存模型、图片
            tv.utils.save_image(fix_fake_imgs.data[:64], '%s/%s.png' % (opt.save_path, epoch), normalize=True,
                                range=(-1, 1))
            t.save(netd.state_dict(), 'checkpoints/netd_%s.pth' % epoch)
            t.save(netg.state_dict(), 'checkpoints/netg_%s.pth' % epoch)
            errord_meter.reset()
            errorg_meter.reset()