Esempio n. 1
0
def main():
    net = load_net()
    net = net.cuda()
    net.eval()

    dt = dataset.FashionDataset('validation', transform=dataset.val_transform)
    loader = data.DataLoader(dt,
                             batch_size=32,
                             shuffle=False,
                             num_workers=8,
                             pin_memory=True,
                             drop_last=False)

    prec_meter = AverageMeter()
    reca_meter = AverageMeter()
    avF1_meter = AverageMeter()

    for i, [image, label] in enumerate(loader):
        image = image.cuda()
        label = label.cuda()
        with torch.no_grad():
            output = net(image)

        prec, reca, avF1 = statistics(output, label)
        prec_meter.update(prec)
        reca_meter.update(reca)
        avF1_meter.update(avF1)

        if i % 20 == 0:
            print('Iter: %d, precision: %.4f, recall: %.4f, F1: %.4f' %
                  (i, prec_meter.avg, reca_meter.avg, avF1_meter.avg))
    print('precision: %.4f, recall: %.4f, F1: %.4f' %
          (prec_meter.avg, reca_meter.avg, avF1_meter.avg))
Esempio n. 2
0
def main():
    base_lr = 1e-1
    net, opt = load_net_opt(base_lr=base_lr)
    crit = nn.BCEWithLogitsLoss().cuda()
    net = net.cuda()

    dt = dataset.FashionDataset('train', transform=dataset.train_transform)
    loader = data.DataLoader(dt, batch_size=32, shuffle=True, num_workers=8, pin_memory=True)
    dt_iter = iter(loader)

    writer = SummaryWriter('./tf_record')
    loss_meter = AverageMeter()
    prec_meter = AverageMeter()
    reca_meter = AverageMeter()
    avF1_meter = AverageMeter()

    for step in range(1, 100001):
        adjust_lr(step, base_lr, opt)

        try:
            image, label = next(dt_iter)
        except StopIteration:
            dt_iter = iter(loader)
            image, label = next(dt_iter)

        image = image.cuda()
        label = label.cuda()
        output = net(image)
        loss = crit(output, label)
        loss_meter.update(loss)

        opt.zero_grad()
        loss.backward()
        opt.step()

        prec, reca, avF1 = statistics(output, label)
        prec_meter.update(prec)
        reca_meter.update(reca)
        avF1_meter.update(avF1)

        if step % 200 == 0:
            print('Iter: %d, loss: %f, precision: %.4f, recall: %.4f, F1: %.4f' %
                    (step, loss_meter.avg, prec_meter.avg, reca_meter.avg, avF1_meter.avg))
            writer.add_scalar('loss', loss_meter.avg, step)
            writer.add_scalar('prec', prec_meter.avg, step)
            writer.add_scalar('reca', reca_meter.avg, step)
            writer.add_scalar('F1', avF1_meter.avg, step)

        if step % 500 == 0:
            torch.save(net.state_dict(), 'models/step_%d.pth' % step)
        if step % 2000 == 0:
            torch.save(net.state_dict(), 'models/latest.pth')
Esempio n. 3
0
def main():
    base_lr = 1e-2
    net, opt = load_net_opt(base_lr=base_lr)
    net = net.cuda()

    dt = dataset.FashionDataset('train', transform=dataset.train_transform)
    loader = data.DataLoader(dt, batch_size=32, shuffle=True, num_workers=8, pin_memory=True)
    dt_iter = iter(loader)

    writer = SummaryWriter('./tf_record')
    loss_meters = {i: AverageMeter() for i in [1, 2, 3, 4, 5, 6, 8, 9]}
    loss_meter = AverageMeter()
    prec_meter = AverageMeter()
    reca_meter = AverageMeter()
    avF1_meter = AverageMeter()

    for step in range(1, 160001):
        adjust_lr(step, base_lr, opt)

        try:
            image, label = next(dt_iter)
        except StopIteration:
            dt_iter = iter(loader)
            image, label = next(dt_iter)

        image = image.cuda()
        label = label.cuda()
        output = net(image)

        eps = 1e-6
        losses = []
        for task_id in [1, 2, 3, 4, 5, 6, 8, 9]:
            start_idx = start_idxs[task_id]
            end_idx = start_idxs[task_id+1]
            if task_id in [2, 4, 6]:
                prob = torch.sigmoid(output[:, start_idx:end_idx])
                prob = torch.clamp(prob, eps, 1-eps)
                log_prob = -torch.log(prob)
                logprob_ = -torch.log(1 - prob)
                target = label[:, start_idx:end_idx]
                loss = (log_prob * target + logprob_ * (1 - target)).mean()
            else:
                prob = F.softmax(output[:, start_idx:end_idx], dim=1)
                prob = torch.clamp(prob, eps, 1-eps)
                log_prob = -torch.log(prob)
                target = label[:, start_idx:end_idx]
                loss = (log_prob * target).sum(dim=1).mean()
            losses.append(loss)
            loss_meters[task_id].update(loss)

        loss = sum(losses)# / 8
        loss_meter.update(loss)

        opt.zero_grad()
        loss.backward()
        opt.step()

        prec, reca, avF1 = statistics(output, label)
        prec_meter.update(prec)
        reca_meter.update(reca)
        avF1_meter.update(avF1)

        if step % 1000 == 0:
            print('Iter: %d, loss: %f, precision: %.4f, recall: %.4f, F1: %.4f' %
                    (step, loss_meter.avg, prec_meter.avg, reca_meter.avg, avF1_meter.avg))
            print(', '.join(['loss%d: %.4f' % (task_id, meter.avg) for task_id, meter in loss_meters.items()]))
            writer.add_scalar('loss', loss_meter.avg, step)
            writer.add_scalar('prec', prec_meter.avg, step)
            writer.add_scalar('reca', reca_meter.avg, step)
            writer.add_scalar('F1', avF1_meter.avg, step)
            for task_id, meter in loss_meters.items():
                writer.add_scalar('loss%d', meter.avg, step)

        if step % 2000 == 0:
            torch.save(net.state_dict(), 'models/latest.pth')
        if step % 10000 == 0:
            torch.save(net.state_dict(), 'models/step_%d.pth' % step)