Exemplo n.º 1
0
Arquivo: train.py Projeto: Lxtjx/cv
def train(model):
    best = [0]  # 保存最好权重
    net = model.train()
    # 训练轮次
    for epoch in range(cfg.epoch_number):
        print(f"Epoch is [{epoch + 1}/{cfg.epoch_number}]")
        if epoch % 50 == 0 and epoch != 0:
            for group in optimizer.param_groups:
                group['lr'] *= 0.5

        train_loss = 0
        train_acc = 0
        train_miou = 0
        train_class_acc = 0  # 类的准确率
        for i, sample in enumerate(train_data):
            img_data = Variable(sample['img'].to(device))
            img_label = Variable(sample['label'].to(device))

            out = net(img_data)
            out = F.log_softmax(out, dim=1)  # log_sogtmax作用:?
            loss = criterion(out, img_label)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            train_loss += loss.item()

            # 评估
            pre_label = out.max(dim=1)[1].data.cpu().numpy()
            pre_label = [i for i in pre_label]

            true_label = img_label.data.cpu().numpy()
            true_label = [i for i in true_label]

            # 混淆矩阵
            eval_metric = eval_semantic_segmentation(pre_label, true_label)
            train_acc += eval_metric['mean_class_accuracy']
            train_miou += eval_metric['miou']
            train_class_acc += eval_metric['class_accuracy']
            print('|batch[{}/{}]|batch_loss {: .8f}|'.format(
                i + 1, len(train_data), loss.item()))

        metric_description = '|Train Acc|: {:.5f}|Train Mean IU|: {:.5f}\n|Train_class_acc|:{:}'.format(
            train_acc / len(train_data),
            train_miou / len(train_data),
            train_class_acc / len(train_data),
        )
        print(metric_description)

        evaluate(net)

        if max(best) <= train_miou / len(train_data):
            best.append(train_miou / len(train_data))
            torch.save(net.state_dict(), '{}.pth'.format(epoch))
Exemplo n.º 2
0
def train(model):
    best = [0]
    net = model.train()
    # 训练轮次
    for epoch in range(cfg.EPOCH_NUMBER):
        print('Epoch is [{}/{}]'.format(epoch + 1, cfg.EPOCH_NUMBER))
        if epoch % 50 == 0 and epoch != 0:
            for group in optimizer.param_groups:
                group['lr'] *= 0.5

        train_loss = 0
        train_acc = 0
        train_miou = 0
        train_class_acc = 0
        # 训练批次
        for i, sample in enumerate(train_data):
            # 载入数据
            img_data = Variable(sample['img'].to(device))
            img_label = Variable(sample['label'].to(device))
            # 训练
            out = net(img_data)
            out = F.log_softmax(out, dim=1)
            loss = criterion(out, img_label)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            train_loss += loss.item()

            # 评估
            pre_label = out.max(dim=1)[1].data.cpu().numpy()
            pre_label = [i for i in pre_label]

            true_label = img_label.data.cpu().numpy()
            true_label = [i for i in true_label]

            eval_metrix = eval_semantic_segmentation(pre_label, true_label)
            train_acc += eval_metrix['mean_class_accuracy']
            train_miou += eval_metrix['miou']
            train_class_acc += eval_metrix['class_accuracy']

            print('|batch[{}/{}]|batch_loss {: .8f}|'.format(i + 1, len(train_data), loss.item()))

        metric_description = '|Train Acc|: {:.5f}|Train Mean IU|: {:.5f}\n|Train_class_acc|:{:}'.format(
            train_acc / len(train_data),
            train_miou / len(train_data),
            train_class_acc / len(train_data),
        )

        print(metric_description)
        if max(best) <= train_miou / len(train_data):
            best.append(train_miou / len(train_data))
            t.save(net.state_dict(), './Results/weights/FCN_weight/{}.pth'.format(epoch))
Exemplo n.º 3
0
def train(model):
    best = [0]
    net = model.train()
    # 训练轮次
    for epoch in range(cfg.EPOCH_NUMBER):
        print("Eopch is [{}/{}]".format(epoch + 1, cfg.EPOCH_NUMBER))
        if epoch % 50 == 0 and epoch != 0:
            for group in optimizer.param_groups:
                group["lr"] *= 0.5

        train_loss = 0
        train_acc = 0
        train_miou = 0
        train_class_acc = 0

        for i, sample in enumerate(train_data):
            img_data = Variable(sample["img"].to(device))
            img_label = Variable(sample["label"].to(device))

            out = net(img_data)
            out = F.log_softmax(out, dim=1)
            loss = criterion(out, img_label)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            train_loss += loss.item()

            pre_label = out.max(dim=1)[1].data.cpu().numpy()
            pre_label = [i for i in pre_label]

            true_label = img_label.data.cpu().numpy()
            true_label = [i for i in true_label]

            eval_metric = eval_semantic_segmentation(pre_label, true_label)
            train_acc += eval_metric["mean_class_accuracy"]
            train_miou += eval_metric["miou"]
            train_class_acc += eval_metric["class_accuracy"]

            print('|batch[{}/{}]|batch_loss {:.8f}|'.format(
                i + 1, len(train_data), loss.item()))

    t.save(net.state_dict(), 'xxx.pth')
Exemplo n.º 4
0
Arquivo: train.py Projeto: liu8526/FCN
def evaluate(model):
    net = model.eval()
    eval_loss = 0
    eval_acc = 0
    eval_miou = 0
    eval_class_acc = 0

    prec_time = datetime.now()
    for j, sample in enumerate(val_data):
        valImg = Variable(sample['img'].to(device))
        valLabel = Variable(sample['label'].long().to(device))

        out = net(valImg)
        out = F.log_softmax(out, dim=1)
        loss = criterion(out, valLabel)
        eval_loss = loss.item() + eval_loss
        pre_label = out.max(dim=1)[1].data.cpu().numpy()
        pre_label = [i for i in pre_label]

        true_label = valLabel.data.cpu().numpy()
        true_label = [i for i in true_label]

        eval_metrics = eval_semantic_segmentation(pre_label, true_label)
        eval_acc = eval_metrics['mean_class_accuracy'] + eval_acc
        eval_miou = eval_metrics['miou'] + eval_miou
    # eval_class_acc = eval_metrix['class_accuracy'] + eval_class_acc

    cur_time = datetime.now()
    h, remainder = divmod((cur_time - prec_time).seconds, 3600)
    m, s = divmod(remainder, 60)
    time_str = 'Time: {:.0f}:{:.0f}:{:.0f}'.format(h, m, s)

    val_str = ('|Valid Loss|: {:.5f} \n|Valid Acc|: {:.5f} \n|Valid Mean IU|: {:.5f} \n|Valid Class Acc|:{:}'.format(
        eval_loss / len(train_data),
        eval_acc / len(val_data),
        eval_miou / len(val_data),
        eval_class_acc / len(val_data)))
    print(val_str)
    print(time_str)
Exemplo n.º 5
0
train_mpa = 0
error = 0

for i, sample in enumerate(test_data):
    data = Variable(sample['img']).to(device)
    label = Variable(sample['label']).to(device)
    out = net(data)
    out = F.log_softmax(out, dim=1)

    pre_label = out.max(dim=1)[1].data.cpu().numpy()
    pre_label = [i for i in pre_label]

    true_label = label.data.cpu().numpy()
    true_label = [i for i in true_label]

    eval_metrix = eval_semantic_segmentation(pre_label, true_label)
    train_acc = eval_metrix['mean_class_accuracy'] + train_acc
    train_miou = eval_metrix['miou'] + train_miou
    train_mpa = eval_metrix['pixel_accuracy'] + train_mpa
    if len(eval_metrix['class_accuracy']) < 12:
        eval_metrix['class_accuracy'] = 0
        train_class_acc = train_class_acc + eval_metrix['class_accuracy']
        error += 1
    else:
        train_class_acc = train_class_acc + eval_metrix['class_accuracy']

    print(eval_metrix['class_accuracy'], '================', i)

epoch_str = (
    'test_acc :{:.5f} ,test_miou:{:.5f}, test_mpa:{:.5f}, test_class_acc :{:}'.
    format(train_acc / (len(test_data) - error),