示例#1
0
def main(class_num, pre_trained, train_data, batch_size, momentum, lr, cate_weight, epoch, weights):

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    model = PSPNet(num_classes=class_num, downsample_factor=16, pretrained=True, aux_branch=False)
    model = model.to(device)

    train_loader = Data.DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True)

    optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum)

    loss_func = nn.CrossEntropyLoss(weight=torch.from_numpy(np.array(cate_weight)).float()).cuda()

    model.train()
    for i in range(epoch):
        for step, (b_x, b_y) in enumerate(train_loader):
            b_x = b_x.to(device)
            b_y = b_y.to(device)
            b_y = b_y.view(-1, 473, 473)
            output = model(b_x)
            loss = loss_func(output, b_y.long())
            loss = loss.to(device)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if step % 1 == 0:
                print("Epoch:{0} || Step:{1} || Loss:{2}".format(i, step, format(loss, ".4f")))

    torch.save(model.state_dict(), weights + "PSPNet_weights" + ".pth")
示例#2
0
    # colorized_mask.save(os.path.join(output_path, image_file+'.png'))


if __name__ == '__main__':
    path = './keyboard.pth'  #导入的模型文件必须与当前文件在同一目录下
    # path='./seg_hand.pth'
    checkpoint = torch.load(path)

    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint.keys():
        checkpoint = checkpoint['state_dict']
    if 'module' in list(checkpoint.keys())[0] and not isinstance(
            model, torch.nn.DataParallel):
        model = torch.nn.DataParallel(model)

    model.load_state_dict(checkpoint)
    model.to(device)
    model.eval()
    paramerters = sum(x.numel() for x in model.parameters())
    #---model->51M
    print("models have {} M paramerters in total".format(paramerters / 1e6))

    img_paths = './mask_imgs'
    img_paths = [
        os.path.join(img_paths, x) for x in os.listdir(img_paths)
        if x.endswith('.png')
    ]

    # for img_path in img_paths:
    #     if not os.path.basename(img_path)=='0000.png':continue
    #     img=cv2.imread('./keyboard_images/0015.jpg')
    #     print(img_path)
示例#3
0
def train(epo_num=50, show_vgg_params=False):
    vis = visdom.Visdom()

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    vgg_model = VGGNet(requires_grad=True, show_params=show_vgg_params)
    fcn_model = PSPNet(n_classes=2)
    fcn_model = fcn_model.to(device)
    criterion = nn.BCELoss().to(device)
    optimizer = optim.SGD(fcn_model.parameters(), lr=1e-2, momentum=0.7)

    all_train_iter_loss = []
    all_test_iter_loss = []

    # start timing
    prev_time = datetime.now()
    for epo in range(epo_num):

        train_loss = 0
        fcn_model.train()
        for index, (img, lidar, label,
                    color_label) in enumerate(train_dataloader):
            img = img.to(device)
            lidar = lidar.to(device)
            label = label.to(device)
            optimizer.zero_grad()
            output, out_cls = fcn_model(img)
            output = torch.sigmoid(
                output)  # output.shape is torch.Size([4, 2, 160, 160])
            loss = criterion(output, label)
            loss.backward()
            iter_loss = loss.item()
            all_train_iter_loss.append(iter_loss)
            train_loss += iter_loss
            optimizer.step()

            output_np = output.cpu().detach().numpy().copy()
            output_np = np.argmin(output_np, axis=1)
            bag_msk_np = label.cpu().detach().numpy().copy()
            bag_msk_np = np.argmin(bag_msk_np, axis=1)

            if np.mod(index, 15) == 0:
                print('epoch {}, {}/{},train loss is {}'.format(
                    epo, index, len(train_dataloader), iter_loss))
                vis.images(output_np[:, None, :, :],
                           win='train_pred',
                           opts=dict(title='train prediction'))
                vis.images(bag_msk_np[:, None, :, :],
                           win='train_label',
                           opts=dict(title='label'))
                vis.line(all_train_iter_loss,
                         win='train_iter_loss',
                         opts=dict(title='train iter loss'))

        test_loss = 0
        fcn_model.eval()
        for index, (img, lidar, label,
                    color_label) in enumerate(test_dataloader):

            img = img.to(device)
            lidar = lidar.to(device)
            label = label.to(device)
            with torch.no_grad():
                optimizer.zero_grad()
                output, out_cls = fcn_model(img)
                output = torch.sigmoid(output)
                loss = criterion(output, label)
                iter_loss = loss.item()
                all_test_iter_loss.append(iter_loss)
                test_loss += iter_loss

                output_np = output.cpu().detach().numpy().copy()
                output_np = np.argmin(output_np, axis=1)
                bag_msk_np = label.cpu().detach().numpy().copy()
                bag_msk_np = np.argmin(bag_msk_np, axis=1)

                if np.mod(index, 15) == 0:
                    print(
                        r'Testing... Open http://localhost:8097/ to see test result.'
                    )
                    vis.images(output_np[:, None, :, :],
                               win='test_pred',
                               opts=dict(title='test prediction'))
                    vis.images(bag_msk_np[:, None, :, :],
                               win='test_label',
                               opts=dict(title='label'))
                    vis.line(all_test_iter_loss,
                             win='test_iter_loss',
                             opts=dict(title='test iter loss'))

        cur_time = datetime.now()
        h, remainder = divmod((cur_time - prev_time).seconds, 3600)
        m, s = divmod(remainder, 60)
        time_str = "Time %02d:%02d:%02d" % (h, m, s)
        prev_time = cur_time

        print('epoch train loss = %f, epoch test loss = %f, %s' %
              (train_loss / len(train_dataloader),
               test_loss / len(test_dataloader), time_str))

        if np.mod(epo, 5) == 0:
            torch.save(fcn_model, 'checkpoints/fcn_model_{}.pt'.format(epo))
            print('saveing checkpoints/fcn_model_{}.pt'.format(epo))
                   deep_features_size=256,
                   backend='resnet18')
    model = nn.DataParallel(model)
    #adjest for the a2d2 class
    model.module.classifier[2] = nn.Linear(in_features=256,
                                           out_features=cls_num,
                                           bias=True)
    model.module.final[0] = nn.Conv2d(64,
                                      cls_num,
                                      kernel_size=(1, 1),
                                      stride=(1, 1))

print(model_name)
#USE GPU
cuda = 0
model.to(cuda)
#Only inference
model.eval()
# cretate path list
train_list, val_list = make_datapath_list_a2d2(rootpath)
#Create Dataset
train_dataset = A2D2Dataset(file_list=train_list,
                            transform=transform_dict['train'],
                            phase='train',
                            seg_label=SEG_COLOR_DICT_A2D2)
#Create Dataloader
train_dataloader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=batch_size,
                                               shuffle=True)

#criterion