Exemplo n.º 1
0
def graph_seg_bbox():
    dataset = CUB_Loader(args=args, mode='val')
    val_dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False)
    for step, (img_id, img, label, bbox) in enumerate(tqdm(val_dataloader)):
        data_dict = dict()
        data_dict['img_path'] = get_img_path_by_id(args, img_id, dataset)
        data_dict['raw_imgs'] = get_raw_imgs_by_id(args, img_id, dataset)
        data_dict['img_id']=img_id
        get_seg_by_path(args=args,data_dict=data_dict)
Exemplo n.º 2
0
def base_vgg_cls():
    # 初始化
    if args.gpu:
        torch.cuda.empty_cache()

    epoch = 0
    train_acc_arr = []
    val_acc_arr = []

    # 加载数据
    dataset = CUB_Loader(args=args)
    dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True)

    # 加载模型
    model = get_base_vgg_model(args=args)
    #model = get_vgg_deconv_cgf_model(args = args)

    # 加载参数
    loss_func = torch.nn.CrossEntropyLoss()

    # 训练
    if args.continue_train:
        model.load_state_dict(torch.load(args.train_model))
        check_dict = load_check_point(args=args)
        epoch = check_dict['epoch']
        train_acc_arr = check_dict['train_acc_arr']
        val_acc_arr = check_dict['val_acc_arr']

    while epoch < args.epoch:
        opt = get_finetune_optimizer(args, model)

        train_result = []
        train_label = []
        val_result = []
        val_label = []

        for step, (img_id, img, label, bbox) in enumerate(dataloader):
            if args.gpu:
                img = img.cuda()
                label = label.cuda()

            logits, cam = model.forward(img)
            loss = loss_func(logits, label)
            acc = cal_acc(logits, label)

            opt.zero_grad()
            loss.backward()
            opt.step()

            print('epoch:{} train loss:{} train acc:{}'.format(
                epoch, loss, acc))

            train_result.extend(
                torch.argmax(logits, dim=-1).cpu().data.numpy())
            train_label.extend(label.cpu().data.numpy())

        train_acc_arr.append(
            np.mean(np.array(train_result) == np.array(train_label)))

        # validation
        #dataset.to_val()
        #val_dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False)
        #for step, (img_id, img, label, bbox) in enumerate(tqdm(val_dataloader)):
        #    if args.gpu:
        #        img = img.cuda()
        #        label = label.cuda()

        #    logits, cam = model.forward(img)
        #    val_result.extend(torch.argmax(logits, dim=-1).cpu().data.numpy())
        #    val_label.extend(label.cpu().data.numpy())

        #    if step == 0:
        #        target_cls = torch.argmax(logits, dim=-1)

        #        plot_dict = dict()
        #        plot_dict['raw_imgs'] = get_raw_imgs_by_id(args, img_id[:5], dataset)
        #        target_cams = []
        #        for i in range(5):
        #            raw_img_size = plot_dict['raw_imgs'][i].size
        #            target_cam = cam[i][target_cls[i]].unsqueeze(0).unsqueeze(0).detach().cpu().data
        #            up_target_cam = F.upsample(target_cam, size=(raw_img_size[1], raw_img_size[0]), mode='bilinear',
        #                                       align_corners=True)
        #            target_cams.append(up_target_cam.squeeze())

        #        plot_dict['cams'] = target_cams
        #        plot_different_figs(args, plot_dict)

        #val_acc_arr.append(np.mean(np.array(val_result) == np.array(val_label)))

        #if len(val_acc_arr) == 1 or val_acc_arr[-1] >= val_acc_arr[-2]:
        #    torch.save(model.state_dict(), args.save_model_path)

        # plot
        #plot_train_process(args, [train_acc_arr, val_acc_arr])

        # save check point
        epoch += 1
        #save_check_point(args=args, check_dict={
        #    'epoch': epoch,
        #    'train_acc_arr': train_acc_arr,
        #    'val_acc_arr': val_acc_arr
        #})

        dataset.to_train()

    torch.save(
        model.state_dict(), args.save_model_path[:-3] + '_' + str(args.cls) +
        args.save_model_path[-3:])
    return model, dataset