コード例 #1
0
    for id, (data, depth, img_name, img_size) in enumerate(test_loader):
        print('testing bach %d' % id)

        inputs = Variable(data).cuda()
        inputs_depth = Variable(depth).cuda()

        n, c, h, w = inputs.size()
        depth = inputs_depth.view(n, h, w, 1).repeat(1, 1, 1, c)
        depth = depth.transpose(3, 1)
        depth = depth.transpose(3, 2)

        h1, h2, h3, h4, h5 = model_rgb(inputs)  # RGBNet's output
        d1, d2, d3, d4, d5 = model_depth(depth)  # DepthNet's output
        outputs_all = model_fusion(h1, h2, h3, h4, h5, d1, d2, d3, d4,
                                   d5)  # Final output

        outputs_all = F.softmax(outputs_all, dim=1)
        outputs = outputs_all[0][1]

        outputs = outputs.cpu().data.resize_(h, w)
        imsave(os.path.join(MapRoot, img_name[0] + '.png'), outputs, img_size)

    # -------------------------- validation --------------------------- #
    torch.cuda.empty_cache()

    print("\nevaluating mae....")
    F_measure, mae = get_FM(salpath=MapRoot + '/',
                            gtpath=test_dataRoot + '/test_masks/')
    print('F_measure:', F_measure)
    print('MAE:', mae)
コード例 #2
0
import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader
import torchvision
import torch.nn.functional as F
import torch.optim as optim
from dataset_loader import MyData, MyTestData, DTestData
from model import FocalNet, FocalNet_sub
from conv_lstm import ConvLSTM
from functions import imsave
import argparse
from Trainer_Teacher import Trainer
import os

if __name__ == '__main__':
    configurations = {
        1: dict(
            max_iteration=500000,
            lr=1.0e-10,
            momentum=0.99,
            weight_decay=0.0005,
            spshot=10000,
            nclass=2,
            sshow=10,
            focal_num=12,
        )
    }
    parser=argparse.ArgumentParser()
    parser.add_argument('--phase', type=str, default='test', help='train or test')
    parser.add_argument('--param', type=str, default=True, help='path to pre-trained parameters')
コード例 #3
0
ファイル: demo.py プロジェクト: OIPLab-DUT/DMRA_RGBD-SOD
"""
Title: Depth-induced Multi-scale Recurrent Attention Network for Saliency Detection
Author: Wei Ji, Jingjing Li
E-mail: [email protected]
"""
import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader
import torchvision
import torch.nn.functional as F
import torch.optim as optim
from dataset_loader import MyData, MyTestData
from model import RGBNet,DepthNet
from fusion import ConvLSTM
from functions import imsave
import argparse
from trainer import Trainer
import os

configurations = {
    # same configuration as original work
    # https://github.com/shelhamer/fcn.berkeleyvision.org
    1: dict(
        max_iteration=1000000,
        lr=1.0e-10,
        momentum=0.99,
        weight_decay=0.0005,
        spshot=20000,
        nclass=2,
        sshow=10,
コード例 #4
0
        inputs = Variable(data).cuda()
        n, c, h, w = inputs.size()
        begin_time = time.time()

        low_1, low_2, high_1, high_2, high_3 = model_rgb(inputs)
        Features, _, _, Edge, _, _, Depth, Sal = model_intergration(
            low_1, low_2, high_1, high_2, high_3)
        outputs = model_att(Features, Edge, Sal, Depth)
        outputs = F.softmax(outputs, dim=1)
        outputs = outputs[0][1]
        outputs = outputs.cpu().data.resize_(h, w)
        end_time = time.time()
        run_time = end_time - begin_time
        res.append(run_time)
        imsave(os.path.join(MapRoot, img_name[0] + '.png'), outputs, img_size)

        # ---------------- Visual Results ------------------ #
        # Edge
        out1 = F.softmax(Edge, dim=1)
        out1 = out1[0][1]
        out1 = out1.cpu().data.resize_(h, w)
        imsave(os.path.join(args.out1, img_name[0] + '.png'), out1, img_size)
        # Depth
        out2 = Depth[0][0]
        out2 = out2.cpu().data.resize_(h, w)
        imsave(os.path.join(args.out2, img_name[0] + '.png'), out2, img_size)
        # Sal-Att
        out3 = Sal[0][1]
        out3 = out3.cpu().data.resize_(h, w)
        imsave(os.path.join(args.out3, img_name[0] + '.png'), out3, img_size)
コード例 #5
0
import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader
import torchvision
import torch.nn.functional as F
import torch.optim as optim
from dataset_loader import MyData, MyTestData
from model import FocalNet, FocalNet_sub
from conv_lstm import ConvLSTM
from functions import imsave
import argparse
from Trainer_Student import Trainer
from resnet_18 import Resnet_18
import os
import imageio

if __name__ == '__main__':
    configurations = {
        1: dict(
            max_iteration=300000,
            lr=1.0e-10,
            momentum=0.99,
            weight_decay=0.0005,
            spshot=10000,
            nclass=2,
            sshow=10,
            focal_num=12,
        )
    }
    parser=argparse.ArgumentParser()