コード例 #1
0
from dataloaders.saliency_detection.DUTS import DUTS
from models.PFAN.model import PFAN as Model
from models.PFAN.loss import EdgeSaliencyLoss
from torch.utils.data import DataLoader
from constant import DEVICE, LEARNING_RATE, ITERATION_SIZE, WEIGHT_DECAY, TMP_ROOT
from utils.StatisticalValue import StatisticalValue
from utils.functions.status import print_training_status
from env import iteration_writer
from torchvision import transforms
from os.path import join
import torch
import torchvision
import numpy as np
import os

trainloader = DataLoader(DUTS(train=True, ),
                         batch_size=6,
                         shuffle=False,
                         num_workers=8)

model = Model()
model.to(device=DEVICE)

criterion = EdgeSaliencyLoss(device=DEVICE)
mae = torch.nn.L1Loss()

optimizer = torch.optim.Adam(model.parameters(), lr=0.0004, weight_decay=0)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.1)


def run(epoch):
コード例 #2
0
from constant import DEVICE, LEARNING_RATE, ITERATION_SIZE, WEIGHT_DECAY, TMP_ROOT
from utils.StatisticalValue import StatisticalValue
from utils.functions.status import print_training_status
from env import iteration_writer
from torchvision import transforms
from os.path import join
import time
import torch
import torchvision
import numpy as np
import os

trainloader = DataLoader(
    DUTS(
        train=False,
        augment=False,
        coordinate=False,
    ),
    batch_size=10,
    shuffle=False,
    num_workers=8
)

model = Model()
model.to(device=DEVICE)

criterion = saliency_loss
mae = torch.nn.L1Loss()

def run():
    statistical_losses = StatisticalValue()
コード例 #3
0
ファイル: train.py プロジェクト: YiKeYaTu/Experiments
from models.RACNN.loss import saliency_loss
from models.PFAN.loss import EdgeSaliencyLoss
from torch.utils.data import DataLoader
from constant import DEVICE, LEARNING_RATE, ITERATION_SIZE, WEIGHT_DECAY, TMP_ROOT
from utils.StatisticalValue import StatisticalValue
from utils.functions.status import print_training_status
from env import iteration_writer
from torchvision import transforms
from os.path import join
import torch
import torchvision
import numpy as np
import os

trainloader = DataLoader(DUTS(
    train=True,
    coordinate=False,
),
                         batch_size=10,
                         shuffle=False,
                         num_workers=8)

model = Model()
model.to(device=DEVICE)

# state_dict = torch.load(
#     '/home/ncrc-super/data/Liangchen/Experiments/tasks/pretrain_unet_saliency_detection/__tmp__/2020-12-21-21-34-50/checkpoints/checkpoint_153_1.pth',
#     map_location=DEVICE
# )['state_dict']
# new_state_dict = {}
# for key in state_dict:
#     new_state_dict[key.replace('unet.', '')] = state_dict[key]
コード例 #4
0
ファイル: test.py プロジェクト: YiKeYaTu/Experiments
from torch.utils.data import DataLoader
from constant import DEVICE, LEARNING_RATE, WEIGHT_DECAY, TMP_ROOT
from utils.StatisticalValue import StatisticalValue
from utils.functions.status import print_training_status
from env import iteration_writer
from torchvision import transforms
from os.path import join
import torch
import torchvision
import numpy as np
import os

trainloader = DataLoader(
    DUTS(
        train=True,
        augment=True,
        coordinate=False
    ),
    batch_size=5,
    shuffle=False,
    num_workers=8
)
testloader = DataLoader(
    DUTS(
        train=False,
        augment=False,
        coordinate=False,
    ),
    batch_size=10,
    shuffle=False,
    num_workers=8
コード例 #5
0
ファイル: test.py プロジェクト: YiKeYaTu/Experiments
from torch.utils.data import DataLoader
from constant import DEVICE, LEARNING_RATE, ITERATION_SIZE, WEIGHT_DECAY, TMP_ROOT
from utils.StatisticalValue import StatisticalValue
from utils.functions.status import print_training_status
from env import iteration_writer
from torchvision import transforms
from os.path import join
import time
import torch
import torchvision
import numpy as np
import os

trainloader = DataLoader(DUTS(
    train=False,
    augment=False,
    coordinate=False,
),
                         batch_size=10,
                         shuffle=False,
                         num_workers=8)

model = Model()
model.to(device=DEVICE)

state_dict = torch.load(
    '/home/ncrc-super/data/Liangchen/Experiments/tasks/pretrain_detection/__tmp__/2021-01-11-14-35-41/checkpoints/checkpoint_17_1.pth',
    map_location=DEVICE)['state_dict']
model.rcnn.rcnn.load_state_dict(state_dict)

criterion = saliency_loss
コード例 #6
0
ファイル: train.py プロジェクト: YiKeYaTu/Experiments
from models.SOD.PFAN_OD.model import PFAN_OD
from models.PFAN.loss import EdgeSaliencyLoss
from torch.utils.data import DataLoader
from constant import DEVICE, LEARNING_RATE, WEIGHT_DECAY, TMP_ROOT
from utils.StatisticalValue import StatisticalValue
from utils.functions.status import print_training_status
from env import iteration_writer
from torchvision import transforms
from os.path import join
import torch
import torchvision
import numpy as np
import os
import time

trainloader = DataLoader(DUTS(train=True, augment=True, coordinate=False),
                         batch_size=5,
                         shuffle=False,
                         num_workers=8)
testloader = DataLoader(DUTS(
    train=False,
    augment=False,
    coordinate=False,
),
                        batch_size=10,
                        shuffle=False,
                        num_workers=8)

model = PFAN_OD(mode='train_local')
model.to(device=DEVICE)
コード例 #7
0
ファイル: test.py プロジェクト: YiKeYaTu/Experiments
from models.PFAN.loss import EdgeSaliencyLoss
from torch.utils.data import DataLoader
from constant import DEVICE, LEARNING_RATE, ITERATION_SIZE, WEIGHT_DECAY, TMP_ROOT
from utils.StatisticalValue import StatisticalValue
from utils.functions.status import print_training_status
from env import iteration_writer
from torchvision import transforms
from os.path import join
import time
import torch
import torchvision
import numpy as np
import os

trainloader = DataLoader(DUTS(
    train=False,
    augment=False,
),
                         batch_size=1,
                         shuffle=False,
                         num_workers=8)

model = Model()
model.to(device=DEVICE)

criterion = EdgeSaliencyLoss(device=DEVICE)
mae = torch.nn.L1Loss()


def run():
    statistical_losses = StatisticalValue()
    statistical_mae_errors = StatisticalValue()