Esempio n. 1
0
    def __init__(self):
        super(MySalEncoder, self).__init__()
        # the image is downsampled by resnet with 4 times, so feat_stride is set to 4 here.
        self.feat_stride = 4
        model_path = './models/'
        resnet = resnet34(pretrained=True,
                          modelpath=model_path,
                          num_classes=1000)
        # the extractor (resnet) need not to be updated, so only roi_head in encoder is updated.
        # in demo "encoder_optimizer", the parameters in self.roi_head is inserted into computing graph,
        # encoder_optimizer = optim.Adam(EncoderModel.roi_head.parameters(), lr=0.001, ...
        # ...betas=(0.9, 0.999), eps=1e-08, weight_decay=0)

        self.extractor = nn.Sequential(resnet)
        self.roi_head = ROIHead(roi_size=(2, 2),
                                feat_stride=self.feat_stride).cuda()
        self.extractor.cuda()
        self.roi_head.cuda()
Esempio n. 2
0
from torch import nn
from utils import Tester
from network import resnet34, resnet101

# Set Test parameters
params = Tester.TestParams()
params.gpus = [
    0
]  # set 'params.gpus=[]' to use CPU model. if len(params.gpus)>1, default to use params.gpus[0] to test
params.ckpt = './models/ckpt_epoch_60.pth'  #'./models/ckpt_epoch_400_res34.pth'
params.testdata_dir = './testimg/3/'

# models
model = resnet34(pretrained=True,
                 num_classes=1000)  # batch_size=120, 1GPU Memory < 7000M
model.fc = nn.Linear(512, 10)
#model = resnet101(pretrained=False,num_classes=1000)  # batch_size=60, 1GPU Memory > 9000M
#model.fc = nn.Linear(512*4, 6)
# ture result  000 111 222 333 444 555
#
# Test
tester = Tester(model, params)
#tester.test_line()
tester.test_ros()
    trainset = ImageFolder(path_ + '/4and5_256_ZJ_really/', imgsize, transform)
    trainloader = torch.utils.data.DataLoader(trainset,batch_size=16, shuffle=True, num_workers=1)
    testset = ImageFolder(r'D:\1WXJ\DATA\CLASS_Japan\devide_3\test\2017-2019\Delete_5_zong212_typhoon\A_2',imgsize, transform)
    testloader = torch.utils.data.DataLoader(testset, batch_size=1, num_workers=1)
    pathoutput = r"D:\1WXJ\Estimate\Model_2\Res_A_170_2_ZJ_0.0005"#
    pathlosssave = os.path.join(r'D:\1WXJ\Estimate\plot_2\Res_A_170_2_ZJ_0.0005')#
    tys_time = {}  # map typhoon-time to wind
    totalloss = []
    test_allloss = []
    max_RMSE = 0
    if not os.path.exists(pathlosssave):
        os.makedirs(pathlosssave)
    if not os.path.exists(pathoutput):
        os.makedirs(pathoutput)
    model_path = r"D:\1WXJ\Estimate\Model\MODEL_49946_Res34/"
    net = resnet34(pretrained=False, modelpath=model_path, num_classes=1000)  # batch_size=120, 1GPU Memory < 7000M
    net.fc = nn.Sequential(nn.Linear(2048, 512),
                           nn.ReLU(),
                           nn.Dropout(p=0.2),
                           nn.Linear(512, 64),
                           nn.ReLU(),
                           nn.Linear(64, 1))
    net.cuda()


    # net.load_state_dict(torch.load(path_+'net_relu.pth'))
    print(net)

#定義損失函數·和分類器
    criterion = nn.SmoothL1Loss()
    # criterion2 = nn.MSELoss()
print('num of val images:', len(val_data.images_path))
#print('num of val labels:', len(val_data.images_labels))


train_dataloader = DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=num_workers)

# batch_size=120, 1GPU Memory < 7000M
#print('train dataset len: {}'.format(len(train_dataloader.dataset)))

val_dataloader = DataLoader(val_data, batch_size=batch_size, shuffle=False, num_workers=num_workers)
#print('val dataset len: {}'.format(len(val_dataloader.dataset)))

# models
#model = resnet18(pretrained=False, modelpath=model_path)  # batch_size=120, 1GPU Memory < 7000M

model = resnet34(pretrained=False, modelpath=model_path)  # batch_size=120, 1GPU Memory < 7000M

#model = resnet50(pretrained=False, modelpath=model_path)  # batch_size=120, 1GPU Memory < 7000M

print(model)

# optimizer
print("Training with sgd")
params.optimizer = torch.optim.SGD(model.parameters(), lr=init_lr,
                                   momentum=momentum,
                                   weight_decay=weight_decay,
                                   nesterov=nesterov)

# Train
params.lr_scheduler = ReduceLROnPlateau(params.optimizer, 'min', factor=lr_decay, patience=10, cooldown=10, verbose=True)
trainer = Trainer(model, params, train_dataloader, val_dataloader)
Esempio n. 5
0
    cam_img = np.uint8(255 * cam_img)
    output_cam.append(cv2.resize(cam_img, size_upsample))
    return output_cam


def hook_feature(module, input, output):
    features_blobs.append(output.data.cpu().numpy())


#Parameters:
model_path = './models/model_34/ckpt_epoch_60.pth'
finalconv_name = 'layer4'
features_blobs = []
image_path = 'test_cloth.jpg'
# load model:
model = resnet34(pretrained=False)
#model=torch.nn.DataParallel(model)
model_state = torch.load(model_path)
t_state = {}
for k, v in model_state.items():
    t_state[k[7:]] = v

model.load_state_dict(t_state)
print('Load ckpt from', model_path, '\n')
model = model.cuda()
model.eval()

#hook
handle = model._modules.get(finalconv_name).register_forward_hook(hook_feature)

# feed the image and prediction: