1,
    64,
    'unet_128',
    norm='instance',
)
discriminator = netD()
unet = Unet()
unet.load_state_dict(torch.load("./weight/unet_pretrained.pth"))

optimizer_g = torch.optim.Adam(generator.parameters(), lr=0.0002)
optimizer_d = torch.optim.Adam(discriminator.parameters(), lr=0.0002)
optimizer_s = torch.optim.Adam(unet.parameters(), lr=0.0002)

generator.cuda()
discriminator.cuda()
unet.cuda()
EPOCH = 100
num_iter = len(train_loader)
D_LOSS = []
G_LOSS = []
# S_LOSS=[]
f = open("./loss_gan.txt", 'a')
print(time.strftime('|---------%Y-%m-%d   %H:%M:%S---------|',
                    time.localtime(time.time())),
      file=f)
discriminator.train()
unet.train()
for epoch in range(EPOCH):
    if epoch == 30:
        update_lr(optimizer_g, 0.0001)
        update_lr(optimizer_d, 0.0001)
Exemple #2
0
 ]
 batch_size = 8
 is_gpu = torch.cuda.is_available()
 device = torch.device('cuda' if is_gpu else 'cpu')
 # config
 print('args:' + str(args))
 print('isgpu?:' + str(is_gpu))
 # print config
 r = Readdata(path)
 test_set = Cloudset(r.sub, 'test', r.test_ids, r.test_fold,
                     validation_augmentation_kaggle())
 test_loader = DataLoader(test_set, batch_size=batch_size, shuffle=False)
 print('testing data loaded')
 net = Unet(3, 4).float()
 if is_gpu:
     net.cuda()
 net.load_state_dict(torch.load(model_id, map_location=device))
 print('model loaded')
 # prepare
 ans_dict = global_dict()
 image_uid = 0
 answer_label = []
 answer_index = ['Image_Label', 'EncodedPixels']
 # calculation
 t_bar = tq(test_loader)
 net.eval()
 with torch.no_grad():
     for img, masks in t_bar:
         if is_gpu:
             img = img.cuda()
         masks_pr = net(img).cpu().detach().numpy()
model = Unet(3, 1)
"""
加载模型
"""
try:
    checkpoint = torch.load('/weights/face_weights.pth', map_location='cpu')
    model.load_state_dict(checkpoint)
    #start_epoch = checkpoint['epoch']
    #print("start_epoch:",start_epoch)
    print('===> Load last checkpoint data')
except FileNotFoundError:
    print('Can\'t found weight.pth')

cuda = torch.cuda.is_available()
if (cuda):
    model.cuda()


def clean(img):
    img[img < 0] = 0
    img[img > 0] = 1
    return img


model.eval()

path = "1.jpg"

img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.imshow(img)
Exemple #4
0
def train():
    model = Unet(input_channel=opt.input_channel, cls_num=opt.cls_num)
    model_name = 'Unet_bn'
    train_logger = LogWay(
        datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S') + '.txt')
    train_data = My_Dataset(opt.train_images, opt.train_masks)
    train_dataloader = DataLoader(train_data,
                                  batch_size=opt.batch_size,
                                  shuffle=True,
                                  num_workers=0)

    if opt.cls_num == 1:
        criterion = torch.nn.BCELoss()
    else:
        criterion = torch.nn.NLLLoss()
    if use_gpu:
        model.cuda()
        if opt.cls_num == 1:
            criterion = torch.nn.BCELoss().cuda()
        else:
            criterion = torch.nn.NLLLoss().cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                lr=opt.learning_rate,
                                momentum=opt.momentum,
                                weight_decay=opt.weight_decay)

    for epoch in range(opt.epoch):
        loss_sum = 0
        for i, (data, target) in enumerate(train_dataloader):
            data = Variable(data)
            target = Variable(target)
            if use_gpu:
                data = data.cuda()
                target = target.cuda()
            outputs = model(data)

            if opt.cls_num == 1:
                outputs = F.sigmoid(outputs).view(-1)
                mask_true = target.view(-1)
                loss = criterion(outputs, mask_true)
            else:
                outputs = F.LogSoftmax(outputs, dim=1)
                loss = criterion(outputs, target)

            loss_sum = loss_sum + loss.item()
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            print("epoch:{} batch:{} loss:{}".format(epoch + 1, i,
                                                     loss.item()))
        info = 'Time:{}    Epoch:{}    Loss_avg:{}\n'.format(
            str(datetime.datetime.now()), epoch + 1, loss_sum / (i + 1))
        train_logger.add(info)
        adjusting_rate(optimizer, opt.learning_rate, epoch + 1)
        realepoch = epoch + 1
        if (realepoch % 10 == 0):
            save_name = datetime.datetime.now().strftime(
                '%Y-%m-%d %H-%M-%S') + ' ' + model_name + str(
                    realepoch) + '.pt'
            torch.save(model.state_dict(), save_name)