Ejemplo n.º 1
0
def train(args):
    model = Unet(3, 1).to(device)
    batch_size = args.batch_size
    criterion = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset("data/train",transform=x_transforms,target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
    train_model(model, criterion, optimizer, dataloaders)
Ejemplo n.º 2
0
def train():
    x_transforms = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
    ])
    y_transforms = transforms.ToTensor()

    model = Unet(3, 1).to(device)
    model.load_state_dict(torch.load(r"./results/weights.pth"))
    batch_size = 1
    num_epochs = 2
    criterion = torch.nn.BCELoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset(r'D:\project\data_sets\liver\train',
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    data_loaders = DataLoader(liver_dataset,
                              batch_size=batch_size,
                              shuffle=True,
                              num_workers=0)
    print("Start training at ", strftime("%Y-%m-%d %H:%M:%S", localtime()))
    for epoch in range(num_epochs):
        prev_time = datetime.now()
        print('Epoch{}/{}'.format(epoch, num_epochs))
        print('-' * 10)
        dt_size = len(data_loaders.dataset)
        epoch_loss = 0
        step = 0
        for x, y in data_loaders:
            step += 1
            inputs = x.to(device)
            labels = y.to(device)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()
            epoch_loss += loss.item()
            if (step % 10) == 0:
                print("%d/%d, train_loss:%0.3f" %
                      (step, (dt_size - 1) // data_loaders.batch_size + 1,
                       loss.item()))
        # print the results of the current training
        cur_time = datetime.now()
        h, remainder = divmod((cur_time - prev_time).seconds, 3600)
        m, s = divmod(remainder, 60)
        time_str = 'Time:{:.0f}:{:.0f}:{:.0f}'.format(h, m, s)
        epoch_str = "epoch {} loss:{:.4f} ".format(epoch, epoch_loss / 400)
        print(epoch_str + time_str)
        res_record("Time:" + strftime("%Y-%m-%d %H:%M:%S  ", localtime()))
        res_record(epoch_str + '\n')
    print("End training at ", strftime("%Y-%m-%d %H:%M:%S", localtime()))
    # 记录数据
    torch.save(
        model.state_dict(),
        './results/weights{}_{}_{}.pth'.format(localtime().tm_mday,
                                               localtime().tm_hour,
                                               localtime().tm_sec))
Ejemplo n.º 3
0
 def train(self):
     model = Unet(in_ch=2, out_ch=2).to(device)
     # batch_size = 1
     # criterion = nn.BCEWithLogitsLoss()
     criterion = nn.BCELoss()
     # criterion = nn.CrossEntropyLoss()
     # optimizer = optim.Adam(model.parameters(),lr = 0.01)
     optimizer = optim.SGD(model.parameters(), lr=0.005, momentum=0.9)
     # data_set = Train_Data(data_root='./train', mask_root='./Train_GT')
     self.train_model(model, criterion, optimizer)
Ejemplo n.º 4
0
def train():
    model = Unet(5, 2).to(device)
    model.train()
    batch_size = args.batch_size
    criterion = torch.nn.BCELoss()
    optimizer = optim.Adam(model.parameters())
    PAVE_dataset = SSFPDataset("train", transform=1, target_transform=1)
    dataloaders = DataLoader(PAVE_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=0)
    train_model(model, criterion, optimizer, dataloaders)
Ejemplo n.º 5
0
Archivo: main.py Proyecto: nlroel/unet
def train(args):
    model = Unet(1, 1).to(device)
    batch_size = args.batch_size
    criterion = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(model.parameters(), lr=0.01)
    liver_dataset = LiverDataset("/gs/home/majg/liupeng/code",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=10)
    train_model(model, criterion, optimizer, dataloaders)
Ejemplo n.º 6
0
def train():
    model = Unet(3, 1).to(device)
    batch_size = 8
    criterion = torch.nn.BCELoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset("/home/xm/Program/ALL-Data/unetdata/test",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=4)
    train_model(model, criterion, optimizer, dataloaders)
Ejemplo n.º 7
0
def train():
    model = Unet(3, 1).to(device)
    model.load_state_dict(torch.load(r"./results/weights4_13_40.pth"))
    batch_size = 5
    criterion = torch.nn.BCELoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset(r"D:\project\data_sets\data_sci\train",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=0)
    train_model(model, criterion, optimizer, dataloaders)
Ejemplo n.º 8
0
def train():
    model = Unet(3, 1).to(device)
    batch_size = args.batch_size
    criterion = torch.nn.BCELoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset(
        r"H:\BaiduNetdisk\BaiduDownload\u_net_liver-master\data\train",
        transform=x_transforms,
        target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=0)
    train_model(model, criterion, optimizer, dataloaders)
Ejemplo n.º 9
0
def train():
    #logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
    model = Unet(3, 1).to(device)
    batch_size = 1
    criterion = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset("data/train",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=4)
    train_model(model, criterion, optimizer, dataloaders)
Ejemplo n.º 10
0
def train():
    model = Unet(1, 1).to(device)
    batch_size = 1
    criterion = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(model.parameters())
    train_dataset = TrainDataset("dataset/train/image",
                                 "dataset/train/label",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(train_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=4)
    train_model(model, criterion, optimizer, dataloaders)
Ejemplo n.º 11
0
def train():
    model = Unet(3, 3).to(device)
    batch_size = 3
    criterion = torch.nn.BCELoss()
    optimizer = optim.Adam(model.parameters(), lr=1e-5)
    liver_dataset = LiverDataset("data/train_xin/liver_bmp",
                                 "data/train_xin/mask_bw",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=0)
    train_model(model, criterion, optimizer, dataloaders)
Ejemplo n.º 12
0
def train():
    model = Unet(3, 1).to(device)
    #model.load_state_dict(torch.load('./checkpoints/weights_39.pth'))

    batch_size = args.batch_size
    criterion = torch.nn.BCELoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset("data/train",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=4)  #4
    train_model(model, criterion, optimizer, dataloaders)
Ejemplo n.º 13
0
def train():
    model = Unet(3, 1).to(device)
    #summary(model,(3,512,512))
    batch_size = 1
    criterion = torch.nn.BCELoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset("data/image",
                                 "data/mask",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=4)
    train_model(model, criterion, optimizer, dataloaders)
Ejemplo n.º 14
0
def train(args):
    model = Unet(3, 1).to(device)
    #begin add
    # checkpoint = torch.load("./weights_19.pth",map_location=device)
    # model.load_state_dict(checkpoint['model_state_dict'])
    #end add
    batch_size = args.batch_size
    criterion = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset("./data/train",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=4)
    train_model(model, criterion, optimizer, dataloaders)
Ejemplo n.º 15
0
def train(args):
    model = Unet(3, 1).to(device)  #  输入3通道,输出1通道
    batch_size = args.batch_size
    criterion = nn.BCEWithLogitsLoss()  #  损失函数
    optimizer = optim.Adam(model.parameters())  #  获得模型的参数
    liver_dataset = LiverDataset("data/train",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    #  加载数据集,返回的是一对原图+掩膜,和所有图片的数目
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=4)
    #  DataLoader接口是自定义数据接口输出输入接口,将已有的数据输入按照batch size封装成Tensor
    #  batch_size=4,epoch=10,共100个minbatch
    # shuffle,每个epoch将数据打乱
    # num_workers: 多个进程倒入数据,加速倒入速度
    train_model(model, criterion, optimizer, dataloaders)  # 训练
Ejemplo n.º 16
0
def train(args):
    model = Unet(3, 1).to(device)
    batch_size = args.batch_size
    criterion = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(model.parameters())
    liver_dataset = LiverDataset("data/train",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=1)
    # shuffle = True,  # 乱序
    # num_workers = 2  # 多进程
    # DataLoader:该接口主要用来将自定义的数据读取接口的输出或者PyTorch已有的数据读取接口的输入按照batch size封装成Tensor
    # batch_size:how many samples per minibatch to load,这里为4,数据集大小400,所以一共有100个minibatch
    # shuffle:每个epoch将数据打乱,这里epoch=10。一般在训练数据中会采用
    # num_workers:表示通过多个进程来导入数据,可以加快数据导入速度
    train_model(model, criterion, optimizer, dataloaders)
Ejemplo n.º 17
0
def train(args):
    model = Unet(3, 1).to(device)
    batch_size = args.batch_size

    LR = 0.005
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=0.9,
                          weight_decay=0.0005)
    criterion = nn.BCEWithLogitsLoss()
    lr_list = []

    liver_dataset = LiverDataset("data/train",
                                 transform=x_transforms,
                                 target_transform=y_transforms)
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=4)
    train_model(model, criterion, optimizer, dataloaders)
Ejemplo n.º 18
0
def train(args):
    model = Unet(3, 1).to(device)
    batch_size = args.batch_size
    # 损失函数,该类主要用来创建衡量目标和输出之间的二进制交叉熵的标准。
    criterion = nn.BCEWithLogitsLoss()
    optimizer = optim.Adam(model.parameters())

    # 初始化
    liver_dataset = LiverDataSet(
        "/home/ming/code/u-net-liver-pytorch/data/train",
        transform=x_transforms,
        target_transform=y_transforms)

    # 读取数据集
    # 它为我们提供的常用操作有:batch_size(每个batch的大小),
    # shuffle(是否进行shuffle操作), num_workers(加载数据的时候使用几个子进程)
    # shuffle是否将数据打乱;
    dataloaders = DataLoader(liver_dataset,
                             batch_size=batch_size,
                             shuffle=True,
                             num_workers=4)

    train_model(model, criterion, optimizer, dataloaders)
Ejemplo n.º 19
0
def dnp(run_name,
        noisy_file,
        samples_dir,
        LR=0.001,
        num_iter=5000,
        save_every=50):

    # initiate model
    nlayers = 6
    model = Unet(nlayers=nlayers, nefilters=60).cuda()
    samples_dir = os.path.join(samples_dir, run_name)
    utils.makedirs(samples_dir)
    # load data
    target, sr = utils.load_wav_to_torch(noisy_file)
    target = target[:(len(target) // 2**nlayers) * 2**nlayers]
    target = target / utils.MAX_WAV_VALUE
    input = torch.rand_like(target)
    input = (input - 0.5) * 2
    target, input = target.cuda(), input.cuda()
    criterion = torch.nn.MSELoss()

    optimize(model.parameters(), model, criterion, input, target, samples_dir,
             LR, num_iter, sr, save_every)
reuse_weights = True
if reuse_weights:
    net.load_state_dict(torch.load('./models/model_{}.pth'.format(name)))
    try:
        best_val_loss = np.load('./models/best_val_loss_{}.npy'.format(name))
    except:
        best_val_loss = np.finfo(np.float64).max
    print("Model reloaded. Previous lowest validation loss =",
          str(best_val_loss))
else:
    best_val_loss = np.finfo(np.float64).max

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
net.to(device)
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(net.parameters(), lr=5e-4, weight_decay=1e-4)

best_weights = net.state_dict()
num_epochs = 5
train_loss = np.zeros(num_epochs)
validation_loss = np.zeros(num_epochs)

print('\nStart training')
np.savetxt('epochs_completed.txt', np.zeros(1), fmt='%d')
for epoch in range(num_epochs):  #TODO decide epochs
    print('-----------------Epoch = %d-----------------' % (epoch + 1))
    train_loss[epoch], _ = train(train_loader, net, criterion, optimizer,
                                 device, epoch + 1)

    # TODO create your evaluation set, load the evaluation set and test on evaluation set
    val_loss = test(eval_loader, net, criterion, device)
Ejemplo n.º 21
0
    experiment2(
        exp2,
        optimizer,
        scheduler,
        dataloaders,
        dataset_sizes,
        num_epochs=25
    )


if exp == 'exp3':
    enhanced_vgg = EnhancedVGGNet(freeze_max=False)
    exp3 = Unet('exp3', enhanced_vgg)

    all_trainable_layers = [param for param in exp3.parameters() if param.requires_grad ]
    optimizer = optim.Adam(all_trainable_layers, lr=lr, weight_decay=w_decay)
    scheduler = lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=gamma)  # decay LR by a factor of 0.5 every 30 epochs

    print('Configs:')
    print('Exp3-  Learning Rate %f, Number of epochs: %d, Batch size: %d' %
          (lr, epochs, batch_size)
    )

    experiment3(
        exp3,
        optimizer,
        scheduler,
        dataloaders,
        dataset_sizes,
        num_epochs=25
Ejemplo n.º 22
0
        print(labels.shape, labels.min(), labels.max(), labels.dtype)

    ########################################################################
    # Model
    if args.model == 'resnet34' or args.model == 'se_resnet50':
        net = Unet(args.model,
                   encoder_weights="imagenet",
                   classes=4,
                   activation=None,
                   args=args).to(
                       device)  # pass model specification to the resnet32

    ########################################################################
    # optimizer
    # if args.optim == 'adam':
    optimizer = optim.Adam(net.parameters(), lr=0.001)

    ########################################################################
    # Train the network
    seed_everything(seed=args.seed)
    if args.load_mod:
        history = {
            'Train_loss': [],
            'Train_dice': [],
            'Valid_loss': [],
            'Valid_dice': []
        }
        net.load_state_dict(torch.load(MODEL_FILE))
    else:
        net_swa, history = train_net(net, optimizer, device, args, LOG_FILE,
                                     MODEL_FILE)
Ejemplo n.º 23
0
from utils import plot_fig, get_mnist_loaders
from torchvision.utils import save_image

tp = 'ddpm'  #

if tp == 'ddpm':
    epochs = 10
    ema_decay = 0.9999
    update_ema_every = 10

    model = Unet(dim=16, channels=1, dim_mults=(1, 2, 4)).cuda()
    # betas = cosine_beta_schedule(1000, )
    betas = np.linspace(1e-4, 1e-2, 1000)
    diffusion = gaussian_ddpm(model, loss_type='l2', betas=betas).cuda()
    ema_model = copy.deepcopy(model)
    optim = torch.optim.Adam(model.parameters(), lr=1e-3)
    train_loader, test_loader, val_loader = get_mnist_loaders()
    ema = EMA(ema_decay)

    num = 0
    for epoch in range(epochs):
        with tqdm(train_loader) as it:
            for x, label in it:
                num += 1
                optim.zero_grad()
                loss = diffusion(x.cuda())
                loss.backward()
                optim.step()
                it.set_postfix(ordered_dict={
                    'train loss': loss.item(),
                    'epoch': epoch
Ejemplo n.º 24
0
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True

train_data_dir = 'data/train/'
batch_size = 16
print("load training images")
data = utils.np_from_imgs(train_data_dir, batch_size)
batch_num = len(data)

device = torch.device('cuda')
dtype = torch.cuda.FloatTensor
print("build unet")
net = Unet().to(device)
# net.apply(utils.weights_init)
optimizer = torch.optim.Adam(net.parameters())
criterion = nn.MSELoss()


def train(epoch):
    epoch_loss = 0.
    for j in range(1, batch_num + 1):
        clean_data = data[j - 1]
        sigma = np.random.uniform(0, 51)
        input_data = np.clip(
            clean_data + np.random.normal(scale=sigma, size=clean_data.shape),
            0, 255)
        input_tensor = torch.from_numpy(input_data).type(dtype).to(device)
        optimizer.zero_grad()
        output_tensor = net(input_tensor)
        target_tensor = torch.from_numpy(clean_data).type(dtype).to(device)
Ejemplo n.º 25
0
                         num_workers=10,
                         worker_init_fn=worker_init_fn)
loadtest = data.DataLoader(test_set, batch_size=1, num_workers=10)
loadval = data.DataLoader(validation_set,
                          batch_size=6,
                          num_workers=10,
                          worker_init_fn=worker_init_fn)
# In[6]:

#model = Unet(skipDim, quantization_channels, residualDim,device)
model = Unet()
#model = nn.DataParallel(model)
model = model.cuda()
criterion = nn.MSELoss()
# in wavenet paper, they said crossentropyloss is far better than MSELoss
optimizer = optim.Adam(model.parameters(),
                       lr=1e-4,
                       weight_decay=1e-6,
                       betas=(0.5, 0.999))
# use adam to train

maxloss = np.zeros(50) + 100
# In[7]:
iteration = 0
start_epoch = 0
if continueTrain:  # if continueTrain, the program will find the checkpoints
    if os.path.isfile(resumefile):
        print("=> loading checkpoint '{}'".format(resumefile))
        checkpoint = torch.load(resumefile)
        start_epoch = checkpoint['epoch']
        iteration = checkpoint['iteration']
Ejemplo n.º 26
0
                        path_to_data=path_to_data_test,
                        paired=True)
    testloader = data.DataLoader(loader,
                                 batch_size=1,
                                 num_workers=1,
                                 shuffle=False,
                                 drop_last=False,
                                 pin_memory=False)

    unet_qpi2dapi = Unet(feature_scale=8).cuda()
    unet_dapi2qpi = Unet(feature_scale=8).cuda()

    D_dapi = ResnetLike128(K=16).cuda()
    D_qpi = ResnetLike128(K=16).cuda()

    optimizer_unet_qpi2dapi = optim.Adam(unet_qpi2dapi.parameters(),
                                         lr=init_lr,
                                         betas=(0.5, 0.9))
    optimizer_unet_dapi2qpi = optim.Adam(unet_dapi2qpi.parameters(),
                                         lr=init_lr,
                                         betas=(0.5, 0.9))

    optimizer_D_dapi = optim.Adam(D_dapi.parameters(),
                                  lr=init_lr,
                                  betas=(0.5, 0.9))
    optimizer_D_qpi = optim.Adam(D_qpi.parameters(),
                                 lr=init_lr,
                                 betas=(0.5, 0.9))

    scheduler_unet_qpi2dapi = optim.lr_scheduler.StepLR(
        optimizer_unet_qpi2dapi, 2000, gamma=0.1, last_epoch=-1)
Ejemplo n.º 27
0
                    help='Run model fp16 mode.')
args = parser.parse_args()

train_dataset = dataset.MarsDataset()
val_dataset  = dataset.MarsDataset(val=True)

train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True)
model = Unet(1, 1)
model.cuda()
lr=0.01
momentum=0.9

if args.fp16:
    assert torch.backends.cudnn.enabled, "fp16 mode requires cudnn backend to be enabled."
    model = network_to_half(model)
    param_copy = [param.clone().type(torch.cuda.FloatTensor).detach() for param in model.parameters()]
    for param in param_copy:
        param.requires_grad = True
 else:
  param_copy = list(model.parameters())
optimizer = torch.optim.SGD(param_copy, lr,momentum=momentum)

if args.fp16:
    model.zero_grad()
#optimizer = optim.Adagrad(model.parameters(), lr=0.01)

def train(epoch):
    start = time.time()
    if args.fp16:
        loss_fn = nn.MSELoss().cuda().half()
    else:
Ejemplo n.º 28
0
        loader = DataLoader(split='test',
                            path_to_data='D:\jakubicek\spectral_CT_data',
                            ids=test_ids)
        testloader = data.DataLoader(loader,
                                     batch_size=1,
                                     num_workers=0,
                                     shuffle=False,
                                     drop_last=False,
                                     pin_memory=False)

        unet = Unet(filters=[8, 16, 32, 32]).cuda()

        D = ResnetLike128(K=16).cuda()

        optimizer_unet = optim.Adam(unet.parameters(),
                                    lr=init_lr,
                                    betas=(0.5, 0.9))

        optimizer_D = optim.Adam(D.parameters(), lr=init_lr, betas=(0.5, 0.9))

        scheduler1 = optim.lr_scheduler.StepLR(optimizer_unet,
                                               6000,
                                               gamma=0.1,
                                               last_epoch=-1)
        scheduler2 = optim.lr_scheduler.StepLR(optimizer_D,
                                               6000,
                                               gamma=0.1,
                                               last_epoch=-1)

        #    l2_loss = nn.MSELoss()
    train_dataloader = DataLoader(dataset=SunnybrookDataset(
        TRAIN_IMG_PATH, TRAIN_CONTOUR_PATH, contour_type, crop_size),
                                  batch_size=2,
                                  shuffle=True,
                                  num_workers=4)
    validate_dataloader = DataLoader(dataset=SunnybrookDataset(
        VALID_IMG_PATH, VALID_CONTOUR_PATH, contour_type, crop_size),
                                     batch_size=2,
                                     shuffle=False,
                                     num_workers=4)

    unet = Unet(1, num_class, crop_size).to(device)

    criterion = MultiLoss()
    optimizer = optim.Adam(unet.parameters())

    epochs = 100

    for epoch in range(epochs):

        print("train epoch ", epoch + 1)
        epoch_loss = 0

        for step, (img_batch, mask_batch) in enumerate(train_dataloader):

            img_batch = img_batch.float().to(
                device)  # batch_size*channel*height*width
            mask_batch = mask_batch.long().to(
                device)  # batch_size*height*width
Ejemplo n.º 30
0
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
from unet import Unet, Layer
import time
import cv2

MODEL_NAME = f"model-{int(time.time())}"  # gives a dynamic model name, to just help with things getting messy over time.
learning_rate = 0.001
epochs = 4
validation_percentage = 0.1

u_net = Unet()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
u_net.to(device)
optimizer = optim.Adam(u_net.parameters(), lr=learning_rate)
loss_func = nn.MSELoss()


def filter_img(img00, img01):
    kernel = np.ones((4, 4), np.uint8)

    subtract = cv2.subtract((img00 + 15), img01)

    kernel2 = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])
    img03 = cv2.filter2D(subtract, -1, kernel2)
    img03 = cv2.GaussianBlur(img03, (5, 5), 0)
    img03 = cv2.Canny(img03, 85, 255)
    img03 = cv2.morphologyEx(img03, cv2.MORPH_CLOSE, kernel, iterations=1)
    img03 = cv2.bitwise_not(img03)
    img03 = img03 & img00