Пример #1
0
 def __init__(self,lr,beta1,model_path,data_path,result_path,net_G_type="unet"):
     self.device = torch.device('cuda:0')
     self.train_dataset=mydataset.myDataset(data_path)
     # self.test_dataset=mydataset.myDataset(data_path)
     self.val_dataset=mydataset.myDataset(data_path,val_set=True)
     self.result_path=result_path
     self.model_path=model_path
     self.data_path=data_path
     if(net_G_type=="unet"):
         self.net_G=unet.Unet(3,3,8).to(self.device)
     else:
         self.net_G=unet.GlobalGenerator(3,3,8).to(self.device)
     #self.net_D=net_D.net_D(6).to(self.device)
     self.net_D=net_D.MultiscaleDiscriminator(6, 64, 3, nn.BatchNorm2d, False, 1, True).to(self.device)
     self.init_weights()
     if os.path.exists(model_path+"/net_G.pth"):
         self.net_G.load_state_dict(torch.load(model_path+"/net_G.pth"))
     if os.path.exists(model_path+"/net_D.pth"):
         self.net_D.load_state_dict(torch.load(model_path+"/net_D.pth"))
     self.optimizer_G = torch.optim.Adam(self.net_G.parameters(), lr=lr, betas=(beta1, 0.999))
     self.optimizer_D = torch.optim.Adam(self.net_D.parameters(), lr=lr, betas=(beta1, 0.999))
     self.Tensor = torch.cuda.FloatTensor
     self.criterionGAN = GANLoss(use_lsgan=True,tensor=self.Tensor).to(self.device)
     self.criterionFeat = torch.nn.L1Loss().to(self.device)
     self.real_label=torch.tensor(1.0)
     self.fake_label=torch.tensor(0.0)
     self.fake_pool = ImagePool(0)
     self.criterionVGG = VGGLoss(0).to(self.device)
Пример #2
0
 def __init__(self, lr, beta1, model_path, data_path, result_path):
     self.device = torch.device('cuda:0')
     self.train_dataset = mydataset.myDataset(data_path)
     # self.test_dataset=mydataset.myDataset(data_path)
     self.val_dataset = mydataset.myDataset(data_path, val_set=True)
     self.result_path = result_path
     self.model_path = model_path
     self.data_path = data_path
     self.net_G = unet.Unet(3, 3, 8).to(self.device)
     self.net_D = net_D.net_D(6).to(self.device)
     self.init_weights()
     if os.path.exists(model_path + "/net_G.pth"):
         self.net_G.load_state_dict(torch.load(model_path + "/net_G.pth"))
     if os.path.exists(model_path + "/net_D.pth"):
         self.net_D.load_state_dict(torch.load(model_path + "/net_D.pth"))
     self.optimizer_G = torch.optim.Adam(self.net_G.parameters(),
                                         lr=lr,
                                         betas=(beta1, 0.999))
     self.optimizer_D = torch.optim.Adam(self.net_D.parameters(),
                                         lr=lr,
                                         betas=(beta1, 0.999))
     self.gan_loss = nn.MSELoss().to(self.device)
     self.l1_loss = nn.L1Loss().to(self.device)
     self.real_label = torch.tensor(1.0)
     self.fake_label = torch.tensor(0.0)
Пример #3
0
 def __init__(self, cfg, root):
     self.dataPath = os.path.join("data_maskrcnn", root)
     self.frames = os.path.join(self.dataPath, 'Frames')
     self.bboxes = os.path.join(self.dataPath, 'Bboxes')
     self.optFlow = os.path.join(self.dataPath, 'OptFlow')
     self.transforms = build_transforms(cfg, True)
     self.dataloader = myDataset(self.dataPath, transforms=self.transforms)
     self.cfg = cfg
 def __init__(self, cfg, mode, opt_flow_on=False):
     self.dataPath = "Movie_Frames_{}".format(mode)
     self.imgPath = os.path.join(self.dataPath, 'Frames')
     self.csvPath = os.path.join(self.dataPath, 'Bboxes_mydataset')
     self.npyPath = os.path.join(self.dataPath, 'opt_flow_np')
     self.transforms = build_transforms(cfg, True)
     self.dataloader = myDataset(self.csvPath,
                                 self.imgPath,
                                 transforms=self.transforms)
     self.mode = mode
     self.cfg = cfg
Пример #5
0
config  = Config()

# # load the  data for experiment 1 in the manuscript

data = scipy.io.loadmat('train.mat')
inp  = data['inp']
lab  = data['lab']

traininp =np.reshape( np.transpose(inp,(2,0,1)),(270,512,512,1))
trainlab =np.reshape( np.transpose(lab,(2,0,1)),(270,512,512,1))       

transform = transforms.Compose([transforms.ToTensor(),
            ])

# make the data iterator for training data
train_data = myDataset(traininp,trainlab, transform)
trainloader = torch.utils.data.DataLoader(train_data, batch_size=config.batchsize, shuffle=True, num_workers=2)

#
labels = trainlab.flatten()
class_count = np.bincount(labels, minlength=3)
propensity_score = class_count/labels.size ###
class_weights = 1 / propensity_score #########



print('----------------------------------------------------------')
#%%
# Create the object for the network

if config.gpu == True:    
Пример #6
0
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import numpy as np
import tensorflow as tf
slim = tf.contrib.slim

from mydataset import myDataset
from mynet import myNet
import configs as cfgs 

# datasets
train_datasets = myDataset(cfgs.TRAIN_IMGPATH)
val_datasets = myDataset(cfgs.VAL_IMGPATH)

# network, loss, acc
net = myNet()
output, logits = net.backbone()
loss = net.compute_loss(net.labels, output)
acc = net.compute_acc(net.labels, logits)

# hi_params
global_step = tf.compat.v1.train.get_or_create_global_step()
lr = tf.compat.v1.train.piecewise_constant(global_step,
                                 boundaries=[200000],
                                 values=[cfgs.BASE_lr, cfgs.BASE_lr / 10.0])

# optimizer, train_op
optimizer = tf.compat.v1.train.AdamOptimizer(lr)
update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
with tf.compat.v1.control_dependencies(update_ops):
    weight_decay_loss = tf.compat.v1.add_n(tf.compat.v1.losses.get_regularization_losses())