Example #1
0
    def init_model(self):
        print("initilizing network\n")

        if torch.cuda.device_count() > 1:
            print("Let's use", torch.cuda.device_count(), "GPUs!")
            # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
            self.model = createDeepLabv3()
            self.model = nn.DataParallel(self.model,
                                         device_ids=self.device_ids).to(
                                             self.device)
        else:
            self.model = createDeepLabv3().to(self.device)

        # self.optim = torch.optim.Adam(self.model.parameters(), lr=self.lr, betas=(self.beta_1, self.beta_2))
        self.optim = torch.optim.SGD(self.model.parameters(), lr=self.lr)

        self.criterian = torch.nn.MSELoss(reduction='mean')

        self.transform = transforms.Compose([
            # transforms.RandomResizedCrop(128, scale=(0.08, 1.0), ratio=(0.75, 1.3333333333333333), interpolation=2),
            # transforms.RandomRotation((-90,90)),
            # transforms.ColorJitter(brightness=0, contrast=0, saturation=0, hue=0),
            # transforms.RandomHorizontalFlip(p=0.8),
            # transforms.RandomVerticalFlip(p=0.8),
            # transforms.RandomAffine((-5, 5)),
            # # transforms.GaussianBlur(kernel_size, sigma=(0.1, 2.0)),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
        ])

        # Dataset作成
        # (RGB)の色の平均値と標準偏差
        color_mean = (0.485, 0.456, 0.406)
        color_std = (0.229, 0.224, 0.225)

        self.train_ = DataSet(img_dir=self.img_dir,
                              mask_dir=self.mask_dir,
                              size=self.im_size,
                              data_type="train")
        #                                transform=DataTransform(input_size=1024, color_mean=color_mean, color_std=color_std))
        self.valid_ = DataSet(img_dir=self.img_dir,
                              mask_dir=self.mask_dir,
                              size=self.im_size,
                              data_type="validation")
        #                                transform=DataTransform(input_size=1024, color_mean=color_mean, color_std=color_std))

        self.dataloader_train = DataLoader(self.train_,
                                           batch_size=self.batch_size,
                                           num_workers=4,
                                           shuffle=True)
        self.dataloader_valid = DataLoader(self.valid_,
                                           batch_size=self.batch_size,
                                           num_workers=4,
                                           shuffle=False)

        print("initilization done\n")
Example #2
0
def main(data_directory, exp_directory, epochs, batch_size):
    # Create the deeplabv3 resnet101 model which is pretrained on a subset
    # of COCO train2017, on the 20 categories that are present in the Pascal VOC dataset.
    model = createDeepLabv3()
    model.train()
    data_directory = Path(data_directory)
    # Create the experiment directory if not present
    exp_directory = Path(exp_directory)
    if not exp_directory.exists():
        exp_directory.mkdir()

    # Specify the loss function
    criterion = torch.nn.MSELoss(reduction='mean')
    # Specify the optimizer with a lower learning rate
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)

    # Specify the evaluation metrics
    metrics = {'f1_score': f1_score, 'auroc': roc_auc_score}

    # Create the dataloader
    dataloaders = datahandler.get_dataloader_single_folder(
        data_directory, batch_size=batch_size)
    _ = train_model(model,
                    criterion,
                    dataloaders,
                    optimizer,
                    bpath=exp_directory,
                    metrics=metrics,
                    num_epochs=epochs)

    # Save the trained model
    torch.save(model, exp_directory / 'weights.pt')
def create_masks(data_dir, num_classes, weights_filename, using_unet=False):
    results = []

    import datahandler, model

    ####
    other_than_five_classes = True if num_classes != 5 else False
    ####

    dataloaders = datahandler.get_dataloader_sep_folder(
        data_dir, batch_size=1, other_than_5_classes=other_than_five_classes, num_classes=num_classes, with_aug=True)
    k = 0
    train_acc, test_acc = 0, 0

    model = model.createDeepLabv3(using_unet=using_unet)
    # Load the trained model
    weights_filepath = "./weights/" + weights_filename + ".pt"
    model.load_state_dict(torch.load(weights_filepath, map_location=torch.device('cpu')))

    for phase in ['Train', 'Test']:
        i = 0
        model.eval()  # Set model to evaluate mode
        # Iterate over data.
        for sample in tqdm(iter(dataloaders[phase])):
            i += 1

            inputs = sample['image']
            mask = sample['mask']
            outputs = model(inputs)
            if using_unet:
                outputs = {'out': outputs}
            output = outputs['out'].cpu().detach().numpy()[0][0]
            quantized_mask = quantization(output, num_classes)

            #####
            if phase == 'Train':
                train_acc += accuracy_per_sample(quantized_mask, mask[0][0].numpy(), num_classes)
                print(train_acc)
            else:
                test_acc += accuracy_per_sample(quantized_mask, mask[0][0].numpy(), num_classes)
                print(test_acc)
            #####

            sample_results = [np.array(inputs[0]).transpose(1, 2, 0), mask[0][0], output, quantized_mask]
            results.append(sample_results)
            k += 1
        #####
        if phase == 'Train':
            train_acc = train_acc / i
        else:
            test_acc = test_acc / i
        #####
    print(f'For architechture {weights_filename}: Training accuracy={train_acc}, Test accuracy = {test_acc}')
    pickle.dump(results, open("seg_results.p", "wb"))
######
num_classes = args.num_classes
using_unet = True if args.using_unet == 1 else False
train_all = True if args.train_all == 1 else False

other_than_five_classes = True if num_classes != 5 else False
######

bpath = args.exp_directory
data_dir = args.data_directory
epochs = args.epochs
batchsize = args.batchsize

# Create the deeplabv3 resnet101 model which is pretrained on a subset of COCO train2017, on the 20 categories that are present in the Pascal VOC dataset.
model = createDeepLabv3(using_unet=using_unet, train_all=train_all)
model.train()
# Create the experiment directory if not present
if not os.path.isdir(bpath):
    os.mkdir(bpath)

# Specify the loss function
criterion = torch.nn.MSELoss(reduction='mean')
# Specify the optimizer with a lower learning rate
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)

# Specify the evalutation metrics
metrics = {
    'f1_score': f1_score,
    'auroc': roc_auc_score,
    'accuracy': accuracy_score
parser.add_argument(
    "exp_directory",
    help=
    'Specify the experiment directory where metrics and model weights shall be stored.'
)
parser.add_argument("--epochs", default=25, type=int)
parser.add_argument("--batchsize", default=4, type=int)

args = parser.parse_args()

bpath = args.exp_directory
data_dir = args.data_directory
epochs = args.epochs
batchsize = args.batchsize
# Create the deeplabv3 resnet101 model which is pretrained on a subset of COCO train2017, on the 20 categories that are present in the Pascal VOC dataset.
model = createDeepLabv3()
model.train()
# Create the experiment directory if not present
if not os.path.isdir(bpath):
    os.mkdir(bpath)

# Specify the loss function
criterion = torch.nn.CrossEntropyLoss()
#criterion = torch.nn.BCEWithLogitsLoss()
#criterion = torch.nn.functional.cross_entropy()
# Specify the optimizer with a lower learning rate
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)

# Specify the evalutation metrics
metrics = {'f1_score': f1_score}
Example #6
0
    "exp_directory", help='Specify the experiment directory where metrics and model weights shall be stored.')
parser.add_argument("--epochs", default=25, type=int)
parser.add_argument("--batchsize", default=4, type=int)
parser.add_argument("--outputConfig", default=3, type=int)

args = parser.parse_args()


bpath = args.exp_directory
data_dir = args.data_directory
epochs = args.epochs
batchsize = args.batchsize
outputConfig = args.outputConfig

# Create the deeplabv3 resnet101 model which is pretrained on a subset of COCO train2017, on the 20 categories that are present in the Pascal VOC dataset.
model = createDeepLabv3(outputConfig)
model.train()
# Create the experiment directory if not present
if not os.path.isdir(bpath):
    os.mkdir(bpath)


# Specify the loss function
criterion = torch.nn.MSELoss(reduction='mean')
# Specify the optimizer with a lower learning rate
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)

# Specify the evalutation metrics
metrics = {'f1_score': f1_score, 'auroc': roc_auc_score}

Example #7
0
  b = np.zeros_like(image).astype(np.uint8)

  for l in range(0, nc):
    idx = image == l
    r[idx] = label_colors[l, 0]
    g[idx] = label_colors[l, 1]
    b[idx] = label_colors[l, 2]

    
  rgbp = np.stack([r, g, b], axis=2)
  return rgbp
################################################
# Load the trained model 
import model as myModel

model = myModel.createDeepLabv3(outputchannels=5) # give nclasses

checkpoint = torch.load(exp_dir + 'kasumi_line_ep8000.pth.tar')
model.load_state_dict(checkpoint['model_state_dict'])

model.eval()
device = torch.device('cpu')


preprocess = torchvision.transforms.Compose([
                    torchvision.transforms.ToTensor(),
                    torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
                ])
####################################################
for file in filist:
    imfile = dir +file
import argparse

from model import createDeepLabv3
from utils import masks_to_segmentation, overaly_segmentation, load_segmentation_definition, copy_state_dict

# Command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("--model_path", default='./out/weights.pt', type=str)
parser.add_argument("--data_directory", default='./data/original/', type=str)
parser.add_argument("--result_directory", default='./result/', type=str)

args = parser.parse_args()
if not os.path.isdir(args.result_directory):
    os.mkdir(args.result_directory)

model = createDeepLabv3(outputchannels=12, pretrain=True)
# model = torch.nn.DataParallel(model)
train_model = torch.load(args.model_path)
trained_state_dict = copy_state_dict(train_model.state_dict())
model.load_state_dict(trained_state_dict)
model.eval()
model.cuda()

definitions = load_segmentation_definition(
    'data/json/annotation_definitions.json')
s0 = time.time()
with torch.no_grad():
    for step, path in enumerate(glob.glob(args.data_directory + '*.png')):
        im = cv2.imread(path)
        im = cv2.resize(im, (480, 320), cv2.INTER_NEAREST)
        input_tensor = torch.from_numpy(im)
Example #9
0
    'Specify the experiment directory where metrics and model weights shall be stored.'
)
parser.add_argument("--epochs", default=21, type=int)
parser.add_argument("--batchsize", default=2, type=int)

args = parser.parse_args()

bpath = args.exp_directory
data_dir = args.data_directory
val_dir = args.data_directory[:-5] + 'val'
epochs = args.epochs
batchsize = args.batchsize
print(args)

# Create the deeplabv3 resnet101 model which is pretrained on a subset of COCO train2017, on the 20 categories that are present in the Pascal VOC dataset.
model = createDeepLabv3(outputchannels=5, backboneFreez=False)  # give nclasses
model.train()

# make a larger image, copy the image and fill the other parts by black color.
if batchsize == 1:

    def set_bn_to_eval(m):
        classname = m.__class__.__name__
        if classname.find('BatchNorm') != -1:
            m.eval()
            print(classname)

    model.apply(set_bn_to_eval)
#

# Create the experiment directraintory if not present
Example #10
0
# Command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("--data_directory", default='./data/', type=str)
parser.add_argument("--exp_directory", default='./out/', type=str)
parser.add_argument("--epochs", default=25, type=int)
parser.add_argument("--batchsize", default=24, type=int)
parser.add_argument("--num_workers", default=20, type=int)

args = parser.parse_args()

bpath = args.exp_directory
data_dir = args.data_directory
epochs = args.epochs
batchsize = args.batchsize
# Create the deeplabv3 resnet101 model which is pretrained on a subset of COCO train2017, on the 20 categories that are present in the Pascal VOC dataset.
model = createDeepLabv3(outputchannels=12)
model = torch.nn.DataParallel(model)
model.train()
model.cuda()

# Create the experiment directory if not present
if not os.path.isdir(bpath):
    os.mkdir(bpath)

# Specify the loss function
# criterion = torch.nn.CrossEntropyLoss(reduction='mean')
criterion = FocalLoss(size_average=True)

# Specify the optimizer with a lower learning rate
optimizer = torch.optim.Adam(model.parameters(), lr=2e-2)
Example #11
0
        for x in ['Train', 'Test']
    }
    return dataloaders


data_path = './Data'
masks = os.listdir(data_path)[2:]
dataloaders = get_dataloader_single_folder(data_path, 'Image', masks)

exp_directory = './Model'

exp_directory = Path(exp_directory)
if not exp_directory.exists():
    exp_directory.mkdir()

model = m.createDeepLabv3()
# Specify the loss function
criterion = torch.nn.MSELoss(reduction='mean')
# Specify the optimizer with a lower learning rate
optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)

# Specify the evaluation metrics
metrics = {'f1_score': f1_score, 'auroc': roc_auc_score}

# Create the dataloader
dataloaders = get_dataloader_single_folder(
    './Data',
    'Image', [
        'Eyes_dorsal', 'Outline_dorsal', 'Outline_lateral', 'Ov_lateral',
        'Heart_lateral', 'Yolk_lateral'
    ],
Example #12
0
  a1 = sorted(os.listdir(checkpoint_inpainter_path),key = toInt3,reverse= True)
  if(len(a1)>0):
    pretrained_inpainter = a1[0]

if not os.path.exists(checkpoint_dynamic_path):
        os.makedirs(checkpoint_dynamic_path)
else:
  a1 = sorted(os.listdir(checkpoint_dynamic_path),key = toInt3,reverse= True)
  if(len(a1)>0):
    pretrained_dynamic = a1[0]
# flow_dataset = FlowDataset(transform = transforms.Compose([ToTensor(),Rescale((cnvrt_size,cnvrt_size))]))
flow_dataset = FlowDataset(transform = transforms.Compose([ToTensor()]))

dataloader = DataLoader(flow_dataset, batch_size=batch_size,shuffle=True, num_workers=workers)

net_dynamic = createDeepLabv3().to(device)
net_dynamic.apply(weights_init)

net_impainter = Inpainter(ngpu=1).to(device) 
# net_impainter.apply(weights_init)
optimizerD = optim.Adam(net_dynamic.parameters(), lr=lr, betas=(beta1, beta2))
optimizerI = optim.Adam(net_impainter.parameters(), lr=lr, betas=(beta1, beta2))

if(pretrained_dynamic!=None):
  net_dynamic, optimizerD, start_epoch = load_ckp(checkpoint_dynamic_path+pretrained_dynamic, net_dynamic, optimizerD)
  print("Loaded pretrained: " + pretrained_dynamic)

if(pretrained_inpainter!=None):
  net_impainter, optimizerI, start_epoch = load_ckp(checkpoint_inpainter_path+pretrained_inpainter, net_impainter, optimizerI)
  print("Loaded pretrained: " + pretrained_inpainter)