コード例 #1
0
ファイル: model.py プロジェクト: YJingyu/Face-Recognition
 def __init__(self, path=None):
     import tensorflow as tf
     self.net = segnet.segnet()
     self.learning_rate = tf.placeholder(dtype=tf.float32)
     self.labels = tf.placeholder(dtype=tf.int32)
     self.label_boolean = tf.reshape(tf.cast(self.labels / 255,
                                             dtype=tf.bool),
                                     shape=[-1])
     self.loss = tf.reduce_mean(
         tf.nn.sigmoid_cross_entropy_with_logits(logits=self.net.output,
                                                 labels=tf.cast(
                                                     self.labels / 255,
                                                     dtype=tf.float32)))
     self.trainer = tf.train.AdamOptimizer(self.learning_rate).minimize(
         self.loss)
     self.session = tf.Session()
     self.output_boolean = tf.reshape((self.net.output >= 0.5), shape=[-1])
     #iou between normalized_labels and output_boolean
     self.intersection = tf.reduce_sum(
         tf.cast(
             tf.math.logical_and(self.output_boolean, self.label_boolean),
             tf.float32))
     self.union = tf.reduce_sum(
         tf.cast(
             tf.math.logical_or(self.output_boolean, self.label_boolean),
             tf.float32))
     self.IOU_metric = self.intersection / (self.union + 0.000000001)
     if path is None:
         self.session.run(tf.global_variables_initializer())
     else:
         saver = tf.train.Saver()
         saver.restore(self.session, path)
コード例 #2
0
 def __init__(self, _train_list, _val_list, _inf_list, _dag_it = 0, _input_shape = (256, 1024, 3),
              _train_steps = 500, _val_steps = 200, _num_epochs = 15, _batch_size = 4, _gpu_num = '0, 1',
              _no_inidices = True, _segnet = False):
     self.dag_it = _dag_it
     self.train_list = _train_list
     self.val_list = _val_list
     self.inf_list = _inf_list
     self.base_dir = '/media/localadmin/Test/11Nils/kitti/dataset/sequences/Data/'
     self.img_dir = 'images/'
     self.label_dir = 'labels/'
     self.inf_dir = 'inf/'
     self.dag_dir = 'dagger/'
     self.log_dir = 'log/'
     self.optimizer = 'adagrad'
     self.gpu_num = _gpu_num  # '1'
     os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
     os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu_num
     self.untrained = 'store_true'
     self.loss = 'categorical_crossentropy'
     self.output_mode = 'softmax'
     self.pool_size = (2, 2)
     self.kernel = 3
     self.input_shape = _input_shape  # (128, 512, 3)
     self.n_labels = 3  # num classes
     self.val_steps = _val_steps
     self.epoch_steps = _train_steps
     self.n_epochs = _num_epochs
     self.batch_size = _batch_size
     self.filters = 8
     self.b_pool_indices = _no_inidices
     self.b_use_segnet = _segnet
     if not self.b_pool_indices and not self.b_use_segnet:
         self.model = unet_wIndices(self.input_shape, self.n_labels, self.filters, self.kernel, self.pool_size,
                                    self.output_mode)
     elif not self.b_use_segnet:
         self.model = unet(self.input_shape, self.n_labels, self.filters, self.kernel, self.pool_size,
                           self.output_mode)
     else:
         self.model = segnet(self.input_shape, self.n_labels, self.filters, self.kernel, self.pool_size,
                             self.output_mode)
     print(self.model.summary())
     list_gpus_trained = [int(x) for x in self.gpu_num.split(',')]
     self.num_gpus = len(list_gpus_trained)
     if self.num_gpus > 1:
         trained_gpu_str = ', '.join(str(e) for e in list_gpus_trained)
         print('Training on GPU\'s: ' + trained_gpu_str)
         self.multi_model = multi_gpu_model(self.model, gpus = self.num_gpus)
     else:
         self.multi_model = self.model
     self.multi_model.compile(loss = self.loss, optimizer = self.optimizer, metrics = ['accuracy'])
     plot_model(model = self.multi_model, to_file = self.base_dir + 'model.png')
     print(print_summary(self.multi_model))
     self.std = [0.32636853, 0.31895106, 0.30716496]
     self.mean = [0.39061851, 0.38151629, 0.3547171]
     self.es_cb = []
     self.tb_cb = []
     self.cp_cb = []
コード例 #3
0
def main():
    print('===> Loading datasets')
    test_set = get_eval_set(opt.data_dir, opt.test_dir, opt.sr_upscale_factor,
                            opt.num_classes)
    test_loader = DataLoader(dataset=test_set,
                             num_workers=opt.threads,
                             batch_size=1,
                             shuffle=False)

    print('Building SR model ', opt.sr_model_name)
    if opt.sr_model_name == 'DBPN':
        sr_model = DBPN(num_channels=3,
                        base_filter=64,
                        feat=256,
                        num_stages=7,
                        scale_factor=opt.sr_upscale_factor)
        sr_model = torch.nn.DataParallel(sr_model, device_ids=gpus_list)
        model_name = os.path.join(opt.models_dir, exp_name, opt.sr_model)
        print(model_name)
        sr_model.load_state_dict(
            torch.load(model_name, map_location=lambda storage, loc: storage))
        print('Pre-trained SR model is loaded.')
    else:
        sys.exit('Invalid SR network')

    print('Building SemSeg model', opt.seg_model_name)
    if opt.seg_model_name == 'segnet':
        seg_model = segnet(num_classes=opt.num_classes, in_channels=3)
        seg_model = torch.nn.DataParallel(seg_model, device_ids=gpus_list)
        model_name = os.path.join(opt.models_dir, exp_name, opt.seg_model)
        print(model_name)
        seg_model.load_state_dict(torch.load(model_name))
        print('Pre-trained SemSeg model is loaded.')
    else:
        sys.exit('Invalid Semantic segmentation network')

    if cuda:
        sr_model = sr_model.cuda(gpus_list[0])
        seg_model = seg_model.cuda(gpus_list[0])

    check_mkdir(os.path.join('Results'))
    check_mkdir(os.path.join('Results', exp_name))
    check_mkdir(os.path.join('Results', exp_name, 'segmentation'))
    check_mkdir(os.path.join('Results', exp_name, 'super-resolution'))
    check_mkdir(os.path.join('heat_maps'))
    check_mkdir(os.path.join('heat_maps', exp_name))

    test(test_loader, sr_model, seg_model)
コード例 #4
0
import pdb
import cv2
from vis.visualization import visualize_activation_tf
from vis.input_modifiers import Jitter
from PIL import Image
from vis.utils import utils
from vis.visualization import visualize_activation
from vis.input_modifiers import Jitter
import sys, os
import tensorflow.contrib.slim as slim
#define input shape
input_shape = (360, 480)

#construct graph
inputs = tf.placeholder("float", shape=(1, input_shape[0], input_shape[1], 3))
logits, end_points = segnet(inputs)
sess = tf.Session()
saver = tf.train.Saver()
saver.restore(sess, 'weights/segnet_tf.ckpt')
K.set_session(sess)
K.manual_variable_initialization(True)
nimage = np.asarray([Image.open('ouzel.jpg').resize(input_shape).getdata()
                     ]).astype(np.float32)
nimage = np.reshape(nimage, (input_shape[0], input_shape[1], 3))
nimage = np.expand_dims(nimage, 0)
fn = K.function([inputs], [logits])
a = fn([nimage])

#visualize filters
name = 'conv1_1_D'
name = 'conv4_2_bn'
コード例 #5
0
def main():
    print('===> Loading datasets')
    train_set = get_training_set(opt.data_dir, opt.train_dir, opt.patch_size,
                                 opt.sr_patch_size, opt.sr_upscale_factor,
                                 opt.num_classes, opt.sr_data_augmentation)

    if opt.val_dir != None:
        val_set = get_eval_set(opt.data_dir, opt.val_dir,
                               opt.sr_upscale_factor, opt.num_classes)
        train_loader = DataLoader(dataset=train_set,
                                  num_workers=opt.threads,
                                  batch_size=opt.batch_size)
        val_loader = DataLoader(dataset=val_set,
                                num_workers=opt.threads,
                                batch_size=1)
    else:
        # Creating data indices for training and validation splits:
        validation_split = .2
        dataset_size = len(train_set)
        indices = list(range(dataset_size))
        split = int(np.floor(validation_split * dataset_size))
        np.random.seed(opt.seed)
        np.random.shuffle(indices)
        train_indices, val_indices = indices[split:], indices[:split]
        train_sampler = SubsetRandomSampler(train_indices)
        val_sampler = SubsetRandomSampler(val_indices)

        train_loader = DataLoader(dataset=train_set,
                                  num_workers=opt.threads,
                                  batch_size=opt.batch_size,
                                  sampler=train_sampler)
        val_loader = DataLoader(dataset=train_set,
                                num_workers=opt.threads,
                                batch_size=1,
                                sampler=val_sampler)

    print('Building SR model ', opt.sr_model_name)
    if opt.sr_model_name == 'DBPN':
        sr_model = DBPN(num_channels=3,
                        base_filter=64,
                        feat=256,
                        num_stages=7,
                        scale_factor=opt.sr_upscale_factor)
        sr_model = torch.nn.DataParallel(sr_model, device_ids=gpus_list)
        if opt.sr_pretrained:
            model_name = os.path.join(opt.save_folder +
                                      opt.sr_pretrained_model)
            print(model_name)
            sr_model.load_state_dict(
                torch.load(model_name,
                           map_location=lambda storage, loc: storage))
            print('Pre-trained SR model is loaded.')
    else:
        sys.exit('Invalid SR network')

    print('Building SemSeg model', opt.seg_model_name)

    if opt.seg_model_name == 'segnet':
        seg_model = segnet(num_classes=opt.num_classes, in_channels=3)
        if not opt.seg_pretrained:
            seg_model.init_vgg16_params()
            print('segnet params initialized')
            seg_model = torch.nn.DataParallel(seg_model, device_ids=gpus_list)
        if opt.seg_pretrained:
            model_name = os.path.join(opt.save_folder +
                                      opt.seg_pretrained_model)
            print(model_name)
            seg_model.load_state_dict(torch.load(model_name))
            print('Pre-trained SemSeg model is loaded.')
            seg_model = torch.nn.DataParallel(seg_model, device_ids=gpus_list)

    sr_criterion = nn.L1Loss()
    psnr_criterion = nn.MSELoss()
    if cuda:
        sr_model = sr_model.cuda(gpus_list[0])
        seg_model = seg_model.cuda(gpus_list[0])
        sr_criterion = sr_criterion.cuda(gpus_list[0])
        psnr_criterion = psnr_criterion.cuda(gpus_list[0])
    if 'grss' in opt.data_dir:
        seg_criterion = CrossEntropyLoss2d(ignore_index=-1).cuda()
    else:
        seg_criterion = CrossEntropyLoss2d().cuda()

    sr_optimizer = optim.Adam(sr_model.parameters(),
                              lr=opt.sr_lr,
                              betas=(0.9, 0.999),
                              eps=1e-8)
    seg_optimizer = optim.Adam(seg_model.parameters(),
                               lr=opt.seg_lr,
                               weight_decay=opt.seg_weight_decay,
                               betas=(opt.seg_momentum, 0.99))

    scheduler = ReduceLROnPlateau(seg_optimizer,
                                  'min',
                                  factor=0.5,
                                  patience=opt.seg_lr_patience,
                                  min_lr=2.5e-5,
                                  verbose=True)

    check_mkdir(os.path.join('outputs', exp_name))
    check_mkdir(os.path.join('outputs', exp_name, 'segmentation'))
    check_mkdir(os.path.join('outputs', exp_name, 'super-resolution'))
    check_mkdir(os.path.join(opt.save_folder, exp_name))

    #best_iou = 0
    best_iou = val_results = validate(0, val_loader, sr_model, seg_model,
                                      sr_criterion, psnr_criterion,
                                      seg_criterion, sr_optimizer,
                                      seg_optimizer)
    #sys.exit()
    #best_epoch = -1
    best_epoch = 0
    best_model = (sr_model, seg_model)
    since_last_best = 0

    for epoch in range(opt.start_iter, opt.epoch_num + 1):
        train(epoch, train_loader, sr_model, seg_model, sr_criterion,
              psnr_criterion, seg_criterion, sr_optimizer, seg_optimizer)

        val_results = validate(epoch, val_loader, sr_model, seg_model,
                               sr_criterion, psnr_criterion, seg_criterion,
                               sr_optimizer, seg_optimizer)

        if val_results > best_iou:
            best_iou = val_results
            best_epoch = epoch
            print('New best iou ', best_iou)
            best_model = (copy.deepcopy(sr_model), copy.deepcopy(seg_model))
            since_last_best = 0
            checkpoint(epoch, sr_model, seg_model, 'tmp_best')
        else:
            print('Best iou epoch: ', best_epoch, ':', best_iou)

        scheduler.step(val_results)

        if (epoch) % (opt.epoch_num / 2) == 0:
            for param_group in sr_optimizer.param_groups:
                param_group['lr'] /= 10.0
            print('SR Learning rate decay: lr={}'.format(
                sr_optimizer.param_groups[0]['lr']))

        if (epoch) % (opt.snapshots) == 0:
            checkpoint(epoch, sr_model, seg_model)

        #since_last_best += 1
        #if since_last_best == 20:
        #    checkpoint(epoch, best_model[0], best_model[1], 'tmp_best')

    print('Saving final best model')
    checkpoint(epoch, best_model[0], best_model[1], 'best')
コード例 #6
0
name = 'conv3_3_D'
name = 'conv4_2'
name = 'conv5_3'
names = ['conv5_3', 'conv4_3']
obj = 9
pixelwise_weight_dict = {}
grad_for_weighting_dict = {}
# Create tensorflow graph for evaluation
eval_graph = tf.Graph()
with eval_graph.as_default():
    with eval_graph.gradient_override_map({'Relu': 'GuidedRelu'}):
    
        images = tf.placeholder("float", [batch_size, input_shape[0], input_shape[1], 3])
        masks = tf.placeholder("float", [batch_size, input_shape[0], input_shape[1],12])

        logits, end_points = segnet(images)
        prob = tf.nn.softmax(logits)
        argmax = tf.argmax(logits, axis=3)

        #cost = tf.reduce_sum(tf.abs(tf.multiply(logits, masks)))
        cost = tf.reduce_mean(tf.nn.relu(tf.multiply(logits, np.ones(logits.get_shape().as_list()))))
        #cost = tf.reduce_mean(logits)

        for name in names:
            grad_for_weighting_dict[name] = tf.gradients(cost, end_points[name])[0]
            pixelwise_weight_dict[name] = tf.nn.relu(tf.multiply(grad_for_weighting_dict[name], end_points[name]))
        test_grad_for_weighting = tf.gradients(end_points['conv1_1_D'], end_points[name])
        test_pixelwise_weight = tf.abs(tf.multiply(test_grad_for_weighting, end_points[name]))


        
コード例 #7
0
ix=int(sys.argv[1])

# tensorflow courtesy options: use GPU1, don't take up all the memory
tf.config.experimental.set_visible_devices([], 'GPU')
os.environ["CUDA_VISIBLE_DEVICES"]="1"
for d in tf.config.list_physical_devices('GPU'):
    tf.config.experimental.set_memory_growth(d, True)


# hyperparameters
n_classes=8
width=[2,4,6,8,16,24,32,48]
n_filters=[4,6,8,12,16,32,64]
#pool_size=[2,3,4,6,8,16]
pool_size=[2]
n_blocks=[2,3,4,5,6,7,8]
pparams=[(a,b,c,d) for a in width for b in n_filters for c in pool_size for d in n_blocks]

# open log file
with open('temp/hparam_to_nparam.{}.tsv'.format(ix), 'w') as i:
    # header
    i.write('\t'.join(['width','n_filter','maxpool','depth','n_par\n']))
    # iterate through parameters of interest
    for j,(w,nf,ps,nb) in enumerate(pparams):
        if j==ix:
            nv=(2**20)-((2**20) % (ps**nb))
            model=segnet(input_shape=(nv,2), n_classes=n_classes, width=w, n_filters=nf, pool_size=ps, n_blocks=nb)
            n_par=model.count_params()
            i.write('\t'.join(map(str, [w, nf, ps, nb, n_par]))+'\n')

コード例 #8
0
    def __init__(self, num_epochs=301, l_r=1e-10, size=256, batch_size=32, n_workers=8, thresh=0.5, num_classes=1,
                 write_flag=False, net_name=None, ckpt_name=''):
        # Hyper-parameters
        self.num_epochs = num_epochs
        self.learning_rate = l_r
        self.size = size
        self.batch_size = batch_size
        self.n_workers = n_workers
        self.thresh = thresh
        self.write_flag = write_flag
        self.num_classes = num_classes
        self.net_name = net_name

        self.ckpt_name = ckpt_name

        self.outf = os.path.join(pgsegm_root, opt.network, opt.split, opt.outf)
        self.outm = os.path.join(pgsegm_root, opt.network, opt.split, opt.outm)

        if opt.network == 'Unet':
            self.n = vanillaUnet(num_classes)
        elif opt.network == 'Unet_noskips':
            self.n = Unet_noskips(num_classes)
        elif opt.network == 'segnet':
            self.n = segnet(num_classes)
        elif opt.network == 'true_segnet':
            self.n = true_segnet(num_classes)
        elif opt.network == 'tiramisu':
            self.n = tiramisu(num_classes)
        elif opt.network == 'DeepLab':
            self.n = DeepLab(num_classes)
        else:
            print("ERROR in network name")

        # allow data parallel
        if torch.cuda.device_count() > 1:
            self.n = nn.DataParallel(self.n)
        # put the model on DEVICE AFTER allowing data parallel
        self.n.to(DEVICE)

        # Loss and optimizer
        self.criterion = nn.BCEWithLogitsLoss()
        self.optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad, self.n.parameters()), lr=self.learning_rate)

        if opt.dataset == 'isic':
            self.dataset = isic.ISIC(root=opt.data,  # split_list=training_set,
                                     split_name=opt.split,
                                     load=opt.SRV, size=(self.size, self.size),
                                     segmentation_transform=transforms.Compose([
                                         transforms.Resize((self.size, self.size)),
                                         transforms.RandomHorizontalFlip(),
                                         transforms.RandomVerticalFlip(),
                                         transforms.RandomApply(
                                             [transforms.Resize((self.size, self.size)),
                                              transforms.ColorJitter(0.3, 0.3, 0.2, 0.01),
                                              transforms.RandomAffine(degrees=0, shear=5),
                                              transforms.RandomRotation(180),
                                              transforms.RandomAffine(degrees=0, translate=(0.05, 0.05)),
                                              transforms.RandomAffine(degrees=0, scale=(0.95, 1.25))],
                                             p=0.4),
                                         transforms.ToTensor(),
                                         transforms.Normalize((0.3359, 0.1133, 0.0276), (0.3324, 0.3247, 0.3351)),
                                     ]),
                                     )

            self.val_dataset = isic.ISIC(root=opt.data,  # split_list=training_set,
                                         split_name='validation_2017',
                                         load=opt.SRV, size=(self.size, self.size),
                                         transform=standard_transforms.Compose([
                                             standard_transforms.Resize((self.size, self.size)),
                                             standard_transforms.ToTensor(),
                                             standard_transforms.Normalize((0.3359, 0.1133, 0.0276),
                                                                           (0.3324, 0.3247, 0.3351)),
                                         ]),
                                         target_transform=standard_transforms.Compose([
                                             standard_transforms.Resize((self.size, self.size)),
                                             standard_transforms.ToTensor()
                                         ])
                                         )

            self.test_dataset = isic.ISIC(root=opt.data,  # split_list=training_set,
                                          split_name='test_2017',
                                          load=False,
                                          transform=standard_transforms.Compose([
                                              standard_transforms.Resize((self.size, self.size)),
                                              standard_transforms.ToTensor(),
                                              standard_transforms.Normalize((0.3359, 0.1133, 0.0276),
                                                                            (0.3324, 0.3247, 0.3351)),
                                          ]),
                                          target_transform=standard_transforms.Compose([
                                              standard_transforms.ToTensor()
                                          ])
                                          )


        elif opt.dataset == 'fake_isic':
            self.dataset = fake_isic.Fake(ckpt_name='/homes/my_d/ppgan/4chs/Models/Gs_nch-16_epoch-390_p-6.pth',
                                          size=(self.size, self.size),
                                          transform=standard_transforms.Normalize((0.3359, 0.1133, 0.0276),
                                                                           (0.3324, 0.3247, 0.3351)))

            self.val_dataset = isic.ISIC(root=opt.data,  # split_list=training_set,
                                         split_name='validation_2017',
                                         load=opt.SRV, size=(self.size, self.size),
                                         transform=standard_transforms.Compose([
                                             standard_transforms.Resize((self.size, self.size)),
                                             standard_transforms.ToTensor(),
                                             standard_transforms.Normalize((0.3359, 0.1133, 0.0276),
                                                                           (0.3324, 0.3247, 0.3351)),
                                         ]),
                                         target_transform=standard_transforms.Compose([
                                             standard_transforms.Resize((self.size, self.size)),
                                             standard_transforms.ToTensor()
                                         ])
                                         )

            self.test_dataset = isic.ISIC(root=opt.data,  # split_list=training_set,
                                          split_name='test_2017',
                                          load=False,
                                          transform=standard_transforms.Compose([
                                              standard_transforms.Resize((self.size, self.size)),
                                              standard_transforms.ToTensor(),
                                              standard_transforms.Normalize((0.3359, 0.1133, 0.0276),
                                                                            (0.3324, 0.3247, 0.3351)),
                                          ]),
                                          target_transform=standard_transforms.Compose([
                                              standard_transforms.ToTensor()
                                          ])
                                          )

        elif opt.dataset == 'fake_isic_waugm':
            self.dataset = fake_isic.Fake(ckpt_name='/homes/my_d/ppgan/4chs/Models/Gs_nch-16_epoch-390_p-6.pth',
                                          size=(self.size, self.size),
                                          segmentation_transform=transforms.Compose([
                                              transforms.ToPILImage(),
                                              transforms.Resize((self.size, self.size)),
                                              transforms.RandomHorizontalFlip(),
                                              transforms.RandomVerticalFlip(),
                                              transforms.RandomApply(
                                                  [transforms.Resize((self.size, self.size)),
                                                   transforms.ColorJitter(0.3, 0.3, 0.2, 0.01),
                                                   transforms.RandomAffine(degrees=0, shear=5),
                                                   transforms.RandomRotation(180),
                                                   transforms.RandomAffine(degrees=0, translate=(0.05, 0.05)),
                                                   transforms.RandomAffine(degrees=0, scale=(0.95, 1.25))],
                                                  p=0.4),
                                              transforms.ToTensor(),
                                              transforms.Normalize((0.3359, 0.1133, 0.0276), (0.3324, 0.3247, 0.3351)),
                                          ]),
                                          )

            self.val_dataset = isic.ISIC(root=opt.data,  # split_list=training_set,
                                         split_name='validation_2017',
                                         load=opt.SRV, size=(self.size, self.size),
                                         transform=standard_transforms.Compose([
                                             standard_transforms.Resize((self.size, self.size)),
                                             standard_transforms.ToTensor(),
                                             standard_transforms.Normalize((0.3359, 0.1133, 0.0276),
                                                                           (0.3324, 0.3247, 0.3351)),
                                         ]),
                                         target_transform=standard_transforms.Compose([
                                             standard_transforms.Resize((self.size, self.size)),
                                             standard_transforms.ToTensor()
                                         ])
                                         )

            self.test_dataset = isic.ISIC(root=opt.data,  # split_list=training_set,
                                          split_name='test_2017',
                                          load=False,
                                          transform=standard_transforms.Compose([
                                              standard_transforms.Resize((self.size, self.size)),
                                              standard_transforms.ToTensor(),
                                              standard_transforms.Normalize((0.3359, 0.1133, 0.0276),
                                                                            (0.3324, 0.3247, 0.3351)),
                                          ]),
                                          target_transform=standard_transforms.Compose([
                                              standard_transforms.ToTensor()
                                          ])
                                          )


        elif opt.dataset == 'voc':
            self.dataset = isic.ISIC(root=opt.data,  # split_list=training_set,
                                     split_name=opt.split,
                                     load=opt.SRV, size=(self.size, self.size),
                                     segmentation_transform=transforms.Compose([
                                         transforms.Resize((self.size, self.size)),
                                         transforms.RandomHorizontalFlip(),
                                         transforms.RandomVerticalFlip(),
                                         transforms.RandomApply(
                                             [transforms.RandomRotation(180), transforms.ColorJitter()]),
                                         transforms.ToTensor(),
                                         transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
                                     ]),
                                     )

            self.val_dataset = isic.ISIC(root=opt.data,  # split_list=training_set,
                                         split_name='validation_2017',
                                         load=opt.SRV, size=(self.size, self.size),
                                         transform=standard_transforms.Compose([
                                             standard_transforms.Resize((self.size, self.size)),
                                             standard_transforms.ToTensor(),
                                             standard_transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
                                         ]),
                                         target_transform=standard_transforms.Compose([
                                             standard_transforms.Resize((self.size, self.size)),
                                             standard_transforms.ToTensor()
                                         ])
                                         )

            self.test_dataset = isic.ISIC(root=opt.data,  # split_list=training_set,
                                          split_name='test_2017',
                                          load=False,
                                          transform=standard_transforms.Compose([
                                              standard_transforms.Resize((self.size, self.size)),
                                              standard_transforms.ToTensor(),
                                              standard_transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
                                          ]),
                                          target_transform=standard_transforms.Compose([
                                              standard_transforms.ToTensor()
                                          ])
                                          )

        self.data_loader = DataLoader(self.dataset,
                                      batch_size=self.batch_size,
                                      shuffle=True,
                                      num_workers=self.n_workers,
                                      drop_last=True,
                                      pin_memory=True)

        self.eval_data_loader = DataLoader(self.val_dataset,
                                           batch_size=self.batch_size,
                                           shuffle=False,
                                           num_workers=self.n_workers,
                                           drop_last=False,
                                           pin_memory=True)

        self.test_data_loader = DataLoader(self.test_dataset,
                                           batch_size=1,
                                           shuffle=False,
                                           num_workers=self.n_workers,
                                           drop_last=False,
                                           pin_memory=True)

        if not os.path.exists(self.outf):
            os.makedirs(self.outf)
        if not os.path.exists(self.outm):
            os.makedirs(self.outm)

        self.total = len(self.data_loader)
        print(len(self.data_loader))
        print(len(self.eval_data_loader))
コード例 #9
0
# specify model, datasets
prefix='weights/chr20.full.conv3.168'
if len(sys.argv) > 1:
	dataset=sys.argv[1]
else:
	dataset="test_10gen.no_OCE_WAS"
in_x="/scratch/users/magu/deepmix/data/simulated_chr20/numpy/"+dataset+".query.npz"
in_y="/scratch/users/magu/deepmix/data/simulated_chr20/label/"+dataset+".result.npz"
print(in_x)
print(in_y)

# consider proper variants
v = np.loadtxt(prefix+'.var_index.txt', dtype=int)

# declare model, compile, load weights -- perhaps make this automated with the file?
model=segnet(input_shape=(v.shape[0], 2), n_classes=5, n_blocks=4, n_filters=16, width=16)
model.compile(tf.keras.optimizers.Adam(lr=1e-4), loss='categorical_crossentropy', metrics=['accuracy'])
model.load_weights(prefix+'.h5')


# load data
anc=np.array(['AFR','EAS','EUR','NAT','SAS']) # fixed for all test datasets
x=np.load(in_x)
y=np.load(in_y)
X=x['G'][:,v,:]
S=x['S']
V=x['V'][v]
Y=y['L'][np.ix_(np.array([np.where(y['S']==s)[0][0] for s in S]),v)]-1
print(X.shape, Y.shape)

コード例 #10
0
batch_size = 32
optim_type = 'adam'
learning_rate = 0.001
sum_time = 0
if (args.net == 'unet')and(args.block == '5'):
    model = unet_model.ZF_UNET_224(subseq=subseq, filters=32, INPUT_CHANNELS=N_FEATURES, OUTPUT_MASK_CHANNELS=act_classes)
elif (args.net == 'unet')and(args.block == '4'):
    model = unet_model.ZF_UNET_224_4(subseq=subseq, filters=32, INPUT_CHANNELS=N_FEATURES, OUTPUT_MASK_CHANNELS=act_classes)
elif (args.net == 'unet')and(args.block == '3'):
    model = unet_model.ZF_UNET_224_3(subseq=subseq, filters=32, INPUT_CHANNELS=N_FEATURES,
                                     OUTPUT_MASK_CHANNELS=act_classes)
elif (args.net == 'unet')and(args.block == '2'):
    model = unet_model.ZF_UNET_224_2(subseq=subseq, filters=32, INPUT_CHANNELS=N_FEATURES,
                                     OUTPUT_MASK_CHANNELS=act_classes)
elif (args.net == 'segnet')and(args.block == '5'):
    model = segnet.segnet(subseq=subseq, INPUT_CHANNELS=N_FEATURES, filters=64, n_labels=act_classes, kernel=3,
                          pool_size=(1, 2))
elif args.net == 'fcn':
    model = unet_model.FCN(inputsize=subseq, deconv_output_size=subseq, INPUT_CHANNELS=N_FEATURES,
                           num_classes=act_classes)
elif args.net == 'maskrcnn':
    model = maskrcnn.Mask(subseq=28, INPUT_CHANNELS=N_FEATURES, filters=32, n_labels=act_classes, kernel=3)

# model = segnet.segnet(subseq=subseq, INPUT_CHANNELS=N_FEATURES,filters=64, n_labels = act_classes, kernel=3, pool_size=(1, 2))
# model = segnet.segnet4(subseq=subseq, INPUT_CHANNELS=N_FEATURES,filters=64, n_labels = act_classes, kernel=3, pool_size=(1, 2))
# model = segnet.segnet3(subseq=subseq, INPUT_CHANNELS=N_FEATURES,filters=64, n_labels = act_classes, kernel=3, pool_size=(1, 2))
# model = segnet.segnet2(subseq=subseq, INPUT_CHANNELS=N_FEATURES,filters=64, n_labels = act_classes, kernel=3, pool_size=(1, 2))

# model = maskrcnn.Mask(subseq=28, INPUT_CHANNELS=N_FEATURES, filters=32, n_labels=act_classes,kernel=3)

# model = unet_model.FCN(inputsize=subseq,deconv_output_size=subseq,INPUT_CHANNELS=N_FEATURES,num_classes=act_classes)
# model = unet_model.ZF_UNET_224_3(subseq=subseq,filters=32, INPUT_CHANNELS=N_FEATURES, OUTPUT_MASK_CHANNELS=act_classes)
コード例 #11
0
ファイル: train.py プロジェクト: ynulonger/Segnet-pytorch
def main():
    opt.manualSeed = random.randint(1, 10000)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)
    # dataset = SegDataset(opt.dataset_root, '../datasets/ycb/dataset_config/train_data_list_debug.txt', True, 30)

    dataset = SegDataset(opt.dataset_root,
                         '../datasets/ycb/dataset_config/train_data_list.txt',
                         True, 5000)
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=opt.batch_size,
                                             shuffle=True,
                                             num_workers=int(opt.workers))
    # test_dataset = SegDataset(opt.dataset_root, '../datasets/ycb/dataset_config/train_data_list_debug.txt', False, 30)

    test_dataset = SegDataset(
        opt.dataset_root, '../datasets/ycb/dataset_config/test_data_list.txt',
        False, 1000)
    test_dataloader = torch.utils.data.DataLoader(test_dataset,
                                                  batch_size=1,
                                                  shuffle=True,
                                                  num_workers=int(opt.workers))

    print(len(dataset), len(test_dataset))  # 5000 1000

    model = segnet()
    model = model.cuda()
    print("device count:", torch.cuda.device_count())
    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUS!")
        device_ids = [0, 1, 2, 3]
        model = nn.DataParallel(model, device_ids=device_ids)
    else:
        model = nn.DataParallel(
            model, device_ids=[0])  # change the number by yourself plz

    if opt.resume_model != '':
        print('resume train model')
        checkpoint = torch.load('{0}/{1}'.format(opt.model_save_path,
                                                 opt.resume_model))
        model.load_state_dict(checkpoint)
        for log in os.listdir(opt.log_dir):
            os.remove(os.path.join(opt.log_dir, log))

    optimizer = optim.Adam(model.parameters(), lr=opt.lr)
    criterion = Loss()
    best_val_cost = np.Inf
    st_time = time.time()

    for epoch in range(1, opt.n_epochs):
        model.train()
        train_all_cost = 0.0
        train_time = 0
        logger = setup_logger(
            'epoch%d' % epoch,
            os.path.join(opt.log_dir, 'epoch_%d_log.txt' % epoch))
        logger.info('Train time {0}'.format(
            time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)) +
            ', ' + 'Training started'))

        for i, data in enumerate(dataloader, 0):
            rgb, target = data
            rgb, target = Variable(rgb).cuda(), Variable(target).cuda()
            semantic = model(rgb)
            optimizer.zero_grad()
            semantic_loss = criterion(semantic, target)
            train_all_cost += semantic_loss.item()
            semantic_loss.backward()
            optimizer.step()

            # print('rgb.shape', rgb.shape)     # [1, 3, 480, 640]
            # print('target.shape', target.shape)       # [1, 480, 640]
            # print('semantic.shape', semantic.shape)       # [1, 22, 480, 640]
            logger.info('Train time {0} Batch {1} CEloss {2}'.format(
                time.strftime("%Hh %Mm %Ss",
                              time.gmtime(time.time() - st_time)), train_time,
                semantic_loss.item()))
            if train_time != 0 and train_time % 1000 == 0:
                torch.save(
                    model.state_dict(),
                    os.path.join(opt.model_save_path, 'model_current.pth'))
            train_time += 1

        train_all_cost = train_all_cost / train_time
        logger.info('Train Finish Avg CEloss: {0}'.format(train_all_cost))
        logger.info('epoch:{0}'.format(epoch))
        # writer.add_image('rgb', rgb.reshape([3, 480, 640]), epoch)
        # writer.add_image('target', target, epoch)
        # writer.add_image('semantic', semantic.reshape([22,480,640]), epoch)
        writer.add_scalar('train_loss_paral', semantic_loss, epoch)

        torch.cuda.empty_cache()

        model.eval()
        test_all_cost = 0.0
        test_time = 0
        logger = setup_logger(
            'epoch%d_test' % epoch,
            os.path.join(opt.log_dir, 'epoch_%d_test_log.txt' % epoch))
        logger.info('Test time {0}'.format(
            time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)) +
            ', ' + 'Testing started'))
        for j, data in enumerate(test_dataloader, 0):
            rgb, target = data
            rgb, target = Variable(rgb).cuda(), Variable(target).cuda()
            semantic = model(rgb)
            semantic_loss = criterion(semantic, target)
            test_all_cost += semantic_loss.item()
            test_time += 1
            logger.info('Test time {0} Batch {1} CEloss {2}'.format(
                time.strftime("%Hh %Mm %Ss",
                              time.gmtime(time.time() - st_time)), test_time,
                semantic_loss.item()))

        test_all_cost = test_all_cost / test_time
        logger.info('Test Finish Avg CEloss: {0}'.format(test_all_cost))
        writer.add_scalar('test_semantic_paral', semantic_loss, epoch)

        torch.cuda.empty_cache()

        model.eval()
        test_all_cost = 0.0
        test_time = 0
        logger = setup_logger(
            'epoch%d_test' % epoch,
            os.path.join(opt.log_dir, 'epoch_%d_test_log.txt' % epoch))
        logger.info('Test time {0}'.format(
            time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - st_time)) +
            ', ' + 'Testing started'))
        for j, data in enumerate(test_dataloader, 0):
            rgb, target = data
            rgb, target = Variable(rgb).cuda(), Variable(target).cuda()
            semantic = model(rgb)
            semantic_loss = criterion(semantic, target)
            test_all_cost += semantic_loss.item()
            test_time += 1
            logger.info('Test time {0} Batch {1} CEloss {2}'.format(
                time.strftime("%Hh %Mm %Ss",
                              time.gmtime(time.time() - st_time)), test_time,
                semantic_loss.item()))

        test_all_cost = test_all_cost / test_time
        logger.info('Test Finish Avg CEloss: {0}'.format(test_all_cost))
        writer.add_scalar('test_semantic_paral', semantic_loss, epoch)

        # TODO: save model for multi- or single gpu!!!
        if test_all_cost <= best_val_cost:
            best_val_cost = test_all_cost
            torch.save(
                model.state_dict(),
                os.path.join(opt.model_save_path,
                             'model_{}_{}.pth'.format(epoch, test_all_cost)))
            print('----------->BEST SAVED<-----------')
コード例 #12
0
 def __init__(self, path='./face_detection/saved_model/model'):
     import tensorflow as tf
     self.net = segnet.segnet()
     self.output = self.net.output
     self.path = path
コード例 #13
0
# save_file = 'weights_fcn.pth'
# model = AutoEncoder(labels).to(device)
# model = nn.DataParallel(model)
# if(resume):
#     model.load_state_dict(torch.load(save_file))
#     f = open(log, "a")
# else:
#     f = open(log, "w")




############ Segnet ####################
save_file = 'weights_segnet.pth'
vgg16 = models.vgg16_bn(pretrained=True)
model = segnet(num_classes)
model.init_vgg16_params(vgg16)
model = model.to(device)
if(resume):
    
    model.load_state_dict(torch.load(save_file))
    print('yes')
    f = open(log, "a")
else:
    f = open(log, "w")

########### Transforms ###########
mean_std = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
input_transforms = transforms.Compose([
        transforms.Resize(img_size, interpolation = 1),
        transforms.ToTensor(),
コード例 #14
0
def train(chrom=20,
          out='segnet_weights',
          no_generator=False,
          batch_size=4,
          num_epochs=100,
          dropout_rate=0.01,
          input_dropout_rate=0.01,
          batch_norm=False,
          filter_size=8,
          pool_size=4,
          num_blocks=5,
          num_filters=8,
          var_start=0,
          num_var=int(1e9),
          bp_start=0,
          bp_end=int(1e9),
          array_only=False,
          continue_train=True,
          ivw=False,
          random_batch=False,
          admix=False):
    ## Load data
    X, Y, S, V, train_ix, v1, v2 = load_train_set(chm=chrom,
                                                  ix=var_start,
                                                  count=num_var,
                                                  bp1=bp_start,
                                                  bp2=bp_end)
    X_dev, Y_dev, S_dev = load_dev_set(chm=chrom,
                                       ix=var_start,
                                       count=num_var,
                                       bp1=bp_start,
                                       bp2=bp_end)
    # filter variants, get counts of variants, alleles, ancestries
    vs = filter_ac(X[:, v1:v2, :], ac=1)
    nv = np.sum(vs) - (np.sum(vs) % (pool_size**num_blocks))
    na = X.shape[-1]
    vs = np.array([False for _ in range(v1 - 1)] +
                  [i and s <= nv for i, s in zip(vs, np.cumsum(vs))] +
                  [False for _ in range(v2, X.shape[1])])  # update truncation
    if os.path.exists(out + '.var_index.txt'):
        vs = np.genfromtxt(out + '.var_index.txt', dtype=int)
        nv = vs.shape[0]
    else:
        np.savetxt(out + '.var_index.txt', np.arange(len(vs))[vs], fmt='%i')

    # subset
    anc = np.array([0, 1, 2, 3, 5])  # ancestry indexes -- 4 is OCE, 6 is WAS
    X = X[np.ix_(train_ix, vs, np.arange(na))]
    Y = Y[np.ix_(train_ix, vs, anc)]
    X_dev = X_dev[:, vs, :na]
    Y_dev = Y_dev[np.ix_(np.arange(Y_dev.shape[0]), vs,
                         np.arange(anc.shape[0]))]
    nc = Y.shape[-1]

    ## Create model, declare optimizer
    os.system('echo "pre-model"; nvidia-smi')
    model = segnet(input_shape=(nv, na),
                   n_classes=nc,
                   width=filter_size,
                   n_filters=num_filters,
                   pool_size=pool_size,
                   n_blocks=num_blocks,
                   dropout_rate=dropout_rate,
                   input_dropout_rate=input_dropout_rate,
                   l2_lambda=1e-30,
                   batch_normalization=batch_norm)
    adam = optimizers.Adam(lr=1e-4)
    os.system('echo "post-compile"; nvidia-smi')

    ## Compile model and summarize
    model.compile(optimizer=adam,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    print(model.summary())

    if continue_train and os.path.exists(out +
                                         '.h5') and os.path.exists(out +
                                                                   '.log.csv'):
        model.load_weights(out + '.h5')
        bb = np.genfromtxt(out + '.log.csv',
                           delimiter=',')[-1,
                                          0]  # subtract off previous batches
        print("continuing training from batch {}...".format(bb))
    else:
        bb = 0  # previous batches is zero

    ## Train model
    es = callbacks.EarlyStopping(monitor='val_loss',
                                 mode='min',
                                 verbose=1,
                                 patience=10)
    wt = callbacks.ModelCheckpoint(out + ".h5",
                                   monitor='val_loss',
                                   mode='min',
                                   verbose=1,
                                   save_best_only=True)
    lg = callbacks.CSVLogger(out + '.log.csv',
                             separator=",",
                             append=continue_train)
    cw = np.sqrt(1 / Y.sum(axis=0).sum(axis=0)) if ivw else np.ones(
        (Y.shape[-1], ))
    if no_generator:
        history = model.fit(X,
                            Y,
                            validation_data=(X_dev, Y_dev),
                            batch_size=batch_size,
                            epochs=num_epochs - int(bb),
                            callbacks=[es, wt, lg],
                            class_weight=cw)
    else:
        params = {
            'X': X,
            'Y': Y,
            'dim': nv,
            'batch_size': batch_size,
            'n_classes': nc,
            'n_alleles': anc.shape[0],
            'train_ix': np.arange(X.shape[0])
        }
        param2 = {
            'X': X_dev,
            'Y': Y_dev,
            'dim': nv,
            'batch_size': batch_size,
            'n_classes': nc,
            'n_alleles': anc.shape[0],
            'train_ix': np.arange(X_dev.shape[0])
        }
        anc_fq = Y[:, 0, :].sum(axis=0)
        anc_wt = ((1 / anc_fq) /
                  ((1 / anc_fq).sum())).flatten() if random_batch else np.ones(
                      (Y.shape[-1], ))
        history = model.fit_generator(
            generator=DataGenerator(**params,
                                    sample=random_batch,
                                    anc_wts=anc_wt,
                                    admix=admix),
            validation_data=DataGenerator(**param2),
            epochs=num_epochs - int(bb),
            callbacks=[es, wt, lg],
            class_weight=cw)
    ## Save model weights and return
    model.save_weights(out + '.h5')
    return history
コード例 #15
0
ファイル: train.py プロジェクト: khmariem/DenseFusion
                         True, 5000)
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=opt.batch_size,
                                             shuffle=True,
                                             num_workers=int(opt.workers))
    test_dataset = SegDataset(
        opt.dataset_root, '../datasets/wrs/dataset_config/test_data_list.txt',
        False, 1000)
    test_dataloader = torch.utils.data.DataLoader(test_dataset,
                                                  batch_size=1,
                                                  shuffle=True,
                                                  num_workers=int(opt.workers))

    print(len(dataset), len(test_dataset))

    model = segnet()
    model = model.cuda()

    if opt.resume_model != '':
        checkpoint = torch.load('{0}/{1}'.format(opt.model_save_path,
                                                 opt.resume_model))
        model.load_state_dict(checkpoint)
        for log in os.listdir(opt.log_dir):
            os.remove(os.path.join(opt.log_dir, log))

    optimizer = optim.Adam(model.parameters(), lr=opt.lr)
    criterion = Loss()
    best_val_cost = np.Inf
    st_time = time.time()

    for epoch in range(1, opt.n_epochs):
コード例 #16
0
ファイル: segnet_trainbatch.py プロジェクト: ShashKash/SemSeg
num_batches = 636
num_channels = 3

X = tf.placeholder(tf.float32,
                   shape=(batch_size, 256, 256, num_channels),
                   name="myInput")
sparse_label = tf.placeholder(tf.int32,
                              shape=(batch_size, 256, 256),
                              name="myOutput")
class_weights = tf.placeholder(tf.float32,
                               shape=(num_classes),
                               name="class_weights")

global_step = tf.Variable(0, name='global_step', trainable=False)

logits = segnet(X, num_classes)
# unweighted_cost = sparse_unweighted_cost(logits, sparse_label, num_classes)
# weighted_cost = sparse_weighted_cost(logits, sparse_label, class_weights, num_classes)

summ1 = tf.summary.scalar(
    'unweighted_cost', sparse_unweighted_cost(logits, sparse_label,
                                              num_classes))
summ2 = tf.summary.scalar(
    'weighted_cost',
    sparse_weighted_cost(logits, sparse_label, class_weights, num_classes))

learning_rate_node = tf.train.exponential_decay(learning_rate=learning_rate,
                                                global_step=global_step,
                                                decay_steps=1000,
                                                decay_rate=0.95,
                                                staircase=True)
コード例 #17
0
ファイル: seg_train.py プロジェクト: lfs119/my_paddle
    opt.manualSeed = random.randint(1, 10000)
    random.seed(opt.manualSeed)
    torch.manual_seed(opt.manualSeed)

    dataset = SegDataset('train', opt.dataset_root, True)
    dataloader = torch.utils.data.DataLoader(dataset,
                                             batch_size=opt.batch_size,
                                             shuffle=True,
                                             num_workers=int(opt.workers))
    test_dataset = SegDataset('test', opt.dataset_root, False)
    test_dataloader = torch.utils.data.DataLoader(test_dataset,
                                                  batch_size=1,
                                                  shuffle=True,
                                                  num_workers=int(opt.workers))

    model = segnet(input_nbr=1, label_nbr=2)
    model = model.cuda()
    if not os.path.exists(opt.log_dir): os.mkdir(opt.log_dir)
    if opt.resume_model != '':
        checkpoint = torch.load('{0}/{1}'.format(opt.model_save_path,
                                                 opt.resume_model))
        model.load_state_dict(checkpoint)
        for log in os.listdir(opt.log_dir):
            os.remove(os.path.join(opt.log_dir, log))
        print("load model!")
    optimizer = optim.Adam(model.parameters(), lr=opt.lr)
    criterion = Loss()
    best_val_cost = np.Inf
    st_time = time.time()

    for epoch in range(1, opt.n_epochs):