Exemplo n.º 1
0
def nn1(n):
    net = gluon.nn.Sequential()
    net.add(gluon.nn.Dense(192, activation='relu'))
    for _ in range(n):
        net.add(gluon.nn.Dense(192, activation='relu'))
    net.add(gluon.nn.Dense(96, activation='relu'))
    net.initialize(init=initializer.Xavier())
    return net
Exemplo n.º 2
0
 def __init__(self,c2, kernel_size=3, strides=1, padding=1, activation='relu',\
                 bias=False):
     super(BasicConv, self).__init__()
     self.c1 = nn.Conv3D(c2, kernel_size=kernel_size, \
                     strides=strides, padding=padding, \
                     activation='relu', weight_initializer=init.Xavier(),
                     use_bias=bias)
     self.bn = nn.BatchNorm()
Exemplo n.º 3
0
def build_initializer(type, kerasDefaults, constant=0.):
    
    if type == 'constant':
        return initializer.Constant(constant)
    
    elif type == 'uniform':
        return initializer.Uniform(scale=kerasDefaults['maxval_uniform'])

    elif type == 'normal':
        return initializer.Normal(sigma=kerasDefaults['stddev_normal'])

    elif type == 'glorot_uniform':
        return initializer.Xavier(rnd_type='uniform', factor_type='avg', magnitude=3.)

    elif type == 'lecun_uniform':
        return initializers.Xavier(rnd_type='uniform', factor_type='in', magnitude=3.)

    elif type == 'he_normal':
        return initializer.Xavier(rnd_type='gaussian', factor_type='in', magnitude=2.)
Exemplo n.º 4
0
 def load_params(self,
                 inference,
                 init=initializer.Uniform(),
                 postfix='epoch'):
     """
     load the parameters
     :param inference: network
     :param init: initializer function
     :param postfix: postfix
     :return:
     """
     if self.args.training:
         if self.args.pretrained:
             # print('load the weights from path: %s' % self.args.model_path)
             print('load the weights for features from path: %s' %
                   self.args.model_path)
             inference.features.load_parameters(self.args.model_path,
                                                self.args.ctx,
                                                ignore_extra=True)
             print('initialize the weights for embeds and output')
             inference.embeds.initialize(
                 init=initializer.Xavier(magnitude=2.24), ctx=self.args.ctx)
             inference.output.initialize(
                 init=initializer.Xavier(magnitude=2.24), ctx=self.args.ctx)
         elif self.args.model_path.endswith('.params'):
             print('load the weights from path: %s' % self.args.model_path)
             inference.load_parameters(self.args.model_path, self.args.ctx)
         elif self.args.start_epoch > 0:
             print('load the weights from path: %s' % os.path.join(
                 self.args.ckpt, '%s-%s-%04d.params' %
                 (self.args.bb, postfix, 0)))
             inference.load_parameters(
                 os.path.join(
                     self.args.ckpt,
                     '%s-%s-%04d.params' % (self.args.bb, postfix, 0)),
                 self.args.ctx)
         else:
             print('Initialize the weights')
             inference.initialize(init, ctx=self.args.ctx)
     else:
         print('load the weights from path: %s' % self.args.model_path)
         inference.load_parameters(self.args.model_path, self.args.ctx)
Exemplo n.º 5
0
def review_network(net,
                   use_symbol=False,
                   timing=True,
                   num_rep=1,
                   dir_out='',
                   print_model_size=False):
    """inspect the network architecture & input - output
    use_symbol: set True to inspect the network in details
    timing: set True to estimate inference time of the network
    num_rep: number of inference"""
    # from my_func import get_model_size

    shape = (6, 4, 16, 160, 160)
    if use_symbol:
        x1 = symbol.Variable('x1')
        x2 = symbol.Variable('x2')
        y = net(x1, x2)
        if print_model_size:
            get_model_size(y, to_print=False)
        viz.plot_network(y,
                         shape={
                             'x1': shape,
                             'x2': shape
                         },
                         node_attrs={
                             "fixedsize": "false"
                         }).view('%sDenseMultipathNet' % dir_out)
    else:
        x1 = nd.random_normal(0.1, 0.02, shape=shape, ctx=ctx)
        x2 = nd.random_normal(0.1, 0.02, shape=shape, ctx=ctx)
        net.collect_params().initialize(initializer.Xavier(magnitude=2),
                                        ctx=ctx)
        net.hybridize(static_alloc=True, static_shape=True)

        if timing:
            s1 = time.time()
            y = net(x1, x2)
            y.wait_to_read()
            print("First run: %.5f" % (time.time() - s1))

            import numpy as np
            times = np.zeros(num_rep)
            for t in range(num_rep):
                x = nd.random_normal(0.1, 0.02, shape=shape, ctx=ctx)
                s2 = time.time()
                y = net(x1, x2)
                y.wait_to_read()
                times[t] = time.time() - s2
            print("Run with hybrid network: %.5f" % times.mean())
        else:
            y = net(x)
        print("Input size: ", x.shape)
        print("Output size: ", y.shape)
Exemplo n.º 6
0
 def initialize(
     self,
     init=initializer.Uniform(),
     ctx=None,
     verbose=False,
     force_reinit=False,
 ):
     super(Model, self).initialize(
         init=initializer.Xavier(rnd_type="uniform"),
         ctx=ctx,
         verbose=verbose,
         force_reinit=force_reinit,
     )
Exemplo n.º 7
0
def generate_initializer(init_dict):
    if init_dict is None:
        return init.Normal()
    init_type = init_dict['type']
    init_param = init_dict['init_config']

    # currently Uniform, Normal, Xavier, MSRAPrelu are supported
    if init_type == 'Uniform':
        scale = float(init_param['scale'])
        return init.Uniform(scale)
    if init_type == 'Normal':
        sigma = float(init_param['sigma'])
        return init.Normal(sigma)
    # Xavier
    if init_type == 'Xavier':
        magnitude = float(init_param['magnitude'])
        return init.Xavier(magnitude=magnitude)
    # PReLU
    if init_type == 'MSRAPrelu':
        slope = float(init_param['slope'])
        return init.MSRAPrelu(factor_type='avg', slope=slope)
Exemplo n.º 8
0
    arr = np.random.rand(0,2)
    arr_grad = np.empty_like(arr)

    autograd.mark_variables([arr], [arr_grad])
    with autograd.record():
        res = npx.leaky_relu(arr)
    res.backward()

@use_np
@pytest.mark.parametrize('initializer',[
    'zeros', 'ones', initializer.Constant(3),
    initializer.Uniform(),
    initializer.Normal(),
    initializer.Orthogonal(),
    initializer.Orthogonal(rand_type='normal'),
    initializer.Xavier(),
    initializer.Xavier(rnd_type='gaussian'),
    initializer.MSRAPrelu(),
    initializer.MSRAPrelu(factor_type='in'),
    initializer.MSRAPrelu(factor_type='out'),
    initializer.LSTMBias(),
])
@pytest.mark.parametrize('dtype', [
    'float32', 'float64'
])
def test_19118(initializer, dtype):
    net = gluon.nn.Dense(16, in_units=16)
    net.cast(dtype)
    net.initialize(initializer)
    net.hybridize()
    net(np.zeros((16, 16), dtype=dtype))
Exemplo n.º 9
0
    def __init__(self,
                 basenetwork='resnet50_v2',
                 pretrained="True",
                 feature_channels=512,
                 classes=751,
                 laststride=2,
                 withpcb='True',
                 partnum=6,
                 feature_weight_share=False,
                 withrpp='True',
                 **kwargs):

        super(PCBRPPNet, self).__init__(**kwargs)
        basenetwork = eval(basenetwork)
        self.withpcb = withpcb
        self.withrpp = withrpp
        if self.withrpp and not self.withpcb:
            raise "If withrpp is True, with pcb must be True."
        self.feature_weight_share = feature_weight_share
        self.partnum = partnum

        self.conv = basenetwork(pretrained=pretrained,
                                laststride=laststride,
                                ctx=cpu())
        if not pretrained:
            self.conv.collect_params().initialize(init=init.Xavier(),
                                                  ctx=cpu())

        self.pool = nn.GlobalAvgPool2D()
        self.dropout = nn.Dropout(rate=0.5)

        if not self.withpcb or self.feature_weight_share:
            self.feature = nn.HybridSequential(prefix='')
            with self.feature.name_scope():
                self.feature.add(
                    nn.Dense(feature_channels,
                             activation=None,
                             use_bias=False,
                             flatten=True))
                self.feature.add(nn.BatchNorm())
                self.feature.add(nn.LeakyReLU(alpha=0.1))
            self.feature.hybridize()
            self.classifier = nn.Dense(classes, use_bias=False)
            self.feature.collect_params().initialize(init=init.Xavier(),
                                                     ctx=cpu())
            self.classifier.collect_params().initialize(
                init=init.Normal(0.001), ctx=cpu())
        else:
            for pn in range(self.partnum):
                tmp_feature = nn.Dense(feature_channels,
                                       activation=None,
                                       use_bias=False,
                                       flatten=True)
                tmp_classifier = nn.Dense(classes, use_bias=False)
                tmp_feature.collect_params().initialize(init=init.Xavier(),
                                                        ctx=cpu())
                tmp_classifier.collect_params().initialize(
                    init=init.Normal(0.001), ctx=cpu())
                setattr(self, 'feature%d' % (pn + 1), tmp_feature)
                setattr(self, 'classifier%d' % (pn + 1), tmp_classifier)

        if self.withrpp:
            # from ..init.rppinit import RPP_Init
            # rpp_init = RPP_Init(mean=0.0, sigma=0.001)
            self.rppscore = nn.Conv2D(self.partnum,
                                      kernel_size=1,
                                      use_bias=False)
            self.rppscore.collect_params().initialize(init=init.One(),
                                                      ctx=cpu())
Exemplo n.º 10
0
 def init_params(self):
     """initialize network parameters"""
     self.net.collect_params().initialize(initializer.Xavier(magnitude=2.2), ctx=self.ctx)
Exemplo n.º 11
0
def train(args):
    np.random.seed(args.seed)
    if args.gpu:
        ctx = [mx.gpu(0)]
    else:
        ctx = [mx.cpu(0)]
    if args.dataset == "Sony":
        out_channels = 12
        scale = 2
    else:
        out_channels = 27
        scale = 3

    # load data
    train_transform = utils.Compose([
        utils.RandomCrop(args.patch_size, scale),
        utils.RandomFlipLeftRight(),
        utils.RandomFlipTopBottom(),
        utils.RandomTranspose(),
        utils.ToTensor(),
    ])
    train_dataset = data.MyDataset(args.dataset,
                                   "train",
                                   transform=train_transform)
    val_transform = utils.Compose([utils.ToTensor()])
    val_dataset = data.MyDataset(args.dataset, "val", transform=val_transform)
    train_loader = gluon.data.DataLoader(train_dataset,
                                         shuffle=True,
                                         batch_size=args.batch_size,
                                         last_batch='rollover')
    val_loader = gluon.data.DataLoader(val_dataset,
                                       batch_size=1,
                                       last_batch='discard')
    unet = net.UNet(out_channels, scale)
    unet.initialize(init=initializer.Xavier(), ctx=ctx)

    # optimizer and loss
    trainer = gluon.Trainer(unet.collect_params(), 'adam',
                            {'learning_rate': args.lr})
    l1_loss = gluon.loss.L1Loss()

    print "Start training now.."
    for i in range(args.epochs):
        total_loss = 0
        count = 0
        profiler.set_state('run')
        for batch_id, (img, gt) in enumerate(train_loader):
            batch_size = img.shape[0]
            count += batch_size
            img_list = gluon.utils.split_and_load(img[0], ctx)
            gt_list = gluon.utils.split_and_load(gt[0], ctx)
            with autograd.record():
                preds = [unet(x) for x in img_list]
                losses = []
                for ii in range(len(preds)):
                    loss = l1_loss(gt_list[ii], preds[ii])
                    losses.append(loss)
            for loss in losses:
                loss.backward()
            total_loss += sum([l.sum().asscalar() for l in losses])
            avg_loss = total_loss / count
            trainer.step(batch_size)
            metric.update(gt_list, preds)
            F.waitall()
            profiler.set_state('stop')
            print profiler.dumps()
            break
            gt_save = gt_list[0]
            output_save = preds[0]

            if (batch_id + 1) % 100 == 0:
                message = "Epoch {}: [{}/{}]: l1_loss: {:.4f}".format(
                    i + 1, count, len(train_dataset), avg_loss)
                print message
        temp = F.concat(gt_save, output_save, dim=3)
        temp = temp.asnumpy().reshape(temp.shape[2], temp.shape[3], 3)
        scipy.misc.toimage(temp * 255,
                           high=255,
                           low=0,
                           cmin=0,
                           cmax=255,
                           mode='RGB').save(args.save_model_dir +
                                            '%04d_%05d_00_train.jpg' %
                                            (i + 1, count))

        # evaluate
        batches = 0
        avg_psnr = 0.
        for img, gt in val_loader:
            batches += 1
            imgs = gluon.utils.split_and_load(img[0], ctx)
            label = gluon.utils.split_and_load(gt[0], ctx)
            outputs = []
            for x in imgs:
                outputs.append(unet(x))
            metric.update(label, outputs)
            avg_psnr += 10 * math.log10(1 / metric.get()[1])
            metric.reset()
        avg_psnr /= batches
        print('Epoch {}: validation avg psnr: {:.3f}'.format(i + 1, avg_psnr))

        # save model
        if (i + 1) % args.save_freq == 0:
            save_model_filename = "Epoch_" + str(i + 1) + ".params"
            save_model_path = os.path.join(args.save_model_dir,
                                           save_model_filename)
            unet.save_params(save_model_path)
            print("\nCheckpoint, trained model saved at", save_model_path)

    # save model
    save_model_filename = "Final_Epoch_" + str(i + 1) + ".params"
    save_model_path = os.path.join(args.save_model_dir, save_model_filename)
    unet.save_params(save_model_path)
    print("\nCheckpoint, trained model saved at", save_model_path)
Exemplo n.º 12
0
 def init_params(self):
     """initialize network parameters"""
     self.net.collect_params().initialize(initializer.Xavier(),
                                          ctx=self.ctx)
     print(self.ctx)
Exemplo n.º 13
0
                                                     time.time() - etic))

    print('Total training time: {:.2f} secs'.format(time.time() - tic))


if __name__ == "__main__":
    args = parse_args()
    ctx = set_context(args)
    net = Network(args)
    x = nd.random_normal(0.02,
                         0.2,
                         shape=(args.batch_size, 60, 20, 240, 240),
                         ctx=ctx)

    if args.resumed_epoch < 0:
        net.collect_params().initialize(initializer.Xavier(magnitude=2),
                                        ctx=ctx)
    else:
        net.load_params('%s/checkpoints/Epoch%03d.params' %
                        (args.dir_out, args.resumed_epoch),
                        ctx=ctx)
    net(x)
    net.hybridize()

    loss = SmoothL1Loss()
    sw = SummaryWriter(logdir="%slogs" % args.dir_out,
                       flush_secs=5,
                       filename_suffix='mra',
                       verbose=False)
    if args.validation_only > 0:
        validate_only(args)