Exemple #1
0
 def get_body(self, resnet_layer_id):
     '''
     Create the feature extraction network based on resnet34.
     The first layer of the res-net is converted into grayscale by averaging the weights of the 3 channels
     of the original resnet.
     
     Parameters
     ----------
     resnet_layer_id: int
         The resnet_layer_id specifies which layer to take from 
         the bottom of the network.
     Returns
     -------
     network: gluon.nn.HybridSequential
         The body network for feature extraction based on resnet
     '''
     
     pretrained = resnet50_v1(pretrained=True, ctx=self.ctx)
     pretrained_2 = resnet50_v1(pretrained=True, ctx=mx.cpu(0))
     first_weights = pretrained_2.features[0].weight.data().mean(axis=1).expand_dims(axis=1)
     # First weights could be replaced with individual channels.
     
     body = gluon.nn.HybridSequential()
     with body.name_scope():
         first_layer = gluon.nn.Conv2D(channels=64, kernel_size=(7, 7), padding=(3, 3), strides=(2, 2), in_channels=1, use_bias=False)
         first_layer.initialize(mx.init.Xavier(), ctx=self.ctx)
         first_layer.weight.set_data(first_weights)
         body.add(first_layer)
         body.add(*pretrained.features[1:-resnet_layer_id])
     return body
Exemple #2
0
 def __init__(self, output_layers=7, pretrained=False, ctx=cpu(), **kwargs):
     super(ResNet50, self).__init__(**kwargs)
     if isinstance(output_layers, int):
         output_layers = [
             output_layers,
         ]
     self.output_layers = output_layers
     self.net = vision.resnet50_v1(pretrained=pretrained, ctx=ctx).features
    def construct_baseline_model(self):

        out_path = 'TrainedModels/default_name'
        FC_len = 512

        split_pattern = "[ <>]"  # splits model internals for console output

        # Obtain feed-forward data
        self.train_iter.reset()
        for b, batch in enumerate(self.train_iter):

            data = batch.data[0].as_in_context(self.cpu_ctx)
            break

        # Transfer features from here
        resnet50 = vision.resnet50_v1(pretrained=True, ctx=self.cpu_ctx)  # Load pre-trained model
        resnet50.hybridize()

        # Transfer pre-trained features to a new net
        net = vision.resnet50_v1(classes=self.N, prefix='resnetv10_')
        with net.name_scope():
            net.output = nn.Dense(FC_len, flatten=True)
            net.output.collect_params().initialize(mx.init.Xavier(rnd_type='gaussian', factor_type='in', magnitude=2))
        net.features = resnet50.features
        net.hybridize()
        net(data)
        net.export('UntrainedModels/Res50_pretrained_model')

        # Load Resnet 50 Model
        sym50 = mx.sym.load(self.sym_file_bbone)  # Load symbol file
        feature_layer = 'FC1_output'  # Desired output layer
        sym_layer = mx.symbol.FullyConnected(name='FC1', num_hidden=self.N)  # Define dense layer
        composed = sym_layer(FC1_data=sym50, name='FC1')  # Combine with loaded symbol model
        internals = composed.get_internals()  # Fetch internals, for verification

        # Bring together network and feed forward
        net = gluon.nn.SymbolBlock(outputs=internals[feature_layer], inputs=mx.sym.var('data'))
        net.collect_params('^FC').initialize(mx.init.Constant(0.1))
        net.collect_params('^FC1_output').initialize(mx.init.Xavier(rnd_type='gaussian', factor_type='in', magnitude=2))
        net.collect_params('^(?!FC).*$').load(self.param_file_bbone, ignore_extra=True)
        result = net(data)

        # Export a modifed baseline model with a 256 FC layer & 13003 FC layer output
        print("Exporting to: %s \n" % out_path)
        net.export(out_path)
Exemple #4
0
 def __init__(self, pretrained, ctx=cpu(), **kwargs):
     super(Resnet50Backbone, self).__init__(**kwargs)
     self.output_layers = {
         2: "c1",
         4: "c2",
         5: "c3",
         6: "c4",
         7: "c5",
     }
     self._net = vision.resnet50_v1(pretrained=pretrained, ctx=ctx).features
Exemple #5
0
    def __init__(self, ctx, config, lr_mult=None):
        lr_mult = config['optimizer']['lr_mult']
        lr_schedule = config['optimizer']['learning_rate']
        lr_schedule = [(s, lr * lr_mult) for s, lr in lr_schedule]
        optimizer_type = config['optimizer'].get('type', 'sgd')
        optimizer_params = {'learning_rate': 1e-3, 'wd': 2e-4}
        if optimizer_type == 'sgd':
            optimizer_params['momentum'] = 0.9
        optimizer_params.update(config['optimizer'].get('params', dict()))

        model = vision.resnet50_v1(
            root=
            r'\\msralab\ProjectData\ScratchSSD\Users\v-dinliu\.mxnet\models',
            pretrained=True,
            ctx=ctx)

        # HybridNetCoarse
        Network = eval(config['network']['class'])
        scale = get_param(config, 'network.scale', 20)
        network = Network(model.features, config)
        network.hybridize()
        for k, v in network.collect_params().items():
            if k.startswith(network.prefix):
                v.initialize(ctx=ctx)

        for k, v in config['optimizer'].get('lr_mult_layer', dict()).items():
            for _, param in getattr(network, k).collect_params().items():
                param.lr_mult = param.lr_mult * v

        trainer = gluon.trainer.Trainer(network.collect_params(),
                                        optimizer_type, optimizer_params)

        super().__init__(network, trainer, lr_schedule, ctx)
        self.epeloss = EpeLoss()
        self.epeloss.hybridize()
        self.color_mean = nd.reshape(nd.array([0.485, 0.456, 0.406]),
                                     [1, 3, 1, 1])
        self.color_std = nd.reshape(nd.array([0.229, 0.224, 0.225]),
                                    [1, 3, 1, 1])
        self.upsampler = Upsample(2, 32)
        self.upsampler.collect_params().initialize(ctx=ctx)
        self.scale = scale

        loss_scales = get_param(config, 'loss.scales', [32])
        loss_weights = get_param(config, 'loss.weights',
                                 [1 for _ in loss_scales])
        self.msloss = MultiscaleEpe(loss_scales,
                                    loss_weights,
                                    match=get_param(config, 'loss.match',
                                                    'downsampling'))
        self.msloss.hybridize()
        self.msloss.collect_params().initialize(ctx=self.ctx)
Exemple #6
0
    def __init__(self, pretrained, num_classes, ctx, **kwargs):
        super(ResNet50, self).__init__(**kwargs)
        with self.name_scope():
            model = models.resnet50_v1(pretrained=pretrained, ctx=ctx)
            self.num_classes = num_classes
            self.features = model.features

            # classification layer
            self.classifier = nn.Dense(self.num_classes)

            self.addition = nn.HybridSequential()
            self.addition.add(self.classifier)
            self.addition.initialize(ctx=ctx)
Exemple #7
0
    def __init__(self, num_bins, **kwargs):

        ctx = kwargs.pop('ctx')
        super(Gazenet_mxJiang_pitch_zoo, self).__init__(**kwargs)
        self.net = mx.gluon.nn.HybridSequential(prefix='')
        with self.net.name_scope():
            mx.random.seed(int(time.time()))
            self.model_resnet50 = vision.resnet50_v1(pretrained=True,
                                                     ctx=ctx,
                                                     root='./')
            #self.model_resnet50.features[1]._kwargs['use_global_stats'] = True
            freeze_bn(self.model_resnet50.features)
            #print('net features:', self.model_resnet50.features[1])
            self.net.add(self.model_resnet50)
            self.model_pitch = mx.gluon.nn.Dense(num_bins)
            self.model_pitch.collect_params().initialize(
                mx.initializer.Uniform(1 / math.sqrt(2048)),
                ctx=ctx,
                force_reinit=True)
            self.model_pitch.bias.set_data(
                mx.nd.random.uniform(-0.2, 0.2, shape=(num_bins, ), ctx=ctx))
            self.net.add(self.model_pitch)
Exemple #8
0
def train():
    # 初始化
    ctx = try_gpu(2)
    net = models.resnet50_v1(classes=4)
    net.hybridize()
    net.initialize(ctx=ctx)
    # net.forward(nd.ones((1, 3, 227, 227)).as_in_context(ctx))

    sw = SummaryWriter(
        '/data1/lsp/lsp/pytorch_mnist/log/rotate/mxnet_resnet18')
    # sw.add_graph(net)

    print('initialize weights on', ctx)

    # 获取数据
    batch_size = 64
    epochs = 10

    train_data = custom_dataset(txt='/data2/dataset/image/train.txt',
                                data_shape=(224, 224),
                                channel=3)
    test_data = custom_dataset(txt='/data2/dataset/image/val.txt',
                               data_shape=(224, 224),
                               channel=3)
    transforms_train = transforms.ToTensor()
    # transforms_train = transforms.Compose([transforms.Resize(227), transforms.ToTensor()])
    train_data_loader = gluon.data.DataLoader(
        train_data.transform_first(transforms_train),
        batch_size=batch_size,
        shuffle=True,
        num_workers=12)

    test_data_loader = gluon.data.DataLoader(
        test_data.transform_first(transforms_train),
        batch_size=batch_size,
        shuffle=True,
        num_workers=12)
    # 训练
    criterion = gluon.loss.SoftmaxCrossEntropyLoss()

    steps = train_data.__len__() // batch_size

    schedule = mx.lr_scheduler.FactorScheduler(step=3 * steps,
                                               factor=0.1,
                                               stop_factor_lr=1e-6)
    sgd_optimizer = mx.optimizer.SGD(learning_rate=0.01, lr_scheduler=schedule)
    trainer = gluon.Trainer(net.collect_params(), optimizer=sgd_optimizer)

    for epoch in range(epochs):
        # test_data.reset()
        start = time.time()
        train_loss = 0.0
        train_acc = 0.0
        cur_step = 0
        n = train_data.__len__()
        for i, (data, label) in enumerate(train_data_loader):
            label = label.astype('float32').as_in_context(ctx)
            data = data.as_in_context(ctx)
            with autograd.record():
                outputs = net(data)
                loss = criterion(outputs, label)
            loss.backward()
            trainer.step(batch_size)

            cur_loss = loss.sum().asscalar()
            cur_acc = nd.sum(outputs.argmax(axis=1) == label).asscalar()
            train_acc += cur_acc
            train_loss += cur_loss
            if i % 100 == 0:
                batch_time = time.time() - start
                print(
                    'epoch [%d/%d], Iter: [%d/%d]. Loss: %.4f. Accuracy: %.4f, time:%0.4f, lr:%s'
                    % (epoch, epochs, i, steps, cur_loss, cur_acc / batch_size,
                       batch_time, trainer.learning_rate))
                start = time.time()
            cur_step = epoch * steps + i
            sw.add_scalar(tag='Train/loss',
                          value=cur_loss / label.shape[0],
                          global_step=cur_step)
            sw.add_scalar(tag='Train/acc',
                          value=cur_acc / label.shape[0],
                          global_step=cur_step)
            sw.add_scalar(tag='Train/lr',
                          value=trainer.learning_rate,
                          global_step=cur_step)

        val_acc = evaluate_accuracy(test_data_loader, net, ctx)
        sw.add_scalar(tag='Eval/acc', value=val_acc, global_step=cur_step)
        net.save_parameters("models/resnet501/{}_{}.params".format(
            epoch, val_acc))
        print(
            'epoch: %d, train_loss: %.4f, train_acc: %.4f, val_acc: %.4f, time: %.4f, lr=%s'
            % (epoch, train_loss / n, train_acc / n, val_acc,
               time.time() - start, str(trainer.learning_rate)))
    sw.close()
batch_size = 64
learning_rate = .1
num_epochs = 500
randseed = 10
test_n = 'test15'
ctx = utils.try_all_gpus()
loss = gluon.loss.SoftmaxCrossEntropyLoss()
train_data, test_data, data_num = get_data(
    'C:/lagBear/S***N/finally_data/cnn_data.mat',
    randseed,
    batch_size=batch_size,
    train_augs=train_augs)

### resnet network ###
pre_net = models.resnet50_v1(pretrained=True, prefix='sperm_3class_')
pre_net.output
pre_net.features[0].weight.data()[0][0]

net = models.resnet50_v1(classes=3, prefix='sperm_3class_')
net.features = pre_net.features
net.output.initialize(init.Xavier())
net.hybridize()
sw = SummaryWriter(logdir='./logs/resnet50/randseed%s/%s' % (randseed, test_n),
                   flush_secs=5)

trainer = gluon.Trainer(net.collect_params(), 'sgd',
                        {'learning_rate': learning_rate})
#    utils.train(
#        train_data, test_data, net, loss, trainer, ctx, num_epochs)
#    #######
Exemple #10
0
FC_len = opts.dense_size[0]                      # Size Of Dense Layer
N = opts.N[0]
batch_size = opts.batch_size[0]

cpu_ctx = mx.cpu()                                      # CPU context
gpu_ctx = mx.gpu()                                      # GPU context
train_iter, val_iter, _ = get_iters(batch_size)         # Fetch Iterators


# Obtain feed-forward data
for b, batch in enumerate(train_iter):
    data = batch.data[0].as_in_context(cpu_ctx)
    break

# Transfer features from here
resnet50 = vision.resnet50_v1(pretrained=True, ctx=cpu_ctx)             # Load pre-trained model
resnet50.hybridize()

# Transfer pre-trained features to a new net
net = vision.resnet50_v1(classes=N, prefix='resnetv10_')
with net.name_scope():
    net.output = nn.Dense(FC_len, flatten=True)
    net.output.collect_params().initialize(mx.init.Xavier(rnd_type='gaussian', factor_type='in', magnitude=2))
net.features = resnet50.features
net.hybridize()
net(data)
net.export('UntrainedModels/Res50_pretrained_model')

# Load Resnet 50 Model
sym50 = mx.sym.load(sym_file)                                           # Load symbol file
feature_layer = 'FC1_output'                                            # Desired output layer
def train(channel_input_dirs, hyperparameters, hosts, num_cpus, num_gpus,
          output_data_dir, model_dir, **kwargs):
    print(sys.version)
    print(sys.executable)
    print(sys.version_info)
    print(mx.__version__)
    '''
    # Load preprocessed data #
    Due to the memory limit of m4 instance, we only use a part of dataset to train the model. 
    '''
    trn_im, test_im, trn_output, test_output = load_data(
        os.path.join(channel_input_dirs['dataset']))
    '''
    # Additional Data Augmentation #
    '''
    # Mirror
    trn_im_mirror = trn_im[:, :, :, ::-1]
    trn_output_mirror = np.zeros(trn_output.shape)
    trn_output_mirror[:, 0] = trn_output[:, 0]
    trn_output_mirror[:, 1] = trn_output[:, 1] * -1
    trn_im = np.concatenate((trn_im, trn_im_mirror), axis=0)
    trn_output = np.concatenate((trn_output, trn_output_mirror), axis=0)

    # Color Shift
    for i0 in range(trn_im.shape[0]):
        im_temp = trn_im[i0, :, :, :]
        im_temp = np.transpose(
            im_temp, (1, 2, 0)) * 255  #transposing and restoring the color
        im_temp = shiftHSV(im_temp,
                           h_shift_lim=(-0.1, 0.1),
                           s_shift_lim=(-0.1, 0.1),
                           v_shift_lim=(-0.1, 0.1))
        im_temp = np.transpose(
            im_temp, (2, 0, 1)) / 255  #transposing and restoring the color
        trn_im[i0, :, :, :] = im_temp
    '''
    # Head Pose Labeling #
    '''
    # angle class (3) i.e. headpose class ( 3 x 3)
    n_grid = 3
    angles_thrshld = [
        np.arcsin(float(a) * 2 / n_grid - 1) / np.pi * 180 / 90
        for a in range(1, n_grid)
    ]

    # From (normalized) angle to angle class
    trn_tilt_cls = []
    trn_pan_cls = []
    for i0 in range(trn_output.shape[0]):
        trn_tilt_cls += [angles2Cat(angles_thrshld, trn_output[i0, 0])]
        trn_pan_cls += [angles2Cat(angles_thrshld, trn_output[i0, 1])]

    test_tilt_cls = []
    test_pan_cls = []
    for i0 in range(test_output.shape[0]):
        test_tilt_cls += [angles2Cat(angles_thrshld, test_output[i0, 0])]
        test_pan_cls += [angles2Cat(angles_thrshld, test_output[i0, 1])]

    np_trn_tilt_cls = np.asarray(trn_tilt_cls)
    np_test_tilt_cls = np.asarray(test_tilt_cls)
    np_trn_pan_cls = np.asarray(trn_pan_cls)
    np_test_pan_cls = np.asarray(test_pan_cls)

    # From angle class to head pose class
    np_trn_grid_cls = np_trn_pan_cls * n_grid + np_trn_tilt_cls
    np_test_grid_cls = np_test_pan_cls * n_grid + np_test_tilt_cls
    '''
    Train the model 
    '''
    if len(hosts) == 1:
        kvstore = 'device' if num_gpus > 0 else 'local'
    else:
        kvstore = 'dist_device_sync'

    ctx = mx.gpu() if num_gpus > 0 else mx.cpu()

    batch_size = 64
    train_iter = mx.gluon.data.DataLoader(mx.gluon.data.ArrayDataset(
        (trn_im.astype(np.float32) - 0.5) * 2, np_trn_grid_cls),
                                          batch_size=batch_size,
                                          shuffle=True,
                                          last_batch='discard')
    test_iter = mx.gluon.data.DataLoader(mx.gluon.data.ArrayDataset(
        (test_im.astype(np.float32) - 0.5) * 2, np_test_grid_cls),
                                         batch_size=batch_size,
                                         shuffle=True,
                                         last_batch='discard')
    # Modify the number of output classes

    pretrained_net = resnet50_v1(pretrained=True, prefix='headpose_')
    net = resnet50_v1(classes=9, prefix='headpose_')
    net.collect_params().initialize()
    net.features = pretrained_net.features

    #net.output.initialize(init.Xavier(rnd_type='gaussian', factor_type="in", magnitude=2)) # MXNet 1.1.0
    net.initialize(
        init.Xavier(rnd_type='gaussian', factor_type="in",
                    magnitude=2))  # MXNet 0.12.1

    net.collect_params().reset_ctx(ctx)
    net.hybridize()

    loss = gluon.loss.SoftmaxCrossEntropyLoss()
    trainer = gluon.Trainer(
        net.collect_params(), 'adam',
        {'learning_rate': float(hyperparameters.get("learning_rate", 0.0005))})

    # Fine-tune the model
    logging.getLogger().setLevel(logging.DEBUG)

    num_epoch = 5

    print('training started')
    net, net_with_softmax = train_util(output_data_dir, net, train_iter,
                                       test_iter, loss, trainer, ctx,
                                       num_epoch, batch_size)
    print('training is done')

    ### Serial format v. Modular format
    # "net" is a serial (i.e. Gluon) network.
    # In order to save the network in the modular format, "net" needs to be passed to save function.

    return net
Exemple #12
0
        return result


def img_rotate(degree, img):
    height, width = img.shape[:2]
    heightNew = int(width * fabs(sin(radians(degree))) +
                    height * fabs(cos(radians(degree))))
    widthNew = int(height * fabs(sin(radians(degree))) +
                   width * fabs(cos(radians(degree))))
    matRotation = cv2.getRotationMatrix2D((width / 2, height / 2), degree, 1)
    matRotation[0, 2] += (widthNew - width) / 2
    matRotation[1, 2] += (heightNew - height) / 2
    imgRotation = cv2.warpAffine(img,
                                 matRotation, (widthNew, heightNew),
                                 borderValue=(255, 255, 255))
    return imgRotation


if __name__ == '__main__':
    img_path = '/data/datasets/mnist/train/0/0_1.png'
    model_path = 'models/resnet50/2_1.0.params'

    model1 = Gluon_Model(models.resnet50_v1(classes=4),
                         model_path,
                         gpu_id=0,
                         img_shape=[224, 224])

    for img in os.listdir('/data2/zj/pingan/t_xz/input1'):
        img_path = os.path.join('/data2/zj/pingan/t_xz/input1', img)
        start = time.time()
        result = model1.predict(img_path)