def get_model_resnet18_v2(classes_num, ctx):
    pretrained_net = models.resnet18_v2(pretrained=True)
    # print(pretrained_net)

    finetune_net = models.resnet18_v2(classes=classes_num)      # 输出为classes_num个类
    finetune_net.features = pretrained_net.features             # 特征设置为resnet18_v2的特征
    finetune_net.output.initialize(init.Xavier(), ctx=ctx)      # 对输出层做初始化
    finetune_net.collect_params().reset_ctx(ctx)                # 设置CPU或GPU
    finetune_net.hybridize()                                    # gluon特征,运算转成符号运算,提高运行速度
    return finetune_net
Exemplo n.º 2
0
def test_tensorrt_resnet18_feature_vect():
    print("downloading sample input")
    input_data = get_image(url)
    gluon_resnet18 = vision.resnet18_v2(pretrained=True)
    gluon_resnet18.hybridize()
    gluon_resnet18.forward(input_data)
    gluon_resnet18.export(model_file_name)
    sym, arg_params, aux_params = mx.model.load_checkpoint(model_file_name, 0)

    executor = sym.simple_bind(ctx=mx.gpu(), data=batch_shape,
                               grad_req='null', force_rebind=True)
    executor.copy_params_from(arg_params, aux_params)
    y = executor.forward(is_train=False, data=input_data)
    trt_sym = sym.get_backend_symbol('TensorRT')
    mx.contrib.tensorrt.init_tensorrt_params(trt_sym, arg_params, aux_params)
    original_precision_value = mx.contrib.tensorrt.get_use_fp16()
    try:
        mx.contrib.tensorrt.set_use_fp16(True)
        executor = trt_sym.simple_bind(ctx=mx.gpu(), data=batch_shape,
                                       grad_req='null', force_rebind=True)
        executor.copy_params_from(arg_params, aux_params)
        y_trt = executor.forward(is_train=False, data=input_data)
        mx.contrib.tensorrt.set_use_fp16(False)
        executor = trt_sym.simple_bind(ctx=mx.gpu(), data=batch_shape,
                                       grad_req='null', force_rebind=True)
        executor.copy_params_from(arg_params, aux_params)
        y_trt_fp32 = executor.forward(is_train=False, data=input_data)
        no_trt_output = y[0].asnumpy()[0]
        trt_output = y_trt[0].asnumpy()[0]
        trt_fp32_output = y_trt_fp32[0].asnumpy()[0]
        assert_almost_equal(no_trt_output, trt_output, 1e-1, 1e-2)
        assert_almost_equal(no_trt_output, trt_fp32_output, 1e-4, 1e-4)
    finally:
        mx.contrib.tensorrt.set_use_fp16(original_precision_value)
Exemplo n.º 3
0
def test_tensorrt_resnet18_feature_vect():
    print("downloading sample input")
    input_data = get_image(url)
    gluon_resnet18 = vision.resnet18_v2(pretrained=True)
    gluon_resnet18.hybridize()
    gluon_resnet18.forward(input_data)
    gluon_resnet18.export(model_file_name)
    sym, arg_params, aux_params = mx.model.load_checkpoint(model_file_name, 0)

    executor = sym.simple_bind(ctx=mx.gpu(), data=batch_shape,
                               grad_req='null', force_rebind=True)
    executor.copy_params_from(arg_params, aux_params)
    y = executor.forward(is_train=False, data=input_data)
    trt_sym = sym.get_backend_symbol('TensorRT')
    arg_params, aux_params = mx.contrib.tensorrt.init_tensorrt_params(trt_sym, arg_params, aux_params)
    original_precision_value = mx.contrib.tensorrt.get_use_fp16()
    try:
        mx.contrib.tensorrt.set_use_fp16(True)
        executor = trt_sym.simple_bind(ctx=mx.gpu(), data=batch_shape,
                                       grad_req='null', force_rebind=True)
        executor.copy_params_from(arg_params, aux_params)
        y_trt = executor.forward(is_train=False, data=input_data)
        mx.contrib.tensorrt.set_use_fp16(False)
        executor = trt_sym.simple_bind(ctx=mx.gpu(), data=batch_shape,
                                       grad_req='null', force_rebind=True)
        executor.copy_params_from(arg_params, aux_params)
        y_trt_fp32 = executor.forward(is_train=False, data=input_data)
        no_trt_output = y[0].asnumpy()[0]
        trt_output = y_trt[0].asnumpy()[0]
        trt_fp32_output = y_trt_fp32[0].asnumpy()[0]
        assert_almost_equal(no_trt_output, trt_output, 1e-1, 1e-2)
        assert_almost_equal(no_trt_output, trt_fp32_output, 1e-4, 1e-4)
    finally:
        mx.contrib.tensorrt.set_use_fp16(original_precision_value)
def get_net(model_name, context):
    if model_name == 'FCN':
        num_classes = 21
        pretrained_net = vision.resnet18_v2(pretrained=True, ctx=context)
        net = gluon.nn.HybridSequential()
        for layer in pretrained_net.features[:-2]:
            net.add(layer)
        net.add(
            gluon.nn.Conv2D(num_classes, kernel_size=1),
            gluon.nn.Conv2DTranspose(num_classes,
                                     kernel_size=64,
                                     padding=16,
                                     strides=32))
        net[-1].initialize(init.Constant(
            bilinear_kernel(num_classes, num_classes, 64)),
                           ctx=context)
        net[-2].initialize(init=init.Xavier(), ctx=context)
        input_shape = (1, 3, 320, 480)
        label_shape = (1, 320, 480)
        loss_axis = 1
    else:
        net = vision.get_model(model_name, classes=10)
        net.initialize(mx.init.Xavier(), ctx=context)
        input_shape = (1, 1, 224, 224)
        label_shape = 1
        loss_axis = -1
    return net, input_shape, label_shape, loss_axis
def train_net(k):
    his_testacc=[]
    his_trainacc=[]
    for n in range(k):
        pretrained_net = models.resnet18_v2(pretrained=True)
        net = models.resnet18_v2(classes=2)
        net.features = pretrained_net.features
        net.output.initialize(init.Xavier())
        net.collect_params().reset_ctx(ctx)
#        resnet = utils.ResNet(2,1) 
#        net = utils.Perceptron(2)
#        net.initialize(ctx=ctx)

#use k cross validation
        train_index = list(range(0,int(n*1000/k)))+list(range(int((n+1)*1000/k),1000))
        train_data_array = data[train_index]
        label_train_array = label[train_index]
        test_data_array = data[int(n*1000/k):int((n+1)*1000/k)]
        label_test_array = label[int(n*1000/k):int((n+1)*1000/k)]
#use data augmentation 
        train_data_array_ori = nd.transpose(train_data_array, (0,2,3,1))*255
        train_data_array = nd.stack(*[utils.apply_aug_list(d,utils.test_augs) for d in train_data_array_ori])
        train_data_array_aug = nd.stack(*[utils.apply_aug_list(d,utils.train_augs) for d in train_data_array_ori])
        train_data_array = nd.stack(train_data_array,train_data_array_aug).reshape([int(2*(1000-1000/k)),utils.size,utils.size,3])
        train_data_array = nd.transpose(train_data_array, (0,3,1,2))/255
        label_train_array = nd.stack(label_train_array,label_train_array).reshape([int(2*(1000-1000/k))])


            
        test_data_array = nd.transpose(test_data_array, (0,2,3,1))*255
        test_data_array = nd.stack(*[utils.apply_aug_list(d,utils.test_augs) for d in test_data_array])
        test_data_array = nd.transpose(test_data_array, (0,3,1,2))/255
           

#load trainging data and test data  
        
        train_data = mx.io.NDArrayIter(data = train_data_array,label=label_train_array,batch_size=batch_size,shuffle=True)
        test_data = mx.io.NDArrayIter(data = test_data_array,label=label_test_array,batch_size=batch_size,shuffle=True)
        trainer = gluon.Trainer(net.collect_params(),
                                'sgd', {'learning_rate':.008 , 'wd':.3 })
        train_acc , acc, net_tem = utils.train(train_data, test_data,batch_size ,net, loss,
                    trainer, ctx, num_epochs=epochs)
        his_testacc.append(acc)
        his_trainacc.append(train_acc)
        net_tem.save_params(os.path.join(path_net,str(n)))#save net
        
    return his_trainacc ,his_testacc
Exemplo n.º 6
0
def build_dictionary():
    #model = keras.applications.vgg16.VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3), pooling='avg')

    ctx = mx.gpu() if len(mx.test_utils.list_gpus()) else mx.cpu()
    net = vision.resnet18_v2(pretrained=True, ctx=ctx).features

    net.hybridize()
    net(mx.nd.ones((1, 3, 224, 224), ctx=ctx))
    net.export(join('mms', 'visualsearch'))

    block_blob_service = BlockBlobService(
        account_name='hackathonnetshoes',
        account_key=
        '1syRJGXjNT8s3eZR1lCLTuo7OCDm6LfMDAMNh1ej8Krs8mpwy3EeheTq1bnXCehOTIo0Glffu5MKZhskULys6A=='
    )
    generator = block_blob_service.list_blobs('hackthonns')

    idx = 0

    with open('/../../grupo2storage2/features/dnn/features_mx.pickle',
              'wb') as features_handle:
        with open('/../../grupo2storage2/features/dnn/skus_mx.pickle',
                  'wb') as sku_handle:

            for blob in generator:

                if (not blob.name.endswith('.jpg')):
                    continue

                if ('produtos' not in blob.name):
                    continue

                print('[{0}] processing {1}'.format(
                    idx, os.path.basename(blob.name)))
                blob = block_blob_service.get_blob_to_bytes(
                    'hackthonns', blob.name)

                image_file_in_mem = io.BytesIO(blob.content)
                img = load_img(image_file_in_mem)
                img = img_to_array(img)

                img = transform(nd.array(img))
                feature = img.expand_dims(axis=0).as_in_context(ctx)

                print(feature)

                sku = os.path.basename(blob.name).split('.')[0]

                pickle.dump(feature,
                            features_handle,
                            protocol=pickle.HIGHEST_PROTOCOL)
                pickle.dump(sku, sku_handle, protocol=pickle.HIGHEST_PROTOCOL)

                idx += 1
Exemplo n.º 7
0
    def __init__(self, features_file, skus_file):
        #self._model = keras.applications.vgg16.VGG16(weights='imagenet',
        #                                             include_top=False,
        #                                             input_shape=(224, 224, 3),
        #                                             pooling='avg')
        #self.graph = tf.get_default_graph()

        self.ctx = mx.gpu() if len(mx.test_utils.list_gpus()) else mx.cpu()
        self.net = vision.resnet18_v2(pretrained=True, ctx=self.ctx).features

        print('finishing initialization')
        #      self._preprocess = keras.applications.vgg16.preprocess_input
        self._load_files(features_file, skus_file)
Exemplo n.º 8
0
    def __init__(self, is_color, img_sample_sz=[], size_mode='same'):
        super().__init__(is_color)
        use_for_color = settings.cnn_params.get('useForColor', True)
        use_for_gray = settings.cnn_params.get('useForGray', True)
        self.use_feature = (use_for_color and is_color) or (use_for_gray
                                                            and not is_color)

        self.net = vision.resnet18_v2(pretrained=True, ctx=mx.cpu(0))
        self.compressed_dim = settings.cnn_params['compressed_dim']
        self.cell_size = np.array([4, 16])
        self.penalty = np.zeros((2, 1))
        self.nDim = np.array([64, 256])
        self.img_sample_sz = self._set_size(img_sample_sz, size_mode)
        self.data_sz = np.ceil(self.img_sample_sz / self.cell_size[:, None])
Exemplo n.º 9
0
def set_p():
    global p
    global net
    global datasets
    if not p:
        p = hnswlib.Index(space='l2', dim=EMBEDDING_SIZE)
        print config_dic
        idx_loc = config_dic.get("idx_loc")
        print idx_loc
        p.load_index(idx_loc)
        p.set_ef(300)
        net = vision.resnet18_v2(pretrained=True, ctx=ctx)
        net = net.features
        pkl_loc = config_dic.get("idx_dir") + "dataset.pkl"
        datasets = pickle.load(open(pkl_loc, "rb"))
Exemplo n.º 10
0
    def __init__(self, class_num, **kwargs):
        super(FCNet, self).__init__(**kwargs)
        pretrained_net = modelv.resnet18_v2(ctx=mx.gpu(), pretrained=True)

        self.net = gluon.nn.HybridSequential()
        for layer in pretrained_net.features[:-2]:
            with self.net.name_scope():     # add name_scope()
                self.net.add(layer)

        with self.net.name_scope():
            self.net.add(gluon.nn.Conv2D(channels=class_num, kernel_size=1),
                         gluon.nn.Conv2DTranspose(channels=class_num, kernel_size=64, padding=16, strides=32))

        self.net[-2].initialize(init=mx.init.Xavier(), ctx=mx.gpu())
        self.net[-1].initialize(init=mx.init.Bilinear(), ctx=mx.gpu())
Exemplo n.º 11
0
def getNetwork(nb_class):
    resnet_ = vision.resnet18_v2(pretrained=False, classes=nb_class)
    #resnet_18.collect_params().initialize(mx.init.Xavier(magnitude=0.5), ctx=ctx)
    return resnet_
Exemplo n.º 12
0
    net = nn.Sequential()
    with net.name_scope():
        #        net.add(nn.Conv2D(1,3,padding=1))
        net.add(nn.Flatten())
        net.add(nn.Dropout(.5))
        net.add(nn.Dense(256))
        net.add(nn.BatchNorm())
        net.add(nn.Activation('relu'))
        net.add(nn.Dense(num_class))
    return net


#===============define a pretrained network======
from mxnet.gluon.model_zoo import vision as models

pretrained_net = models.resnet18_v2(pretrained=True)
from mxnet import init

finetune_net = models.resnet18_v2(classes=2)
finetune_net.features = pretrained_net.features
finetune_net.output.initialize(init.Xavier())


class DHash(object):
    @staticmethod
    def calculate_hash(image):
        """
        计算图片的dHash值
        :param image: PIL.Image
        :return: dHash值,string类型
        """
Exemplo n.º 13
0
    pad=4,
    num_parts=store.num_workers,
    part_index=store.rank)

test_data = mx.io.ImageRecordIter(
    path_imgrec="./cifar/cifar10_val.rec",
    # mean_img    = "data/cifar/mean.bin",
    resize=-1,
    rand_crop=False,
    rand_mirror=False,
    pad=4,
    data_shape=(3, 32, 32),
    batch_size=batch_size)

# Use ResNet from model zoo
net = vision.resnet18_v2()

# Initialize the parameters with Xavier initializer
net.collect_params().initialize(mx.init.Xavier(magnitude=2), ctx=ctx)

# Use Adam optimizer. Ask trainer to use the distributer kv store.
trainer = gluon.Trainer(net.collect_params(),
                        'adam', {
                            'learning_rate': learning_rate,
                            'rescale_grad': 1.0 /
                            (batch_size * store.num_workers),
                            'clip_gradient': 10
                        },
                        kvstore=store)

Exemplo n.º 14
0
import mxnet as mx
import time
import gluoncv

from mxnet import nd, autograd
from mxnet import gluon
from mxnet.gluon import nn

inputShape = (1, 3, 224, 224)

from mxnet.gluon.model_zoo import vision

alexnet = vision.alexnet()
inception = vision.inception_v3()

resnet18v1 = vision.resnet18_v1()
resnet18v2 = vision.resnet18_v2()
squeezenet = vision.squeezenet1_0()
densenet = vision.densenet121()
mobilenet = vision.mobilenet0_5()

############### 그래프 ###############
import gluoncv
gluoncv.utils.viz.plot_network(resnet18v1, shape=inputShape)
#####################################
Exemplo n.º 15
0
def getNetwork(nb_class):
    resnet_18 = vision.resnet18_v2(pretrained=False,classes=nb_class)
    return resnet_18
Exemplo n.º 16
0
import mxnet as mx
from mxnet.gluon.model_zoo import vision
import time
import os

batch_shape = (1, 3, 224, 224)
resnet18 = vision.resnet18_v2(pretrained=True)
resnet18.hybridize()
resnet18.forward(mx.nd.zeros(batch_shape))
resnet18.export('resnet18_v2')
sym, arg_params, aux_params = mx.model.load_checkpoint('resnet18_v2', 0)

# Create sample input
input = mx.nd.zeros(batch_shape)

# Execute with MXNet
os.environ['MXNET_USE_TENSORRT'] = '0'
executor = sym.simple_bind(ctx=mx.gpu(0),
                           data=batch_shape,
                           grad_req='null',
                           force_rebind=True)
executor.copy_params_from(arg_params, aux_params)

# Warmup
print('Warming up MXNet')
for i in range(0, 10):
    y_gen = executor.forward(is_train=False, data=input)
    y_gen[0].wait_to_read()

# Timing
print('Starting MXNet timed run')
Exemplo n.º 17
0
voc_test = VOCSegDataset(False, input_shape)

batch_size = 5
train_data = gluon.data.DataLoader(voc_train,
                                   batch_size,
                                   shuffle=True,
                                   last_batch='discard')
test_data = gluon.data.DataLoader(voc_test, batch_size, last_batch='discard')

#for data, label in train_data:
#    print(data.shape)
#    print(label.shape)
#    break

from mxnet.gluon.model_zoo import vision as models
pretrained_net = models.resnet18_v2(pretrained=True)

from mxnet.gluon import nn
#conv = nn.Conv2D(10, kernel_size=4, padding=1, strides=2)
#conv_trans = nn.Conv2DTranspose(3, kernel_size=4, padding=1, strides=2)

#conv.initialize()
#conv_trans.initialize()
#x = nd.random.uniform(shape=(1,3,64,64))
#y = conv(x)
#print('Input:', x.shape)
#print('After conv:', y.shape)
#print('After transposed conv', conv_trans(y).shape)

net = nn.HybridSequential()
for layer in pretrained_net.features[:-2]:
    average_acc += his_testacc[i][-1]
plt.xlabel('epoch')
plt.ylabel('test_acc')
plt.legend()
plt.show()
print('The average training accuracy is : %.4f , the average test accuracy is : %.4f '% (np.mean(train_acc) , average_acc/k_cross))




max_acc = 0
max_k = 0
# compare the acc of the 10 net on the entire data , get the best classifier
for i in range(k_cross):
#    net = utils.Perceptron(2)
    net = models.resnet18_v2(classes=2)
    test_data_array = data
    for image in test_data_array:
        image_tem = nd.transpose(image,axes=(1,2,0))*255
        image = utils.apply_aug_list(image_tem,utils.test_augs)
        image = nd.transpose(image,(2,0,1))/255
    test_data = mx.io.NDArrayIter(data = test_data_array,label=label,batch_size=batch_size,shuffle=True)
    net.load_params(os.path.join(path_net,str(i)) , ctx = ctx)
    if utils.evaluate_accuracy(test_data,net,ctx) > max_acc:
        max_acc = utils.evaluate_accuracy(test_data,net,ctx)
        max_k = i
print('The best net is net%i , accuracy of the entire data is :%.4f '% (max_k , max_acc))
os.rename(os.path.join(path_net,str(max_k)),os.path.join(path_net,'bestnet_pretrained'))
        

Exemplo n.º 19
0
def resnet18mxnetload():
    net = vision.resnet18_v2(pretrained=True)
    net.hybridize()
    return net
Exemplo n.º 20
0
##################
# Hyperparameter #
#----------------#
ctx = mx.cpu()
lr = 0.05
epochs = 10
momentum = 0.9
batch_size = 64
#----------------#
# Hyperparameter #
##################

################## model
from mxnet.gluon.model_zoo import vision
net = vision.resnet18_v2(classes=10, pretrained=False, ctx=ctx)
# net = vision.resnet34_v2(classes=10, pretrained=False, ctx=ctx)
# net = vision.resnet50_v2(classes=10, pretrained=False, ctx=ctx)
# net = vision.resnet101_v2(classes=10, pretrained=False, ctx=ctx)
# net = vision.resnet152_v2(classes=10, pretrained=False, ctx=ctx)

################## 그래프
import gluoncv
inputShape = (1, 3, 224, 224)
gluoncv.utils.viz.plot_network(net, shape=inputShape)


##### 전처리 ##############################################
def transformer(data, label):
    data = mx.image.imresize(data, 224, 224)
    data = mx.nd.transpose(data, (2, 0, 1))
Exemplo n.º 21
0
for option in config['DEFAULT']:
    config_dic[option] = config.get('DEFAULT', option, raw=True)


images_path = "images/"
idx_loc = config_dic.get("idx_loc")
idx_dir = config_dic.get("idx_dir")
BATCH_SIZE = 256
EMBEDDING_SIZE = 512
SIZE = (224, 224)
MEAN_IMAGE= mx.nd.array([0.485, 0.456, 0.406])
STD_IMAGE = mx.nd.array([0.229, 0.224, 0.225])

ctx = mx.cpu()
net = vision.resnet18_v2(pretrained=True, ctx=ctx)
net = net.features

def transform(image, label):
    resized = mx.image.resize_short(image, SIZE[0]).astype('float32')
    cropped, crop_info = mx.image.center_crop(resized, SIZE)
    cropped /= 255.
    normalized = mx.image.color_normalize(cropped,
                                      mean=MEAN_IMAGE,
                                      std=STD_IMAGE) 
    transposed = nd.transpose(normalized, (2,0,1))
    return transposed, label

empty_folder = tempfile.mkdtemp()
# Create an empty image Folder Data Set
dataset = ImageFolderDataset(root=empty_folder, transform=transform)
Exemplo n.º 22
0
def main():
    input_shape = (320, 480)
    voc_train = VOCSegDataset(True, input_shape)
    voc_test = VOCSegDataset(False, input_shape)

    batch_size = 4
    train_data = gluon.data.DataLoader(voc_train,
                                       batch_size,
                                       shuffle=True,
                                       last_batch='discard')
    test_data = gluon.data.DataLoader(voc_test,
                                      batch_size,
                                      last_batch='discard')

    # ctx = common.ChoiceGpu()
    ctx = common.ChoiceCpu()

    pretrained_net1 = models.resnet18_v2(pretrained=True)
    # pretrained_net2 = GetNet(10, ctx=ctx)
    # pretrained_net3 = models.vgg13(pretrained=True)
    # pretrained_net2.load_parameters("train.params")

    net = nn.HybridSequential()
    for layer in pretrained_net1.features[:-2]:
        # for layer in pretrained_net2.net[:-2]:
        net.add(layer)

    num_classes = len(classes)
    with net.name_scope():
        net.add(
            nn.Conv2D(num_classes, kernel_size=1),
            # nn.Conv2DTranspose(num_classes, kernel_size=4, padding=1, strides=2),
            # nn.Conv2DTranspose(num_classes, kernel_size=4, padding=1, strides=2),
            # nn.Conv2DTranspose(num_classes, kernel_size=4, padding=1, strides=2),
            # nn.Conv2DTranspose(num_classes, kernel_size=4, padding=1, strides=2),
            # nn.Conv2DTranspose(num_classes, kernel_size=4, padding=1, strides=2)
            # nn.Conv2DTranspose(num_classes, kernel_size=32, padding=8, strides=16)
            nn.Conv2DTranspose(channels=num_classes,
                               kernel_size=64,
                               padding=16,
                               strides=32),
        )

    conv_trans1 = net[-1]
    conv_trans1.initialize(init=init.Zero())
    # conv_trans2 = net[-2]
    # conv_trans2.initialize(init=init.Zero())
    # conv_trans3 = net[-3]
    # conv_trans3.initialize(init=init.Zero())
    # conv_trans4 = net[-4]
    # conv_trans4.initialize(init=init.Zero())
    # conv_trans5 = net[-5]
    # conv_trans5.initialize(init=init.Zero())
    #
    net[-2].initialize(init=init.Xavier())
    x = nd.zeros((batch_size, 3, *input_shape))
    net(x)

    shape = conv_trans1.weight.data().shape
    conv_trans1.weight.set_data(bilinear_kernel(*shape[0:3]))
    conv_trans0 = net[1]
    print(conv_trans0.weight.data())
    # print(conv_trans0.weight.data())
    #
    # shape = conv_trans2.weight.data().shape
    # conv_trans2.weight.set_data(bilinear_kernel(*shape[0:3]))
    #
    # shape = conv_trans3.weight.data().shape
    # conv_trans3.weight.set_data(bilinear_kernel(*shape[0:3]))
    #
    # shape = conv_trans4.weight.data().shape
    # conv_trans4.weight.set_data(bilinear_kernel(*shape[0:3]))
    #
    # shape = conv_trans5.weight.data().shape
    # conv_trans5.weight.set_data(bilinear_kernel(*shape[0:3]))

    ctx = common.ChoiceGpu()
    net.collect_params().reset_ctx(ctx)

    net.load_parameters("train.params", ctx=ctx)

    loss = gluon.loss.SoftmaxCrossEntropyLoss(axis=1)

    if True:
        trainer = gluon.Trainer(net.collect_params(), 'sgd', {
            'learning_rate': 1 / batch_size,
            'wd': 1e-3
        })

        common.Train(train_data, 10, net, loss, trainer, batch_size, ctx)

    n = 6
    imgs = []
    data, label = ReadImage(train=False)

    for i in range(n):
        x = data[i]
        pred = label2image(predict(x, net, ctx))
        imgs += [x, pred, label[i]]

    show_images(imgs, nrows=n, ncols=3, figsize=(6, 10))
    print("ok")
Exemplo n.º 23
0
    isprs_test = ISPRSDataset(is_train=False, transform=None)

    # generate the dataloader
    trainloader = gluon.data.DataLoader(isprs_train,
                                        batch_size=args.batch_size,
                                        shuffle=True,
                                        last_batch='discard')
    testloader = gluon.data.DataLoader(isprs_test,
                                       batch_size=1,
                                       last_batch='discard')

    print(len(trainloader))
    print(len(testloader))

    # set the model info
    pretrained_model = models.resnet18_v2(pretrained=True, ctx=ctx)
    model = nn.HybridSequential()
    # build the layer
    for layer in pretrained_model.features[:-2]:
        model.add(layer)

    with model.name_scope():
        model.add(
            nn.Conv2D(args.num_classes, kernel_size=1),
            nn.Conv2DTranspose(args.num_classes,
                               kernel_size=64,
                               padding=16,
                               strides=32))

    model[-2].initialize(init=init.Xavier(), ctx=ctx)
    model[-1].initialize(init=init.Constant(