コード例 #1
0
def test_contrib_sign_ste():
    in_data = nd.uniform(-10, 10, shape=30)  # 10 and 30 are arbitrary numbers
    w_init = float(nd.uniform(-10, 10, shape=1).asscalar())
    check_ste(net_type_str="SignSTENET", w_init=w_init, hybridize=True, in_data=in_data)
    check_ste(net_type_str="SignSTENET", w_init=w_init, hybridize=False, in_data=in_data)

    # Test 0
    in_data = nd.array([0]*30)  # 10 and 30 are arbitrary numbers
    w_init = 0.
    check_ste(net_type_str="SignSTENET", w_init=w_init, hybridize=True, in_data=in_data)
    check_ste(net_type_str="SignSTENET", w_init=w_init, hybridize=False, in_data=in_data)
コード例 #2
0
def test_Conv2D(use_bias, groups):
    x = nd.uniform(shape=(2, 2, 5, 5))

    my_conv = MyConv(10,
                     3,
                     1,
                     1,
                     in_channels=2,
                     groups=groups,
                     use_bias=use_bias)
    my_conv.initialize()

    ref_conv = Conv2D(10,
                      3,
                      1,
                      1,
                      in_channels=2,
                      groups=groups,
                      use_bias=use_bias,
                      bias_initializer=init.Constant(my_conv.bias.data())
                      if use_bias else 'zero',
                      weight_initializer=init.Constant(my_conv.weight.data()))
    ref_conv.initialize()

    return (my_conv(x) - ref_conv(x)).abs().sum().asscalar()
コード例 #3
0
ファイル: mndarray.py プロジェクト: mumupy/mmdeeplearning
def operator():
    """
    mxnet的ndarray加减乘除
    :return:
    """
    a = nd.arange(12).reshape(3, 4)
    b = nd.uniform(0, 10, shape=(3, 4))
    logger.info("ndarray a:")
    logger.info(a)
    logger.info("ndarray b:")
    logger.info(b)

    logger.info("ndarray a.exp():")
    logger.info(a.exp())

    logger.info("ndarray a.sum():")
    logger.info(a.sum())

    logger.info("ndarray a.max():")
    logger.info(a.max())

    logger.info("ndarray a.min():")
    logger.info(a.min())

    logger.info("ndarray a.abs():")
    logger.info(a.abs())

    logger.info("ndarray a.norm().asscalar():")
    logger.info(a.norm().asscalar())

    nd_add = a + b
    logger.info("ndarray a+b:")
    logger.info(nd_add)

    nd_sub = a - b
    logger.info("ndarray a-b:")
    logger.info(nd_sub)

    nd_mul = a * b
    logger.info("ndarray a*b:")
    logger.info(nd_mul)

    nd_dev = a / b
    logger.info("ndarray a/b:")
    logger.info(nd_dev)

    nd_dot = nd.dot(a, a.T)
    logger.info("ndarray nd.dot(a,a.T):")
    logger.info(nd_dot)

    # logger.info("ndarray nd.batch_dot(a,a.T):")
    # logger.info(
    #     nd.batch_dot(nd.arange(24).reshape(2, 3, 4), nd.arange(24).reshape(2, 3, 4), nd.arange(24).reshape(2, 3, 4)))

    logger.info("ndarray nd.concat(a,b,dim=0)")
    logger.info(nd.concat(a, b, dim=0))
    logger.info("ndarray nd.concat(a,b,dim=1)")
    logger.info(nd.concat(a, b, dim=1))
コード例 #4
0
def test_contrib_round_ste():
    # Test with random data
    in_data = nd.uniform(-10, 10, shape=30)  # 10 and 30 are arbitrary numbers
    w_init = float(nd.uniform(-10, 10, shape=1).asscalar())
    check_ste(net_type_str="RoundSTENET", w_init=w_init, hybridize=True, in_data=in_data)
    check_ste(net_type_str="RoundSTENET", w_init=w_init, hybridize=False, in_data=in_data)

    # Test 1.5 (verifies that .5 rounds the same as in round)
    in_data = nd.array([1.5]*30)  # 10 and 30 are arbitrary numbers
    w_init = 1.
    check_ste(net_type_str="RoundSTENET", w_init=w_init, hybridize=True, in_data=in_data)
    check_ste(net_type_str="RoundSTENET", w_init=w_init, hybridize=False, in_data=in_data)

    # Test 0
    in_data = nd.array([0]*30)  # 10 and 30 are arbitrary numbers
    w_init = 0.
    check_ste(net_type_str="RoundSTENET", w_init=w_init, hybridize=True, in_data=in_data)
    check_ste(net_type_str="RoundSTENET", w_init=w_init, hybridize=False, in_data=in_data)
コード例 #5
0
ファイル: mndarray.py プロジェクト: mumupy/mmdeeplearning
def argmax():
    a = nd.uniform(0, 5, dtype=np.float32, shape=(3, 4))
    logger.info("a:")
    logger.info(a)
    logger.info(nd.ones(shape=(a.shape[0], 1)) * 3)
    logger.info(a.argmax(axis=1))
    logger.info("a.argmax(axis=1)>3")
    b = a.argmax(axis=1) >= nd.ones(shape=(1, a.shape[0])) * 3
    logger.info(b)
コード例 #6
0
def test_check_output():
    print("<<TEST: Check whether output is right>>")
    inputs = nd.uniform(shape=(1, 3, 224, 224))

    ref_net = mobilenet1_0(pretrained=True)
    my_net = mobilenet1_0(pretrained=True)
    merge_bn(my_net)

    ref_output = ref_net(inputs)
    my_output = my_net(inputs)

    print(((my_output - ref_output).abs() /
           ref_output.abs()).reshape(-1).sort()[-20:])
    print()
コード例 #7
0
ファイル: mndarray.py プロジェクト: mumupy/mmdeeplearning
def random():
    """
    随机生成多维数组
    :return:
    """
    # a = nd.random_randint(low=0, high=5, shape=(2, 2, 2))
    a = nd.uniform(low=0, high=5, shape=(2, 2, 2))
    logger.info(a)

    b = nd.random_normal(0, 1, (3, 4))
    logger.info(b)

    # 从指数分布中随机抽取样本
    c = nd.random_exponential(lam=2, shape=(2, 2))
    logger.info(c)
コード例 #8
0
    def load_model(self):
        net = DarkNet(input_dim=self.input_dim,
                      num_classes=self.num_classes)  # 基础网络DarkNet
        net.initialize(ctx=self.ctx)

        if self.params_path.endswith(".params"):  # 加载模型
            net.load_params(self.params_path)
        elif self.params_path.endswith(".weights"):
            tmp_batch = nd.uniform(shape=(1, 3, self.input_dim,
                                          self.input_dim),
                                   ctx=self.ctx)
            net(tmp_batch)
            net.load_weights(self.params_path, fine_tune=False)
        else:
            print("params {} load error!".format(self.params_path))
            exit()
        print_info("加载参数: {}".format(self.params_path))
        net.hybridize()

        return net
コード例 #9
0
ファイル: y3_class.py プロジェクト: SpikeKing/XX-ImageLabel
    def __load_model(self):
        """
        加载网络模型
        :return: 网络
        """
        net = DarkNet(input_dim=self.input_dim, num_classes=self.num_classes)  # 基础网络 DarkNet
        net.initialize(ctx=self.ctx)  # 网络环境 cpu or gpu

        print_info("模型: {}".format(self.params_path))

        if self.params_path.endswith(".params"):  # 加载参数
            net.load_params(self.params_path)
        elif self.params_path.endswith(".weights"):  # 加载模型
            tmp_batch = nd.uniform(shape=(1, 3, self.input_dim, self.input_dim), ctx=self.ctx)
            net(tmp_batch)
            net.load_weights(self.params_path, fine_tune=False)
        else:
            raise Exception('模型错误')  # 抛出异常

        net.hybridize()  # 编译和优化网络

        return net
コード例 #10
0
def test_quantized_Conv2D(use_bias, groups):
    x = nd.uniform(shape=(2, 2, 5, 5))

    my_conv = MyConv(10,
                     3,
                     1,
                     1,
                     in_channels=2,
                     groups=groups,
                     use_bias=use_bias,
                     input_dtype='uint8',
                     weight_dtype='int8',
                     quantized=True)
    my_conv.initialize()
    my_res = my_conv(x)

    ref_conv = Conv2D(10,
                      3,
                      1,
                      1,
                      in_channels=2,
                      groups=groups,
                      use_bias=use_bias,
                      bias_initializer=init.Constant(my_conv.bias.data())
                      if use_bias else 'zero',
                      weight_initializer=init.Constant(my_conv.weight.data()))
    ref_conv.initialize()
    ref_res = ref_conv(x)

    sim_conv = ref_conv
    convert_conv2d = gen_conv2d_converter()
    convert_conv2d(sim_conv)
    qparams_init((sim_conv))
    sim_res = sim_conv(x)

    return (((my_res - ref_res).abs() /
             ref_res.abs()).reshape(-1).sort()[-20:],
            ((my_res - sim_res).abs() /
             sim_res.abs()).reshape(-1).sort()[-20:])
コード例 #11
0
def test_quantized_mobilnet():
    x = nd.uniform(shape=(1, 3, 224, 224))

    from models.quantized_mobilenet import mobilenet1_0 as my_mobilenet
    my_net = my_mobilenet(pretrained=True)
    my_res = my_net(x)

    from gluoncv.model_zoo import mobilenet1_0 as ref_mobilenet
    ref_net = ref_mobilenet(pretrained=True)
    ref_res = ref_net(x)

    sim_net = ref_mobilenet(pretrained=True)
    convert_model(sim_net, exclude=[sim_net.features[0]])
    qparams_init(sim_net)
    sim_res = sim_net(x)

    # print(">> Difference between my_quantized_mobilenet and simulated_mobilenet -- ")
    # print(">> ", ((my_res - sim_res).abs() / sim_res.abs()).reshape(-1).sort()[-100:])

    return (my_res.argsort(is_ascend=False)[0, :20],
            ref_res.argsort(is_ascend=False)[0, :20],
            sim_res.argsort(is_ascend=False)[0, :20])
コード例 #12
0
 def _sample_bernoulli(probability):
     return nd.greater(probability, nd.uniform(shape=probability.shape))
コード例 #13
0
def test_im2col_2D():
    x = nd.uniform(shape=(2, 3, 5, 5))
    print("Origin:")
    print(x)
    print("im2col_2D:")
    print(_im2col_2D(nd, x, (3, 3), (1, 1), (1, 1)))
コード例 #14
0
ファイル: util.py プロジェクト: liuyanyi/skeleton-net
    def train(epochs, ctx):
        if isinstance(ctx, mx.Context):
            ctx = [ctx]

        if config.train_cfg.param_init:
            init_func = getattr(mx.init, config.train_cfg.init)
            net.initialize(init_func(), ctx=ctx, force_reinit=True)
        else:
            net.load_parameters(config.train_cfg.param_file, ctx=ctx)

        summary(net, stat_name, nd.uniform(
            shape=(1, 3, imgsize, imgsize), ctx=ctx[0]))
        # net = nn.HybridBlock()
        net.hybridize()

        root = config.dir_cfg.dataset
        train_data = gluon.data.DataLoader(
            gluon.data.vision.CIFAR10(
                root=root, train=True).transform_first(transform_train),
            batch_size=batch_size, shuffle=True, last_batch='discard', num_workers=num_workers)

        val_data = gluon.data.DataLoader(
            gluon.data.vision.CIFAR10(
                root=root, train=False).transform_first(transform_test),
            batch_size=batch_size, shuffle=False, num_workers=num_workers)

        trainer_arg = {'learning_rate': config.lr_cfg.lr,
                       'wd': config.lr_cfg.wd, 'lr_scheduler': lr_sch}
        extra_arg = eval(config.lr_cfg.extra_arg)
        trainer_arg.update(extra_arg)
        trainer = gluon.Trainer(net.collect_params(), optimizer, trainer_arg)
        if config.train_cfg.amp:
            amp.init_trainer(trainer)
        metric = mx.metric.Accuracy()
        train_metric = mx.metric.RMSE()
        loss_fn = gluon.loss.SoftmaxCrossEntropyLoss(
            sparse_label=False if config.data_cfg.mixup else True)
        train_history = TrainingHistory(['training-error', 'validation-error'])
        # acc_history = TrainingHistory(['training-acc', 'validation-acc'])
        loss_history = TrainingHistory(['training-loss', 'validation-loss'])

        iteration = 0

        best_val_score = 0

        # print('start training')
        sig_state.emit(1)
        sig_pgbar.emit(0)
        # signal.emit('Training')
        for epoch in range(epochs):
            tic = time.time()
            train_metric.reset()
            metric.reset()
            train_loss = 0
            num_batch = len(train_data)
            alpha = 1
            for i, batch in enumerate(train_data):
                if epoch == 0 and iteration == 1 and config.save_cfg.profiler:
                    profiler.set_state('run')
                    is_profiler_run = True
                if epoch == 0 and iteration == 1 and config.save_cfg.tensorboard:
                    sw.add_graph(net)
                lam = np.random.beta(alpha, alpha)
                if epoch >= epochs - 20 or not config.data_cfg.mixup:
                    lam = 1

                data_1 = gluon.utils.split_and_load(
                    batch[0], ctx_list=ctx, batch_axis=0)
                label_1 = gluon.utils.split_and_load(
                    batch[1], ctx_list=ctx, batch_axis=0)

                if not config.data_cfg.mixup:
                    data = data_1
                    label = label_1
                else:
                    data = [lam*X + (1-lam)*X[::-1] for X in data_1]
                    label = []
                    for Y in label_1:
                        y1 = label_transform(Y, classes)
                        y2 = label_transform(Y[::-1], classes)
                        label.append(lam*y1 + (1-lam)*y2)

                with ag.record():
                    output = [net(X) for X in data]
                    loss = [loss_fn(yhat, y) for yhat, y in zip(output, label)]
                if config.train_cfg.amp:
                    with ag.record():
                        with amp.scale_loss(loss, trainer) as scaled_loss:
                            ag.backward(scaled_loss)
                            # scaled_loss.backward()
                else:
                    for l in loss:
                        l.backward()
                trainer.step(batch_size)
                train_loss += sum([l.sum().asscalar() for l in loss])

                output_softmax = [nd.SoftmaxActivation(out) for out in output]
                train_metric.update(label, output_softmax)
                metric.update(label_1, output_softmax)
                name, acc = train_metric.get()
                if config.save_cfg.tensorboard:
                    sw.add_scalar(tag='lr', value=trainer.learning_rate,
                                  global_step=iteration)
                if epoch == 0 and iteration == 1 and config.save_cfg.profiler:
                    nd.waitall()
                    profiler.set_state('stop')
                    profiler.dump()
                iteration += 1
                sig_pgbar.emit(iteration)
                if check_flag()[0]:
                    sig_state.emit(2)
                while(check_flag()[0] or check_flag()[1]):
                    if check_flag()[1]:
                        print('stop')
                        return
                    else:
                        time.sleep(5)
                        print('pausing')

            epoch_time = time.time() - tic
            train_loss /= batch_size * num_batch
            name, acc = train_metric.get()
            _, train_acc = metric.get()
            name, val_acc, _ = test(ctx, val_data)
            # if config.data_cfg.mixup:
            #     train_history.update([acc, 1-val_acc])
            #     plt.cla()
            #     train_history.plot(save_path='%s/%s_history.png' %
            #                        (plot_name, model_name))
            # else:
            train_history.update([1-train_acc, 1-val_acc])
            plt.cla()
            train_history.plot(save_path='%s/%s_history.png' %
                               (plot_name, model_name))

            if val_acc > best_val_score:
                best_val_score = val_acc
                net.save_parameters('%s/%.4f-cifar-%s-%d-best.params' %
                                    (save_dir, best_val_score, model_name, epoch))

            current_lr = trainer.learning_rate
            name, val_acc, val_loss = test(ctx, val_data)

            logging.info('[Epoch %d] loss=%f train_acc=%f train_RMSE=%f\n     val_acc=%f val_loss=%f lr=%f time: %f' %
                         (epoch, train_loss, train_acc, acc, val_acc, val_loss, current_lr, epoch_time))
            loss_history.update([train_loss, val_loss])
            plt.cla()
            loss_history.plot(save_path='%s/%s_loss.png' %
                              (plot_name, model_name), y_lim=(0, 2), legend_loc='best')
            if config.save_cfg.tensorboard:
                sw._add_scalars(tag='Acc',
                                scalar_dict={'train_acc': train_acc, 'test_acc': val_acc}, global_step=epoch)
                sw._add_scalars(tag='Loss',
                                scalar_dict={'train_loss': train_loss, 'test_loss': val_loss}, global_step=epoch)

            sig_table.emit([epoch, train_loss, train_acc,
                            val_loss, val_acc, current_lr, epoch_time])
            csv_writer.writerow([epoch, train_loss, train_acc,
                                 val_loss, val_acc, current_lr, epoch_time])
            csv_file.flush()

            if save_period and save_dir and (epoch + 1) % save_period == 0:
                net.save_parameters('%s/cifar10-%s-%d.params' %
                                    (save_dir, model_name, epoch))
        if save_period and save_dir:
            net.save_parameters('%s/cifar10-%s-%d.params' %
                                (save_dir, model_name, epochs-1))
コード例 #15
0
    def forward(self, inputs, states_forward=None, states_backward=None):
        inputs_reversed = nd.reverse(inputs, axis=2)

        if not (states_forward and states_backward):
            states_forward, states_backward = self.begin_state(batch_size=inputs.shape[1])

        outputs_forward = []
        outputs_backward = []

        for i in range(self.num_layers):
            if i == 0:
                output, states_forward[i] = getattr(self, 'forward_lstm_{}'.format(i))(inputs, states_forward[i])
                outputs_forward.append(output)

                output, states_backward[i] = getattr(self, 'backward_lstm_{}'.format(i))(inputs_reversed, states_backward[i])
                outputs_backward.append(output)
            else:
                output, states_forward[i] = getattr(self, 'forward_lstm_{}'.format(i))(outputs_forward[i-1], states_forward[i])
                outputs_forward.append(output)

                output, states_backward[i] = getattr(self, 'backward_lstm_{}'.format(i))(outputs_backward[i-1], states_backward[i])
                outputs_backward.append(output)
        return outputs_forward, states_forward, outputs_backward, states_backward

lstm = ElmoLSTMReverse('lstm', 3, 400, 1150, 0.4, 0.5, True)
lstm.collect_params().initialize()
states_forward, states_backward = lstm.begin_state(batch_size=80)
outputs_forward, states_forward, outputs_backward, states_backward = lstm(nd.uniform(-1, 1, (35,80,400)), states_forward, states_backward)

コード例 #16
0
# 有3层用了最大池化,参数完全一样(pool_size=3, strides=2),目的使得卷积出 的尺寸和接下来的池化尺寸一样!!!
# NiN去除了容易造成过拟合的全连接输出层,替换成输出通道数等于标签类别数的NiN块和全局平均池化层,这种方式泛化能力相对低些,但是需要的显存大大降低
# 输出尺寸是输入的一半-(p,s)=(3,2),(pool_size,strides)
net = nn.Sequential()
net.add(NiN_block(channels=96, kernel_size=11, strides=4), nn.MaxPool2D(pool_size=3, strides=2),
        NiN_block(channels=256, kernel_size=5, padding=2), nn.MaxPool2D(pool_size=3, strides=2),
        NiN_block(channels=384, kernel_size=3, padding=1), nn.MaxPool2D(pool_size=3, strides=2), nn.Dropout(0.4),
        # 类别数10,下面的全局平均池化自动接任10
        NiN_block(channels=10, kernel_size=3, padding=1, strides=1),
        # 全局平均池化-poolsize就是输入大小,即输出(_*_*1*1)
        nn.GlobalAvgPool2D(),
        # 将输出4D->2D(N,10)
        nn.Flatten())

# Test
X = nd.uniform(shape=(1, 1, 224, 224))
net.initialize()       # 默认初始化init=initializer.Uniform()
for layer in net:
    X = layer(X)
    print(layer.name, 'output shape:\t', X.shape)



# 读取数据
batch_size = 32
train_iter, test_iter = d2l.load_data_fashion_mnist(batch_size, resize=224)

# 重新初始化模型
ctx = d2l.try_gpu()
net.initialize(force_reinit=True, init=init.Xavier(), ctx=ctx)              # 模型重新初始化
コード例 #17
0
ファイル: modules.py プロジェクト: z01nl1o02/NLP-transformer
    assert (len(value.shape) == 3)
    d_model = query.shape[-1]

    scores = nd.batch_dot(query, key, transpose_b=True) / math.sqrt(d_model)

    if mask is not None:
        val = nd.ones(scores.shape, ctx=cfg.ctx) * (-1e9)
        scores = nd.where(mask == 1, scores, val)
    p_attn = nd.softmax(scores, axis=-1)
    if dropout is not None:
        p_attn = dropout(p_attn)
    return nd.batch_dot(p_attn, value), p_attn


if cfg.DEBUG_ON:
    x = nd.uniform(0, 1, shape=(1, 9, 512))
    out, attn = attention(x, x, x)
    print('attention debug:')
    print("\t input, out, attn: ", x.shape, out.shape, attn.shape)
# print(attn[0,:,:])


class MultiHeadedAttention(nn.Block):
    def __init__(self, h, d_model, dropout=0.1):
        super(MultiHeadedAttention, self).__init__()
        assert (d_model % h == 0)
        with self.name_scope():
            self.d_k = d_model // h
            self.h = h

            self.linears_0 = nn.Dense(in_units=d_model,
コード例 #18
0
ファイル: func.py プロジェクト: chr5tphr/ecGAN
def fuzzy_one_hot(arr, size):
    x = arr.reshape((-1, ))
    return nd.where(nd.one_hot(x, size),
                    nd.uniform(low=0.7, high=1.2, shape=(x.shape[0], size), ctx=x.context),
                    nd.uniform(low=0.0, high=0.3, shape=(x.shape[0], size), ctx=x.context))
コード例 #19
0
    def forward(self, x, y, sample_prob=None):
        if sample_prob is not None:
            self.sample_prob = sample_prob
        batch_size = x.shape[0]
        state = self.init_hidden(batch_size, self.ctx)
        outputs_pgm = []
        outputs_param = []
        seq = y
        for i in range(seq.shape[1]):
            if i == 0:
                xt = x
            else:
                if i >= 1 and self.sample_prob > 0:
                    #print("x.shape:",x.shape)
                    sample_prob = nd.uniform(
                        0, 1, shape=(batch_size),
                        ctx=self.ctx)  #sample_prob.shape (10,)
                    sample_mask = sample_prob < self.sample_prob
                    #print("sample_mask:",sample_mask)
                    #print("sample_mask.sum:",sample_mask.sum().asscalar())
                    if sample_mask.sum() == 0:
                        it1 = seq[:, i - 1]
                    else:
                        sample_ind = sample_mask != 0
                        #print("sample_ind:",sample_ind)
                        it1 = seq[:, i - 1]  #it1.shape : (10,)
                        #print("it1:",it1.shape)
                        #print("output_prog:",outputs_pgm[-1])
                        prob_prev = nd.exp(outputs_pgm[-1])
                        #print("prob_pre:",prob_prev)
                        temp = nd.random.multinomial(
                            prob_prev, 1).reshape(-1).astype('int64')
                        #print("prob_prev:",nd.argmax(prob_prev,axis=1).astype('int64')==temp)
                        #print("temp",temp,"\n it1:",it1)
                        it1 = nd.where(sample_ind, temp, it1).astype('float32')
                else:
                    #print("obtain last ground truth")
                    it1 = seq[:, i - 1].copy()
                xt = self.pgm_embed(it1)
                #print("xt after embed:",xt)

            #print("xt                      :",xt)
            output, state = self.core(xt.expand_dims(axis=0), state)

            pgm_feat1 = nd.relu(self.logit1(output.squeeze(0)))
            pgm_feat2 = self.logit2(pgm_feat1)
            pgm_score = nd.log_softmax(pgm_feat2, axis=1)

            trans_prob = nd.softmax(pgm_feat2, axis=1).detach()
            param_feat1 = nd.relu(self.regress1(output.squeeze(0)))
            param_feat2 = nd.concat(trans_prob, param_feat1, dim=1)

            param_score = self.regress2(param_feat2)
            param_score = param_score.reshape(batch_size, self.vocab_size + 1,
                                              self.max_param)
            #index = nd.argmax(trans_prob, axis = 1)
            index = seq[:, i]
            index = index.expand_dims(axis=1).expand_dims(axis=2).broadcast_to(
                shape=(batch_size, 1, self.max_param)).detach()
            param_score = nd.pick(param_score, index, 1)

            outputs_pgm.append(pgm_score)
            outputs_param.append(param_score)

        outputs_pgm = [_.expand_dims(axis=1) for _ in outputs_pgm]
        outputs_param = [_.expand_dims(axis=1) for _ in outputs_param]
        pgms = outputs_pgm[0]
        params = outputs_param[0]
        for i in range(1, len(outputs_pgm)):
            pgms = nd.concat(pgms, outputs_pgm[i], dim=1)
            params = nd.concat(params, outputs_param[i], dim=1)
        #print("params", params.shape)
        #rint("pgm", pgms.shape)

        return [pgms, params]
コード例 #20
0
ファイル: func.py プロジェクト: chr5tphr/ecGAN
def randint(low=0., high=1., shape=(1, ), ctx=None, dtype='int32'):
    return nd.uniform(low=low, high=high, shape=shape, ctx=ctx).astype(dtype)
コード例 #21
0
ファイル: func.py プロジェクト: chr5tphr/ecGAN
def fuzzy_one_hot(arr, size):
    x = arr.reshape((-1, ))
    return nd.where(
        nd.one_hot(x, size),
        nd.uniform(low=0.7, high=1.2, shape=(x.shape[0], size), ctx=x.context),
        nd.uniform(low=0.0, high=0.3, shape=(x.shape[0], size), ctx=x.context))
コード例 #22
0
ファイル: func.py プロジェクト: chr5tphr/ecGAN
def randint(low=0., high=1., shape=(1, ), ctx=None, dtype='int32'):
    return nd.uniform(low=low, high=high, shape=shape, ctx=ctx).astype(dtype)
コード例 #23
0
        x = self.conv2(x_)
        x = x_ + x
        x_ = self.conv3(x)
        x = F.concat(x, x_, dim=1)
        x = self.pool(x)
        x = self.fc1(x)
        x = self.fc2(x)
        return x


if __name__ == "__main__":
    root = "../models/my_testmodel/"
    # Generate model and initialize
    model = TestModel()
    model.initialize()
    input_ = nd.uniform(shape=(1, 3, 224, 224))
    _ = model(input_)
    # Save parameters
    model.save_parameters(root + "my_testmodel.param")
    # Save input
    nd.save(root + "input.nd", input_)
    save(root + "input.dat", input_.asnumpy())
    # Save conv1
    conv = model.conv1
    save(root + "conv1.weight.dat", conv.weight.data().asnumpy())
    # Save conv2
    conv = model.conv2
    save(root + "conv2.weight.dat", conv.weight.data().asnumpy())
    save(root + "conv2.bias.dat", conv.bias.data().asnumpy())
    # Save conv3
    conv = model.conv3