Пример #1
0
def test_full_forward_op():
    inputs = ad.Variable("inputs")
    filters = ad.Variable("filters")
    y_ = ad.Variable(name="y_")

    #ini
    ctx = ndarray.gpu(0)
    x_val = np.linspace(0, 100, 100).reshape((5, 1, 20))
    filters_val = np.ones((1, 1, 20)) * 0.001
    y_val = np.zeros((5, 1))
    x_val = ndarray.array(x_val, ctx)
    filters_val = ndarray.array(filters_val, ctx)
    y_val = ndarray.array(y_val, ctx)
    outputs = ad.convolution_1d_forward_op(inputs, filters, "NCHW", "VALID", 1)
    outputs_pool = ad.pooling_1d_forward_op(outputs, "NCHW", "max", 0, 1, 1)
    outputs_relu = ad.activation_forward_op(outputs_pool, "NCHW", "relu")
    outputs_f = ad.flatten_op(outputs_relu)
    output = ad.fullyactivation_forward_op(outputs_f, "NCHW", "relu")
    loss = ad.matmul_op(output, output, trans_A=True) * (1 / 5)
    grad_f = ad.gradients(loss, [filters])  #gra返回一个list
    executor = ad.Executor([grad_f[0]], ctx=ctx)
    g_val = executor.run(feed_dict={
        inputs: x_val,
        filters: filters_val
    })  #返回一个list
    print("g_val:", g_val[0].asnumpy())
Пример #2
0
def convolutional_block(inputs,
                        kernel_size,
                        in_filter,
                        out_filters,
                        block_name,
                        stride=1):
    f1, f2, f3 = out_filters

    W1 = ad.Variable(block_name + "W1")
    W2 = ad.Variable(block_name + "W2")
    W3 = ad.Variable(block_name + "W3")
    W_shortcut = ad.Variable(block_name + "W_shortcut")
    rand = np.random.RandomState(seed=123)
    W1_val = rand.normal(scale=0.1, size=(f1, in_filter, 1, 1))
    W2_val = rand.normal(scale=0.1, size=(f2, f1, kernel_size, kernel_size))
    W3_val = rand.normal(scale=0.1, size=(f3, f2, 1, 1))
    W_shortcut_val = rand.normal(scale=0.1, size=(in_filter, f3, 1, 1))

    # conv1
    conv1 = ad.convolution_2d_forward_op(inputs, W1, "NCHW", "SAME", stride,
                                         stride)
    bn1 = ad.bn_forward_op(conv1, "NCHW", "pre_activation")
    act1 = ad.activation_forward_op(bn1, "NCHW", "relu")

    # conv2
    conv2 = ad.convolution_2d_forward_op(act1, W2, "NCHW", "SAME", stride,
                                         stride)
    bn2 = ad.bn_forward_op(conv2, "NCHW", "pre_activation")
    act2 = ad.activation_forward_op(bn2, "NCHW", "relu")

    # conv3
    conv3 = ad.convolution_2d_forward_op(act2, W3, "NCHW", "VALID", stride,
                                         stride)
    bn3 = ad.bn_forward_op(conv3, "NCHW", "pre_activation")

    #shortcut_path
    conv4 = ad.convolution_2d_forward_op(inputs, W_shortcut, "NCHW", "VALID",
                                         stride, stride)
    shortcut = ad.bn_forward_op(conv4, "NCHW", "pre_activation")

    # shortcut
    add = ad.add_op(bn3, shortcut)
    act4 = ad.activation_forward_op(add, "NCHW", "relu")

    dict = {W1: W1_val, W2: W2_val, W3: W3_val, W_shortcut: W_shortcut_val}
    return act4, dict
Пример #3
0
def test_convolution_3d_forward_op():
    inputs = ad.Variable("inputs")
    filters = ad.Variable("filters")
    y_ = ad.Variable(name="y_")

    #ini
    ctx = ndarray.gpu(0)
    x_val = np.linspace(0, 100, 135).reshape((5, 1, 3, 3, 3))
    filters_val = np.ones((1, 1, 2, 2, 2)) * 0.001
    y_val = np.zeros((5, 1))
    x_val = ndarray.array(x_val, ctx)
    filters_val = ndarray.array(filters_val, ctx)
    y_val = ndarray.array(y_val, ctx)

    outputs = ad.convolution_3d_forward_op(inputs, filters, "NCHW", "VALID", 1,
                                           1, 1)
    outputs_pool = ad.pooling_3d_forward_op(outputs, "NCHW", "max", 0, 0, 0, 1,
                                            1, 1, 2, 2, 2)
    outputs_relu = ad.activation_forward_op(outputs_pool, "NCHW", "relu")
    outputs_dro = ad.dropout_forward_op(outputs_relu, "NCHW", 0.5, 0)
    outputs_f = ad.flatten_op(outputs_dro)
    loss = ad.matmul_op(outputs_f, outputs_f, trans_A=True) * (1 / 5)
    grad_inputs, grad_f = ad.gradients(loss, [inputs, filters])
    executor = ad.Executor([loss, grad_f], ctx=ctx)

    aph = 1.0e-6
    for i in range(20):
        loss_val, filters_grad_val = executor.run(feed_dict={
            inputs: x_val,
            filters: filters_val
        })

        filters_val = filters_val.asnumpy()
        filters_grad_val = filters_grad_val.asnumpy()
        filters_val = filters_val - aph * filters_grad_val
        filters_val = ndarray.array(filters_val, ctx)

    print("loss_val:", loss_val.asnumpy())
    print("filters_val:", filters_val.asnumpy())
Пример #4
0
def test_sigmoid_conv_1d():
    inputs = ad.Variable("inputs")
    filters = ad.Variable("filters")
    y_ = ad.Variable(name="y_")

    # ini
    ctx = ndarray.gpu(0)
    x_val = np.linspace(0, 100, 80).reshape((5, 1, 4, 4))
    filters_val = np.ones((1, 1, 3, 3)) * 0.001
    y_val = np.zeros((5, 1))
    x_val = ndarray.array(x_val, ctx)
    filters_val = ndarray.array(filters_val, ctx)
    y_val = ndarray.array(y_val, ctx)

    outputs = ad.convolution_2d_forward_op(inputs, filters, "NCHW", "VALID", 1,
                                           1)
    # outputs_pool = ad.pooling_2d_forward_op(outputs, "NCHW", "max", 0, 0, 1, 1, 2, 2)
    outputs_relu = ad.activation_forward_op(outputs, "NCHW", "relu")
    executor = ad.Executor([outputs_relu], ctx=ctx)

    loss_val = executor.run(feed_dict={inputs: x_val, filters: filters_val})

    print("loss_val:", loss_val[0].asnumpy())
Пример #5
0
def ResNet50(inputs, n_class):
    X = ad.Placeholder("X")
    y_ = ad.Placeholder("y_")
    W1 = ad.Variable("W1")
    W6 = ad.Variable("W6")
    b6 = ad.Variable("b6")
    W7 = ad.Variable("W7")
    b7 = ad.Variable("b7")
    keep_prob = ad.Placeholder("keep_prob")

    #conv1
    conv1 = ad.convolution_2d_forward_op(X, W1, "NCHW", "VALID", 2, 2)
    bn1 = ad.bn_forward_op(conv1, "NCHW", "pre_activation")
    act1 = ad.activation_forward_op(bn1, "NCHW", "relu")
    pool1 = ad.pooling_2d_forward_op(act1, "NCHW", "max", 0, 0, 2, 2, 3, 3)

    #conv2_x
    conv2, dict2 = convolutional_block(inputs=pool1,
                                       kernel_size=3,
                                       in_filter=64,
                                       out_filters=[64, 64, 256],
                                       block_name="2a",
                                       stride=1)
    iden2_1, dict2_1 = identity_block(inputs=conv2,
                                      kernel_size=3,
                                      in_filter=256,
                                      out_filters=[64, 64, 256],
                                      block_name="2b",
                                      stride=1)
    iden2_2, dict2_2 = identity_block(iden2_1, 3, 256, [64, 64, 256], "2c", 1)

    #conv3_x
    conv3, dict3 = convolutional_block(iden2_2, 3, 256, [128, 128, 512], "3a",
                                       1)
    iden3_1, dict3_1 = identity_block(conv3, 3, 512, [128, 128, 512], "3b", 1)
    iden3_2, dict3_2 = identity_block(iden3_1, 3, 512, [128, 128, 512], "3c",
                                      1)
    iden3_3, dict3_3 = identity_block(iden3_2, 3, 512, [128, 128, 512], "3d",
                                      1)

    #conv4_x
    conv4, dict4 = convolutional_block(iden3_3, 3, 512, [256, 256, 1024], "4a",
                                       1)
    iden4_1, dict4_1 = identity_block(conv4, 3, 1024, [256, 256, 1024], "4b",
                                      1)
    iden4_2, dict4_2 = identity_block(iden4_1, 3, 1024, [256, 256, 1024], "4c",
                                      1)
    iden4_3, dict4_3 = identity_block(iden4_2, 3, 1024, [256, 256, 1024], "4d",
                                      1)
    iden4_4, dict4_4 = identity_block(iden4_3, 3, 1024, [256, 256, 1024], "4e",
                                      1)
    iden4_5, dict4_5 = identity_block(iden4_4, 3, 1024, [256, 256, 1024], "4f",
                                      1)

    #conv5_x
    conv5, dict5 = convolutional_block(iden4_5, 3, 1024, [512, 512, 2048],
                                       "5a", 1)
    iden5_1, dict5_1 = identity_block(conv5, 3, 2048, [512, 512, 2048], "5b",
                                      1)
    iden5_2, dict5_2 = identity_block(iden5_1, 3, 2048, [512, 512, 2048], "5c",
                                      1)
    pool5 = ad.pooling_2d_forward_op(iden5_2, "NCHW", "mean", 0, 0, 1, 1, 2, 2)

    pool5_flat = ad.flatten_op(pool5)
    mul6 = ad.matmul_op(pool5_flat, W6)
    add6 = ad.add_op(mul6, b6)
    act6 = ad.fullyactivation_forward_op(add6, "NCHW", "relu")
    drop_out = ad.fullydropout_forward_op(act6, "NCHW", keep_prob)
    mul7 = ad.matmul_op(drop_out, W7)
    add7 = ad.add_op(mul7, b7)
    act7 = ad.fullyactivation_forward_op(add7, "NCHW", "softmax")

    loss = ad.softmaxcrossentropy_op(act7, y_)

    X_val = np.random.normal(0, 0.5, (10, 3, 230, 230))
    W1_val = np.random.normal(0, 0.5, (64, 3, 7, 7))
    W6_val = np.random.normal(0, 0.5, (7 * 7 * 2048, 50))
    b6_val = np.random.normal(0, 0.5, (10, 50))
    W7_val = np.random.normal(0, 0.5, (50, 6))
    b7_val = np.random.normal(0, 0.5, (10, 6))
    y_val = np.random.normal(0, 0.5, (10, 6))

    feed_dict = {W1: W1_val, W6: W6_val, W7: W7_val, b6: b6_val, b7: b7_val}
    feed_dict.update(dict2)
    feed_dict.update(dict2_1)
    feed_dict.update(dict2_2)
    feed_dict.update(dict3)
    feed_dict.update(dict3_1)
    feed_dict.update(dict3_2)
    feed_dict.update(dict3_3)
    feed_dict.update(dict4)
    feed_dict.update(dict4_1)
    feed_dict.update(dict4_2)
    feed_dict.update(dict4_3)
    feed_dict.update(dict4_4)
    feed_dict.update(dict4_5)
    feed_dict.update(dict5)
    feed_dict.update(dict5_1)
    feed_dict.update(dict5_2)

    time.sleep(5)
    list = []
    for key in feed_dict.keys():
        list.append(key)
    executor = ad.Executor(list, ctx=ndarray.gpu(0))
    executor.run(feed_dict)

    time.sleep(5)
Пример #6
0
def vgg16():

    n = 10
    n_class = 10

    inputs = ad.Variable("inputs")
    filters1_1 = ad.Variable("filters1_1")
    filters1_2 = ad.Variable("filters1_2")
    filters2_1 = ad.Variable("filters2_1")
    filters2_2 = ad.Variable("filters2_2")
    filters3_1 = ad.Variable("filters3_1")
    filters3_2 = ad.Variable("filters3_2")
    filters3_3 = ad.Variable("filters3_3")
    filters4_1 = ad.Variable("filters4_1")
    filters4_2 = ad.Variable("filters4_2")
    filters4_3 = ad.Variable("filters4_3")
    filters5_1 = ad.Variable("filters5_1")
    filters5_2 = ad.Variable("filters5_2")
    filters5_3 = ad.Variable("filters5_3")
    filters6 = ad.Variable("filters6")
    filters7 = ad.Variable("filters7")
    filters8 = ad.Variable("filters8")
    biases6 = ad.Variable("biases6")
    biases7 = ad.Variable("biases7")
    biases8 = ad.Variable("biases8")
    y_ = ad.Variable(name="y_")

    x_val = np.linspace(0, 0.001, 10*3*224*224).reshape((10, 3, 224, 224))
    filters_val = [np.ones((64, 3, 3, 3))*0.001]
    filters_val.append(np.ones((64, 64, 3, 3))*0.001)
    filters_val.append(np.ones((128, 64, 3, 3))*0.001)
    filters_val.append(np.ones((128, 128, 3, 3))*0.001)
    filters_val.append(np.ones((256, 128, 3, 3))*0.001)
    filters_val.append(np.ones((256, 256, 3, 3))*0.001)
    filters_val.append(np.ones((256, 256, 3, 3))*0.001)
    filters_val.append(np.ones((512, 256, 3, 3))*0.001)
    filters_val.append(np.ones((512, 512, 3, 3))*0.001)
    filters_val.append(np.ones((512, 512, 3, 3))*0.001)
    filters_val.append(np.ones((512, 512, 3, 3))*0.001)
    filters_val.append(np.ones((512, 512, 3, 3))*0.001)
    filters_val.append(np.ones((512, 512, 3, 3))*0.001)
    filters_val.append(np.ones((512*7*7, 4096)) * 0.001)
    filters_val.append(np.ones((4096, 4096)) * 0.001)
    filters_val.append(np.ones((4096, n_class)) * 0.001)
    biases_val = [np.ones((1, 4096))* 0.001]
    biases_val.append(np.ones((1, 4096)) * 0.001)
    biases_val.append(np.ones((1, n_class)) * 0.001)
    y_val = np.zeros((10, n_class))

    ctx = ndarray.gpu(0)
    for i in range(16):
        filters_val[i] = ndarray.array(filters_val[i], ctx)

    # conv 1
    conv1_1 = ad.convolution_2d_forward_op(inputs, filters1_1, "NCHW", "SAME", 1, 1)
    bn1_1 = ad.bn_forward_op(conv1_1, "NCHW", "pre_activation")
    act1_1 = ad.activation_forward_op(bn1_1, "NCHW", "relu")

    conv1_2 = ad.convolution_2d_forward_op(act1_1, filters1_2, "NCHW", "SAME", 1, 1)
    bn1_2 = ad.bn_forward_op(conv1_2, "NCHW", "pre_activation")
    act1_2 = ad.activation_forward_op(bn1_2, "NCHW", "relu")
    pool1 = ad.pooling_2d_forward_op(act1_2, "NCHW", "max", 0, 0, 2, 2, 2, 2)

    # conv 2
    conv2_1 = ad.convolution_2d_forward_op(pool1, filters2_1, "NCHW", "SAME", 1, 1)
    bn2_1 = ad.bn_forward_op(conv2_1, "NCHW", "pre_activation")
    act2_1 = ad.activation_forward_op(bn2_1, "NCHW", "relu")
    conv2_2 = ad.convolution_2d_forward_op(act2_1, filters2_2, "NCHW", "SAME", 1, 1)
    bn2_2 = ad.bn_forward_op(conv2_2, "NCHW", "pre_activation")
    act2_2 = ad.activation_forward_op(bn2_2, "NCHW", "relu")
    pool2 = ad.pooling_2d_forward_op(act2_2, "NCHW", "max", 0, 0, 2, 2, 2, 2)

    # conv 3
    conv3_1 = ad.convolution_2d_forward_op(pool2, filters3_1, "NCHW", "SAME", 1, 1)
    bn3_1 = ad.bn_forward_op(conv3_1, "NCHW", "pre_activation")
    act3_1 = ad.activation_forward_op(bn3_1, "NCHW", "relu")
    conv3_2 = ad.convolution_2d_forward_op(act3_1, filters3_2, "NCHW", "SAME", 1, 1)
    bn3_2 = ad.bn_forward_op(conv3_2, "NCHW", "pre_activation")
    act3_2 = ad.activation_forward_op(bn3_2, "NCHW", "relu")
    conv3_3 = ad.convolution_2d_forward_op(act3_2, filters3_3, "NCHW", "SAME", 1, 1)
    bn3_3 = ad.bn_forward_op(conv3_3, "NCHW", "pre_activation")
    act3_3 = ad.activation_forward_op(bn3_3, "NCHW", "relu")
    pool3 = ad.pooling_2d_forward_op(act3_3, "NCHW", "max", 0, 0, 2, 2, 2, 2)

    # conv 4
    conv4_1 = ad.convolution_2d_forward_op(pool3, filters4_1, "NCHW", "SAME", 1, 1)
    bn4_1 = ad.bn_forward_op(conv4_1, "NCHW", "pre_activation")
    act4_1 = ad.activation_forward_op(bn4_1, "NCHW", "relu")
    conv4_2 = ad.convolution_2d_forward_op(act4_1, filters4_2, "NCHW", "SAME", 1, 1)
    bn4_2 = ad.bn_forward_op(conv4_2, "NCHW", "pre_activation")
    act4_2 = ad.activation_forward_op(bn4_2, "NCHW", "relu")
    conv4_3 = ad.convolution_2d_forward_op(act4_2, filters4_3, "NCHW", "SAME", 1, 1)
    bn4_3 = ad.bn_forward_op(conv4_3, "NCHW", "pre_activation")
    act4_3 = ad.activation_forward_op(bn4_3, "NCHW", "relu")
    pool4 = ad.pooling_2d_forward_op(act4_3, "NCHW", "max", 0, 0, 2, 2, 2, 2)

    # conv 5
    conv5_1 = ad.convolution_2d_forward_op(pool4, filters5_1, "NCHW", "SAME", 1, 1)
    bn5_1 = ad.bn_forward_op(conv5_1, "NCHW", "pre_activation")
    act5_1 = ad.activation_forward_op(bn5_1, "NCHW", "relu")
    conv5_2 = ad.convolution_2d_forward_op(act5_1, filters5_2, "NCHW", "SAME", 1, 1)
    bn5_2 = ad.bn_forward_op(conv5_2, "NCHW", "pre_activation")
    act5_2 = ad.activation_forward_op(bn5_2, "NCHW", "relu")
    conv5_3 = ad.convolution_2d_forward_op(act5_2, filters5_3, "NCHW", "SAME", 1, 1)
    bn5_3 = ad.bn_forward_op(conv5_3, "NCHW", "pre_activation")
    act5_3 = ad.activation_forward_op(bn5_3, "NCHW", "relu")
    pool5 = ad.pooling_2d_forward_op(act5_3, "NCHW", "max", 0, 0, 2, 2, 2, 2)

    # fc6
    pool5_flat = ad.flatten_op(pool5)
    mul6 = ad.matmul_op(pool5_flat, filters6)
    add6 = ad.add_op(mul6, biases6)
    bn6 = ad.fullybn_forward_op(add6, "NCHW")
    fc6 = ad.fullyactivation_forward_op(bn6, "NCHW", "relu")
    drop6 = ad.fullydropout_forward_op(fc6, "NCHW", 0.5)

    # fc7
    mul7 = ad.matmul_op(drop6, filters7)
    add7 = ad.add_op(mul7, biases7)
    bn7 = ad.fullybn_forward_op(add7, "NCHW")
    fc7 = ad.fullyactivation_forward_op(bn7, "NCHW", "relu")
    drop7 = ad.fullydropout_forward_op(fc7, "NCHW", 0.5)

    #fc8
    mul8 = ad.matmul_op(drop7, filters8)
    add8 = ad.add_op(mul8, biases8)
    fc8 = ad.fullyactivation_forward_op(add8, "NCHW", "softmax")

    loss = ad.l2loss_op(fc8, y_)

    grad = ad.gradients(loss, [filters1_1, filters1_2, filters2_1, filters2_2, filters3_1, filters3_2, filters3_3
                                , filters4_1, filters4_2, filters4_3, filters5_1, filters5_2, filters5_3
                                , filters6, filters7])
    executor = ad.Executor([grad[0], grad[1], grad[2], grad[3], grad[4], grad[5], grad[6], grad[7], grad[8], grad[9]
                               , grad[10], grad[11], grad[12], grad[13], grad[14], loss, y_], ctx=ctx)

    aph = 1.0e-6
    for i in range(20):

        select = random.randint(0, n-1)
        tmp_x_val = x_val[select]
        tmp_x_val = np.expand_dims(tmp_x_val, 0)
        tmp_y_val = y_val[select]
        tmp_y_val = np.expand_dims(tmp_y_val, 0)
        grad_val = executor.run(
            feed_dict={inputs: tmp_x_val, y_: tmp_y_val
                        , filters1_1: filters_val[0], filters1_2: filters_val[1], filters2_1: filters_val[2], filters2_2: filters_val[3]
                        , filters3_1: filters_val[4], filters3_2: filters_val[5], filters3_3: filters_val[6]
                        , filters4_1: filters_val[7], filters4_2: filters_val[8], filters4_3: filters_val[9]
                        , filters5_1: filters_val[10], filters5_2: filters_val[11], filters5_3: filters_val[12]
                        , filters6: filters_val[13], filters7: filters_val[14], filters8: filters_val[15]
                        , biases6: biases_val[0], biases7: biases_val[1], biases8: biases_val[2]})


        for i in range(14):
            sgd_update_gpu(filters_val[i], grad_val[i], aph)

    print(filters_val[0].asnumpy())
    return filters_val
Пример #7
0
def ResNet152(inputs, n_class):
    X = ad.Placeholder("X")
    y_ = ad.Placeholder("y_")
    W1 = ad.Variable("W1")
    W6 = ad.Variable("W6")
    b6 = ad.Variable("b6")
    W7 = ad.Variable("W7")
    b7 = ad.Variable("b7")
    keep_prob = ad.Placeholder("keep_prob")

    #conv1
    conv1 = ad.convolution_2d_forward_op(X, W1, "NCHW", "VALID", 2, 2)
    bn1 = ad.bn_forward_op(conv1, "NCHW", "pre_activation")
    act1 = ad.activation_forward_op(bn1, "NCHW", "relu")
    pool1 = ad.pooling_2d_forward_op(act1, "NCHW", "max", 0, 0, 2, 2, 3, 3)

    #conv2_x
    conv2, dict2 = convolutional_block(inputs=pool1,
                                       kernel_size=3,
                                       in_filter=64,
                                       out_filters=[64, 64, 256],
                                       block_name="2a",
                                       stride=1)
    iden2_1, dict2_1 = identity_block(inputs=conv2,
                                      kernel_size=3,
                                      in_filter=256,
                                      out_filters=[64, 64, 256],
                                      block_name="2b",
                                      stride=1)
    iden2_2, dict2_2 = identity_block(iden2_1, 3, 256, [64, 64, 256], "2c", 1)

    #conv3_x
    conv3, dict3 = convolutional_block(iden2_2, 3, 256, [128, 128, 512], "3a",
                                       1)
    iden3_1, dict3_1 = identity_block(conv3, 3, 512, [128, 128, 512], "3b", 1)
    iden3_2, dict3_2 = identity_block(iden3_1, 3, 512, [128, 128, 512], "3c",
                                      1)
    iden3_3, dict3_3 = identity_block(iden3_2, 3, 512, [128, 128, 512], "3d",
                                      1)
    iden3_4, dict3_4 = identity_block(iden3_3, 3, 512, [128, 128, 512], "3e",
                                      1)
    iden3_5, dict3_5 = identity_block(iden3_4, 3, 512, [128, 128, 512], "3f",
                                      1)
    iden3_6, dict3_6 = identity_block(iden3_5, 3, 512, [128, 128, 512], "3g",
                                      1)
    iden3_7, dict3_7 = identity_block(iden3_6, 3, 512, [128, 128, 512], "3h",
                                      1)

    #conv4_x
    conv4, dict4 = convolutional_block(iden3_7, 3, 512, [256, 256, 1024], "4a",
                                       1)
    iden4_1, dict4_1 = identity_block(conv4, 3, 1024, [256, 256, 1024], "4b",
                                      1)
    iden4_2, dict4_2 = identity_block(iden4_1, 3, 1024, [256, 256, 1024], "4c",
                                      1)
    iden4_3, dict4_3 = identity_block(iden4_2, 3, 1024, [256, 256, 1024], "4d",
                                      1)
    iden4_4, dict4_4 = identity_block(iden4_3, 3, 1024, [256, 256, 1024], "4e",
                                      1)
    iden4_5, dict4_5 = identity_block(iden4_4, 3, 1024, [256, 256, 1024], "4f",
                                      1)
    iden4_6, dict4_6 = identity_block(iden4_5, 3, 1024, [256, 256, 1024], "4f",
                                      1)
    iden4_7, dict4_7 = identity_block(iden4_6, 3, 1024, [256, 256, 1024], "4f",
                                      1)
    iden4_8, dict4_8 = identity_block(iden4_7, 3, 1024, [256, 256, 1024], "4f",
                                      1)
    iden4_9, dict4_9 = identity_block(iden4_8, 3, 1024, [256, 256, 1024], "4f",
                                      1)
    iden4_10, dict4_10 = identity_block(iden4_9, 3, 1024, [256, 256, 1024],
                                        "4f", 1)
    iden4_11, dict4_11 = identity_block(iden4_10, 3, 1024, [256, 256, 1024],
                                        "4f", 1)
    iden4_12, dict4_12 = identity_block(iden4_11, 3, 1024, [256, 256, 1024],
                                        "4f", 1)
    iden4_13, dict4_13 = identity_block(iden4_12, 3, 1024, [256, 256, 1024],
                                        "4f", 1)
    iden4_14, dict4_14 = identity_block(iden4_13, 3, 1024, [256, 256, 1024],
                                        "4f", 1)
    iden4_15, dict4_15 = identity_block(iden4_14, 3, 1024, [256, 256, 1024],
                                        "4f", 1)
    iden4_16, dict4_16 = identity_block(iden4_15, 3, 1024, [256, 256, 1024],
                                        "4f", 1)
    iden4_17, dict4_17 = identity_block(iden4_16, 3, 1024, [256, 256, 1024],
                                        "4f", 1)
    iden4_18, dict4_18 = identity_block(iden4_17, 3, 1024, [256, 256, 1024],
                                        "4f", 1)
    iden4_19, dict4_19 = identity_block(iden4_18, 3, 1024, [256, 256, 1024],
                                        "4f", 1)
    iden4_20, dict4_20 = identity_block(iden4_19, 3, 1024, [256, 256, 1024],
                                        "4f", 1)
    iden4_21, dict4_21 = identity_block(iden4_20, 3, 1024, [256, 256, 1024],
                                        "4f", 1)
    iden4_22, dict4_22 = identity_block(iden4_21, 3, 1024, [256, 256, 1024],
                                        "4f", 1)
    iden4_23, dict4_23 = identity_block(iden4_22, 3, 1024, [256, 256, 1024],
                                        "4f", 1)
    iden4_24, dict4_24 = identity_block(iden4_23, 3, 1024, [256, 256, 1024],
                                        "4f", 1)
    iden4_25, dict4_25 = identity_block(iden4_24, 3, 1024, [256, 256, 1024],
                                        "4f", 1)
    iden4_26, dict4_26 = identity_block(iden4_25, 3, 1024, [256, 256, 1024],
                                        "4f", 1)
    iden4_27, dict4_27 = identity_block(iden4_26, 3, 1024, [256, 256, 1024],
                                        "4f", 1)
    iden4_28, dict4_28 = identity_block(iden4_27, 3, 1024, [256, 256, 1024],
                                        "4f", 1)
    iden4_29, dict4_29 = identity_block(iden4_28, 3, 1024, [256, 256, 1024],
                                        "4f", 1)
    iden4_30, dict4_30 = identity_block(iden4_29, 3, 1024, [256, 256, 1024],
                                        "4f", 1)
    iden4_31, dict4_31 = identity_block(iden4_30, 3, 1024, [256, 256, 1024],
                                        "4f", 1)
    iden4_32, dict4_32 = identity_block(iden4_31, 3, 1024, [256, 256, 1024],
                                        "4f", 1)
    iden4_33, dict4_33 = identity_block(iden4_32, 3, 1024, [256, 256, 1024],
                                        "4f", 1)
    iden4_34, dict4_34 = identity_block(iden4_33, 3, 1024, [256, 256, 1024],
                                        "4f", 1)
    iden4_35, dict4_35 = identity_block(iden4_34, 3, 1024, [256, 256, 1024],
                                        "4f", 1)

    #conv5_x
    conv5, dict5 = convolutional_block(iden4_35, 3, 1024, [512, 512, 2048],
                                       "5a", 1)
    iden5_1, dict5_1 = identity_block(conv5, 3, 2048, [512, 512, 2048], "5b",
                                      1)
    iden5_2, dict5_2 = identity_block(iden5_1, 3, 2048, [512, 512, 2048], "5c",
                                      1)
    pool5 = ad.pooling_2d_forward_op(iden5_2, "NCHW", "mean", 0, 0, 1, 1, 2, 2)

    pool5_flat = ad.flatten_op(pool5)
    mul6 = ad.matmul_op(pool5_flat, W6)
    add6 = ad.add_op(mul6, b6)
    act6 = ad.fullyactivation_forward_op(add6, "NCHW", "relu")
    drop_out = ad.fullydropout_forward_op(act6, "NCHW", keep_prob)
    mul7 = ad.matmul_op(drop_out, W7)
    add7 = ad.add_op(mul7, b7)
    act7 = ad.fullyactivation_forward_op(add7, "NCHW", "softmax")

    loss = ad.softmaxcrossentropy_op(act7, y_)

    X_val = np.random.normal(0, 0.5, (10, 3, 230, 230))
    W1_val = np.random.normal(0, 0.5, (64, 3, 7, 7))
    W6_val = np.random.normal(0, 0.5, (7 * 7 * 2048, 50))
    b6_val = np.random.normal(0, 0.5, (10, 50))
    W7_val = np.random.normal(0, 0.5, (50, 6))
    b7_val = np.random.normal(0, 0.5, (10, 6))
    y_val = np.random.normal(0, 0.5, (10, 6))

    aph = 0.001
    t = train.Adam_minimize(loss, aph)
    feed_dict = {W1: W1_val, W6: W6_val, W7: W7_val, b6: b6_val, b7: b7_val}
    feed_dict.update(dict2)
    feed_dict.update(dict2_1)
    feed_dict.update(dict2_2)
    feed_dict.update(dict3)
    feed_dict.update(dict3_1)
    feed_dict.update(dict3_2)
    feed_dict.update(dict3_3)
    feed_dict.update(dict3_4)
    feed_dict.update(dict3_5)
    feed_dict.update(dict3_6)
    feed_dict.update(dict3_7)
    feed_dict.update(dict4)
    feed_dict.update(dict4_1)
    feed_dict.update(dict4_2)
    feed_dict.update(dict4_3)
    feed_dict.update(dict4_4)
    feed_dict.update(dict4_5)
    feed_dict.update(dict4_6)
    feed_dict.update(dict4_7)
    feed_dict.update(dict4_8)
    feed_dict.update(dict4_9)
    feed_dict.update(dict4_10)
    feed_dict.update(dict4_11)
    feed_dict.update(dict4_12)
    feed_dict.update(dict4_13)
    feed_dict.update(dict4_14)
    feed_dict.update(dict4_15)
    feed_dict.update(dict4_16)
    feed_dict.update(dict4_17)
    feed_dict.update(dict4_18)
    feed_dict.update(dict4_19)
    feed_dict.update(dict4_20)
    feed_dict.update(dict4_21)
    feed_dict.update(dict4_22)
    feed_dict.update(dict4_23)
    feed_dict.update(dict4_24)
    feed_dict.update(dict4_25)
    feed_dict.update(dict4_26)
    feed_dict.update(dict4_27)
    feed_dict.update(dict4_28)
    feed_dict.update(dict4_29)
    feed_dict.update(dict4_30)
    feed_dict.update(dict4_31)
    feed_dict.update(dict4_32)
    feed_dict.update(dict4_33)
    feed_dict.update(dict4_34)
    feed_dict.update(dict4_35)
    feed_dict.update(dict5)
    feed_dict.update(dict5_1)
    feed_dict.update(dict5_2)

    # t.init_Variable(feed_dict)
    # t.run({X: X_val, y_: y_val})
    # print(t.get_Variable_node_to_val_map()[W1_val].asnumpy())
    list = []
    for key in feed_dict.keys():
        list.append(key)
    executor = ad.Executor(list, ctx=ndarray.gpu(0))
    executor.run(feed_dict)
Пример #8
0
def vgg16(num_epochs=10):

    n_class = 10

    X = ad.Placeholder("inputs")
    y_ = ad.Placeholder("y_")
    filters1_1 = ad.Variable("filters1_1")
    filters1_2 = ad.Variable("filters1_2")
    filters2_1 = ad.Variable("filters2_1")
    filters2_2 = ad.Variable("filters2_2")
    filters3_1 = ad.Variable("filters3_1")
    filters3_2 = ad.Variable("filters3_2")
    filters3_3 = ad.Variable("filters3_3")
    filters4_1 = ad.Variable("filters4_1")
    filters4_2 = ad.Variable("filters4_2")
    filters4_3 = ad.Variable("filters4_3")
    filters5_1 = ad.Variable("filters5_1")
    filters5_2 = ad.Variable("filters5_2")
    filters5_3 = ad.Variable("filters5_3")
    filters6 = ad.Variable("filters6")
    filters7 = ad.Variable("filters7")
    filters8 = ad.Variable("filters8")
    b1_1 = ad.Variable("b1_1")
    b1_2 = ad.Variable("b1_2")
    b2_1 = ad.Variable("b2_1")
    b2_2 = ad.Variable("b2_2")
    b3_1 = ad.Variable("b3_1")
    b3_2 = ad.Variable("b3_2")
    b3_3 = ad.Variable("b3_3")
    b4_1 = ad.Variable("b4_1")
    b4_2 = ad.Variable("b4_2")
    b4_3 = ad.Variable("b4_3")
    b5_1 = ad.Variable("b5_1")
    b5_2 = ad.Variable("b5_2")
    b5_3 = ad.Variable("b5_3")
    b6 = ad.Variable("b6")
    b7 = ad.Variable("b7")
    b8 = ad.Variable("b8")

    # conv 1
    conv1_1 = ad.conv2withbias(X, filters1_1, b1_1, "NCHW", "SAME", 1, 1)
    bn1_1 = ad.bn_forward_op(conv1_1, "NCHW", "pre_activation")
    act1_1 = ad.activation_forward_op(bn1_1, "NCHW", "relu")

    conv1_2 = ad.conv2withbias(act1_1, filters1_2, b1_2, "NCHW", "SAME", 1, 1)
    bn1_2 = ad.bn_forward_op(conv1_2, "NCHW", "pre_activation")
    act1_2 = ad.activation_forward_op(bn1_2, "NCHW", "relu")
    pool1 = ad.pooling_2d_forward_op(act1_2, "NCHW", "max", 0, 0, 2, 2, 2, 2)

    # conv 2
    conv2_1 = ad.conv2withbias(pool1, filters2_1, b2_1, "NCHW", "SAME", 1, 1)
    bn2_1 = ad.bn_forward_op(conv2_1, "NCHW", "pre_activation")
    act2_1 = ad.activation_forward_op(bn2_1, "NCHW", "relu")
    conv2_2 = ad.conv2withbias(act2_1, filters2_2, b2_2, "NCHW", "SAME", 1, 1)
    bn2_2 = ad.bn_forward_op(conv2_2, "NCHW", "pre_activation")
    act2_2 = ad.activation_forward_op(bn2_2, "NCHW", "relu")
    pool2 = ad.pooling_2d_forward_op(act2_2, "NCHW", "max", 0, 0, 2, 2, 2, 2)

    # conv 3
    conv3_1 = ad.conv2withbias(pool2, filters3_1, b3_1, "NCHW", "SAME", 1, 1)
    bn3_1 = ad.bn_forward_op(conv3_1, "NCHW", "pre_activation")
    act3_1 = ad.activation_forward_op(bn3_1, "NCHW", "relu")
    conv3_2 = ad.conv2withbias(act3_1, filters3_2, b3_2, "NCHW", "SAME", 1, 1)
    bn3_2 = ad.bn_forward_op(conv3_2, "NCHW", "pre_activation")
    act3_2 = ad.activation_forward_op(bn3_2, "NCHW", "relu")
    conv3_3 = ad.conv2withbias(act3_2, filters3_3, b3_3, "NCHW", "SAME", 1, 1)
    bn3_3 = ad.bn_forward_op(conv3_3, "NCHW", "pre_activation")
    act3_3 = ad.activation_forward_op(bn3_3, "NCHW", "relu")
    pool3 = ad.pooling_2d_forward_op(act3_3, "NCHW", "max", 0, 0, 2, 2, 2, 2)

    # conv 4
    conv4_1 = ad.conv2withbias(pool3, filters4_1, b4_1, "NCHW", "SAME", 1, 1)
    bn4_1 = ad.bn_forward_op(conv4_1, "NCHW", "pre_activation")
    act4_1 = ad.activation_forward_op(bn4_1, "NCHW", "relu")
    conv4_2 = ad.conv2withbias(act4_1, filters4_2, b4_2, "NCHW", "SAME", 1, 1)
    bn4_2 = ad.bn_forward_op(conv4_2, "NCHW", "pre_activation")
    act4_2 = ad.activation_forward_op(bn4_2, "NCHW", "relu")
    conv4_3 = ad.conv2withbias(act4_2, filters4_3, b4_3, "NCHW", "SAME", 1, 1)
    bn4_3 = ad.bn_forward_op(conv4_3, "NCHW", "pre_activation")
    act4_3 = ad.activation_forward_op(bn4_3, "NCHW", "relu")
    pool4 = ad.pooling_2d_forward_op(act4_3, "NCHW", "max", 0, 0, 2, 2, 2, 2)

    # conv 5
    conv5_1 = ad.conv2withbias(pool4, filters5_1, b5_1, "NCHW", "SAME", 1, 1)
    bn5_1 = ad.bn_forward_op(conv5_1, "NCHW", "pre_activation")
    act5_1 = ad.activation_forward_op(bn5_1, "NCHW", "relu")
    conv5_2 = ad.conv2withbias(act5_1, filters5_2, b5_2, "NCHW", "SAME", 1, 1)
    bn5_2 = ad.bn_forward_op(conv5_2, "NCHW", "pre_activation")
    act5_2 = ad.activation_forward_op(bn5_2, "NCHW", "relu")
    conv5_3 = ad.conv2withbias(act5_2, filters5_3, b5_3, "NCHW", "SAME", 1, 1)
    bn5_3 = ad.bn_forward_op(conv5_3, "NCHW", "pre_activation")
    act5_3 = ad.activation_forward_op(bn5_3, "NCHW", "relu")
    pool5 = ad.pooling_2d_forward_op(act5_3, "NCHW", "max", 0, 0, 2, 2, 2, 2)

    # fc6
    pool5_flat = ad.flatten_op(pool5)
    fc6 = ad.dense(pool5_flat, filters6, b6)
    bn6 = ad.fullybn_forward_op(fc6, "NCHW")
    act6 = ad.fullyactivation_forward_op(bn6, "NCHW", "relu")
    drop6 = ad.fullydropout_forward_op(act6, "NCHW", 0.5)

    # fc7
    fc7 = ad.dense(drop6, filters7, b7)
    bn7 = ad.fullybn_forward_op(fc7, "NCHW")
    act7 = ad.fullyactivation_forward_op(bn7, "NCHW", "relu")
    drop7 = ad.fullydropout_forward_op(act7, "NCHW", 0.5)

    #fc8
    fc8 = ad.dense(drop7, filters8, b8)
    bn8 = ad.fullybn_forward_op(fc8, "NCHW")
    act8 = ad.fullyactivation_forward_op(bn8, "NCHW", "softmax")

    loss = ad.softmaxcrossentropy_op(bn8, y_)

    # grad = ad.gradients(loss, [filters1_1, filters1_2, filters2_1, filters2_2, filters3_1, filters3_2, filters3_3
    #                             , filters4_1, filters4_2, filters4_3, filters5_1, filters5_2, filters5_3
    #                             , filters6, filters7])
    # executor = ad.Executor([grad[0], grad[1], grad[2], grad[3], grad[4], grad[5], grad[6], grad[7], grad[8], grad[9]
    #                            , grad[10], grad[11], grad[12], grad[13], grad[14], loss, y_], ctx=ctx)

    train_x, train_y, test_x, test_y = prepare_data()
    n_train_batches = train_x.shape[0] // batch_size
    n_test_batches = test_x.shape[0] // batch_size

    X_val = np.empty(shape=(batch_size, 3, 32, 32), dtype=np.float32)
    y_val = np.empty(shape=(batch_size, n_class), dtype=np.float32)
    filters_val = [np.random.normal(0.0, 0.1, (64, 3, 3, 3))]
    filters_val.append(np.random.normal(0.0, 0.1, (64, 64, 3, 3)))
    filters_val.append(np.random.normal(0.0, 0.1, (128, 64, 3, 3)))
    filters_val.append(np.random.normal(0.0, 0.1, (128, 128, 3, 3)))
    filters_val.append(np.random.normal(0.0, 0.1, (256, 128, 3, 3)))
    filters_val.append(np.random.normal(0.0, 0.1, (256, 256, 3, 3)))
    filters_val.append(np.random.normal(0.0, 0.1, (256, 256, 3, 3)))
    filters_val.append(np.random.normal(0.0, 0.1, (512, 256, 3, 3)))
    filters_val.append(np.random.normal(0.0, 0.1, (512, 512, 3, 3)))
    filters_val.append(np.random.normal(0.0, 0.1, (512, 512, 3, 3)))
    filters_val.append(np.random.normal(0.0, 0.1, (512, 512, 3, 3)))
    filters_val.append(np.random.normal(0.0, 0.1, (512, 512, 3, 3)))
    filters_val.append(np.random.normal(0.0, 0.1, (512, 512, 3, 3)))
    filters_val.append(np.random.normal(0.0, 0.1, (512 * 1 * 1, 4096)))
    filters_val.append(np.random.normal(0.0, 0.1, (4096, 4096)) * 0.001)
    filters_val.append(np.random.normal(0.0, 0.1, (4096, n_class)) * 0.001)
    b_val = [np.ones(64) * 0.1]
    b_val.append(np.ones(64) * 0.1)
    b_val.append(np.ones(128) * 0.1)
    b_val.append(np.ones(128) * 0.1)
    b_val.append(np.ones(256) * 0.1)
    b_val.append(np.ones(256) * 0.1)
    b_val.append(np.ones(256) * 0.1)
    b_val.append(np.ones(512) * 0.1)
    b_val.append(np.ones(512) * 0.1)
    b_val.append(np.ones(512) * 0.1)
    b_val.append(np.ones(512) * 0.1)
    b_val.append(np.ones(512) * 0.1)
    b_val.append(np.ones(512) * 0.1)
    b_val.append(np.ones(4096) * 0.1)
    b_val.append(np.ones(4096) * 0.1)
    b_val.append(np.ones(n_class) * 0.1)

    # ctx = ndarray.gpu(0)
    # for i in range(16):
    #     filters_val[i] = ndarray.array(filters_val[i], ctx)
    # for i in range(16):
    #     b_val[i] = ndarray.array(b_val[i], ctx)

    aph = 0.0001
    t = train.Adam_minimize(loss, aph)
    t.init_Variable({
        filters1_1: filters_val[0],
        filters1_2: filters_val[1],
        filters2_1: filters_val[2],
        filters2_2: filters_val[3],
        filters3_1: filters_val[4],
        filters3_2: filters_val[5],
        filters3_3: filters_val[6],
        filters4_1: filters_val[7],
        filters4_2: filters_val[8],
        filters4_3: filters_val[9],
        filters5_1: filters_val[10],
        filters5_2: filters_val[11],
        filters5_3: filters_val[12],
        filters6: filters_val[13],
        filters7: filters_val[14],
        filters8: filters_val[15],
        b1_1: b_val[0],
        b1_2: b_val[1],
        b2_1: b_val[2],
        b2_2: b_val[3],
        b3_1: b_val[4],
        b3_2: b_val[5],
        b3_3: b_val[6],
        b4_1: b_val[7],
        b4_2: b_val[8],
        b4_3: b_val[9],
        b5_1: b_val[10],
        b5_2: b_val[11],
        b5_3: b_val[12],
        b6: b_val[13],
        b7: b_val[14],
        b8: b_val[15]
    })

    for i in range(num_epochs):
        print("epoch %d" % i)
        X_val = np.empty(shape=(0, 3, 32, 32), dtype=np.float32)
        y_val = np.empty(shape=(0, n_class), dtype=np.float32)
        select = random.randint(0, n_train_batches)
        # X_val = train_x[select]
        # y_val = train_y[select]
        for j in range(batch_size):
            select = random.randint(0, n_train_batches)
            temp_train_x = train_x[select]
            temp_train_x = temp_train_x[np.newaxis, :]
            temp_train_y = train_y[select]
            temp_train_y = temp_train_y[np.newaxis, :]
            X_val = np.append(X_val, temp_train_x, axis=0)
            y_val = np.append(y_val, temp_train_y, axis=0)
        # print("train", X_val.shape)
        # print("train", y_val.shape)
        t.run({X: X_val, y_: y_val})

        if (i + 1) % 30 == 0:
            X_test_val = np.empty(shape=(100, 3, 32, 32), dtype=np.float32)
            y_test_val = np.empty(shape=(100, n_class), dtype=np.float32)
            correct_predictions = []
            for minibatch_index in range(n_test_batches):
                minibatch_start = minibatch_index * batch_size
                minibatch_end = (minibatch_index + 1) * batch_size
                X_test_val[:] = test_x[minibatch_start:minibatch_end]
                y_test_val[:] = test_y[minibatch_start:minibatch_end]
                # print(X_test_val.shape)
                # print(y_test_val.shape)
                feed_dict = {X: X_test_val, y_: y_test_val}
                dict_Variable = t.get_Variable_node_to_val_map()
                feed_dict.update(dict_Variable)
                y_predicted = t.run_get_nodelist_once(feed_dict,
                                                      [act8])[act8].asnumpy()
                correct_prediction = np.equal(np.argmax(y_test_val, 1),
                                              np.argmax(y_predicted,
                                                        1)).astype(np.float)
                correct_predictions.extend(correct_prediction)
            accuracy = np.mean(correct_predictions)
            print("validation set accuracy=%f" % accuracy)

    return filters_val