Пример #1
0
def discriminator(in_ch=3, n_down_layers=4):
    with dnn.Model(opt_gen()) as m:
        base = 64
        h = m.cbr(in_ch,
                  64,
                  bn=False,
                  sample='down',
                  activation=dnn.Node.leaky_relu,
                  dropout=False,
                  noise=True)
        for _ in range(1, n_down_layers):
            h = h.cbr(base,
                      base * 2,
                      bn=True,
                      sample='down',
                      activation=dnn.Node.leaky_relu,
                      dropout=False,
                      noise=True)
            base *= 2
        h = h.cbr(base,
                  1,
                  bn=False,
                  sample='none',
                  activation=None,
                  dropout=False,
                  noise=True)
        return m
Пример #2
0
def feature_extraction(model, data, out_layer_num=-2, out_layer_name=None):
    '''
    extract the features from the pre-trained model
    inp_layer_num - input layer
    out_layer_num -- from which layer to extract the features
    out_layer_name -- name of the layer to extract the features
    '''
    if out_layer_name is None:
        intermediate_layer_model = dnn.Model(
            inputs=model.layers[0].input,
            outputs=model.layers[out_layer_num].output)
        intermediate_output = intermediate_layer_model.predict(data)
    else:
        intermediate_layer_model = dnn.Model(
            inputs=model.layers[0].input,
            outputs=model.get_layer(out_layer_name).output)
        intermediate_output = intermediate_layer_model.predict(data)

    return intermediate_output
Пример #3
0
 def feature_extraction(model, data, out_layer_num=-2):
     '''
     extract the features from the pre-trained model
     inp_layer_num - input layer
     out_layer_num -- from which layer to extract the features
     '''
     intermediate_layer_model = dnn.Model(
         inputs=model.layers[1].layers[1].input,
         outputs=model.layers[1].layers[out_layer_num].output)
     intermediate_output = intermediate_layer_model.predict(data)
     return intermediate_output
Пример #4
0
def vgg16F_fe(img_input):
    # net = preprocess_input(img_input)
    from keras_vggface.vggface import VGGFace
    vgg_model = VGGFace(include_top=False,
                        input_tensor=img_input,
                        pooling='avg')
    #vgg_model.layers.pop()
    last_layer = vgg_model.get_layer('pool5').output
    x = Flatten(name='flatten')(last_layer)
    x = Dense(1024, activation='relu', trainable=True)(x)
    x = Dense(512, activation='relu', trainable=True)(x)
    model = dnn.Model(input=vgg_model.input, output=x)
    return model.layers[-1].output
Пример #5
0
def dqn(train, lr, history_size, hidden_size, action_size):
    fc = hidden_size * 704

    with dnn.Model(chainer.optimizers.Adam(alpha=lr) if train else None) as m:
        flat = m\
        .conv2d(history_size, hidden_size, 8, 4).relu()\
        .conv2d(hidden_size, hidden_size * 2, 4, 2).relu()\
        .conv2d(hidden_size * 2, hidden_size * 2, 3, 1).relu()\
        .reshape((1, fc))

        a = flat.dense(fc, 512).relu().dense(512, action_size)
        v = flat.dense(fc, 512).relu().dense(512, 1).tile((1, action_size))
        av = a.average(1, keepdims=True).tile((1, action_size))

        m.gate('merge', (lambda a, v, av, o: a + v - av), a, v, av)
        m.build('dqn_prediction.py', 'dqn')
        return m
Пример #6
0
def resblk_9(in_ch, out_ch):
    with dnn.Model(opt_gen()) as m:
        m.cbr(in_ch, 32, bn=True, sample='none-7')\
              .cbr(32, 64, bn=True, sample='down')\
              .cbr(64, 128, bn=True, sample='down')\
              .resblk(128, bn=True)\
              .resblk(128, bn=True)\
              .resblk(128, bn=True)\
              .resblk(128, bn=True)\
              .resblk(128, bn=True)\
              .resblk(128, bn=True)\
              .resblk(128, bn=True)\
              .resblk(128, bn=True)\
              .resblk(128, bn=True)\
              .cbr(128, 64, bn=True, sample='up')\
              .cbr(64, 32, bn=True, sample='up')\
              .cbr(32, out_ch, bn=True, sample='none-7', activation=dnn.Node.tanh)
        return m
Пример #7
0
    net = dnn.Dense(100, activation='relu', name='feat_ext')(net)
    return net


def classifier(model_input, nclass, l2_weight=0.0):
    net = dnn.Dense(100, activation='relu', name='cl')(model_input)
    net = dnn.Dense(nclass, activation='softmax', name='cl_output')(net)
    return net


#%% Feature extraction as a keras model
main_input = dnn.Input(shape=(n_dim[1], ))
fe = feat_ext(main_input)
fe_size = fe.get_shape().as_list()[1]
# feature extraction model
fe_model = dnn.Model(main_input, fe, name='fe_model')
# Classifier model as a keras model
cl_input = dnn.Input(
    shape=(fe.get_shape().as_list()[1], ))  # input dim for the classifier
net = classifier(cl_input, n_class)
# classifier keras model
cl_model = dnn.Model(cl_input, net, name='classifier')
#%% source model
ms = dnn.Input(shape=(n_dim[1], ))
fes = feat_ext(ms)
nets = classifier(fes, n_class)
source_model = dnn.Model(ms, nets)
source_model.compile(optimizer=optim,
                     loss='categorical_crossentropy',
                     metrics=['accuracy'])
source_model.fit(source_traindata,
Пример #8
0
def get_model(data_set='mnist',
              main_input=None,
              nclass=None,
              classifier=True,
              drop_out=None,
              reconstruction=False,
              unsupervised_reconstruction=False):
    '''
    returns the keras model for the dataset
    when reconstruction is true, the model contains the decoder branch
    when classifier is false, the model contains on the feature extraction layer

    '''
    def L2_norm(vects):
        '''
        compute the squared L2 distance between two matrics
        '''
        x, y = vects
        ndim = x.shape
        ndim_y = y.shape
        if len(ndim) == 4:
            x = dnn.K.reshape(x, (-1, ndim[1] * ndim[2] * ndim[3]))
            y = dnn.K.reshape(y, (-1, ndim[1] * ndim[2] * ndim[3]))
        dist = dnn.K.reshape(dnn.K.sum(dnn.K.square(x), 1), (-1, 1))
        dist += dnn.K.reshape(dnn.K.sum(dnn.K.square(y), 1), (1, -1))
        dist -= 2.0 * dnn.K.dot(x, dnn.K.transpose(y))
        return dnn.K.sum(dist)

    def L2_norm_output_shape(shapes):
        shape1, shape2 = shapes
        return (shape1[0], shape2[0])

    def switch_layer(vects):
        x, y, s_recon, us_recon = vects
        reconst = dnn.K.switch(dnn.K.greater_equal(x, y), us_recon, s_recon)
        return reconst

    if data_set == 'mnist':

        if reconstruction == False:
            #fes = mnist_featext(main_input)
            fes_model = dnn.Model(main_input,
                                  mnist_featext(main_input),
                                  name='encoder')  # feature ext model
            fes = fes_model(main_input)
            if classifier:
                net = labelpredictor(fes, nclass)  # classifier
                model = dnn.Model(main_input, net)
            else:
                model = fes_model
        else:
            fes, out_shape = mnist_featext(main_input, out_shape=True)
            fes_model = dnn.Model(main_input, fes, name='encoder')
            fes = fes_model(main_input)
            net = labelpredictor(fes, nclass, drop_out=drop_out)

            reconst = mnist_reconst(fes, out_shape)
            model = dnn.Model(main_input, [net, reconst])

        return model

    elif data_set == 'cifar10':

        if reconstruction == False:
            fes_model = dnn.Model(main_input,
                                  cifar10_featext(main_input),
                                  name='encoder')  # feature ext model
            fes = fes_model(main_input)
            if classifier:
                net = labelpredictor(fes, nclass, drop_out=drop_out)
                model = dnn.Model(main_input, net)
            else:
                model = fes_model
        elif reconstruction == True and unsupervised_reconstruction == False:
            fes, out_shape = cifar10_featext(main_input, out_shape=True)
            fes_model = dnn.Model(main_input, fes, name='encoder')
            fes = fes_model(main_input)

            net = labelpredictor(fes, nclass, drop_out=drop_out)
            reconst = cifar10_reconst(fes, out_shape)
            model = dnn.Model(main_input, [net, reconst])

        elif reconstruction == True and unsupervised_reconstruction == True:
            sup_fes, out_shape = cifar10_featext(main_input, out_shape=True)
            sup_fes_model = dnn.Model(main_input, sup_fes, name='sup_encoder')
            sup_fes = sup_fes_model(main_input)

            net = labelpredictor(sup_fes, nclass, drop_out=drop_out)
            sup_reconst = cifar10_reconst(sup_fes, out_shape)

            unsup_fes, out_shape = cifar10_featext(main_input,
                                                   out_shape=True,
                                                   name_prefix='unsup_')
            unsup_fes_model = dnn.Model(main_input,
                                        unsup_fes,
                                        name='unsup_encoder')
            unsup_fes = unsup_fes_model(main_input)

            unsup_reconst = cifar10_reconst(unsup_fes,
                                            out_shape,
                                            name_prefix='unsup_')

            stop_grad_sup = dnn.Lambda(lambda x: dnn.K.stop_gradient(x))(
                sup_reconst)
            stop_grad_unsup = dnn.Lambda(lambda x: dnn.K.stop_gradient(x))(
                unsup_reconst)
            total_reconst_unsup = dnn.keras.layers.add(
                [unsup_reconst, stop_grad_sup])
            total_reconst_sup = dnn.keras.layers.add(
                [stop_grad_unsup, sup_reconst])
            # #
            sup_diff = dnn.Lambda(L2_norm)([sup_reconst, main_input])
            unsup_diff = dnn.Lambda(L2_norm)([unsup_reconst, main_input])

            tot_recon = dnn.Lambda(switch_layer)(
                [sup_diff, unsup_diff, total_reconst_sup, total_reconst_unsup])
            # #
            # total_reconst= dnn.K.switch(dnn.K.greater_equal(unsup_diff,sup_diff), total_reconst_unsup,  total_reconst_sup)
            # # total_reconst = dnn.Lambda(lambda x: x)(total_reconst)
            #con_cat = dnn.keras.layers.concatenate([sup_reconst, unsup_reconst], axis=0)
            model = dnn.Model(main_input, [net, tot_recon])

        return model
Пример #9
0
        img = dnn.to_cpu(x.data[batch])
        img = img.reshape(calc_width_height(img.size))
        ax.cla()
        ax.imshow(img)
        ax.set_title("frame {}".format(itr))
        return x

    return self.funcs('plot', proc)


# Node にメソッド追加してみる
dnn.Node.plot = plot

# モデル構築
batch = 1
model = dnn.Model(optim.Adam())
with model.module as m:
    m\
    .conv2d(1, 1, 3, 1, 1).prelu()\
    .conv2d(1, 1, 3, 1, 1).prelu()\
    .conv2d(1, 1, 3, 1, 1).prelu()\
    .reshape((batch, 1024))\
    .dense(1024, 512).prelu()\
    .plot(0, ax_in_pred)\
    .dense(512, 512).prelu()\
    .dense(512, 1024).prelu()\
    .reshape((batch, 1, 32, 32)).prelu()\
    .conv2d(1, 1, 3, 1, 1).prelu()\
    .conv2d(1, 1, 3, 1, 1).prelu()\
    .conv2d(1, 1, 3, 1, 1)