Exemple #1
0
    def olliNetwork(self):
        self.model = models.Sequential()

        self.model.add(
            layers.Conv2D(64, (5, 5),
                          activation='relu',
                          input_shape=(48, 48, 1)))
        self.model.add(layers.Conv2D(64, (5, 5), activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
        self.model.add(layers.Dropout(0.25))

        self.model.add(layers.Conv2D(64, (5, 5), activation='relu'))
        self.model.add(layers.Conv2D(64, (5, 5), activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
        self.model.add(layers.Dropout(0.25))

        self.model.add(layers.Conv2D(128, (4, 4), activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(2, 2)))
        self.model.add(layers.Dropout(0.25))

        self.model.add(layers.Flatten())
        self.model.add(layers.Dense(3072, activation='relu'))
        self.model.add(layers.Dropout(0.5))
        self.model.add(layers.Dense(128, activation='relu'))
        self.model.add(layers.Dropout(0.5))
        self.model.add(layers.Dense(3, activation='softmax'))
Exemple #2
0
def classifier_model():

    model = models.Sequential()
    model.add(
        layers.Conv2D(NUM_FILTERS_1, [3, 3],
                      strides=(2, 2),
                      padding='same',
                      activation='relu',
                      input_shape=(28, 28, 1),
                      kernel_initializer=initializers.glorot_normal(),
                      bias_initializer=initializers.Zeros()))
    model.add(
        layers.Conv2D(NUM_FILTERS_2, [3, 3],
                      strides=(2, 2),
                      padding='same',
                      activation='relu',
                      kernel_initializer=initializers.glorot_normal(),
                      bias_initializer=initializers.Zeros()))
    model.add(layers.Flatten())
    model.add(
        layers.Dense(NUM_CLASSES,
                     kernel_initializer=initializers.glorot_normal(),
                     bias_initializer=initializers.Zeros()))

    return model
    def model_definition(self):
        self.model = models.Sequential()

        self.model.add(
            layers.Conv2D(64, (5, 5),
                          activation='relu',
                          input_shape=self.input_shape))
        self.model.add(layers.Conv2D(64, (5, 5), activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
        self.model.add(layers.Dropout(0.25))

        self.model.add(layers.Conv2D(64, (5, 5), activation='relu'))
        self.model.add(layers.Conv2D(64, (5, 5), activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
        self.model.add(layers.Dropout(0.25))

        self.model.add(layers.Conv2D(128, (3, 3), activation='relu'))
        self.model.add(layers.AveragePooling2D())
        self.model.add(layers.Conv2D(128, (1, 1), activation='relu'))
        self.model.add(layers.MaxPooling2D(pool_size=(2, 2)))
        self.model.add(layers.Dropout(0.25))

        self.model.add(layers.Flatten())
        self.model.add(layers.Dense(3072, activation='relu'))
        self.model.add(layers.Dropout(0.5))
        self.model.add(layers.Dense(128, activation='relu'))
        self.model.add(layers.Dropout(0.5))
        self.model.add(layers.Dense(3, activation='softmax'))

        adam = optimizers.Adamax()
        self.model.compile(loss='categorical_crossentropy',
                           optimizer=adam,
                           metrics=['acc'])
def VGG6(inputs, n_class=10):
    # Block 1
    x = layers.Conv2D(32, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block1_conv1')(inputs)
    x = layers.Conv2D(32, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block1_conv2')(x)
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)

    # Block 2
    x = layers.Conv2D(64, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block2_conv1')(x)
    x = layers.Conv2D(64, (3, 3),
                      activation='relu',
                      padding='same',
                      name='block2_conv2')(x)
    x = layers.MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)

    x = layers.Flatten(name='flatten')(x)
    x = layers.Dense(512, activation='relu', name='fc1')(x)
    features = layers.Dense(512, activation='relu', name='fc2')(x)
    outputs = layers.Dense(n_class, activation='softmax',
                           name='predictions')(features)

    return outputs
Exemple #5
0
def create_model(dropout_rate):
    model = models.Sequential()
    conv_base = applications.VGG16(include_top=False,
                                   input_shape=(150, 150, 3),
                                   weights='imagenet')
    conv_base.trainable = False
    model.add(conv_base)
    model.add(layers.Flatten())
    model.add(layers.Dropout(dropout_rate))
    model.add(layers.Dense(256, activation='relu'))
    model.add(layers.Dense(1, activation='sigmoid'))
    return model
def create_model(dropout_rate):
    model = models.Sequential()
    model.add(
        layers.Conv2D(32, (3, 3), activation='relu',
                      input_shape=(150, 150, 3)))
    model.add(layers.MaxPool2D((2, 2)))
    model.add(layers.Conv2D(64, (3, 3), activation='relu'))
    model.add(layers.MaxPool2D((2, 2)))
    model.add(layers.Conv2D(128, (3, 3), activation='relu'))
    model.add(layers.MaxPool2D((2, 2)))
    model.add(layers.Conv2D(128, (3, 3), activation='relu'))
    model.add(layers.MaxPool2D((2, 2)))
    model.add(layers.Flatten())
    model.add(layers.Dropout(dropout_rate))
    model.add(layers.Dense(512, activation='relu'))
    model.add(layers.Dense(1, activation='sigmoid'))
    return model
Exemple #7
0
def classifier_model():  #Building of the CNN
    model = models.Sequential()

    model.add(
        layers.Conv2D(1, [2, 40],
                      input_shape=(1, 40, 173),
                      strides=(1, 1),
                      padding='valid',
                      activation='relu'))
    #
    #model.add(layers.MaxPool1D(pool_size=2, strides=2, padding='valid'))
    #
    model.add(
        layers.Conv2D(1, [2, 20],
                      strides=(1, 1),
                      padding='valid',
                      activation='relu',
                      kernel_initializer=initializers.glorot_normal(),
                      bias_initializer=initializers.Zeros()))
    #
    # model.add(layers.MaxPool1D(pool_size=2, strides=2, padding='valid'))
    #
    #
    model.add(
        layers.Conv2D(1, [2, 10],
                      strides=(3, 3),
                      padding='valid',
                      activation='relu',
                      kernel_initializer=initializers.glorot_normal(),
                      bias_initializer=initializers.Zeros()))
    #
    # model.add(layers.MaxPool1D(pool_size=2, strides=2, padding='valid'))
    #
    model.add(layers.Flatten())
    #
    model.add(
        layers.Dense(1,
                     kernel_initializer=initializers.glorot_normal(),
                     bias_initializer=initializers.Zeros()))

    print(model.summary())
    return model
def generate_model():
    conv_base = tf.contrib.keras.applications.VGG16(include_top=False,
                                                    weights='imagenet',
                                                    input_shape=(IMG_WIDTH,
                                                                 IMG_HEIGHT,
                                                                 3))
    conv_base.trainable = True
    model = models.Sequential()
    model.add(conv_base)
    model.add(layers.Flatten())
    model.add(
        layers.Dense(HIDDEN_SIZE,
                     name='dense',
                     kernel_regularizer=regularizers.l2(L2_LAMBDA)))
    model.add(layers.Dropout(rate=0.3, name='dropout'))
    model.add(
        layers.Dense(NUM_CLASSES, activation='softmax', name='dense_output'))
    model = multi_gpu_model(model, gpus=NUM_GPUS)
    print(model.summary())
    return model
Exemple #9
0
def classifier_model():  #Building of the CNN
    model = models.Sequential()

    model.add(
        layers.Conv2D(1, [2, 10],
                      input_shape=(40, 44, 1),
                      strides=(1, 1),
                      padding='valid',
                      activation='relu',
                      data_format='channels_last'))

    model.add(
        layers.MaxPooling2D(pool_size=(2, 2),
                            strides=None,
                            padding='valid',
                            data_format=None))

    model.add(
        layers.Conv2D(1, [2, 6],
                      strides=(1, 1),
                      padding='valid',
                      activation='relu'))

    model.add(
        layers.MaxPooling2D(pool_size=(2, 2),
                            strides=None,
                            padding='valid',
                            data_format=None))

    model.add(
        layers.Conv2D(1, [2, 3],
                      strides=(1, 1),
                      padding='valid',
                      activation='relu'))

    model.add(layers.Flatten())

    model.add(layers.Dense(1))

    print(model.summary())
    return model
Exemple #10
0
def classifier_model():  # linear stack of

    ################################################################################
    ############################    YOUR CODE HERE   ################################

    # Define a Sequential model
    model = models.Sequential()
    # The first two layers are convolutional layers. For the first layer, we must specify the input shape.
    model.add(
        layers.Conv2D(
            NUM_FILTERS_1,
            3,
            strides=(2, 2),
            activation='relu',
            padding='same',
            input_shape=(28, 28, 1)))  # we have to add 2d convolutional layer
    # a conv layer is defined by the number of filters
    # second dimension we have to choose a stride of 2 : amount of shift that we are using for shift
    # papdding same = output is cropped , output should be 28*28
    # specify the kind of activate : RELU
    # first layer of input dimension
    # we initialize the biases to 0
    # we set kernel initializer

    model.add(
        layers.Conv2D(NUM_FILTERS_2,
                      3,
                      strides=(2, 2),
                      activation='relu',
                      padding='same'))
    # also a convolutional layer
    # 3x3 filters , 2x2 strides
    # we don't specify the input shape
    # The final layer is a dense, 1 dimensional layer. We must therefore first flatten the result of the
    # previous layer
    model.add(
        layers.Flatten()
    )  # we want to reduce to 1 dimension to be able to have a dense layer of 1 dim
    # convert 2-3 layer dimension into a vextor
    model.add(layers.Dense(10))
    return model
Exemple #11
0
    def _create_discriminator(self):
        inputs = layers.Input(shape=(HEIGHT, WIDTH, CHANNELS))

        x = layers.Conv2D(128, kernel_size=3)(inputs)
        x = layers.LeakyReLU()(x)

        x = layers.Conv2D(128, kernel_size=4, strides=2)(x)
        x = layers.LeakyReLU()(x)

        x = layers.Conv2D(128, kernel_size=4, strides=2)(x)
        x = layers.LeakyReLU()(x)

        x = layers.Conv2D(128, kernel_size=4, strides=2)(x)
        x = layers.LeakyReLU()(x)

        x = layers.Flatten()(x)
        x = layers.Dropout(self.args.dropout)(x)
        outputs = layers.Dense(1, activation='sigmoid')(x)

        discriminator = models.Model(inputs, outputs)
        return discriminator
Exemple #12
0
###############################
### 使用keras API开始定义模型 ###
###############################
model = models.Sequential()

# 向模型中添加层
model.add(
    layers.Conv2D(
        32,
        kernel_size=(5, 5),  # 添加卷积层,深度32,过滤器大小5*5
        activation=tf.nn.relu,  # 使用relu激活函数
        input_shape=(img_rows, img_cols, 1)))  # 输入的尺寸就是一张图片的尺寸(28,28,1)
model.add(layers.MaxPooling2D(pool_size=(2, 2)))  # 添加池化层,过滤器大小是2*2
model.add(layers.Conv2D(64, (5, 5), activation=tf.nn.relu))  # 添加卷积层,简单写法
model.add(layers.MaxPooling2D(pool_size=(2, 2)))  # 添加池化层
model.add(layers.Flatten())  # 将池化层的输出拉直,然后作为全连接层的输入
model.add(layers.Dense(500, activation=tf.nn.relu))  # 添加有500个结点的全连接层,激活函数用relu
model.add(layers.Dense(10,
                       activation=tf.nn.softmax))  # 输出最终结果,有10个,激活函数用softmax

# 定义损失函数、优化函数、评测方法
model.compile(loss=keras.losses.categorical_crossentropy,
              optimizer=keras.optimizers.SGD(),
              metrics=['accuracy'])

# 自动完成模型的训练过程
model.fit(
    X_train,
    Y_train,  # 训练集
    batch_size=128,  # batchsize
    epochs=10,  # 训练轮数
for stage in range(1, nb_res_stages+1):
    x = residual_layer(x, 
                       nb_in_filters=sz_ly0_filters,
                       nb_bottleneck_filters=nb_res_filters,
                       filter_sz=sz_res_filters, 
                       stage=stage,
                       reg=0.0)

# Complete last resnet layer    
x = layers.BatchNormalization(axis=-1, name='bnF')(x)
x = layers.Activation('relu', name='reluF')(x)


# Final layer
x = layers.AveragePooling2D((16, 16), name='avg_pool')(x)
x = layers.Flatten(name='flat')(x)
x = layers.Dense(10, activation='softmax', name='fc1')(x)

model1 = models.Model(inputs=img_input, outputs=x)
model1.summary()


my_datagen = preprocessing.image.ImageDataGenerator()


# Augmentation for training
train_datagen = preprocessing.image.ImageDataGenerator(
    rescale=1. / 255,
    shear_range=0.2,
    zoom_range=0.2,
    width_shift_range=0.1,  # randomly shift images horizontally (fraction of total width)
    def create_network(**kwargs):
        model_input = L.Input(shape=(17, 9, 9))
        print model_input

        convolution_path = L.Convolution2D(
            input_shape=(),
            filters=64,
            kernel_size=3,
            activation='linear',
            padding='same',
            kernel_regularizer=R.l2(.0001),
            bias_regularizer=R.l2(.0001))(model_input)
        print convolution_path
        convolution_path = L.BatchNormalization(
            beta_regularizer=R.l2(.0001),
            gamma_regularizer=R.l2(.0001))(convolution_path)
        print convolution_path
        convolution_path = L.Activation('relu')(convolution_path)

        convolution_path = L.Convolution2D(
            input_shape=(),
            filters=128,
            kernel_size=3,
            activation='linear',
            padding='same',
            kernel_regularizer=R.l2(.0001),
            bias_regularizer=R.l2(.0001))(convolution_path)
        print convolution_path
        convolution_path = L.BatchNormalization(
            beta_regularizer=R.l2(.0001),
            gamma_regularizer=R.l2(.0001))(convolution_path)
        print convolution_path
        convolution_path = L.Activation('relu')(convolution_path)

        print '------------- value -------------------'
        # policy head
        policy_path = L.Convolution2D(
            input_shape=(),
            filters=2,
            kernel_size=1,
            activation='linear',
            padding='same',
            kernel_regularizer=R.l2(.0001),
            bias_regularizer=R.l2(.0001))(convolution_path)
        print policy_path
        policy_path = L.BatchNormalization(
            beta_regularizer=R.l2(.0001),
            gamma_regularizer=R.l2(.0001))(policy_path)
        policy_path = L.Activation('relu')(policy_path)
        print policy_path
        policy_path = L.Flatten()(policy_path)
        print policy_path
        policy_path = L.Dense((9 * 9) + 1,
                              kernel_regularizer=R.l2(.0001),
                              bias_regularizer=R.l2(.0001))(policy_path)
        policy_output = L.Activation('softmax')(policy_path)
        print 'policy_output', policy_output

        print '------------- policy -------------------'

        # value head
        value_path = L.Convolution2D(
            input_shape=(),
            filters=1,
            kernel_size=1,
            activation='linear',
            padding='same',
            kernel_regularizer=R.l2(.0001),
            bias_regularizer=R.l2(.0001))(convolution_path)
        print value_path
        value_path = L.BatchNormalization(
            beta_regularizer=R.l2(.0001),
            gamma_regularizer=R.l2(.0001))(value_path)
        value_path = L.Activation('relu')(value_path)
        print value_path
        value_path = L.Flatten()(value_path)
        print value_path
        value_path = L.Dense(256,
                             kernel_regularizer=R.l2(.0001),
                             bias_regularizer=R.l2(.0001))(value_path)
        print value_path
        value_path = L.Activation('relu')(value_path)
        print value_path
        value_path = L.Dense(1,
                             kernel_regularizer=R.l2(.0001),
                             bias_regularizer=R.l2(.0001))(value_path)
        print value_path
        value_output = L.Activation('tanh')(value_path)
        print value_path

        return M.Model(inputs=[model_input],
                       outputs=[policy_output, value_output])
Exemple #15
0
    def create_model(self, img_shape, num_class):
        concat_axis = 3
        inputs = layers.Input(shape=img_shape)

        conv1 = layers.Conv2D(32, (3, 3),
                              activation='relu',
                              padding='same',
                              name='conv1_1')(inputs)
        conv1 = layers.Conv2D(32, (3, 3), activation='relu',
                              padding='same')(conv1)
        pool1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)
        conv2 = layers.Conv2D(64, (3, 3), activation='relu',
                              padding='same')(pool1)
        conv2 = layers.Conv2D(64, (3, 3), activation='relu',
                              padding='same')(conv2)
        pool2 = layers.MaxPooling2D(pool_size=(2, 2))(conv2)

        conv3 = layers.Conv2D(128, (3, 3), activation='relu',
                              padding='same')(pool2)
        conv3 = layers.Conv2D(128, (3, 3), activation='relu',
                              padding='same')(conv3)
        pool3 = layers.MaxPooling2D(pool_size=(2, 2))(conv3)

        conv4 = layers.Conv2D(256, (3, 3), activation='relu',
                              padding='same')(pool3)
        conv4 = layers.Conv2D(256, (3, 3), activation='relu',
                              padding='same')(conv4)
        pool4 = layers.MaxPooling2D(pool_size=(2, 2))(conv4)

        conv5 = layers.Conv2D(512, (3, 3), activation='relu',
                              padding='same')(pool4)
        #        conv5 = layers.concatenate([conv5, conv5_2], axis=concat_axis)

        conv5 = layers.Conv2D(512, (3, 3), activation='relu',
                              padding='same')(conv5)
        flat = layers.Flatten()(conv5)
        dense0 = layers.Dense(4096)(flat)
        #        dense1=layers.Dropout(0.5)(dense0)
        dense = layers.Dense(4096)(dense0)
        out1 = layers.Dense(4, )(dense)

        up_conv5 = layers.UpSampling2D(size=(2, 2))(conv5)
        ch, cw = self.get_crop_shape(conv4, up_conv5)
        crop_conv4 = layers.Cropping2D(cropping=(ch, cw))(conv4)
        up6 = layers.concatenate([up_conv5, crop_conv4], axis=concat_axis)
        conv6 = layers.Conv2D(256, (3, 3), activation='relu',
                              padding='same')(up6)
        conv6 = layers.Conv2D(256, (3, 3), activation='relu',
                              padding='same')(conv6)

        up_conv6 = layers.UpSampling2D(size=(2, 2))(conv6)
        ch, cw = self.get_crop_shape(conv3, up_conv6)
        crop_conv3 = layers.Cropping2D(cropping=(ch, cw))(conv3)
        up7 = layers.concatenate([up_conv6, crop_conv3], axis=concat_axis)
        conv7 = layers.Conv2D(128, (3, 3), activation='relu',
                              padding='same')(up7)
        conv7 = layers.Conv2D(128, (3, 3), activation='relu',
                              padding='same')(conv7)

        up_conv7 = layers.UpSampling2D(size=(2, 2))(conv7)
        ch, cw = self.get_crop_shape(conv2, up_conv7)
        crop_conv2 = layers.Cropping2D(cropping=(ch, cw))(conv2)
        up8 = layers.concatenate([up_conv7, crop_conv2], axis=concat_axis)
        conv8 = layers.Conv2D(64, (3, 3), activation='relu',
                              padding='same')(up8)
        conv8 = layers.Conv2D(64, (3, 3), activation='relu',
                              padding='same')(conv8)

        up_conv8 = layers.UpSampling2D(size=(2, 2))(conv8)
        ch, cw = self.get_crop_shape(conv1, up_conv8)
        crop_conv1 = layers.Cropping2D(cropping=(ch, cw))(conv1)
        up9 = layers.concatenate([up_conv8, crop_conv1], axis=concat_axis)
        conv9 = layers.Conv2D(32, (3, 3), activation='relu',
                              padding='same')(up9)
        conv9 = layers.Conv2D(32, (3, 3), activation='relu',
                              padding='same')(conv9)

        ch, cw = self.get_crop_shape(inputs, conv9)
        conv9 = layers.ZeroPadding2D(padding=((ch[0], ch[1]), (cw[0],
                                                               cw[1])))(conv9)
        conv10 = layers.Conv2D(num_class, (1, 1))(conv9)

        model = models.Model(inputs=inputs, outputs=[conv10, out1])

        return model
Exemple #16
0
# sys.exit()

# scarica ed utilizza i pesi da imagenet per vgg16, questa è la base convoluzionale
conv_base = tf.contrib.keras.applications.InceptionV3(
    include_top=False,
    weights='imagenet',
    input_shape=(IMG_WIDTH, IMG_HEIGHT, 3)  # 3 per i canali RGB
)
conv_base.summary()
# stampa info sulla base conv

# RETE
# Layer finali per la vgg
model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())

model.add(
    layers.Dense(512,
                 name='dense_1',
                 kernel_regularizer=regularizers.l2(L2_LAMBDA)))
model.add(layers.Activation(activation='relu', name='activation_1'))

model.add(layers.Dense(NUM_CLASSES, activation='softmax', name='dense_output'))
model.summary()

conv_base.trainable = False
model.summary()


def load_batch(file_list):
Exemple #17
0
    def create_network(**kwargs):
        """construct a convolutional neural network with Residual blocks.
        Arguments are the same as with the default CNNPolicy network, except the default
        number of layers is 20 plus a new n_skip parameter

        Keword Arguments:
        - input_dim:             depth of features to be processed by first layer (default 17)
        - board:                 width of the go board to be processed (default 19)
        - filters_per_layer:     number of filters used on every layer (default 256)
        - layers:                number of residual blocks (default 19)
        - filter_width:          width of filter
                                 Must be odd.
        """
        defaults = {
            "input_dim": 17,
            "board": 9,
            "filters_per_layer": 64,
            "layers": 9,
            "filter_width": 3
        }

        # copy defaults, but override with anything in kwargs
        params = defaults
        params.update(kwargs)

        # create the network using Keras' functional API,
        model_input = L.Input(shape=(params["input_dim"], params["board"], params["board"]))
        print model_input
        # create first layer
        convolution_path = L.Convolution2D(
            input_shape=(),
            filters=params["filters_per_layer"],
            kernel_size=params["filter_width"],
            activation='linear',
            padding='same',
            kernel_regularizer=R.l2(.0001),
            bias_regularizer=R.l2(.0001))(model_input)
        print convolution_path
        convolution_path = L.BatchNormalization(
            beta_regularizer=R.l2(.0001),
            gamma_regularizer=R.l2(.0001))(convolution_path)
        print convolution_path
        convolution_path = L.Activation('relu')(convolution_path)
        def add_resnet_unit(path, **params):
            block_input = path
            # add Conv2D
            path = L.Convolution2D(
                filters=params["filters_per_layer"],
                kernel_size=params["filter_width"],
                activation='linear',
                padding='same',
                kernel_regularizer=R.l2(.0001),
                bias_regularizer=R.l2(.0001))(path)
            print path
            path = L.BatchNormalization(
                    beta_regularizer=R.l2(.0001),
                    gamma_regularizer=R.l2(.0001))(path)
            print path
            path = L.Activation('relu')(path)
            print path
            path = L.Convolution2D(
                filters=params["filters_per_layer"],
                kernel_size=params["filter_width"],
                activation='linear',
                padding='same',
                kernel_regularizer=R.l2(.0001),
                bias_regularizer=R.l2(.0001))(path)
            print path
            path = L.BatchNormalization(
                    beta_regularizer=R.l2(.0001),
                    gamma_regularizer=R.l2(.0001))(path)
            print path
            path = L.Add()([block_input, path])
            print path
            path = L.Activation('relu')(path)
            print path
            return path

        # create all other layers
        for _ in range(params['layers']):
            convolution_path = add_resnet_unit(convolution_path, **params)

        print '------------- policy -------------------'            
        # policy head
        policy_path = L.Convolution2D(
            input_shape=(),
            filters=2,
            kernel_size=1,
            activation='linear',
            padding='same',
            kernel_regularizer=R.l2(.0001),
            bias_regularizer=R.l2(.0001))(convolution_path)
        print policy_path
        policy_path = L.BatchNormalization(
                beta_regularizer=R.l2(.0001),
                gamma_regularizer=R.l2(.0001))(policy_path)
        policy_path = L.Activation('relu')(policy_path)
        print policy_path
        policy_path = L.Flatten()(policy_path)
        print policy_path
        policy_path = L.Dense(
                params["board"]*params["board"]+1,
                kernel_regularizer=R.l2(.0001),
                bias_regularizer=R.l2(.0001))(policy_path)
        policy_output = L.Activation('softmax')(policy_path)
        print 'policy_output', policy_output

        print '-------------value -------------------'
        
        # value head
        value_path = L.Convolution2D(
            input_shape=(),
            filters=1,
            kernel_size=1,
            activation='linear',
            padding='same',
            kernel_regularizer=R.l2(.0001),
            bias_regularizer=R.l2(.0001))(convolution_path)
        print value_path
        value_path = L.BatchNormalization(
                beta_regularizer=R.l2(.0001),
                gamma_regularizer=R.l2(.0001))(value_path)
        value_path = L.Activation('relu')(value_path)
        print value_path
        value_path = L.Flatten()(value_path)
        print value_path
        value_path = L.Dense(
                256,
                kernel_regularizer=R.l2(.0001),
                bias_regularizer=R.l2(.0001))(value_path)
        print value_path
        value_path = L.Activation('relu')(value_path)
        print value_path
        value_path = L.Dense(
                1,
                kernel_regularizer=R.l2(.0001),
                bias_regularizer=R.l2(.0001))(value_path)
        print value_path
        value_output = L.Activation('tanh')(value_path)
        print value_path

        return M.Model(inputs=[model_input], outputs=[policy_output, value_output])