def D_model(Height, Width, channel=3):
    base = 32
    inputs = Input((Height, Width, channel))
    x = Conv2D(base, (5, 5), padding='same', strides=(2,2), name='d_conv1',
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(inputs)
    x = LeakyReLU(alpha=0.2)(x)
    #x = BatchNormalization(momentum=0.9, epsilon=1e-5, name='d_conv1_bn')(x)
    x = Conv2D(base*2, (5, 5), padding='same', strides=(2,2), name='d_conv2',
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(x)
    x = LeakyReLU(alpha=0.2)(x)
    #x = BatchNormalization(momentum=0.9, epsilon=1e-5, name='d_conv2_bn')(x)
    x = Conv2D(base*4, (5, 5), padding='same', strides=(2,2), name='d_conv3',
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(x)
    x = LeakyReLU(alpha=0.2)(x)
    #x = BatchNormalization(momentum=0.9, epsilon=1e-5, name='d_conv3_bn')(x)
    x = Conv2D(base*8, (5, 5), padding='same', strides=(2,2), name='d_conv4',
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(x)
    x = LeakyReLU(alpha=0.2)(x)
    #x = BatchNormalization(momentum=0.9, epsilon=1e-5, name='d_conv4_bn')(x)
    x = Flatten()(x)
    #x = Dense(4096, activation='relu', name='d_dense1',
    #    kernel_initializer=RN(mean=0.0, stddev=0.02), bias_initializer=Constant())(x)
    x = Dense(1, activation='sigmoid', name='d_out',
        kernel_initializer=RN(mean=0.0, stddev=0.02), bias_initializer=Constant())(x)
    model = Model(inputs=inputs, outputs=x, name='D')
    return model
Пример #2
0
def G_model(Height, Width, channel=3):
    inputs = Input((100, ))
    in_h = int(Height / 16)
    in_w = int(Width / 16)
    d_dim = 256
    base = 128
    x = Dense(in_h * in_w * d_dim,
              name='g_dense1',
              kernel_initializer=RN(mean=0.0, stddev=0.02),
              use_bias=False)(inputs)
    x = Reshape((in_h, in_w, d_dim), input_shape=(d_dim * in_h * in_w, ))(x)
    x = Activation('relu')(x)
    x = BatchNormalization(momentum=0.9, epsilon=1e-5, name='g_dense1_bn')(x)
    # 1/8
    x = Conv2DTranspose(base * 4, (5, 5),
                        name='g_conv1',
                        padding='same',
                        strides=(2, 2),
                        kernel_initializer=RN(mean=0.0, stddev=0.02),
                        use_bias=False)(x)
    x = Activation('relu')(x)
    x = BatchNormalization(momentum=0.9, epsilon=1e-5, name='g_conv1_bn')(x)
    # 1/4
    x = Conv2DTranspose(base * 2, (5, 5),
                        name='g_conv2',
                        padding='same',
                        strides=(2, 2),
                        kernel_initializer=RN(mean=0.0, stddev=0.02),
                        use_bias=False)(x)
    x = Activation('relu')(x)
    x = BatchNormalization(momentum=0.9, epsilon=1e-5, name='g_conv2_bn')(x)
    # 1/2
    x = Conv2DTranspose(base, (5, 5),
                        name='g_conv3',
                        padding='same',
                        strides=(2, 2),
                        kernel_initializer=RN(mean=0.0, stddev=0.02),
                        use_bias=False)(x)
    x = Activation('relu')(x)
    x = BatchNormalization(momentum=0.9, epsilon=1e-5, name='g_conv3_bn')(x)
    # 1/1
    x = Conv2DTranspose(channel, (5, 5),
                        name='g_out',
                        padding='same',
                        strides=(2, 2),
                        kernel_initializer=RN(mean=0.0, stddev=0.02),
                        bias_initializer=Constant())(x)
    x = Activation('tanh')(x)
    model = Model(inputs=inputs, outputs=x, name='G')
    return model
def G_model():
    inputs = Input([100, ], name="x")
    con_x = Input([num_classes, ], name="con_x")
    con_x2 = Input([img_height, img_width, num_classes], name="con_x2")
    
    #con_x = K.zeros([None, num_classes, 1, 1])
    #print(con_x.shape)
    #con_x = np.zeros([len(_con_x), num_classes, 1, 1], dtype=np.float32)
    #con_x[np.arange(len(_con_x)), _con_x] = 1

    x = concatenate([inputs, con_x], axis=-1)
    
    in_h = int(img_height / 16)
    in_w = int(img_width / 16)
    d_dim = 256
    base = 128
    x = Dense(in_h * in_w * d_dim, name='g_dense1',
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(x)
    x = Reshape((in_h, in_w, d_dim), input_shape=(d_dim * in_h * in_w,))(x)
    x = Activation('relu')(x)
    x = BatchNormalization(momentum=0.9, epsilon=1e-5, name='g_dense1_bn')(x)
    # 1/8
    x = Conv2DTranspose(base*4, (5, 5), name='g_conv1', padding='same', strides=(2,2),
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(x)
    x = Activation('relu')(x)
    x = BatchNormalization(momentum=0.9, epsilon=1e-5, name='g_conv1_bn')(x)
    # 1/4
    x = Conv2DTranspose(base*2, (5, 5), name='g_conv2', padding='same', strides=(2,2),
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(x)
    x = Activation('relu')(x)
    x = BatchNormalization(momentum=0.9, epsilon=1e-5, name='g_conv2_bn')(x)
    # 1/2
    x = Conv2DTranspose(base, (5, 5), name='g_conv3', padding='same', strides=(2,2),
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(x)
    x = Activation('relu')(x)
    x = BatchNormalization(momentum=0.9, epsilon=1e-5, name='g_conv3_bn')(x)
    # 1/1
    x = Conv2DTranspose(channel, (5, 5), name='g_out', padding='same', strides=(2,2),
        kernel_initializer=RN(mean=0.0, stddev=0.02),  bias_initializer=Constant())(x)
    x = Activation('tanh')(x)

    #con_x = np.zerns([len(_con_x), num_classes, img_height, img_width], dtype=np.float32)
    #con_x[np.arange(len(_con_x)), _con_x] = 1
    x2 = concatenate([x, con_x2], axis=-1)

    model = Model(inputs=[inputs, con_x, con_x2], outputs=[x], name='G')
    gan_g_model = Model(inputs=[inputs, con_x, con_x2], outputs=[x2], name='GAN_G')
    
    return model, gan_g_model
def D_model():
    base = 32
    inputs = Input([img_height, img_width, channel + num_classes])
    x = Conv2D(base, (5, 5), padding='same', strides=(2,2), name='d_conv1',
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(inputs)
    x = LeakyReLU(alpha=0.2)(x)
    x = Conv2D(base*2, (5, 5), padding='same', strides=(2,2), name='d_conv2',
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Conv2D(base*4, (5, 5), padding='same', strides=(2,2), name='d_conv3',
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Conv2D(base*8, (5, 5), padding='same', strides=(2,2), name='d_conv4',
        kernel_initializer=RN(mean=0.0, stddev=0.02), use_bias=False)(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Flatten()(x)
    x = Dense(1, activation='sigmoid', name='d_out',
        kernel_initializer=RN(mean=0.0, stddev=0.02), bias_initializer=Constant())(x)
    model = Model(inputs=inputs, outputs=x, name='D')
    return model
Пример #5
0
def classifying(Height, Width, channel=3, num_classes=10):
    inputs_x = Input((Height, Width, channel),
                     name='classifying_X')  # generator出力を取得
    x = Flatten()(inputs_x)
    x = Dense(num_classes,
              activation='sigmoid',
              name='d_out',
              kernel_initializer=RN(mean=0.0, stddev=0.02),
              bias_initializer=Constant())(x)
    model = Model(inputs=inputs_x, outputs=x, name='D')

    return model
Пример #6
0
def conv_x(Height, Width, channel=3):
    inputs_z = Input((Height, Width, channel), name='Z')  # 入力画像を取得
    x = Conv2D(32, (5, 5),
               padding='same',
               strides=(2, 2),
               name='g_conv1',
               kernel_initializer=RN(mean=0.0, stddev=0.02),
               use_bias=False)(inputs_z)
    x = InstanceNormalization()(x)
    x = BatchNormalization(momentum=0.9, epsilon=1e-5, name='g_conv1_bn')(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Conv2D(64, (5, 5),
               padding='same',
               strides=(2, 2),
               name='g_conv2',
               kernel_initializer=RN(mean=0.0, stddev=0.02),
               use_bias=False)(x)
    x = BatchNormalization(momentum=0.9, epsilon=1e-5, name='g_conv2_bn')(x)
    x = InstanceNormalization()(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Conv2D(128, (5, 5),
               padding='same',
               strides=(2, 2),
               name='g_conv3',
               kernel_initializer=RN(mean=0.0, stddev=0.02),
               use_bias=False)(x)
    x = BatchNormalization(momentum=0.9, epsilon=1e-5, name='d_conv3_bn')(x)
    x = InstanceNormalization()(x)
    x = LeakyReLU(alpha=0.2)(x)
    x = Conv2D(256, (5, 5),
               padding='same',
               strides=(2, 2),
               name='g_conv4',
               kernel_initializer=RN(mean=0.0, stddev=0.02),
               use_bias=False)(x)
    x = BatchNormalization(momentum=0.9, epsilon=1e-5, name='g_conv4_bn')(x)
    x = InstanceNormalization()(x)
    x = LeakyReLU(alpha=0.2)(x)
    model = Model(inputs=[inputs_z], outputs=[x], name='G')
    return model