X = np.concatenate((x_train, x_test), axis=0)

X = X / 127.5 - 1.
##############################################################

img_shape = (28, 28, 1)
latent_dim = 100
optimizer = Adam(0.0002, 0.5)

################ Discriminator Architecture ################################

H = Sequential()

H.add(Flatten(input_shape=img_shape))
H.add(Dense(512))
H.add(LeakyReLU(alpha=0.2))
H.add(Dense(256))
H.add(LeakyReLU(alpha=0.2))
H.add(Dense(1, activation='sigmoid'))
H.summary()

image = Input(shape=img_shape)
fake_valid_class = H(image)

discriminator = Model(image, fake_valid_class)

discriminator.compile(loss='binary_crossentropy',
                      optimizer=optimizer,
                      metrics=['accuracy'])

################################# Generator Architecture ################
Exemple #2
0
    Conv2D(filters=256,
           kernel_size=5,
           activation="relu",
           input_shape=sampleX.shape[1:]))
model.add(Conv2D(filters=256, kernel_size=5, activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 5)))

#model.add(Conv2D(filters=512,kernel_size=5,activation=LeakyReLU(alpha=0.1),input_shape=sampleX.shape[1:]))
#model.add(Conv2D(filters=512,kernel_size=5,activation=LeakyReLU(alpha=0.1)))
#model.add(MaxPooling2D(pool_size=(2,5)))

#model.add(GlobalAveragePooling2D())

model.add(Flatten())
model.add(Dense(512, kernel_regularizer=regularizers.l2(regul)))
model.add(LeakyReLU(alpha=0.1))
model.add(Dropout(0.5))
model.add(Dense(args.zdim, kernel_regularizer=regularizers.l2(regul)))
model.add(LeakyReLU(alpha=0.1))

model.add(
    Dense(sampleT.shape[1],
          kernel_regularizer=regularizers.l2(regul),
          bias_initializer=initializers.Constant(value=1.0)))
model.add(Activation('linear'))


def eachError(n):
    def rel_error_at(t, y):
        v1 = K.sqrt(K.mean(K.square(t[:, n] - y[:, n])))
        v0 = K.sqrt(K.mean(K.square(t[:, n] - 1.0)))
Exemple #3
0
def unet(input_shapes:list, aux_decoder_out_channels:int, decoder_out_channels:int, filters:int, auxiliary_decoder:bool):    
    #inputs: List of tuples denoting the shapes.
    model_inputs = []
    for input_shape in input_shapes:
        model_inputs.append(Input(shape=input_shape))

    #Encoder
    x = BatchNormalization()(Conv2D(filters*1, kernel_size = ENCODER_KERNELS[0], strides = 2, padding = "same")(Concatenate()(model_inputs)))
    x = LeakyReLU(0.2)(x); e1 = x
    x = BatchNormalization()(Conv2D(filters*2, kernel_size = ENCODER_KERNELS[0], strides = 2, padding = "same")(x))
    x = LeakyReLU(0.2)(x); e2 = x
    x = BatchNormalization()(Conv2D(filters*4, kernel_size = ENCODER_KERNELS[0], strides = 2, padding = "same")(x))
    x = LeakyReLU(0.2)(x); e3 = x
    x = BatchNormalization()(Conv2D(filters*8, kernel_size = ENCODER_KERNELS[0], strides = 2, padding = "same")(x))
    x = LeakyReLU(0.2)(x); e4 = x
    x = BatchNormalization()(Conv2D(filters*8, kernel_size = ENCODER_KERNELS[1], strides = 2, padding = "same")(x))
    x = LeakyReLU(0.2)(x); e5 = x
    x = BatchNormalization()(Conv2D(filters*8, kernel_size = ENCODER_KERNELS[1], strides = 2, padding = "same")(x))
    x = LeakyReLU(0.2)(x); e6 = x
    x = BatchNormalization()(Conv2D(filters*8, kernel_size = ENCODER_KERNELS[1], strides = 2, padding = "same")(x))
    x = LeakyReLU(0.2)(x); e7 = x
    x = BatchNormalization()(Conv2D(filters*8, kernel_size = ENCODER_KERNELS[1], strides = 2, padding = "same")(x))
    x = LeakyReLU(0.2)(x); 
    #e8 = x
    
    if auxiliary_decoder:
        #Auxiliary Decoder
        encoder = x
        y = BatchNormalization()(Conv2DTranspose(filters*8, kernel_size = DECODER_KERNEL, strides = 2, padding = "same")(encoder))
        y = LeakyReLU(0.2)(y); y = Concatenate()([Dropout(0.5)(y), e7])
        y = BatchNormalization()(Conv2DTranspose(filters*8, kernel_size = DECODER_KERNEL, strides = 2, padding = "same")(y))
        y = LeakyReLU(0.2)(y); y = Concatenate()([Dropout(0.5)(y), e6])
        y = BatchNormalization()(Conv2DTranspose(filters*8, kernel_size = DECODER_KERNEL, strides = 2, padding = "same")(y))
        y = LeakyReLU(0.2)(y); y = Concatenate()([Dropout(0.5)(y), e5])
        y = BatchNormalization()(Conv2DTranspose(filters*8, kernel_size = DECODER_KERNEL, strides = 2, padding = "same")(y))
        y = LeakyReLU(0.2)(y); y = Concatenate()([Dropout(0.5)(y), e4])
        y = BatchNormalization()(Conv2DTranspose(filters*4, kernel_size = DECODER_KERNEL, strides = 2, padding = "same")(y))
        y = LeakyReLU(0.2)(y); y = Concatenate()([Dropout(0.5)(y), e3])
        y = BatchNormalization()(Conv2DTranspose(filters*2, kernel_size = DECODER_KERNEL, strides = 2, padding = "same")(y))
        y = LeakyReLU(0.2)(y); y = Concatenate()([Dropout(0.5)(y), e2])
        y = BatchNormalization()(Conv2DTranspose(filters*1, kernel_size = DECODER_KERNEL, strides = 2, padding = "same")(y))
        y = LeakyReLU(0.2)(y); y = Concatenate()([Dropout(0.5)(y), e1])
        y = Conv2DTranspose(aux_decoder_out_channels, kernel_size = DECODER_KERNEL, strides = 2, padding = "same")(y)
        y = Activation("sigmoid", name='recon')(y)
    
    #Decoder
    x = BatchNormalization()(Conv2DTranspose(filters*8, kernel_size = DECODER_KERNEL, strides = 2, padding = "same")(x))
    x = LeakyReLU(0.2)(x); x = Concatenate()([Dropout(0.5)(x), e7])
    x = BatchNormalization()(Conv2DTranspose(filters*8, kernel_size = DECODER_KERNEL, strides = 2, padding = "same")(x))
    x = LeakyReLU(0.2)(x); x = Concatenate()([Dropout(0.5)(x), e6])
    x = BatchNormalization()(Conv2DTranspose(filters*8, kernel_size = DECODER_KERNEL, strides = 2, padding = "same")(x))
    x = LeakyReLU(0.2)(x); x = Concatenate()([Dropout(0.5)(x), e5])
    x = BatchNormalization()(Conv2DTranspose(filters*8, kernel_size = DECODER_KERNEL, strides = 2, padding = "same")(x))
    x = LeakyReLU(0.2)(x); x = Concatenate()([Dropout(0.5)(x), e4])
    x = BatchNormalization()(Conv2DTranspose(filters*4, kernel_size = DECODER_KERNEL, strides = 2, padding = "same")(x))
    x = LeakyReLU(0.2)(x); x = Concatenate()([Dropout(0.5)(x), e3])
    x = BatchNormalization()(Conv2DTranspose(filters*2, kernel_size = DECODER_KERNEL, strides = 2, padding = "same")(x))
    x = LeakyReLU(0.2)(x); x = Concatenate()([Dropout(0.5)(x), e2])
    x = BatchNormalization()(Conv2DTranspose(filters*1, kernel_size = DECODER_KERNEL, strides = 2, padding = "same")(x))
    x = LeakyReLU(0.2)(x); x = Concatenate()([Dropout(0.5)(x), e1])
    x = Conv2DTranspose(decoder_out_channels, kernel_size = DECODER_KERNEL, strides = 2, padding = "same")(x)
    x = Activation("sigmoid", name='midcurve')(x)
    
    if auxiliary_decoder:
        unet = Model(inputs=model_inputs, outputs = [x, y])
    else:
        unet = Model(inputs=model_inputs, outputs = [x])
    return unet
Exemple #4
0
 def block(x):
   x = Conv2D(filters * 4, kernel_size=3, padding='same')(x)
   x = LeakyReLU(0.1)(x)
   x = PixelShuffler()(x)
   return x
Exemple #5
0
    def __init__(self, input_size):
        input_image = Input(shape=(input_size, input_size, 3))

        # the function to implement the orgnization layer (thanks to github.com/allanzelener/YAD2K)
        def space_to_depth_x2(x):
            return tf.space_to_depth(x, block_size=2)

        # Layer 1
        x = Conv2D(32, (3,3), strides=(1,1), padding='same', name='conv_1', use_bias=False)(input_image)
        x = BatchNormalization(name='norm_1')(x)
        x = LeakyReLU(alpha=0.1)(x)
        x = MaxPooling2D(pool_size=(2, 2))(x)

        # Layer 2
        x = Conv2D(64, (3,3), strides=(1,1), padding='same', name='conv_2', use_bias=False)(x)
        x = BatchNormalization(name='norm_2')(x)
        x = LeakyReLU(alpha=0.1)(x)
        x = MaxPooling2D(pool_size=(2, 2))(x)

        # Layer 3
        x = Conv2D(128, (3,3), strides=(1,1), padding='same', name='conv_3', use_bias=False)(x)
        x = BatchNormalization(name='norm_3')(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Layer 4
        x = Conv2D(64, (1,1), strides=(1,1), padding='same', name='conv_4', use_bias=False)(x)
        x = BatchNormalization(name='norm_4')(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Layer 5
        x = Conv2D(128, (3,3), strides=(1,1), padding='same', name='conv_5', use_bias=False)(x)
        x = BatchNormalization(name='norm_5')(x)
        x = LeakyReLU(alpha=0.1)(x)
        x = MaxPooling2D(pool_size=(2, 2))(x)

        # Layer 6
        x = Conv2D(256, (3,3), strides=(1,1), padding='same', name='conv_6', use_bias=False)(x)
        x = BatchNormalization(name='norm_6')(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Layer 7
        x = Conv2D(128, (1,1), strides=(1,1), padding='same', name='conv_7', use_bias=False)(x)
        x = BatchNormalization(name='norm_7')(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Layer 8
        x = Conv2D(256, (3,3), strides=(1,1), padding='same', name='conv_8', use_bias=False)(x)
        x = BatchNormalization(name='norm_8')(x)
        x = LeakyReLU(alpha=0.1)(x)
        x = MaxPooling2D(pool_size=(2, 2))(x)

        # Layer 9
        x = Conv2D(512, (3,3), strides=(1,1), padding='same', name='conv_9', use_bias=False)(x)
        x = BatchNormalization(name='norm_9')(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Layer 10
        x = Conv2D(256, (1,1), strides=(1,1), padding='same', name='conv_10', use_bias=False)(x)
        x = BatchNormalization(name='norm_10')(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Layer 11
        x = Conv2D(512, (3,3), strides=(1,1), padding='same', name='conv_11', use_bias=False)(x)
        x = BatchNormalization(name='norm_11')(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Layer 12
        x = Conv2D(256, (1,1), strides=(1,1), padding='same', name='conv_12', use_bias=False)(x)
        x = BatchNormalization(name='norm_12')(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Layer 13
        x = Conv2D(512, (3,3), strides=(1,1), padding='same', name='conv_13', use_bias=False)(x)
        x = BatchNormalization(name='norm_13')(x)
        x = LeakyReLU(alpha=0.1)(x)

        skip_connection = x

        x = MaxPooling2D(pool_size=(2, 2))(x)

        # Layer 14
        x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_14', use_bias=False)(x)
        x = BatchNormalization(name='norm_14')(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Layer 15
        x = Conv2D(512, (1,1), strides=(1,1), padding='same', name='conv_15', use_bias=False)(x)
        x = BatchNormalization(name='norm_15')(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Layer 16
        x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_16', use_bias=False)(x)
        x = BatchNormalization(name='norm_16')(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Layer 17
        x = Conv2D(512, (1,1), strides=(1,1), padding='same', name='conv_17', use_bias=False)(x)
        x = BatchNormalization(name='norm_17')(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Layer 18
        x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_18', use_bias=False)(x)
        x = BatchNormalization(name='norm_18')(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Layer 19
        x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_19', use_bias=False)(x)
        x = BatchNormalization(name='norm_19')(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Layer 20
        x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_20', use_bias=False)(x)
        x = BatchNormalization(name='norm_20')(x)
        x = LeakyReLU(alpha=0.1)(x)

        # Layer 21
        skip_connection = Conv2D(64, (1,1), strides=(1,1), padding='same', name='conv_21', use_bias=False)(skip_connection)
        skip_connection = BatchNormalization(name='norm_21')(skip_connection)
        skip_connection = LeakyReLU(alpha=0.1)(skip_connection)
        skip_connection = Lambda(space_to_depth_x2)(skip_connection)

        x = concatenate([skip_connection, x])

        # Layer 22
        x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_22', use_bias=False)(x)
        x = BatchNormalization(name='norm_22')(x)
        x = LeakyReLU(alpha=0.1)(x)

        self.feature_extractor = Model(input_image, x)
        self.feature_extractor.load_weights(FULL_YOLO_BACKEND_PATH)
 def conv2d(layer_input, filters, f_size=4):
     """Layers used during downsampling"""
     d = Conv2D(filters, kernel_size=f_size, strides=2, padding='same')(layer_input)
     d = LeakyReLU(alpha=0.2)(d)
     d = InstanceNormalization()(d)
     return d
Exemple #7
0
def inception_block(inputs,
                    depth,
                    batch_mode=0,
                    splitted=False,
                    activation='relu'):
    assert depth % 16 == 0
    actv = activation == 'relu' and (lambda: LeakyReLU(
        0.0)) or activation == 'elu' and (lambda: ELU(1.0)) or None

    c1_1 = Convolution2D(depth / 4, 1, 1, init='he_normal',
                         border_mode='same')(inputs)

    c2_1 = Convolution2D(depth / 8 * 3,
                         1,
                         1,
                         init='he_normal',
                         border_mode='same')(inputs)

    c2_1 = actv()(c2_1)

    if splitted:
        c2_2 = Convolution2D(depth / 2,
                             1,
                             3,
                             init='he_normal',
                             border_mode='same')(c2_1)
        c2_2 = BatchNormalization(mode=batch_mode, axis=1)(c2_2)
        c2_2 = actv()(c2_2)
        c2_3 = Convolution2D(depth / 2,
                             3,
                             1,
                             init='he_normal',
                             border_mode='same')(c2_2)
    else:
        c2_3 = Convolution2D(depth / 2,
                             3,
                             3,
                             init='he_normal',
                             border_mode='same')(c2_1)

    c3_1 = Convolution2D(depth / 16,
                         1,
                         1,
                         init='he_normal',
                         border_mode='same')(inputs)
    c3_1 = actv()(c3_1)

    if splitted:
        c3_2 = Convolution2D(depth / 8,
                             1,
                             5,
                             init='he_normal',
                             border_mode='same')(c3_1)
        c3_2 = BatchNormalization(mode=batch_mode, axis=1)(c3_2)
        c3_2 = actv()(c3_2)
        c3_3 = Convolution2D(depth / 8,
                             5,
                             1,
                             init='he_normal',
                             border_mode='same')(c3_2)
    else:
        c3_3 = Convolution2D(depth / 8,
                             5,
                             5,
                             init='he_normal',
                             border_mode='same')(c3_1)

    p4_1 = MaxPooling2D(pool_size=(3, 3), strides=(1, 1),
                        border_mode='same')(inputs)
    c4_2 = Convolution2D(depth / 8, 1, 1, init='he_normal',
                         border_mode='same')(p4_1)

    res = merge([c1_1, c2_3, c3_3, c4_2], mode='concat', concat_axis=1)
    res = BatchNormalization(mode=batch_mode, axis=1)(res)
    res = actv()(res)

    return res
print('x_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')

#convert class vectors to one hot encoded vectors
Y_train = np_utils.to_categorical(Y_train, num_classes)
Y_test = np_utils.to_categorical(Y_test, num_classes)

feature_layers = [
    Convolution2D(64,
                  5,
                  5,
                  border_mode='same',
                  subsample=(2, 2),
                  input_shape=(32, 32, 3)),
    LeakyReLU(0.2),
    Dropout(0.5),
    Convolution2D(128, 5, 5, border_mode='same', subsample=(2, 2)),
    LeakyReLU(0.2),
    Dropout(0.5),
    Convolution2D(256, 5, 5, border_mode='same', subsample=(2, 2)),
    LeakyReLU(0.2),
    Dropout(0.5),
    Convolution2D(512, 5, 5, border_mode='same', subsample=(4, 4)),
    LeakyReLU(0.2),
    Dropout(0.5),
    Flatten()
]

classification_layers = [
    Dense(512, W_regularizer=keras.regularizers.l2(0.01), name='fc_layer1'),
Exemple #9
0
def build_discriminator(shape, build_disc=True):
    '''
    Build discriminator.
    Set build_disc=False to build an encoder network to test
    the encoding/discrimination capability with autoencoder...
    '''
    def conv2d(x, filters, shape=(4, 4), **kwargs):
        '''
        I don't want to write lengthy parameters so I made a short hand function.
        '''
        x = Conv2D(filters,
                   shape,
                   strides=(2, 2),
                   padding='same',
                   kernel_initializer=Args.kernel_initializer,
                   **kwargs)(x)
        #x = MaxPooling2D()( x )
        x = BatchNormalization(momentum=Args.bn_momentum)(x)
        x = LeakyReLU(alpha=Args.alpha_D)(x)
        return x

    # https://github.com/tdrussell/IllustrationGAN
    # As proposed by them, unlike GAN hacks, MaxPooling works better for anime dataset it seems.
    # However, animeGAN doesn't use it so I'll keep it more similar to DCGAN.

    face = Input(shape=shape)
    x = face

    # Warning: Don't batchnorm the first set of Conv2D.
    x = Conv2D(64, (4, 4),
               strides=(2, 2),
               padding='same',
               kernel_initializer=Args.kernel_initializer)(x)
    x = LeakyReLU(alpha=Args.alpha_D)(x)
    # 32x32

    x = conv2d(x, 128)
    # 16x16

    x = conv2d(x, 256)
    # 8x8

    x = conv2d(x, 512)
    # 4x4

    if build_disc:
        x = Flatten()(x)
        # add 16 features. Run 1D conv of size 3.
        #x = MinibatchDiscrimination(16, 3)( x )

        #x = Dense(1024, kernel_initializer=Args.kernel_initializer)( x )
        #x = LeakyReLU(alpha=Args.alpha_D)( x )

        # 1 when "real", 0 when "fake".
        x = Dense(1,
                  activation='sigmoid',
                  kernel_initializer=Args.kernel_initializer)(x)
        return models.Model(inputs=face, outputs=x)
    else:
        # build encoder.
        x = Conv2D(Args.noise_shape[2], (4, 4), activation='tanh')(x)
        return models.Model(inputs=face, outputs=x)
Exemple #10
0
Y = encoder.transform(Y)
Y = np_utils.to_categorical(Y)


input_dim = len(data.columns) - 1

model = Sequential()
model.add(Dense(20,input_dim = input_dim , activation = 'relu'))
#model.add(Dense(135, activation = 'relu'))
#model.add(Dense(100, activation = 'relu'))
#model.add(BatchNormalization())
#model.add(Dense(600))
#model.add(LeakyReLU(alpha=[0.05]))
model.add(BatchNormalization())
model.add(Dense(8))
model.add(LeakyReLU(alpha=[0.05]))
model.add(BatchNormalization())
model.add(Dense(2, activation = 'softmax'))

model.compile(loss = 'categorical_crossentropy' , optimizer = 'adam' , metrics = ['accuracy'] )

model.fit(X, Y, epochs = 10, batch_size = 10)
scores = model.evaluate(X, Y)
print("\n%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))

# serialize model to JSON
model_json = model.to_json()
with open("model.json", "w") as json_file:
    json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model.h5")
Exemple #11
0
def make_discriminator(image_size, use_input_pose, warp_skip, disc_type,
                       warp_agg, use_bg, pose_rep_type):
    input_img = Input(list(image_size) + [3])
    output_pose = Input(
        list(image_size) + [18 if pose_rep_type == 'hm' else 3])
    input_pose = Input(list(image_size) + [18 if pose_rep_type == 'hm' else 3])
    output_img = Input(list(image_size) + [3])
    bg_img = Input(list(image_size) + [3])

    if warp_skip == 'full':
        warp = [Input((10, 8))]
    elif warp_skip == 'mask':
        warp = [Input((10, 8)), Input((10, image_size[0], image_size[1]))]
    else:
        warp = []

    if use_input_pose:
        input_pose = [input_pose]
    else:
        input_pose = []

    if use_bg:
        bg_img = [bg_img]
    else:
        bg_img = []

    assert (not use_bg) or (disc_type == 'call')

    if disc_type == 'call':
        out = Concatenate(axis=-1)([input_img] + input_pose +
                                   [output_img, output_pose] + bg_img)
        out = Conv2D(64, kernel_size=(4, 4), strides=(2, 2))(out)
        out = block(out, 128)
        out = block(out, 256)
        out = block(out, 512)
        out = block(out, 1, bn=False)
        out = Activation('sigmoid')(out)
        out = Flatten()(out)
        return Model(inputs=[input_img] + input_pose +
                     [output_img, output_pose] + bg_img,
                     outputs=[out])
    elif disc_type == 'sim':
        out = Concatenate(axis=-1)([output_img, output_pose])
        out = Conv2D(64, kernel_size=(4, 4), strides=(2, 2))(out)
        out = block(out, 128)
        out = block(out, 256)
        out = block(out, 512)
        m_share = Model(inputs=[output_img, output_pose], outputs=[out])
        output_feat = m_share([output_img, output_pose])
        input_feat = m_share([input_img] + input_pose)

        out = Concatenate(axis=-1)([output_feat, input_feat])
        out = LeakyReLU(0.2)(out)
        out = Flatten()(out)
        out = Dense(1)(out)
        out = Activation('sigmoid')(out)

        return Model(inputs=[input_img] + input_pose +
                     [output_img, output_pose],
                     outputs=[out])
    else:
        out_inp = Concatenate(axis=-1)([input_img] + input_pose)
        out_inp = Conv2D(64, kernel_size=(4, 4), strides=(2, 2))(out_inp)

        out_inp = AffineTransformLayer(10, warp_agg,
                                       image_size)([out_inp] + warp)

        out = Concatenate(axis=-1)([output_img, output_pose])
        out = Conv2D(64, kernel_size=(4, 4), strides=(2, 2))(out)

        out = Concatenate(axis=-1)([out, out_inp])

        out = block(out, 128)
        out = block(out, 256)
        out = block(out, 512)
        out = block(out, 1, bn=False)
        out = Activation('sigmoid')(out)
        out = Flatten()(out)
        return Model(inputs=[input_img] + input_pose +
                     [output_img, output_pose] + warp,
                     outputs=[out])
def LoadModel(in_shape, num_classes):
    # FIXME: 
    # - In the original network, the bias for convolutional layers is set to 1.0
    model = Sequential()

    a = 0.3
    # This looks like they call l1 in the code
    model.add(Conv2D(32, (3,3), padding='same', input_shape=in_shape))
    model.add(LeakyReLU(alpha=a))
    model.add(Conv2D(16, (3,3), padding='same'))
    model.add(LeakyReLU(alpha=a))
    model.add(MaxPooling2D(pool_size=(3,3), strides=(2,2)))

    # This looks like what they call l2 in the code
    model.add(Conv2D(64, (3,3), padding='same'))
    model.add(LeakyReLU(alpha=a))
    model.add(Conv2D(32, (3,3), padding='same'))
    model.add(LeakyReLU(alpha=a))
    model.add(MaxPooling2D(pool_size=(3,3), strides=(2,2)))

    # This looks like what they cal l3 in the code
    model.add(Conv2D(128, (3,3), padding='same'))
    model.add(LeakyReLU(alpha=a))
    model.add(Conv2D(128, (3,3), padding='same'))
    model.add(LeakyReLU(alpha=a))
    model.add(Conv2D(64, (3,3), padding='same'))
    model.add(LeakyReLU(alpha=a))
    model.add(MaxPooling2D(pool_size=(3,3), strides=(2,2)))

    # This looks like what they call l4 in the code
    model.add(Conv2D(256, (3,3), padding='same'))
    model.add(LeakyReLU(alpha=a))
    model.add(Conv2D(256, (3,3), padding='same'))
    model.add(LeakyReLU(alpha=a))
    model.add(Conv2D(128, (3,3), padding='same'))
    model.add(LeakyReLU(alpha=a))
    model.add(MaxPooling2D(pool_size=(3,3), strides=(2,2)))
    model.add(Flatten())
    model.add(Dropout(0.5))

    # This looks like what they call l5
    model.add(Dense(256))
    model.add(LeakyReLU(alpha=a))
    model.add(Dropout(0.5))
    model.add(Dense(256))
    model.add(LeakyReLU(alpha=a))
    model.add(Dropout(0.5))

    return model
Exemple #13
0
# Define optimizers ...

optimizerG = Adam(lr=results.learning_rate, beta_1=0.5, decay=0, amsgrad=True)
optimizerD = Adam(lr=results.learning_rate, beta_1=0.5, decay=0, amsgrad=True)

# Build Generative model ...

initial_state = Input(shape=(1, 5))

inital_state_to_append = Input(shape=(1, 5))

noise_dims = Input(shape=(1, 3))

H = Dense(int(G_architecture[0]))(initial_state)
H = LeakyReLU(alpha=0.2)(H)
H = BatchNormalization(momentum=0.8)(H)

for layer in G_architecture[1:]:

    H = Dense(int(layer))(H)
    H = LeakyReLU(alpha=0.2)(H)
    H = BatchNormalization(momentum=0.8)(H)

H = concatenate([H, noise_dims], axis=2)
H = Dense(100)(H)
H = LeakyReLU(alpha=0.2)(H)
H = BatchNormalization(momentum=0.8)(H)
# H = concatenate([H, noise_dims],axis=2)

H = Dense(5, activation='tanh')(H)
def advanced_autoencoder(x_in, x, epochs, batch_size, activations, depth, neurons):
    sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
    K.set_session(sess)
    num_stock = len(x_in.columns)

    # activation functions
    if activations == 'elu':
        function = ELU(alpha=1.0)
    elif activations == 'lrelu':
        function = LeakyReLU(alpha=0.1)
    else:
        function = ReLU(max_value=None, negative_slope=0.0, threshold=0.0)

    autoencoder = Sequential()
    # encoding layers of desired depth
    for n in range(1, depth + 1):
        # input layer
        if n == 1:
            # autoencoder.add(GaussianNoise(stddev=0.01, input_shape=(num_stock,)))
            autoencoder.add(Dense(int(neurons / n), input_shape=(num_stock,)))
            autoencoder.add(function)
        else:
            autoencoder.add(Dense(int(neurons / n)))
            autoencoder.add(function)
    # decoding layers of desired depth
    for n in range(depth, 1, -1):
        autoencoder.add(Dense(int(neurons / (n - 1))))
        autoencoder.add(function)
    # output layer
    autoencoder.add(Dense(num_stock, activation='linear'))

    # autoencoder.compile(optimizer='sgd', loss='mean_absolute_error', metrics=['accuracy'])

    autoencoder.compile(optimizer='adam', loss='mean_squared_error', metrics=['accuracy'])

    # checkpointer = ModelCheckpoint(filepath='weights.{epoch:02d}-{val_loss:.2f}.txt', verbose=0, save_best_only=True)
    earlystopper = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=0, mode='auto', baseline=None,
                                 restore_best_weights=True)
    history = autoencoder.fit(x_in, x_in, epochs=epochs, batch_size=batch_size, \
                              shuffle=False, validation_split=0.15, verbose=0, callbacks=[earlystopper])
    # errors = np.add(autoencoder.predict(x_in),-x_in)
    y = autoencoder.predict(x)
    # saving results of error distribution tests
    # A=np.zeros((5))
    # A[0]=chi2test(errors)
    # A[1]=pesarantest(errors)
    # A[2]=portmanteau(errors,1)
    # A[3]=portmanteau(errors,3)
    # A[4]=portmanteau(errors,5)

    # autoencoder.summary()

    # plot accuracy and loss of autoencoder
    # plot_accuracy(history)
    # plot_loss(history)

    # plot original, encoded and decoded data for some stock
    # plot_two_series(x_in, 'Original data', auto_data, 'Reconstructed data')

    # the histogram of the data
    # make_histogram(x_in, 'Original data', auto_data, 'Reconstructed data')

    # CLOSE TF SESSION
    K.clear_session()
    return y
Exemple #15
0
    def build_models(self, input_shape):
        self.discriminator = Sequential()
        self.discriminator.add(
            Conv2D(64, (5, 5),
                   strides=(2, 2),
                   padding='same',
                   input_shape=input_shape))
        self.discriminator.add(LeakyReLU(0.2))
        self.discriminator.add(Dropout(0.5))
        self.discriminator.add(
            Conv2D(128, (5, 5), strides=(2, 2), padding='same'))
        self.discriminator.add(LeakyReLU(0.2))
        self.discriminator.add(Dropout(0.5))
        self.discriminator.add(
            Conv2D(256, (5, 5), strides=(2, 2), padding='same'))
        self.discriminator.add(LeakyReLU(0.2))
        self.discriminator.add(Dropout(0.5))
        # 8x8 for CIFAR
        #self.discriminator.add(Conv2D(512, (5, 5), strides=(2, 2), padding = 'same', activation='relu'))
        #self.discriminator.add(LeakyReLU(0.2))
        #self.discriminator.add(Dropout(0.5))
        self.discriminator.add(Flatten())
        self.discriminator.add(
            Dense(1 + self.num_classes, activation='softmax'))
        self.discriminator.summary()

        self.generator = Sequential()
        self.generator.add(Dense(8 * 8 * 256, input_shape=(100, )))
        #self.generator.add(BatchNormalization())
        self.generator.add(Activation(selu))
        if keras.backend.image_data_format() == 'channels_first':
            self.generator.add(Reshape([256, 8, 8]))
        else:
            self.generator.add(Reshape([8, 8, 256]))
        self.generator.add(Dropout(0.5))
        self.generator.add(UpSampling2D(size=(2, 2)))
        self.generator.add(Conv2D(128, (5, 5), padding='same'))
        #self.generator.add(BatchNormalization())
        self.generator.add(Activation(selu))
        self.generator.add(Dropout(0.5))
        self.generator.add(UpSampling2D(size=(2, 2)))
        self.generator.add(Conv2D(64, (5, 5), padding='same'))
        #self.generator.add(BatchNormalization())
        self.generator.add(Activation(selu))
        #self.generator.add(Dropout(0.5))
        #self.generator.add(UpSampling2D(size=(2, 2)))
        #self.generator.add(Conv2D(64, (5, 5), padding='same'))
        #self.generator.add(BatchNormalization())
        #self.generator.add(Activation('relu'))
        # we're ignoring input shape - just assuming it's 4,4,3
        self.generator.add(Conv2D(3, (5, 5), padding='same'))
        self.generator.add(Activation('sigmoid'))
        self.generator.summary()

        #self.real_image_model = Sequential()
        #self.real_image_model.add(self.discriminator)
        #self.real_image_model.compile(loss='categorical_crossentropy',
        #                              optimizer=Adam(lr=1e-4),
        #                              metrics=['accuracy'])

        self.generator.compile(loss='categorical_crossentropy',
                               optimizer=Adam(lr=1e-6),
                               metrics=['accuracy'])

        self.discriminator.compile(loss='categorical_crossentropy',
                                   optimizer=Adam(lr=1e-5),
                                   metrics=['accuracy'])

        self.real_image_model = self.discriminator

        self.fake_image_model = Sequential()
        self.fake_image_model.add(self.generator)
        self.discriminator.trainable = False
        self.fake_image_model.add(self.discriminator)
        self.fake_image_model.compile(loss='categorical_crossentropy',
                                      optimizer=Adam(lr=1e-6),
                                      metrics=['accuracy'])
Exemple #16
0
def build_gen(shape):
    def deconv2d(x, filters, shape=(4, 4)):
        '''
        Conv2DTransposed gives me checkerboard artifact...
        Select one of the 3.
        '''
        # Simpe Conv2DTranspose
        # Not good, compared to upsample + conv2d below.

        # Run the input through transposed 2D Convolution (deconvolution), with:
        # - filters number of output filters
        # - Use a shape[0] x shape[1] kernel
        # and use the kernel_initializer from args.py, which is currently
        # using "glorot_uniform".
        x = Conv2DTranspose(filters,
                            shape,
                            padding='same',
                            strides=(2, 2),
                            kernel_initializer=Args.kernel_initializer)(x)

        # simple and works
        # UpSampling2D repeats the rows and columns of the data by size[0] and
        # size[1], respectively.
        #x = UpSampling2D( (2, 2) )( x )

        # Conv2D runs the inputs through 2D convolution with:
        # - filters number of output filters
        # - Use a shape[0] by shape[1] kernel
        #x = Conv2D( filters, shape, padding='same' )( x )

        # Bilinear2x... Not sure if it is without bug, not tested yet.
        # Tend to make output blurry though
        #x = bilinear2x( x, filters )
        #x = Conv2D( filters, shape, padding='same' )( x )

        x = BatchNormalization(momentum=Args.bn_momentum)(x)
        x = LeakyReLU(alpha=Args.alpha_G)(x)
        return x

    # https://github.com/tdrussell/IllustrationGAN  z predictor...?
    # might help. Not sure.

    # 01. Build input layer.
    noise = Input(shape=Args.noise_shape)
    x = noise
    # 1x1x256
    # noise is not useful for generating images.

    # 02. Run the input through transposed 2D Convolution (deconvolution), with:
    # - 512 output filters
    # - Use a 4 x 4 kernel
    # and use the kernel_initializer from args.py, which is currently
    # using "glorot_uniform".
    x = Conv2DTranspose(512, (4, 4),
                        kernel_initializer=Args.kernel_initializer)(x)

    # 03. Add another layer, this time we normalize, keep the activation
    # mean close to 0 and standard deviation close to 1.
    x = BatchNormalization(momentum=Args.bn_momentum)(x)

    # 04. Add another layer for Leaky ReLU, and is recommended by GANHacks.
    # LeakyReLU is similar to standard ReLU, except we allow some of it to
    # go through by multiplying it by alpha.
    # f(x) = alpha * x    , x < 0
    # f(x) = x            , x >= 0
    x = LeakyReLU(alpha=Args.alpha_G)(x)

    # 4x4
    x = deconv2d(x, 256)
    # 8x8
    x = deconv2d(x, 128)
    # 16x16
    x = deconv2d(x, 64)
    # 32x32

    # Extra layer
    x = Conv2D(64, (3, 3),
               padding='same',
               kernel_initializer=Args.kernel_initializer)(x)
    x = BatchNormalization(momentum=Args.bn_momentum)(x)
    x = LeakyReLU(alpha=Args.alpha_G)(x)
    # 32x32

    x = Conv2DTranspose(3, (4, 4),
                        padding='same',
                        activation='tanh',
                        strides=(2, 2),
                        kernel_initializer=Args.kernel_initializer)(x)
    # 64x64

    return models.Model(inputs=noise, outputs=x)
article = LSTM(latent_dim, return_state=True, dropout=0.2)
article_outputs, articleState_h, articleState_c = article(article_inputs)
article_states = [articleState_h, articleState_c]

headline_inputs = Input(shape=(None, 300))
headline = LSTM(latent_dim,
                return_sequences=True,
                return_state=True,
                dropout=0.2)
headline_outputs, headlineState_h, headlineState_c = headline(
    headline_inputs, initial_state=article_states)

added = Add()(
    [headlineState_c, articleState_c, headlineState_h, articleState_h])
x = Dense(128)(added)
x = LeakyReLU()(x)
x = Dropout(0.2)(x)
x = Dense(64)(x)
x = LeakyReLU()(x)
out = Dense(4, activation='softmax')(x)

model = Model([article_inputs, headline_inputs], out)
adadelta = Adadelta(lr=2)
model.compile(optimizer=adadelta, loss='categorical_crossentropy')

class_weight = {0: 1., 1: 7., 2: 7., 3: 5.}

print(model.summary())

model.fit([article_input_data, headline_input_data],
          target_stance,
def cnn_model(X_train, y_train, channels, nb_gpus, nb_classes, batch_size,
              nb_epoch, cl_weights, leakiness):

    print("X_train:- ", X_train.shape, "\ny_train:- ", y_train.shape,
          "\nClass Weights:- ", cl_weights)
    print("channels:- ", channels, "\nnb_gpus:- ", nb_gpus, "\nLeakiness:- ",
          leakiness)
    print("batch_size:- ", batch_size, "\nnb_epoch:- ", nb_epoch)

    if not os.path.exists('saved_model'):
        os.makedirs('saved_model')
        print("Path Set!")

    input_layer = InputLayer(input_shape=(64, 2))

    model = Sequential()
    #Retinet has one Convolve layer with 7x7 filter but it is expensive so Convert
    #it into two 3x3 filter
    model.add(
        Conv2D(32, (7, 7),
               padding='same',
               strides=(2, 2),
               input_shape=(img_rows, img_cols, channels)))
    model.add(LeakyReLU(alpha=leakiness))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))

    model.add(Conv2D(32, (3, 3), strides=(1, 1), padding='same'))
    model.add(LeakyReLU(alpha=leakiness))
    model.add(Conv2D(32, (3, 3), strides=(1, 1), padding='same'))
    model.add(LeakyReLU(alpha=leakiness))
    #model.add(Conv2D(32, (3, 3), strides = (1, 1), padding = 'same'))
    #model.add(LeakyReLU(alpha = leakiness))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))

    model.add(Conv2D(64, (3, 3), strides=(1, 1), padding='same'))
    model.add(LeakyReLU(alpha=leakiness))
    model.add(Conv2D(64, (3, 3), strides=(1, 1), padding='same'))
    model.add(LeakyReLU(alpha=leakiness))
    #model.add(Conv2D(64, (3, 3), strides = (1, 1), padding = 'same'))
    #model.add(LeakyReLU(alpha = leakiness))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))

    model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same'))
    model.add(LeakyReLU(alpha=leakiness))
    model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same'))
    model.add(LeakyReLU(alpha=leakiness))
    model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same'))
    model.add(LeakyReLU(alpha=leakiness))
    model.add(Conv2D(128, (3, 3), strides=(1, 1), padding='same'))
    model.add(LeakyReLU(alpha=leakiness))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))

    model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same'))
    model.add(LeakyReLU(alpha=leakiness))
    model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same'))
    model.add(LeakyReLU(alpha=leakiness))
    model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same'))
    model.add(LeakyReLU(alpha=leakiness))
    model.add(Conv2D(256, (3, 3), strides=(1, 1), padding='same'))
    model.add(LeakyReLU(alpha=leakiness))
    model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))

    #model.add(AveragePooling1D(pool_size = 2))
    model.add(Dropout(0.5))
    model.add(Flatten())
    model.add(Dense(1024))
    model.add(LeakyReLU(alpha=leakiness))
    model.add(Dropout(0.5))

    model.add(Dense(1024))
    model.add(LeakyReLU(alpha=leakiness))
    model.add(Dropout(0.5))

    model.add(Dense(nb_classes, activation='softmax'))

    #model = multi_gpu_model(model, gpus=nb_gpus)
    model.summary()

    sgd = SGD(lr=0.01, momentum=0.9, decay=1e-6, nesterov=False)
    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])

    stop = EarlyStopping(monitor="val_loss",
                         min_delta=0.001,
                         patience=2,
                         verbose=0,
                         mode="auto")
    tensorboard = TensorBoard(log_dir='tensorboard_logs/',
                              histogram_freq=0,
                              write_graph=True,
                              write_images=True)

    model.fit(X_train,
              y_train,
              batch_size=batch_size,
              epochs=nb_epoch,
              verbose=1,
              validation_split=0.2,
              class_weight=cl_weights,
              callbacks=[stop, tensorboard])

    return model
Exemple #19
0
def DarknetConv2D_BN_Leaky(*args, **kwargs):
    """Darknet Convolution2D followed by BatchNormalization and LeakyReLU."""
    no_bias_kwargs = {'use_bias': False}
    no_bias_kwargs.update(kwargs)
    return compose(DarknetConv2D(*args, **no_bias_kwargs),
                   BatchNormalization(), LeakyReLU(alpha=0.1))
Exemple #20
0
    def residueadd(self):

        dis_input = Input(shape=self.image_shape)  #48
        model = Conv2D(filters=64, kernel_size=3, strides=1,
                       padding="same")(dis_input)
        model = LeakyReLU(alpha=0.2)(model)
        model = MaxPooling2D((2, 2), padding='same',
                             name="pool_24")(model)  #24
        model_24 = model
        model = Conv2D(filters=128, kernel_size=3, strides=1,
                       padding="same")(model)
        model = LeakyReLU(alpha=0.2)(model)
        model = MaxPooling2D((2, 2), padding='same',
                             name="pool_12")(model)  #12
        model_12 = model
        model = Conv2D(filters=256, kernel_size=3, strides=1,
                       padding="same")(model)
        model = LeakyReLU(alpha=0.2)(model)
        model = MaxPooling2D((2, 2), padding='same', name="pool_6")(model)  #6
        model_6 = model
        model = Conv2D(filters=512, kernel_size=3, strides=1,
                       padding="same")(model)
        model = LeakyReLU(alpha=0.2)(model)
        model = MaxPooling2D((2, 2), padding='same', name="pool_3")(model)  #3
        model_3 = model
        model = Conv2D(filters=512, kernel_size=3, strides=1,
                       padding="same")(model)
        model = LeakyReLU(alpha=0.2)(model)
        encoded = MaxPooling2D((3, 3), padding='same',
                               name="pool_1")(model)  #1

        model = Conv2D(filters=512, kernel_size=3, strides=1,
                       padding="same")(encoded)
        model = LeakyReLU(alpha=0.2)(model)
        model = UpSampling2D((3, 3), name="sample_3")(model)  #3
        model = add([model_3, model])

        model = Conv2D(filters=256, kernel_size=3, strides=1,
                       padding="same")(model)
        model = LeakyReLU(alpha=0.2)(model)
        model = UpSampling2D((2, 2), name="sample_6")(model)
        model = add([model_6, model])

        model = Conv2D(filters=128, kernel_size=3, strides=1,
                       padding="same")(model)
        model = LeakyReLU(alpha=0.2)(model)
        model = UpSampling2D((2, 2), name="sample_12")(model)
        model = add([model_12, model])

        model = Conv2D(filters=64, kernel_size=3, strides=1,
                       padding="same")(model)
        model = LeakyReLU(alpha=0.2)(model)
        model = UpSampling2D((2, 2), name="sample_24")(model)
        model = add([model_24, model])

        model = Conv2D(filters=64, kernel_size=3, strides=1,
                       padding="same")(model)
        model = LeakyReLU(alpha=0.2)(model)
        model = UpSampling2D((2, 2))(model)

        model = Conv2D(filters=64, kernel_size=3, strides=1,
                       padding="same")(model)
        model = LeakyReLU(alpha=0.2)(model)
        model = UpSampling2D((2, 2))(model)

        model = Conv2D(filters=3, kernel_size=3, strides=1,
                       padding="same")(model)
        model = LeakyReLU(alpha=0.2)(model)
        decoded = UpSampling2D((2, 2))(model)

        residue_model = Model(inputs=dis_input, outputs=decoded)

        return residue_model
Exemple #21
0
 def block(x):
   x = Conv2D(filters, kernel_size=5, strides=2, padding='same')(x)
   x = LeakyReLU(0.1)(x)
   return x
    def _make_model(self):
        if self.is_hgg:
            dropout_rate = 0.1
        else:
            dropout_rate = 0.5
        step = 0
        print('******************************************', step)
        step += 1
        model_to_make = Sequential()
        print('******************************************', step)
        step += 1
        model_to_make.add(
            Conv2D(64, (3, 3),
                   kernel_initializer=glorot_normal(),
                   bias_initializer='zeros',
                   padding='same',
                   data_format='channels_first',
                   input_shape=(4, 33, 33)))
        print(model_to_make.input_shape)
        print(model_to_make.output)
        print('******************************************', step)
        step += 1
        model_to_make.add(LeakyReLU(alpha=0.333))
        print(model_to_make.output)
        print('******************************************', step)
        step += 1
        model_to_make.add(
            Conv2D(filters=64,
                   kernel_size=(3, 3),
                   padding='same',
                   data_format='channels_first',
                   input_shape=(64, 33, 33)))
        print(model_to_make.output)
        print('******************************************', step)
        step += 1
        model_to_make.add(LeakyReLU(alpha=0.333))
        print(model_to_make.output)
        if self.is_hgg:
            model_to_make.add(
                Conv2D(filters=64,
                       kernel_size=(3, 3),
                       padding='same',
                       data_format='channels_first',
                       input_shape=(64, 33, 33)))
            print('******************************************', step)
            step += 1
            print(model_to_make.output)

            model_to_make.add(LeakyReLU(alpha=0.333))
            print('******************************************', step)
            step += 1
            print(model_to_make.output)

        model_to_make.add(
            MaxPool2D(pool_size=(3, 3),
                      strides=(2, 2),
                      data_format='channels_first',
                      input_shape=(64, 33, 33)))
        print('******************************************', step)
        step += 1
        print(model_to_make.output)

        model_to_make.add(
            Conv2D(filters=128,
                   kernel_size=(3, 3),
                   padding='same',
                   data_format='channels_first',
                   input_shape=(64, 16, 16)))
        print('******************************************', step)
        step += 1
        print(model_to_make.output)

        model_to_make.add(LeakyReLU(alpha=0.333))
        print('******************************************', step)
        step += 1
        print(model_to_make.output)

        model_to_make.add(
            Conv2D(filters=128,
                   kernel_size=(3, 3),
                   padding='same',
                   data_format='channels_first',
                   input_shape=(128, 16, 16)))
        print('******************************************', step)
        step += 1
        print(model_to_make.output)

        if self.is_hgg:
            model_to_make.add(
                Conv2D(filters=128,
                       kernel_size=(3, 3),
                       padding='same',
                       data_format='channels_first',
                       input_shape=(128, 16, 16)))
            print('******************************************', step)
            step += 1
            print(model_to_make.output)
            model_to_make.add(LeakyReLU(alpha=0.333))
            print('******************************************', step)
            step += 1
            print(model_to_make.output)
        model_to_make.add(
            MaxPool2D(pool_size=(3, 3),
                      strides=(2, 2),
                      data_format='channels_first',
                      input_shape=(128, 16, 16)))
        print('******************************************', step)
        step += 1
        print(model_to_make.output)
        print('******************************************', 'flattened')
        model_to_make.add(Flatten())
        print(model_to_make.output)
        model_to_make.add(Dense(units=256, input_dim=6272))
        print('******************************************', step)
        step += 1
        print(model_to_make.output)
        model_to_make.add(LeakyReLU(alpha=0.333))
        print('******************************************', step)
        step += 1
        print(model_to_make.output)
        model_to_make.add(Dropout(dropout_rate))
        print('******************************************', step)
        step += 1
        print(model_to_make.output)
        model_to_make.add(Dense(units=256, input_dim=256))
        print('******************************************', step)
        step += 1
        print(model_to_make.output)
        model_to_make.add(LeakyReLU(alpha=0.333))
        print('******************************************', step)
        step += 1
        print(model_to_make.output)
        model_to_make.add(Dropout(dropout_rate))
        print('******************************************', step)
        step += 1
        print(model_to_make.output)
        model_to_make.add(Dense(units=5, input_dim=256))
        print('******************************************', step)
        step += 1
        print(model_to_make.output)
        model_to_make.add(Activation('softmax'))
        print('******************************************', step)
        step += 1
        print(model_to_make.output)
        self.model = model_to_make
Exemple #23
0
    modelE.add(Conv2D(128, kernel_size=(3, 2), padding="same"))
    modelE.add(BatchNormalization(momentum=0.8))
    modelE.add(Activation("relu"))
    modelE.add(Flatten())
    modelE.add(Dense(latent_dim))

    img = Input(shape=input_shape)
    z = modelE(img)
    encoder = Model(img, z)

    # Discriminator
    z = Input(shape=(latent_dim, ))
    img = Input(shape=input_shape)

    modelDx = Conv2D(64, kernel_size=(3, 2))(img)
    modelDx = LeakyReLU(alpha=0.2)(modelDx)
    modelDx = Dropout(0.5)(modelDx)
    modelDx = Conv2D(64, kernel_size=(3, 1))(modelDx)
    modelDx = BatchNormalization(momentum=0.8)(modelDx)
    modelDx = LeakyReLU(alpha=0.2)(modelDx)
    modelDx = Dropout(0.5)(modelDx)
    modelDx = Flatten()(modelDx)

    modelDz = Dense(512)(z)
    modelDz = LeakyReLU(alpha=0.2)(modelDz)
    modelDz = Dropout(0.5)(modelDz)

    d_in = concatenate([modelDx, modelDz])

    modelD = Dense(1024)(d_in)
    modelD = LeakyReLU(alpha=0.2)(modelD)
Exemple #24
0
# #demo G
# g = Sequential()
# g.add(Dense(256, input_dim=z_dim, kernel_initializer=initializers.RandomNormal(stddev=0.02)))
# g.add(LeakyReLU(0.2))
# g.add(Dense(512))
# g.add(LeakyReLU(0.2))
# g.add(Dense(1024))
# g.add(LeakyReLU(0.2))
# g.add(Dense(784, activation='tanh'))
# g.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])

# Generator
g = Sequential()
g.add(Dense(units=256, input_dim=z_dim))
g.add(LeakyReLU(alpha=0.2))
g.add(Dense(units=512))
g.add(LeakyReLU(alpha=0.2))
g.add(Dense(units=1024))
g.add(LeakyReLU(alpha=0.2))
g.add(Dense(784, activation='tanh'))
g.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])

# Discrinimator
d = Sequential()
d.add(
    Dense(1024,
          input_dim=784,
          kernel_initializer=initializers.RandomNormal(stddev=0.02)))
d.add(LeakyReLU(0.2))
d.add(Dropout(0.3))
Exemple #25
0
x.shape

total_epochs=50
batch_size=128
no_of_batches = int(x.shape[0]/batch_size)
half_batch =128
noise_dim = 100 ##upsample into 784
adam = Adam(lr=2e-4,beta_1=0.5)  ##special parameters for GAN

##Generator
##input noise 100 dim, outputs vector 784 dim - upsampling

generator = Sequential()
generator.add(Dense(256,input_shape=(noise_dim,)))
generator.add(LeakyReLU(0.2))
generator.add(Dense(512))
generator.add(LeakyReLU(0.2))
generator.add(Dense(1024))
generator.add(LeakyReLU(0.2))
generator.add(Dense(784,activation='tanh'))

generator.compile(loss='binary_crossentropy',optimizer=adam)
generator.summary()

##Discrimator
##784->1  - downsampling

discriminator = Sequential()
discriminator.add(Dense(512,input_shape=(784,)))
discriminator.add(LeakyReLU(0.2))
Exemple #26
0
def __bottleneck_block(input,
                       filters=64,
                       cardinality=8,
                       strides=1,
                       weight_decay=5e-4):
    ''' Adds a bottleneck block
    Args:
        input: input tensor
        filters: number of output filters
        cardinality: cardinality factor described number of
            grouped convolutions
        strides: performs strided convolution for downsampling if > 1
        weight_decay: weight decay factor
    Returns: a keras tensor
    '''
    init = input

    grouped_channels = int(filters / cardinality)
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    # Check if input number of filters is same as 16 * k, else create convolution2d for this input
    if K.image_data_format() == 'channels_first':
        if init._keras_shape[1] != 2 * filters:
            init = Conv2D(filters * 2, (1, 1),
                          padding='same',
                          strides=(strides, strides),
                          use_bias=False,
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(weight_decay))(init)
            init = BatchNormalization(axis=channel_axis)(init)
    else:
        if init._keras_shape[-1] != 2 * filters:
            init = Conv2D(filters * 2, (1, 1),
                          padding='same',
                          strides=(strides, strides),
                          use_bias=False,
                          kernel_initializer='he_normal',
                          kernel_regularizer=l2(weight_decay))(init)
            init = BatchNormalization(axis=channel_axis)(init)

    x = Conv2D(filters, (1, 1),
               padding='same',
               use_bias=False,
               kernel_initializer='he_normal',
               kernel_regularizer=l2(weight_decay))(input)
    x = BatchNormalization(axis=channel_axis)(x)
    x = LeakyReLU()(x)

    x = __grouped_convolution_block(x, grouped_channels, cardinality, strides,
                                    weight_decay)

    x = Conv2D(filters * 2, (1, 1),
               padding='same',
               use_bias=False,
               kernel_initializer='he_normal',
               kernel_regularizer=l2(weight_decay))(x)
    x = BatchNormalization(axis=channel_axis)(x)

    x = add([init, x])
    x = LeakyReLU()(x)

    return x
Exemple #27
0
hist_array_test = xr.DataArray(hists[test_mask],
                               coords={'dim_0': event_ids[test_mask]})

hist_array_train = hist_array_train.loc[df_sim_train.index]
hist_array_test = hist_array_test.loc[df_sim_test.index]

# ## CNN model

y_cat = to_categorical(y)
y_train_cat = to_categorical(y_train)
y_test_cat = to_categorical(y_test)

inputs = Input(shape=(24, 24, 1), name='hist_input')

x = Conv2D(12, (3, 3), padding='same')(inputs)
x = LeakyReLU(alpha=0.3)(x)
x = Conv2D(12, (3, 3), padding='same')(x)
x = LeakyReLU(alpha=0.3)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(0.2)(x)

x = Conv2D(24, (3, 3), padding='same')(inputs)
x = LeakyReLU(alpha=0.3)(x)
x = Conv2D(24, (3, 3), padding='same')(x)
x = LeakyReLU(alpha=0.3)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Dropout(0.2)(x)

x = Conv2D(64, (3, 3), padding='same')(inputs)
x = LeakyReLU(alpha=0.3)(x)
x = Conv2D(64, (3, 3), padding='same')(x)
def CNN_Model(data,
              back_hist=10,
              drop_out_rate=0.2,
              number_neurons=600,
              num_epochs=50,
              filter_num=10,
              num_batches=200,
              n_atraso=1):
    #
    #    import tensorflow as tf
    #    config = tf.ConfigProto()
    #    config.gpu_options.allow_growth=True
    #    sess = tf.Session(config=config)

    # Part 1 - Data Preprocessing

    # Importing the libraries
    import numpy as np
    import matplotlib.pyplot as plt
    import pandas as pd
    import datetime
    import math
    from sklearn.metrics import mean_absolute_error
    from sklearn.metrics import mean_squared_error
    from math import sqrt
    import os
    from keras.models import Sequential
    from keras.layers.core import Dense, Dropout, Activation, Flatten
    from keras.layers.recurrent import LSTM, GRU
    from keras.layers import Convolution1D, MaxPooling1D, AtrousConvolution1D, RepeatVector
    from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, CSVLogger
    from keras.layers.wrappers import Bidirectional
    from keras import regularizers
    from keras.layers.normalization import BatchNormalization
    from keras.layers.advanced_activations import LeakyReLU
    from keras.optimizers import RMSprop, Adam, SGD, Nadam
    #from keras.initializers import *

    import seaborn as sns
    sns.despine()

    def mean_absolute_percentage_error(y_true, y_pred):
        #, y_pred = check_arrays(y_true, y_pred)

        ## Note: does not handle mix 1d representation
        #if _is_1d(y_true):
        #    y_true, y_pred = _check_1d_array(y_true, y_pred)

        return np.mean(np.abs((y_true - y_pred) / y_true)) * 100

    def mean_percentage_error(y_true, y_pred):
        #y_true, y_pred = check_arrays(y_true, y_pred)

        ## Note: does not handle mix 1d representation
        #if _is_1d(y_true):
        #    y_true, y_pred = _check_1d_array(y_true, y_pred)

        return np.mean((y_true - y_pred) / y_true) * 100

    print("Janela: %.0f" % back_hist)
    print("Drouout: %.1f" % drop_out_rate)
    print("N de Neuronios: %.0f" % number_neurons)
    print("N de Epocas: %.0f" % num_epochs)
    print("N de Batches: %.0f" % num_batches)
    print("Dias de Previsao: %.0f" % n_atraso)

    #*************************************************************************************
    # Part 1 - Extraction and Data Preparation
    #*************************************************************************************
    dataset_total = data
    test_size = 60
    dataset_total_sem_teste = dataset_total[test_size:len(dataset_total)]
    dataset_total_sem_teste = dataset_total_sem_teste.reset_index()
    dataset_total_sem_teste = dataset_total_sem_teste.drop(["index"], axis=1)

    #int(round(len(dataset_total.index)*0.3,0))
    num_columns = len(dataset_total_sem_teste.columns)
    dataset_train = dataset_total_sem_teste.iloc[:, :]

    #O -1 é devido à última coluna - os dados precisam estar no formato
    training_x = dataset_train.iloc[n_atraso:, 2:num_columns - 9].values
    training_y = dataset_train.iloc[:len(dataset_train) - n_atraso,
                                    num_columns - 1:num_columns].values

    from sklearn.preprocessing import MinMaxScaler
    sc = MinMaxScaler(feature_range=(0, 1))
    training_y = sc.fit_transform(training_y)

    num_columns = training_x.shape[1]
    # Creating a data structure with 60 timesteps and 1 output
    X_train = []
    y_train = []
    for i in range(back_hist, len(training_x)):
        X_train.append(training_x[i - back_hist:i, 0:num_columns])
        y_train.append(training_y[i, 0])
    X_train, y_train = np.array(X_train), np.array(y_train)

    # Reshaping
    X_train = np.reshape(
        X_train, (X_train.shape[0], X_train.shape[1], X_train.shape[2]))

    # Generating test samples

    dataset_test = dataset_total.iloc[:test_size + back_hist + n_atraso, :]

    real_stock_price = dataset_test.iloc[:len(dataset_test) - back_hist -
                                         n_atraso, dataset_test.shape[1] -
                                         9:dataset_test.shape[1] - 8].values
    #preciso arrumar sempre que muda o formato da base.
    test_x = dataset_test.iloc[n_atraso:, 2:dataset_test.shape[1] - 9].values
    X_test = []

    for i in range(back_hist, len(test_x)):
        X_test.append(test_x[i - back_hist:i, 0:num_columns])
    X_test = np.array(X_test)
    X_test = np.reshape(X_test,
                        (X_test.shape[0], X_test.shape[1], num_columns))
    # Part 2 - Building the RNN
    #*************************************************************************************

    # Importing the Keras libraries and packages

    model = Sequential()
    model.add(
        Convolution1D(input_shape=(X_train.shape[1], X_train.shape[2]),
                      nb_filter=number_neurons,
                      filter_length=filter_num,
                      border_mode='same'))
    model.add(BatchNormalization())
    model.add(LeakyReLU())
    model.add(Dropout(drop_out_rate))

    model.add(
        Convolution1D(nb_filter=int(round(number_neurons / 2)),
                      filter_length=int(round(filter_num * 0.8)),
                      border_mode='same'))
    model.add(BatchNormalization())
    model.add(LeakyReLU())
    model.add(Dropout(drop_out_rate))

    model.add(
        Convolution1D(nb_filter=int(round(number_neurons / 4)),
                      filter_length=int(round(filter_num * 0.4)),
                      border_mode='same'))
    model.add(BatchNormalization())
    model.add(LeakyReLU())
    model.add(Dropout(drop_out_rate))

    model.add(Flatten())

    model.add(Dense(64))
    model.add(BatchNormalization())
    model.add(LeakyReLU())

    model.add(Dense(1))

    #model.add(Activation('softmax'))
    #
    #opt = Nadam(lr=0.002)
    #
    #reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.9, patience=30, min_lr=0.000001, verbose=1)
    #checkpointer = ModelCheckpoint(filepath="lolkek.hdf5", verbose=1, save_best_only=True)

    model.compile(
        optimizer='adam',
        loss='mean_squared_error',
    )

    history = model.fit(X_train,
                        y_train,
                        epochs=num_epochs,
                        batch_size=num_batches,
                        verbose=1,
                        validation_data=(X_test, real_stock_price),
                        shuffle=True)

    #Saving the Model

    #*************************************************************************************
    # Part 3 - Making the predictions and visualising the results
    #*************************************************************************************

    predicted_stock_price = model.predict(X_test)
    predicted_stock_price = sc.inverse_transform(predicted_stock_price)

    #Making predictions for the training
    real_stock_price_train = dataset_train.iloc[:len(dataset_train) -
                                                back_hist - n_atraso,
                                                dataset_test.shape[1] -
                                                9:dataset_test.shape[1] -
                                                8].values
    predicted_stock_price_train = model.predict(X_train)
    predicted_stock_price_train = sc.inverse_transform(
        predicted_stock_price_train)

    #*************************************************************************************
    # Part 4 - Results Visualization
    #*************************************************************************************
    mae_train = mean_absolute_error(real_stock_price_train,
                                    predicted_stock_price_train)
    mse_train = mean_squared_error(real_stock_price_train,
                                   predicted_stock_price_train)
    rmse_train = sqrt(mse_train)
    mape_train = mean_absolute_percentage_error(real_stock_price_train,
                                                predicted_stock_price_train)
    mpe_train = mean_percentage_error(real_stock_price_train,
                                      predicted_stock_price_train)

    mae_test = mean_absolute_error(real_stock_price, predicted_stock_price)
    mse_test = mean_squared_error(real_stock_price, predicted_stock_price)
    rmse_test = sqrt(mse_test)
    mape_test = mean_absolute_percentage_error(real_stock_price,
                                               predicted_stock_price)
    mpe_test = mean_percentage_error(real_stock_price, predicted_stock_price)

    #    plt.plot(predicted_stock_price)
    #    plt.plot(real_stock_price)
    #    plt.legend(['Previsto','Real'])
    #    plt.savefig('Resultado_test.png',dpi=200)
    #    plt.show()
    #
    #    plt.plot(predicted_stock_price_train)
    #    plt.plot(real_stock_price_train)
    #    plt.legend(['Previsto','Real'])
    #    plt.savefig('Resultado_train.png',dpi=200)
    #    plt.show()

    #    from keras import backend as K
    #    del regressor
    #    K.clear_session()
    #
    import tensorflow as tf
    import keras
    if keras.backend.tensorflow_backend._SESSION:
        tf.reset_default_graph()
        keras.backend.tensorflow_backend._SESSION.close()
        keras.backend.tensorflow_backend._SESSION = None

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    session = tf.Session(config=config)

    return mae_test, mse_test, rmse_test, mape_test, mpe_test, mae_train, mse_train, rmse_train, mape_train, mpe_train,
Exemple #29
0
#%%
# Create Model
# Set the dimensions of the noise
z_dim = 100

# Optimizer
adam = Adam(lr=0.0002, beta_1=0.5)

# Generator
g = Sequential()
g.add(
    Dense(256,
          input_dim=z_dim,
          kernel_initializer=initializers.RandomNormal(stddev=.02)))
g.add(LeakyReLU(0.2))
g.add(Dense(512))
g.add(LeakyReLU(0.2))
g.add(Dense(1024))
g.add(LeakyReLU(0.2))
g.add(Dense(784, activation='tanh'))
g.compile(loss='binary_crossentropy', optimizer=adam, metrics=['accuracy'])

# Discrinimator
d = Sequential()
d.add(
    Dense(1024,
          input_dim=784,
          kernel_initializer=initializers.RandomNormal(stddev=.02)))
d.add(LeakyReLU(0.2))
d.add(Dropout(0.4))
Exemple #30
0
        # data_dir=top_dir + "/test/TIMIT_mini", cache_directory=top_dir + "/test/cache",
        data_dir=top_dir + "/TIMIT",
        cache_directory=top_dir + "/test/cache",
        min_cluster_count=1,
        max_cluster_count=5,
        return_1d_audio_data=False,
        test_classes=TIMIT_lst,
        validate_classes=TIMIT_lst,
        concat_audio_files_of_speaker=True,
        minimum_snippets_per_cluster=[(200, 200), (100, 100)],
        window_width=[(100, 200)])
    en = CnnEmbedding(output_size=256,
                      cnn_layers_per_block=1,
                      block_feature_counts=[32, 64, 128],
                      fc_layer_feature_counts=[256],
                      hidden_activation=LeakyReLU(),
                      final_activation=LeakyReLU(),
                      batch_norm_for_init_layer=False,
                      batch_norm_after_activation=True,
                      batch_norm_for_final_layer=True)

    c_nn = ClusterNNTry00_V51(dp,
                              20,
                              en,
                              lstm_layers=7,
                              internal_embedding_size=96,
                              cluster_count_dense_layers=1,
                              cluster_count_dense_units=256,
                              output_dense_layers=1,
                              output_dense_units=256,
                              cluster_count_lstm_layers=1,