コード例 #1
0
 def fit(self, X):
     X_train, X_test = train_test_split(X, test_size=0.1)
     self._input_dim = X_train.shape[1]
     input_layer = Input(shape=(self._input_dim, ))
     encoder = Dense(
         self._encoding_dim,
         activation='tanh',
         activity_regularizer=regularizers.activity_l1(10e-5))(input_layer)
     encoder = Dense(int(self._encoding_dim / 2),
                     activation='relu')(encoder)
     decoder = Dense(int(self._encoding_dim / 2),
                     activation='tanh')(encoder)
     decoder = Dense(self._input_dim, activation='relu')(decoder)
     autoencoder = Model(input=input_layer, output=decoder)
     autoencoder.compile(optimizer='adam',
                         loss='mean_squared_error',
                         metrics=['accuracy'])
     history = autoencoder.fit(X_train,
                               X_train,
                               nb_epoch=self._nb_epoch,
                               batch_size=self._batch_size,
                               shuffle=True,
                               validation_data=(X_test, X_test),
                               verbose=0).history
     self.autoencoder = autoencoder
コード例 #2
0
ファイル: autoencoder.py プロジェクト: joshloyal/Aeolus
def AutoEncoder(input_dim, encoding_dim, add_noise=None, dropout_proba=None, l1=1e-4):
    model_input = Input(shape=(input_dim,))

    if add_noise is not None:
        x = Lambda(add_noise, output_shape=noise_output_shape)(model_input)
    else:
        x = model_input

    if l1 is not None:
        encoded = Dense(encoding_dim, activation='relu',
                        activity_regularizer=regularizers.activity_l1(l1))(x)
    else:
        encoded = Dense(encoding_dim, activation='relu')(x)

    if dropout_proba:
        encoded = Dropout(dropout_proba)(encoded)

    decoded = Dense(input_dim, activation='sigmoid')(encoded)

    autoencoder = Model(input=model_input, output=decoded)
    autoencoder.compile(optimizer='adadelta',
                        loss='binary_crossentropy',
                        metrics=['accuracy'])

    return autoencoder
コード例 #3
0
ファイル: test_regularizers.py プロジェクト: 1avgur1/keras
def test_A_reg():
    for reg in [regularizers.activity_l1(), regularizers.activity_l2()]:
        model = create_model(activity_reg=reg)
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
        model.fit(X_train, Y_train, batch_size=batch_size,
                  nb_epoch=nb_epoch, verbose=0)
        model.evaluate(X_test[test_ids, :], Y_test[test_ids, :], verbose=0)
コード例 #4
0
ファイル: autoencoder.py プロジェクト: joshloyal/Baumkuchen
def AutoEncoder(input_dim,
                encoding_dim,
                add_noise=None,
                dropout_proba=None,
                l1=1e-4):
    model_input = Input(shape=(input_dim, ))

    if add_noise is not None:
        x = Lambda(add_noise, output_shape=noise_output_shape)(model_input)
    else:
        x = model_input

    if l1 is not None:
        encoded = Dense(encoding_dim,
                        activation='relu',
                        activity_regularizer=regularizers.activity_l1(l1))(x)
    else:
        encoded = Dense(encoding_dim, activation='relu')(x)

    if dropout_proba:
        encoded = Dropout(dropout_proba)(encoded)

    decoded = Dense(input_dim, activation='sigmoid')(encoded)

    autoencoder = Model(input=model_input, output=decoded)
    autoencoder.compile(optimizer='adadelta',
                        loss='binary_crossentropy',
                        metrics=['accuracy'])

    return autoencoder
コード例 #5
0
def op_create_model(optimizer='adam',
                    activation='relu',
                    learn_rate=0.01,
                    momentum=0,
                    init_mode='uniform',
                    dropout_rate=0.0,
                    weight_constraint=0,
                    neurons=inter_dim):

    # create model
    model = Sequential()
    model.add(
        Dense(neurons,
              input_dim=input_dim,
              init=init_mode,
              activation=activation,
              activity_regularizer=regularizers.activity_l1(10e-5),
              W_constraint=maxnorm(weight_constraint)))
    model.add(Dropout(dropout_rate))
    model.add(Dense(input_dim, init=init_mode, activation=activation))

    # Compile model
    model.compile(loss='mean_squared_error',
                  optimizer=optimizer,
                  metrics=['accuracy'])

    return model
コード例 #6
0
def test_A_reg():
    for reg in [regularizers.activity_l1(), regularizers.activity_l2()]:
        model = create_model(activity_reg=reg)
        model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
        model.fit(X_train, Y_train, batch_size=batch_size,
                  nb_epoch=nb_epoch, verbose=0)
        model.evaluate(X_test[test_ids, :], Y_test[test_ids, :], verbose=0)
コード例 #7
0
    def build(self, input_shape):
        nb_features = input_shape[1]

        if self.np_weights is not None:
            print "Using provided weights"
        else:
            self.np_weights = np.random.normal(
                size=[self.dict_size, nb_features])
            self.np_weights = np.float32(normalize(self.np_weights, axis=1))

        self.W = K.variable(self.np_weights, name='{}_W'.format(self.name))

        Wzero = np.float32(np.zeros(shape=[nb_features, self.dict_size]))
        self.Wzero = K.variable(Wzero, name='{}_Wzero'.format(self.name))

        self.trainable_weights = [self.W]

        # set initial alpha
        #        eigvals = np.linalg.eigvals(self.np_weights.dot(K.transpose(self.np_weights)))
        #        maxEigval = np.max(np.absolute(eigvals))
        #        self.alpha = np.float32(1/maxEigval)

        self.activity_regularizer = activity_l1(self.threshold / nb_features)
        self.activity_regularizer.set_layer(self)
        self.regularizers.append(self.activity_regularizer)

        self.recons_regularizer = reconsRegularizer(l2=self.reconsCoef)
        self.recons_regularizer.set_layer(self)
        self.regularizers.append(self.recons_regularizer)
コード例 #8
0
    def __init__(self, train_set, test_set):
        self.train_data = train_set
        self.test_data = test_set
        self.encoding_dim = train_set.shape[1]

        input_img = Input(shape=(self.encoding_dim, ))
        encoded = Dense(
            self.encoding_dim,
            activation='relu',
            activity_regularizer=regularizers.activity_l1(10e-5))(input_img)
        decoded = Dense(self.encoding_dim, activation='sigmoid')(encoded)

        self.autoencoder = Model(input=input_img, output=decoded)

        # this model maps an input to its encoded representation
        self.encoder = Model(input=input_img, output=encoded)

        # create a placeholder for an encoded (32-dimensional) input
        encoded_input = Input(shape=(self.encoding_dim, ))
        # retrieve the last layer of the autoencoder model
        decoder_layer = self.autoencoder.layers[-1]
        # create the decoder model
        self.decoder = Model(input=encoded_input,
                             output=decoder_layer(encoded_input))

        # First, we'll configure our model to use a per-pixel binary crossentropy loss, and the Adadelta optimizer:
        self.autoencoder.compile(optimizer='adadelta',
                                 loss='binary_crossentropy')
コード例 #9
0
def sample_model():
    #initial model
    model = Sequential()
    # add dropout to reduce overfitting
    # model.add(Dropout(0.2, input_shape=(48, 48, 1)))
    #with 64 filters, 5*5 for convolutional kernel and activation 'relu'
    model.add(
        Convolution2D(64, 5, 5, input_shape=(48, 48, 1), activation='relu'))
    #pooling layer
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(128, 5, 5, activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.2))

    model.add(Flatten())
    #fully connected layer
    model.add(Dense(600, activation='relu'))
    model.add(Dropout(0.2))
    model.add(Dense(200, activation='relu'))
    model.add(
        Dense(num_classes,
              W_regularizer=l1(0.01),
              activity_regularizer=activity_l1(0.01),
              activation='softmax'))
    return model
コード例 #10
0
ファイル: playdot.py プロジェクト: hobson/toynet
def multiplay(N=1,
              M=1000,
              nb_epoch=200,
              activation='relu',
              lr=0.001,
              momentum=0.001,
              decay=0.001,
              nesterov=False):
    """ Learn to multiply and and inputs together """

    from keras.regularizers import l1, activity_l1

    model = Sequential()
    model.add(
        Dense(4 * N,
              input_dim=N * 2,
              W_regularizer=l1(0.03),
              activity_regularizer=activity_l1(0.03)))
    model.add(Activation(activation))
    model.add(
        Dense(4 * N,
              W_regularizer=l1(0.03),
              activity_regularizer=activity_l1(0.03)))
    model.add(Activation(activation))
    model.add(
        Dense(1,
              W_regularizer=l1(0.03),
              activity_regularizer=activity_l1(0.03)))
    model.add(Activation(activation))

    model.compile(optimizer=SGD(lr=lr,
                                momentum=momentum,
                                decay=decay,
                                nesterov=nesterov),
                  loss='mse')

    X = pd.DataFrame(pd.np.random.randn(M, N * 2))
    y = (X.T.loc[:N] * X.T.loc[N:]).sum().T.values
    model.fit(X.values, y, nb_epoch=nb_epoch)

    X_test = pd.DataFrame(pd.np.random.randn(int(M * 0.1), N * 2))
    y_test = (X_test.T.loc[:N] * X_test.T.loc[N:]).sum().T.values
    print('test set loss: {}'.format(model.evaluate(X_test.values, y_test)))

    return model, X, y, X_test, y_test
コード例 #11
0
def tf_autoencoder(code, start_date, end_date, collapse, do_shuffle, input_dimension,
                   latent_dimension, epochs, batch, loss, optimizer):
    train, valid = split_inputs(code, start_date, end_date, collapse, do_shuffle, input_dimension)
    the_fed = regularizers.activity_l1(10e-5)
    input_seq = Input(shape=(input_dimension, ))
    encoded = Dense(latent_dimension, activation='relu', activity_regularizer=the_fed)(input_seq)
    decoded = Dense(input_dimension, activation='sigmoid')(encoded)
    autoencoder = Model(input=input_seq, output=decoded)
    autoencoder.compile(optimizer=optimizer, loss=loss)
    autoencoder.fit(train, train, nb_epoch=epochs, batch_size=batch, verbose=0)
    train_reconstructed = autoencoder.predict(train)
    valid_reconstructed = autoencoder.predict(valid)
    return train, train_reconstructed, valid, valid_reconstructed
コード例 #12
0
def hyperas_sdae_features(feature_list, encoding_dim = 92):
    
    noise_factor = 0.5
    
    train_noise = feature_list[0] + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=feature_list[0].shape) 
    vali_noise = feature_list[1] + noise_factor * np.random.normal(loc=0.0, scale=1.0, size=feature_list[1].shape) 


    train_noisy = np.clip(train_noise, 0., 1.)
    vali_noisy = np.clip(vali_noise, 0., 1.)

    
    input_dim= Input(shape=(np.shape(feature_list[0])[-1],))
    

    
    encoded = Dense({{choice([500, 600, 700])}}, activation='tanh')(input_dim)
    
    encoded = Dense(92, activation='tanh', activity_regularizer=regularizers.activity_l1(10e-5))(encoded)

    decoded = Dense(np.shape(feature_list[0])[-1], activation='linear')(encoded)


                    
    autoencoder = Model(input=input_dim, output=decoded)

    encoder = Model(input=input_dim, output=encoded)
                     
    autoencoder.compile(optimizer='sgd', loss='mse')

    start = timeit.default_timer()
                     
    history = autoencoder.fit(train_noisy, feature_list[0],
                    nb_epoch=10,
                    batch_size=100,
                    shuffle=True,
                    verbose = 0,
                    validation_data=(vali_noisy, feature_list[1])
                    )

    stop = timeit.default_timer()

    print ("The running takes %r min" %((stop-start)/60))    
    
    sdae_train, sdae_test = encoder.predict(feature_list[0]), encoder.predict(feature_list[-1])
    
    score, mse = autoencoder.evaluate(vali_noisy, feature_list[1], verbose = 0)
    
    print ('test mse: ', mse)
                     
    return {'loss': mse, 'status':STATUS_OK, 'model':hyperas_sdae_features}
コード例 #13
0
ファイル: autoencoders.py プロジェクト: Debanjan1234/DL
def sparse_autoencoder(X, lam=1e-5):
    X = X.reshape(X.shape[0], -1)
    M, N = X.shape

    inputs = Input(shape=(N, ))
    h = Dense(64, activation='sigmoid',
              activity_regularizer=activity_l1(lam))(inputs)
    outputs = Dense(N)(h)

    model = Model(input=inputs, output=outputs)
    model.compile(optimizer='adam', loss='mse')
    model.fit(X, X, batch_size=64, nb_epoch=3)

    return model, Model(input=inputs, output=h)
コード例 #14
0
ファイル: main.py プロジェクト: ajeet28/recommender
def autoencoder(x_train, args):
	'''
	Autoencoder
	input shape = feature dimension
	output : autoencoder model
	'''
	encoding_dim = 250
	input_size = x_train.shape[1]
	input_vector = Input(shape=(input_size,))
	encoded = Dense(encoding_dim, activation='relu',activity_regularizer=regularizers.activity_l1(0.01), init='glorot_normal')(input_vector)
	decoded = Dense(input_size, activation='linear', init='glorot_normal')(encoded)
	autoencoder = Model(input_vector, decoded)
	encoder = Model(input_vector, encoded)
	opt = Adam(lr=args.lr, beta_1=args.beta_1, beta_2=args.beta_2, epsilon=args.epsilon, decay=args.decay)
	autoencoder.compile(optimizer='sgd', loss='mse', metrics=['acc'])
	return autoencoder
コード例 #15
0
def tf_autoencoder(code, start_date, end_date, collapse, do_shuffle,
                   input_dimension, latent_dimension, epochs, batch, loss,
                   optimizer):
    train, valid = split_inputs(code, start_date, end_date, collapse,
                                do_shuffle, input_dimension)
    the_fed = regularizers.activity_l1(10e-5)
    input_seq = Input(shape=(input_dimension, ))
    encoded = Dense(latent_dimension,
                    activation='relu',
                    activity_regularizer=the_fed)(input_seq)
    decoded = Dense(input_dimension, activation='sigmoid')(encoded)
    autoencoder = Model(input=input_seq, output=decoded)
    autoencoder.compile(optimizer=optimizer, loss=loss)
    autoencoder.fit(train, train, nb_epoch=epochs, batch_size=batch, verbose=0)
    train_reconstructed = autoencoder.predict(train)
    valid_reconstructed = autoencoder.predict(valid)
    return train, train_reconstructed, valid, valid_reconstructed
コード例 #16
0
ファイル: main.py プロジェクト: Shaofanl/Old_Ideas
def sparseAE():
    input_img = Input(shape=(784, ))
    # add a Dense layer with a L1 activity regularizer
    encoded = Dense(
        encoding_dim,
        activation='relu',
        activity_regularizer=regularizers.activity_l1(10e-5))(input_img)
    decoded = Dense(784, activation='sigmoid')(encoded)
    autoencoder = Model(input=input_img, output=decoded)

    encoder = Model(input=input_img, output=encoded)
    # create a placeholder for an encoded (32-dimensional) input
    encoded_input = Input(shape=(encoding_dim, ))
    # retrieve the last layer of the autoencoder model
    decoder_layer = autoencoder.layers[-1]
    # create the decoder model
    decoder = Model(input=encoded_input, output=decoder_layer(encoded_input))
    return autoencoder, encoder, decoder
コード例 #17
0
def create_model(optimizer='adam',
                 activation='relu',
                 learn_rate=0.01,
                 momentum=0,
                 init_mode='uniform',
                 dropout_rate=0.0,
                 weight_constraint=0,
                 neurons=inter_dim):

    # create model
    model = Sequential()
    model.add(
        Dense(neurons,
              input_dim=input_dim,
              init=init_mode,
              activation=activation,
              activity_regularizer=regularizers.activity_l1(10e-5),
              W_constraint=maxnorm(weight_constraint)))
    model.add(Dropout(dropout_rate))
    model.add(Dense(input_dim, init=init_mode, activation=activation))

    if optimizer == "SGD":
        optimizer = SGD(lr=learn_rate, momentum=momentum)
    elif optimizer == "Adam":
        optimizer = Adam(lr=learn_rate, momentum=momentum)
    elif optimizer == "RMSprop":
        optimizer = RMSprop(lr=learn_rate, momentum=momentum)
    elif optimizer == "Adagrad":
        optimizer = Adagrad(lr=learn_rate, momentum=momentum)
    elif optimizer == "Adadelta":
        optimizer = Adadelta(lr=learn_rate, momentum=momentum)
    elif optimizer == "Adamax":
        optimizer = Adamax(lr=learn_rate, momentum=momentum)
    elif optimizer == "Nadam":
        optimizer = Nadam(lr=learn_rate, momentum=momentum)

    # Compile model
    model.compile(loss='mean_squared_error',
                  optimizer=optimizer,
                  metrics=['accuracy'])

    return model
コード例 #18
0
ファイル: neuralNetwork.py プロジェクト: Tornadoofsoul/MRAE
    def buildAutoencoder(self,
                         inputDim):
        '''
        Network definition layer by layer
        '''
        # Input placeholder
        input_data = Input(shape=(inputDim,))

        # "encoded" is the encoded representation of the input
        encoded = Dense(self.encoding_dim,
                        activation=self.activFirstLayer,
                        activity_regularizer=regularizers.activity_l1(10e-5))(input_data)

        # "decoded" is the lossy reconstruction of the input
        decoded = Dense(inputDim, activation=self.activSecondLayer)(encoded)

        # This model maps an input to its reconstruction
        self.autoencoder = Model(input=input_data, output=decoded)
        self.autoencoder.compile(optimizer=self.optimizer,
                                 loss=self.loss)
        return True
コード例 #19
0
    def __init__(self, train_set, test_set):
        self.train_data = train_set
        self.test_data = test_set
        self.encoding_dim = train_set.shape[1]

        input_img = Input(shape=(self.encoding_dim,))
        encoded = Dense(self.encoding_dim, activation='relu', activity_regularizer=regularizers.activity_l1(10e-5))(input_img)
        decoded = Dense(self.encoding_dim, activation='sigmoid')(encoded)

        self.autoencoder = Model(input=input_img, output=decoded)

        # this model maps an input to its encoded representation
        self.encoder = Model(input=input_img, output=encoded)

        # create a placeholder for an encoded (32-dimensional) input
        encoded_input = Input(shape=(self.encoding_dim,))
        # retrieve the last layer of the autoencoder model
        decoder_layer = self.autoencoder.layers[-1]
        # create the decoder model
        self.decoder = Model(input=encoded_input, output=decoder_layer(encoded_input))


        # First, we'll configure our model to use a per-pixel binary crossentropy loss, and the Adadelta optimizer:
        self.autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
コード例 #20
0
def transform_model(weight_loss_pix=5e-4):
    inputs = Input(shape=(128, 128, 3))
    x1 = Convolution2D(64, 5, 5, border_mode='same')(inputs)
    x2 = LeakyReLU(alpha=0.3, name='wkcw')(x1)
    x3 = BatchNormalization()(x2)
    x4 = Convolution2D(128, 4, 4, border_mode='same', subsample=(2, 2))(x3)
    x5 = LeakyReLU(alpha=0.3)(x4)
    x6 = BatchNormalization()(x5)
    x7 = Convolution2D(256, 4, 4, border_mode='same', subsample=(2, 2))(x6)
    x8 = LeakyReLU(alpha=0.3)(x7)
    x9 = BatchNormalization()(x8)
    x10 = Deconvolution2D(128,
                          3,
                          3,
                          output_shape=(None, 64, 64, 128),
                          border_mode='same',
                          subsample=(2, 2))(x9)
    x11 = BatchNormalization()(x10)
    x12 = Deconvolution2D(64,
                          3,
                          3,
                          output_shape=(None, 128, 128, 64),
                          border_mode='same',
                          subsample=(2, 2))(x11)
    x13 = BatchNormalization()(x12)
    x14 = Deconvolution2D(
        3,
        4,
        4,
        output_shape=(None, 128, 128, 3),
        border_mode='same',
        activity_regularizer=activity_l1(weight_loss_pix))(x13)
    output = merge([inputs, x14], mode='sum')
    model = Model(input=inputs, output=output)

    return model
コード例 #21
0
def sparse_auto_encoder(X_train, X_test):
    encoding_dim = 80
    input_img = Input(shape=(len(X_train[0]), ))
    # add a Dense layer with a L1 activity regularizer
    encoded = Dense(
        encoding_dim,
        activation='tanh',
        activity_regularizer=regularizers.activity_l1(10e-5))(input_img)
    decoded = Dense(len(X_train[0]), activation='tanh')(encoded)

    autoencoder = Model(input=input_img, output=decoded)

    autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')

    autoencoder.fit(X_train,
                    X_train,
                    nb_epoch=100,
                    batch_size=256,
                    shuffle=True,
                    validation_data=(X_train, X_train))

    X_test_encoded = autoencoder.predict(X_test)
    X_train_encoded = autoencoder.predict(X_train)
    return X_train_encoded, X_test_encoded
コード例 #22
0
input_img = Input(shape=(224, 224, 1))
x = Convolution2D(16,
                  3,
                  3,
                  activation='relu',
                  border_mode='same',
                  input_shape=(224, 224, 1))(input_img)
x = MaxPooling2D((2, 2), border_mode='same')(x)
x = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(x)
x = MaxPooling2D((2, 2), border_mode='same')(x)
x = Convolution2D(8,
                  3,
                  3,
                  activation='relu',
                  border_mode='same',
                  activity_regularizer=regularizers.activity_l1(10e-5))(x)
encoded = MaxPooling2D((2, 2), border_mode='same')(x)
model = Model(input_img, encoded)
model.compile(loss='binary_crossentropy', optimizer='adagrad', verbose=0)

# In[4]:

model.load_weights(sys.argv[3], by_name=True)

# In[5]:


def push_pqueue(queue, priority, value):
    if len(queue) > 10:
        heapq.heappushpop(queue, (priority, value))
    else:
コード例 #23
0
ファイル: VanillaAE.py プロジェクト: snehpahilwani/mana-deep
from PIL import Image
import os
import numpy as np
import PIL
import matplotlib.pyplot as plt
import matplotlib.cm as cm


# this is the size of our encoded representations
encoding_dim = 32  # 32 floats -> compression of factor 1638.78125, assuming the input is 50176 floats

# this is our input placeholder
input_img = Input(shape=(50176,))

# "encoded" is the encoded representation of the input
encoded = Dense(encoding_dim, activation='relu',activity_regularizer=regularizers.activity_l1(10e-5))(input_img)

# "decoded" is the lossy reconstruction of the input
decoded = Dense(50176, activation='sigmoid')(encoded)

# this model maps an input to its reconstruction
autoencoder = Model(input=input_img, output=decoded)

# this model maps an input to its encoded representation
encoder = Model(input=input_img, output=encoded)

# create a placeholder for an encoded (32-dimensional) input
encoded_input = Input(shape=(encoding_dim,))

# retrieve the last layer of the autoencoder model
decoder_layer = autoencoder.layers[-1]
コード例 #24
0
'''
用最新版本的Keras训练模型,使用GPU加速(我的是GTX 960)
其中Bidirectional函数目前要在github版本才有
'''
from keras.layers import Dense, LSTM, Lambda, TimeDistributed, Input, Masking, Bidirectional
from keras.models import Model
from keras.utils import np_utils
from keras.regularizers import activity_l1  #通过L1正则项,使得输出更加稀疏

sequence = Input(shape=(maxlen, word_size))
mask = Masking(mask_value=0.)(sequence)
blstm = Bidirectional(LSTM(64, return_sequences=True), merge_mode='sum')(mask)
blstm = Bidirectional(LSTM(32, return_sequences=True), merge_mode='sum')(blstm)
output = TimeDistributed(
    Dense(5, activation='softmax',
          activity_regularizer=activity_l1(0.01)))(blstm)
model = Model(input=sequence, output=output)
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
'''
gen_matrix实现从分词后的list来输出训练样本
gen_target实现将输出序列转换为one hot形式的目标
超过maxlen则截断,不足补0
'''
gen_matrix = lambda z: np.vstack(
    (word2vec[z[:maxlen]], np.zeros((maxlen - len(z[:maxlen]), word_size))))
gen_target = lambda z: np_utils.to_categorical(
    np.array(z[:maxlen] + [0] * (maxlen - len(z[:maxlen]))), 5)

コード例 #25
0
ファイル: VAE.py プロジェクト: snehpahilwani/mana-deep
    images.append(np.array([np.flipud(img)]))
    images.append(np.array([np.fliplr(np.flipud(img))]))

print('Training with ', len(images), ' samples')

# this is the size of our encoded representations
encoding_dim = 32  # 32 floats -> compression of factor 1638.78125, assuming the input is 50176 floats

# this is our input placeholder
input_img = Input(shape=(50176, ))

# "encoded" is the encoded representation of the input
encoded = Dense(
    encoding_dim,
    activation='relu',
    activity_regularizer=regularizers.activity_l1(10e-1))(input_img)

# "decoded" is the lossy reconstruction of the input
decoded = Dense(50176, activation='sigmoid')(encoded)

# this model maps an input to its reconstruction
autoencoder = Model(input=input_img, output=decoded)

# this model maps an input to its encoded representation
encoder = Model(input=input_img, output=encoded)

# create a placeholder for an encoded (32-dimensional) input
encoded_input = Input(shape=(encoding_dim, ))

# retrieve the last layer of the autoencoder model
decoder_layer = autoencoder.layers[-1]
コード例 #26
0
num_feature_detectors = 16 * 16 * 4
tile_size = 16
receptive_field_size = 24
model = Sequential()
model.add(Dropout(0.1, input_shape=ichunkshape[1:]))

input_features = Convolution2D(name='encode_tiled_features',
                               nb_filter=num_feature_detectors,
                               nb_row=receptive_field_size,
                               nb_col=receptive_field_size,
                               subsample=(tile_size, tile_size),
                               border_mode='valid',
                               trainable=True,
                               dim_ordering='tf',
                               activity_regularizer=activity_l1(0.0000005))

model.add(input_features)
model.add(PReLU())

decoder = Convolution2D(name='decode_tiled_features',
                        nb_filter=tile_size * tile_size * 3,
                        nb_row=1,
                        nb_col=1,
                        subsample=(1, 1),
                        border_mode='same',
                        trainable=True,
                        dim_ordering='tf')

model.add(decoder)
model.add(
コード例 #27
0
ファイル: main.py プロジェクト: yjpark1/kerasConvAutoEncoder
input_dim = 1;
filter_length=21
batch_size = 1
nb_epoch = 1

for n in range(nb_hidden_layer):
    print('Pre-training the layer: {}'.format(n))
    # Create AE and training
    encoder = Sequential()
    encoder.add(Convolution1D(input_length=1000,
                            nb_filter = nb_filter,
                            input_dim = input_dim,
                            filter_length = filter_length,
                            border_mode = "same",
                            activation = "tanh",
                            activity_regularizer=activity_l1(0.001),
                            W_regularizer=l2(.001),
                            subsample_length = 1))
    decoder = Sequential()
    decoder.add(Convolution1D(input_length=1000,
                            input_dim = nb_filter,
                            nb_filter = 1,
                            filter_length = filter_length,
                            border_mode = "same",
                            activation = "linear",
                            W_regularizer=l2(.001),
                            subsample_length = 1))
    ae=Sequential()
    ae.add(AutoEncoder(encoder=encoder, decoder=decoder,output_reconstruction=False))
    layer_utils.print_layer_shapes(ae,[(1,1000,1)]) 
    print "....compile"
コード例 #28
0
ファイル: trainold.py プロジェクト: snehpahilwani/mana-deep
import os
from os import listdir
from os.path import isfile, join
import numpy as np
from matplotlib import pyplot as plt
import  cv2
import scipy.misc

input_img = Input(shape=(224, 224,1))

x = Convolution2D(16, 3, 3, activation='relu', border_mode='same', input_shape=(224,224,1))(input_img)
x = MaxPooling2D((2, 2), border_mode='same')(x)
x = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(x)
x = MaxPooling2D((2, 2), border_mode='same')(x)
x = Convolution2D(8, 3, 3, activation='relu', border_mode='same', activity_regularizer=regularizers.activity_l1(10e-5))(x)
encoded = MaxPooling2D((2, 2), border_mode='same')(x)

# at this point the representation is (8, 4, 4) i.e. 128-dimensional

x = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(encoded)
x = UpSampling2D((2, 2))(x)
x = Convolution2D(8, 3, 3, activation='relu', border_mode='same')(x)
x = UpSampling2D((2, 2))(x)
x = Convolution2D(16, 3, 3, activation='relu', border_mode='same')(x)
x = UpSampling2D((2, 2))(x)
decoded = Convolution2D(1, 3, 3, activation='sigmoid', border_mode='same')(x)

autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adagrad', loss='mse')
コード例 #29
0
from audio_preprocessing.pipeline import AudioPipeline, plot_signal_simple

from keras.layers import Input, Dense
from keras.models import Model
from keras.datasets import mnist
from keras import regularizers
import numpy as np
import matplotlib.pyplot as plt

# this is the size of our encoded representations
encoding_dim = 40  # 32 floats -> compression of factor 24.5, assuming the input is 784 floats

# this is our input placeholder
input_img = Input(shape=(882,))
# "encoded" is the encoded representation of the input
encoded = Dense(encoding_dim, activation='relu', activity_regularizer=regularizers.activity_l1(10e-5))(input_img)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(882, activation='tanh')(encoded)

# this model maps an input to its reconstruction
autoencoder = Model(input=input_img, output=decoded)

# this model maps an input to its encoded representation
encoder = Model(input=input_img, output=encoded)

# create a placeholder for an encoded (32-dimensional) input
encoded_input = Input(shape=(encoding_dim,))
# retrieve the last layer of the autoencoder model
decoder_layer = autoencoder.layers[-1]
# create the decoder model
decoder = Model(input=encoded_input, output=decoder_layer(encoded_input))
コード例 #30
0
        if cp not in compress and 0.01 <= bar.mean() < 0.028:
            nbeats.append(bar)
            compress.append(cp)
beats = np.array(nbeats)
# two measures is more fun
#beats = beats.reshape((-1,beats.shape[1]*2,20))
print('two measures', beats.shape)
x_train, x_test, _, _ = train_test_split(beats, beats, test_size=0.25, random_state=0)
print('size', x_train.shape, x_test.shape)

# encoder
input_dim = 20
inputs = Input(shape=(x_train.shape[1], input_dim))
encoded = Reshape((x_train.shape[1]*x_train.shape[2],))(inputs)
encoded = Dropout(0.3)(encoded)
encoded = Dense(256, activity_regularizer=regularizers.activity_l1(10e-5))(encoded)
encoded = Dense(64)(encoded)
encoded = Dense(8)(encoded)
# decoder
decoded = Dense(64)(encoded)
decoded = Dense(256)(decoded)
decoded = Dense(x_train.shape[1]*x_train.shape[2])(decoded)
decoded = Reshape((x_train.shape[1], 20))(decoded)
m = Model(inputs, decoded)
e = Model(inputs, encoded)
enc_in = Input(shape=(8,))
d = Model(enc_in, m.layers[-1](m.layers[-2](m.layers[-3](m.layers[-4](enc_in)))))
# compile
m.compile(optimizer='adadelta', loss='binary_crossentropy', metrics=['accuracy'])
m.summary()
# train
コード例 #31
0
ファイル: AEbasic.py プロジェクト: qizhust/pooling
x_test = x_test.astype('float32') / 255.

# prepare patches for autoencoder training
patch_num = 60000
patch_train = samplePatches(x_train, input_shape, patch_num)

x_train = x_train.reshape(len(x_train), np.prod(x_train.shape[1:]))
x_test = x_test.reshape(len(x_test), np.prod(x_test.shape[1:]))

# build an autoencoder and train
model = Sequential()
model.add(
    Dense(encoding_dim,
          input_dim=input_shape,
          activation='relu',
          activity_regularizer=regularizers.activity_l1(sparsity)))
model.add(Dropout(0.5))
model.add(Dense(input_shape, activation='linear'))
model.compile(optimizer='adadelta', loss='binary_crossentropy')
model.fit(patch_train,
          patch_train,
          nb_epoch=100,
          batch_size=256,
          shuffle=True,
          validation_data=None)

# get weights and bias
w1 = model.layers[0].get_weights()[0]
b1 = model.layers[0].get_weights()[1]
w2 = model.layers[2].get_weights()[0]
b2 = model.layers[2].get_weights()[1]
コード例 #32
0
ファイル: test_code.py プロジェクト: civilTANG/TASINFER
if 'inp' not in TANGSHAN:
    import csv
    if isinstance(inp, np.ndarray) or isinstance(inp, pd.DataFrame
        ) or isinstance(inp, pd.Series):
        shape_size = inp.shape
    elif isinstance(inp, list):
        shape_size = len(inp)
    else:
        shape_size = 'any'
    check_type = type(inp)
    with open('tas.csv', 'a+') as f:
        TANGSHAN.append('inp')
        writer = csv.writer(f)
        writer.writerow(['inp', 145, check_type, shape_size])
L1 = Dense(1024, init='uniform', activation='tanh', activity_regularizer=
    regularizers.activity_l1(0.01))(D1)
if 'D1' not in TANGSHAN:
    import csv
    if isinstance(D1, np.ndarray) or isinstance(D1, pd.DataFrame
        ) or isinstance(D1, pd.Series):
        shape_size = D1.shape
    elif isinstance(D1, list):
        shape_size = len(D1)
    else:
        shape_size = 'any'
    check_type = type(D1)
    with open('tas.csv', 'a+') as f:
        TANGSHAN.append('D1')
        writer = csv.writer(f)
        writer.writerow(['D1', 146, check_type, shape_size])
L2 = Dense(len(unique_labels), init='uniform', activation='softmax')(L1)
コード例 #33
0
    def __init__(self, ds_shape, latent_dim=2, input_noise=0.2, dropout_p=0.5, activ='tanh', final_activ='tanh',
                      nb_classes=2, batch_size=100, compression_factor=None):
        # compression_factor=20
        print('DS shape: {}'.format(ds_shape))
        in_dims = np.prod(ds_shape[1:])
        # latent_dim = int(in_dims // compression_factor)
        in_shape = ds_shape[1:]
        sizes = [4*int(in_dims**0.5), 4*int(in_dims**0.25)]
        # this is our input placeholder
        batch_shape = (batch_size,) + in_shape
        # input_img = Input(batch_shape=batch_shape, name='main_input')
        print('Batch Shape: ', batch_shape )
        x_in = Input(shape=(in_dims,))
        # encoded = Dense(in_dims, activation='linear')(x_in)
        print('Input Dims: {}, input shape: {}, encoding dims: {}'.format(in_dims, in_shape, latent_dim))
        print('Sizes', sizes)

        encoded = Dense(sizes[0], activation=activ, activity_regularizer=regularizers.activity_l1(10e-5))(x_in)
        encoded = GaussianNoise(input_noise)(encoded)

        #     encoded = Dense(encoding_dim*4, activation='sigmoid')(input_img)
        encoded = BatchNormalization()(encoded)
        encoded = Dropout(dropout_p)(encoded)  # batch norm before dropout
        #     encoded = Dense(encoding_dim*3, activation=activ)(encoded)
        #     encoded = Dropout(dropout_p)(encoded)
        encoded = Dense(sizes[1], activation=activ)(encoded)
        encoded = BatchNormalization()(encoded)
        encoded = Dropout(dropout_p)(encoded)

        latent = Dense(latent_dim, activation=activ)(encoded)
        # Middle Noise
        encoded = GaussianNoise(0.02)(encoded)

        # DECODED LAYER
        # "decoded" is the lossy reconstruction of the input
        decoded = Dense(sizes[1], activation=activ)(latent)
        #     decoded = Dropout(dropout_p)(decoded)
        decoded = Dense(sizes[0], activation=activ)(decoded)
        decoded = Dropout(dropout_p/2)(decoded)
        decoded = Dense(in_dims, activation=final_activ)(decoded)

        # MODELs
        self.autoencoder = Model(input=x_in, output=decoded)

        # create a placeholder for an encoded (32-dimensional) input
        encoded_input = Input(shape=(latent_dim,))

        # retrieve the last layer of the autoencoder model
        decoder_layer0 = self.autoencoder.layers[-4]
        decoder_layer1 = self.autoencoder.layers[-3]
        decoder_layer2 = self.autoencoder.layers[-2]
        decoder_layer3 = self.autoencoder.layers[-1]
        # todo: make this into a dedicated unrolling function

        class_out = Dense(nb_classes, activation='sigmoid')(latent)


        # Moar Models
        self.encoder = Model(input=x_in, output=latent)

        # create the decoder model - unrolling the model as we go
        self.decoder = Model(input=encoded_input, output=decoder_layer3(decoder_layer2(
            decoder_layer1(decoder_layer0(encoded_input)))))

        self.classer = Model(input=x_in, output=class_out)
        #     model.add(GaussianNoise(0.1), input_shape=(n_input_len,))
        self.autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
        self.classer.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

        self.autoencoder.model_name = 'Autoencoder 1'
コード例 #34
0
ファイル: Transcript_Prosody.py プロジェクト: caroljew/NRT
	 input_shape=(MAX_SEQUENCE_LENGTH, EMBEDDING_DIM), name='LSTM3')

])

num_hidden_units_conv=1024
nb_hidden_units=1024
nb_filter=128
filter_length=60
MAX_SEQUENCE_LENGTH_2 = 3000

branch4=Sequential([
	Convolution1D(nb_filter=nb_filter,
	filter_length=filter_length, 
	border_mode='valid',
	activation='tanh', 
	input_shape=(MAX_SEQUENCE_LENGTH_2,1),activity_regularizer=regularizers.activity_l1(0.01), name='CONV11'),
	Flatten(),
      Dense(nb_hidden_units, init='uniform', activation='tanh')

])

branch5=Sequential([
	Convolution1D(nb_filter=nb_filter,
	filter_length=filter_length, 
	border_mode='valid',
	activation='tanh', 
	input_shape=(MAX_SEQUENCE_LENGTH_2,1),activity_regularizer=regularizers.activity_l1(0.01), name='CONV21'),
	Flatten(),
      Dense(nb_hidden_units, init='uniform', activation='tanh')
 
])
コード例 #35
0
def autoencoder3(ds, compression_factor=16, input_noise=0.2, dropout_p=0.1, activ='tanh', final_activ='tanh'):
    """
    This one works really well!
    :param ds: Data set, just used to get dimension of input (need to refactor this)
    :param compression_factor: Compression ratio
    :param input_noise: Gaussian sigma to apply to input vector
    :param dropout_p: Dropout rate in the 3 input dropouts and one output dropout
    :param activ: activation function used in all but the last layer
    :param final_activ: activation function of the last layer
    :return:
    """
    # compression_factor=20
    print('DS shape: {}'.format(ds.shape))
    in_dims = np.prod(ds.shape[1:])
    encoding_dim = int(in_dims // compression_factor)
    in_shape = ds[0].shape
    print('Input Dims: {}, input shape: {}, encoding dims: {}'.format(in_dims, in_shape, encoding_dim))

    # this is our input placeholder
    input_img = Input(shape=(in_dims,))
    encoded = GaussianNoise(input_noise)(input_img)

    encoded = Dense(encoding_dim * 4, activation=activ, activity_regularizer=regularizers.activity_l1(10e-5))(encoded)
    #     encoded = Dense(encoding_dim*4, activation='sigmoid')(input_img)
    encoded = BatchNormalization()(encoded)
    encoded = Dropout(dropout_p)(encoded)  # batch norm before dropout
    #     encoded = Dense(encoding_dim*3, activation=activ)(encoded)
    #     encoded = Dropout(dropout_p)(encoded)
    encoded = Dense(encoding_dim * 2, activation=activ)(encoded)
    encoded = Dropout(dropout_p)(encoded)

    encoded = Dense(encoding_dim, activation=activ)(encoded)
    # Middle Noise
    encoded = GaussianNoise(0.02)(encoded)

    # DECODED LAYER
    # "decoded" is the lossy reconstruction of the input
    decoded = Dense(encoding_dim * 2, activation=activ)(encoded)
    #     decoded = Dropout(dropout_p)(decoded)
    decoded = Dense(encoding_dim * 4, activation=activ)(decoded)
    decoded = Dropout(dropout_p)(decoded)
    decoded = Dense(in_dims, activation=final_activ)(decoded)

    # MODEL
    autoencoder = Model(input=input_img, output=decoded)

    # SEPERATE ENCODER MODEL
    encoder = Model(input=input_img, output=encoded)

    # create a placeholder for an encoded (32-dimensional) input
    encoded_input = Input(shape=(encoding_dim,))

    # retrieve the last layer of the autoencoder model
    decoder_layer0 = autoencoder.layers[-4]
    decoder_layer1 = autoencoder.layers[-3]
    decoder_layer2 = autoencoder.layers[-2]
    decoder_layer3 = autoencoder.layers[-1]
    # todo: make this into a dedicated unrolling function

    # create the decoder model - unrolling the model as we go
    decoder = Model(input=encoded_input, output=decoder_layer3(decoder_layer2(
        decoder_layer1(decoder_layer0(encoded_input)))))

    #     model.add(GaussianNoise(0.1), input_shape=(n_input_len,))
    autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')

    autoencoder.model_name = 'Autoencoder 1'
    return autoencoder, encoder, decoder
コード例 #36
0
def model_dense(train_rep, valid_rep, test_rep, y_tr, y_val, y_test, nb_class,
                nb_epoch, batch_size, lr, model_path, weight_path, hidden_size,
                idim, dr):
    model_stylo = Sequential()
    model_stylo.add(
        Dense(hidden_size,
              input_shape=(idim, ),
              init='glorot_uniform',
              activation='relu'))
    model_stylo.add(Dropout(dr))
    model_stylo.add(
        Dense(nb_class,
              init='glorot_uniform',
              activation='softmax',
              name="output_author",
              activity_regularizer=regularizers.activity_l1(0.0005)))
    print(model_stylo.summary())
    adam = Adam(lr=lr)
    model_stylo.compile(loss='categorical_crossentropy',
                        optimizer=adam,
                        metrics=['accuracy'])

    checkPoint = keras.callbacks.ModelCheckpoint(weight_path,
                                                 monitor='val_loss',
                                                 verbose=2,
                                                 save_best_only=True,
                                                 save_weights_only=True,
                                                 mode='auto')
    earlystop_cb = keras.callbacks.EarlyStopping(monitor='val_loss',
                                                 patience=5,
                                                 verbose=2,
                                                 mode='auto')
    print("Training the model...")
    model_stylo.fit(train_rep,
                    y_tr,
                    nb_epoch=nb_epoch,
                    batch_size=batch_size,
                    validation_data=(valid_rep, y_val),
                    callbacks=[earlystop_cb, checkPoint],
                    verbose=2)

    # serialize model to JSON
    model_json = model_stylo.to_json()
    with open(model_path, "w") as json_file:
        json_file.write(model_json)

    # load json and create model
    json_file = open(model_path, 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    loaded_model = model_from_json(loaded_model_json)
    # load weights into new model
    loaded_model.load_weights(weight_path)
    print("Loaded model from disk")

    # evaluate loaded model on test data
    loaded_model.compile(loss='categorical_crossentropy',
                         optimizer=adam,
                         metrics=['accuracy'])
    l, a = loaded_model.evaluate(test_rep, y_test, verbose=2)
    l_valid, a_valid = loaded_model.evaluate(valid_rep, y_val, verbose=2)
    print("Evaluation on the validation and test data using saved model")
    print("validation", l_valid, a_valid)
    print("test", l, a)
    return l_valid, a_valid, l, a
コード例 #37
0
def model_woc(train, valid, test, train_rep, valid_rep, test_rep, y_tr, y_val,
              y_test, max_features, nb_class, emb_size, seq_length, nb_epoch,
              batch_size, lr, model_path, weight_path, dr, idim):

    model_cont = Sequential()
    model_cont.add(
        Embedding(max_features,
                  output_dim=emb_size,
                  input_length=seq_length,
                  dropout=0.75,
                  init='glorot_uniform'))
    model_cont.add(AveragePooling1D(pool_length=model_cont.output_shape[1]))
    model_cont.add(Flatten())

    # syntactic features
    model_stylo = Sequential()
    model_stylo.add(
        Dense(2,
              input_shape=(idim, ),
              init='glorot_uniform',
              activation='relu'))
    model_stylo.add(Dropout(0.75))

    model_merge = Sequential()
    model_merge.add(Merge([model_cont, model_stylo], mode="concat"))
    model_merge.add(Dropout(dr))
    model_merge.add(
        Dense(nb_class,
              init='glorot_uniform',
              activation='softmax',
              name="output_author",
              activity_regularizer=regularizers.activity_l1(0.0005)))

    adam = Adam(lr=lr)
    model_merge.compile(loss='categorical_crossentropy',
                        optimizer=adam,
                        metrics=['accuracy'])

    checkPoint = keras.callbacks.ModelCheckpoint(weight_path,
                                                 monitor='val_loss',
                                                 verbose=2,
                                                 save_best_only=True,
                                                 save_weights_only=True,
                                                 mode='auto')
    earlystop_cb = keras.callbacks.EarlyStopping(monitor='val_loss',
                                                 patience=5,
                                                 verbose=2,
                                                 mode='auto')
    print("Training the model...")
    model_merge.fit([train, train_rep],
                    y_tr,
                    nb_epoch=nb_epoch,
                    batch_size=batch_size,
                    validation_data=([valid, valid_rep], y_val),
                    callbacks=[earlystop_cb, checkPoint],
                    verbose=2)
    loss, acc = model_merge.evaluate([test, test_rep], y_test, verbose=2)
    loss_val, acc_val = model_merge.evaluate([valid, valid_rep],
                                             y_val,
                                             verbose=2)
    print("Evaluation on test data using direct model (not saved one)")
    print("validation", loss_val, acc_val)
    print("test", loss, acc)

    # serialize model to JSON
    model_json = model_merge.to_json()
    with open(model_path, "w") as json_file:
        json_file.write(model_json)

    # load json and create model
    json_file = open(model_path, 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    loaded_model = model_from_json(loaded_model_json)
    # load weights into new model
    loaded_model.load_weights(weight_path)
    print("Loaded model from disk")

    # evaluate loaded model on test data
    loaded_model.compile(loss='categorical_crossentropy',
                         optimizer=adam,
                         metrics=['accuracy'])
    l, a = loaded_model.evaluate([test, test_rep], y_test, verbose=2)
    l_valid, a_valid = loaded_model.evaluate([valid, valid_rep],
                                             y_val,
                                             verbose=2)
    print("Evaluation on the validation and test data using saved model")
    print("validation", l_valid, a_valid)
    print("test", l, a)
    return l_valid, a_valid, l, a