コード例 #1
0
ファイル: train.py プロジェクト: ldricci3/roboracing-software
def make_model():
    model = Sequential()
    # 128 x 48
    model.add(GaussianNoise(0.05, input_shape=input_shape))
    model.add(Conv2D(32, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D((4, 4)))
    # 32 x 12
    model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D((2, 2)))
    # 16 x 6

    model.add(Flatten())
    model.add(Dropout(0.25))
    model.add(Dense(128, activation='relu'))
    model.add(Dense(32, activation='relu'))
    model.add(Dropout(0.35))
    model.add(Dense(len(categories), activation='softmax'))

    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=keras.optimizers.Adadelta(),
                  metrics=['accuracy'])
    return model
コード例 #2
0
 def __init__(self):
     # discriminator
     self.input = Input([28**2])
     layer = GaussianNoise(stddev=0.3)(self.input)
     layer = Dense(1000)(layer)
     layer = GaussianNoise(stddev=0.5)(layer)
     layer = Dense(500)(layer)
     layer = GaussianNoise(stddev=0.5)(layer)
     layer = Dense(250)(layer)
     layer = GaussianNoise(stddev=0.5)(layer)
     layer = Dense(250)(layer)
     layer = GaussianNoise(stddev=0.5)(layer)
     layer = Dense(250)(layer)
     self.feature = Model(inputs=self.input, outputs=layer)
     layer = GaussianNoise(stddev=0.5)(layer)
     self.output = Dense(10)(layer)
     self.model = Model(inputs=self.input, outputs=self.output)
コード例 #3
0
def prepare_model(max_sequence_length, embedding_matrix, max_position, n_out):
    words_input = Input(shape=(max_sequence_length, ),
                        dtype='int32',
                        name='words_input')
    words = Embedding(embedding_matrix.shape[0],
                      embedding_matrix.shape[1],
                      weights=[embedding_matrix],
                      trainable=False)(words_input)

    distance1_input = Input(shape=(max_sequence_length, ),
                            dtype='int32',
                            name='distance1_input')
    distance1 = Embedding(max_position,
                          HYPERPARAMETERS['position_dims'])(distance1_input)

    distance2_input = Input(shape=(max_sequence_length, ),
                            dtype='int32',
                            name='distance2_input')
    distance2 = Embedding(max_position,
                          HYPERPARAMETERS['position_dims'])(distance2_input)

    output = concatenate([words, distance1, distance2])
    output = GaussianNoise(TRAINING_PARAMETERS['std_noise'])(output)
    output = Convolution1D(filters=HYPERPARAMETERS['num_filters'],
                           kernel_size=HYPERPARAMETERS['filter_sizes'],
                           padding='valid',
                           activation='relu',
                           strides=1)(output)
    output = GlobalMaxPooling1D()(output)
    output = Dropout(TRAINING_PARAMETERS['dropout_rate'])(output)
    output = Dense(n_out, activation='softmax')(output)

    model = Model(inputs=[words_input, distance1_input, distance2_input],
                  outputs=[output])
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model
コード例 #4
0
    def _build_model(self, nfeatures, architecture, supervised, confusion,
                     confusion_incr, confusion_max, activations, noise,
                     droprate, mmd_layer_idx, optimizer):

        self.inp_a = tf.placeholder(tf.float32, shape=(None, nfeatures))
        self.inp_b = tf.placeholder(tf.float32, shape=(None, nfeatures))
        self.labels_a = tf.placeholder(tf.float32, shape=(None, 1))

        nlayers = len(architecture)
        layers_a = [self.inp_a]
        layers_b = [self.inp_b]

        for i, nunits in enumerate(architecture):

            print nunits,
            if i in mmd_layer_idx: print '(MMD)'
            else: print

            if isinstance(nunits, int):
                shared_layer = Dense(nunits, activation='linear')
            elif nunits == 'noise':
                shared_layer = GaussianNoise(noise)
            elif nunits == 'bn':
                shared_layer = BatchNormalization()
            elif nunits == 'drop':
                shared_layer = Dropout(droprate)
            elif nunits == 'act':
                if activations == 'prelu':
                    shared_layer = PReLU()
                elif activations == 'elu':
                    shared_layer = ELU()
                elif activations == 'leakyrelu':
                    shared_layer = LeakyReLU()
                else:
                    shared_layer = Activation(activations)

            layers_a += [shared_layer(layers_a[-1])]
            layers_b += [shared_layer(layers_b[-1])]
コード例 #5
0
def discriminator_for_text(text_len, embedding_len, conv_filters,
                           conv_window_len, dense_units, lr):
    # start to create CNN
    # add input layer
    texts = Input(shape=(text_len, embedding_len),
                  dtype="float32",
                  name="texts")

    # add drop-out layer
    hidden_layers = GaussianNoise(0.01)(texts)

    # add first conv layer and max-pool layer
    hidden_layers = Conv1D(conv_filters,
                           conv_window_len,
                           padding='valid',
                           activation='sigmoid',
                           strides=1)(hidden_layers)
    hidden_layers = MaxPool1D()(hidden_layers)

    # add flatten layer
    hidden_layers = Flatten()(hidden_layers)

    hidden_layers = Dense(dense_units, activation="sigmoid")(hidden_layers)
    hidden_layers = Dropout(0.5)(hidden_layers)

    outputs = Dense(2,
                    activation="softmax",
                    name="outputs",
                    activity_regularizer=l1_l2(l1=0, l2=0.02),
                    kernel_regularizer=l1_l2(l1=0, l2=0.02),
                    bias_regularizer=l1_l2(l1=0, l2=0.02))(hidden_layers)

    dis_model = Model(inputs=[texts], outputs=[outputs])
    optimizer = RMSprop(lr=lr, clipvalue=1.0, decay=1e-8)
    dis_model.compile(loss="binary_crossentropy",
                      optimizer=optimizer,
                      metrics=["accuracy"])
    return dis_model
コード例 #6
0
def lstm(emb_matrix, max_len):
    embedding_layer = Embedding(input_dim=emb_matrix.shape[0],
                                output_dim=emb_matrix.shape[1],
                                weights=[emb_matrix],
                                input_length=max_len,
                                trainable=False)
    lstm_layer = LSTM(75, recurrent_dropout=0.2)

    sequence_1_input = Input(shape=(max_len, ), dtype="int32")
    embedded_sequences_1 = embedding_layer(sequence_1_input)
    x1 = lstm_layer(embedded_sequences_1)

    sequence_2_input = Input(shape=(max_len, ), dtype="int32")
    embedded_sequences_2 = embedding_layer(sequence_2_input)
    y1 = lstm_layer(embedded_sequences_2)

    addition = add([x1, y1])
    minus_y1 = Lambda(lambda x: -x)(y1)
    merged = add([x1, minus_y1])
    merged = multiply([merged, merged])
    merged = concatenate([merged, addition])
    merged = Dropout(0.4)(merged)

    merged = BatchNormalization()(merged)
    merged = GaussianNoise(0.1)(merged)

    merged = Dense(150, activation="relu")(merged)
    merged = Dropout(0.2)(merged)
    merged = BatchNormalization()(merged)

    out = Dense(1, activation="sigmoid")(merged)

    model = Model(inputs=[sequence_1_input, sequence_2_input], outputs=out)
    model.compile(loss="binary_crossentropy",
                  optimizer="nadam",
                  metrics=['acc'])

    return model
コード例 #7
0
def CRNN(input_shape):

    model = Sequential()
    # CNN
    model.add(
        Conv1D(8, 4, padding='same', name='conv1', input_shape=input_shape))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling1D(pool_size=2, name='max1'))

    model.add(Conv1D(16, 4, padding='same', name='conv2'))
    model.add(BatchNormalization())
    model.add(Activation('relu'))
    model.add(MaxPooling1D(pool_size=2, name='max2'))

    # CNN to RNN
    model.add(Reshape((1, 16)))

    # RNN
    model.add(LSTM(200))
    model.add(BatchNormalization())

    model.add(Dropout(0.5))

    # Adding noise
    model.add(GaussianNoise(0.2))

    # Activation
    model.add(Dense(4, name='dense'))
    model.add(Activation('softmax', name='softmax'))

    model.compile(loss=keras.losses.categorical_crossentropy,
                  optimizer=keras.optimizers.Adam(),
                  metrics=['accuracy'])

    print(model.summary())

    return model
コード例 #8
0
def gen_model():
    m_in = Input(shape=x[0][0].shape)
    m_off = Lambda(offset_slice)(m_in)
    m_noise = GaussianNoise(np.std(x[0][0] / 100))(m_off) # how much noice to have????

    m_t = Conv1D(30, 10, padding='causal')(m_noise)
    m_t = BatchNormalization()(m_t)
    m_t = ELU()(m_t)
    m_t = AveragePooling1D(2)(m_t)
    m_t = Dropout(0.2)(m_t)

    m_t = Conv1D(30, 5, padding='causal')(m_t)
    m_t = BatchNormalization()(m_t)
    m_t = ELU()(m_t)
    m_t = AveragePooling1D(2)(m_t)
    m_t = Dropout(0.2)(m_t)

    m_t = Conv1D(30, 5, padding='causal')(m_t)
    m_t = BatchNormalization()(m_t)
    m_t = ELU()(m_t)
    m_t = AveragePooling1D(2)(m_t)
    m_t = Dropout(0.2)(m_t)

    m_t = Flatten()(m_t)
    m_t = Dense(50)(m_t)
    m_t = BatchNormalization()(m_t)
    m_t = Activation('tanh')(m_t)
    m_t = Dense(20)(m_t)
    m_t = BatchNormalization()(m_t)
    m_t = Activation('tanh')(m_t)
    m_out = Dense(3, activation='softmax')(m_t)

    model = Model(inputs=m_in, outputs=m_out)

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
コード例 #9
0
def create_model(num_classes, image_rows, image_cols, image_channels):
    model = Sequential()

    percent_noise = 0.1
    noise = (1.0 / 255) * percent_noise
    model.add(
        GaussianNoise(noise,
                      input_shape=(
                          image_rows,
                          image_cols,
                          image_channels,
                      )))
    model.add(Convolution2D(32, 3))
    model.add(Activation('relu'))

    model.add(Convolution2D(32, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(Convolution2D(16, 3))
    model.add(Activation('relu'))

    model.add(Convolution2D(8, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(Flatten())

    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(num_classes))
    model.add(Activation('softmax'))

    return model
コード例 #10
0
ファイル: CNN.py プロジェクト: vchauhan-ai/CNN
def build_classifier(optimizer):

    from keras.models import Sequential
    from keras.layers import Convolution2D
    from keras.layers import MaxPooling2D
    from keras.layers import Flatten
    from keras.layers import Dense
    from keras.layers import Dropout
    from keras.layers import GaussianNoise
    from sklearn.model_selection import GridSearchCV
    from keras.wrappers.scikit_learn import KerasClassifier

    classifier = Sequential()
    classifier.add(
        Convolution2D(512, 3, 3, input_shape=(32, 32, 3), activation='relu'))
    classifier.add(MaxPooling2D(pool_size=(2, 2)))
    classifier.add(Dropout(0.25))
    classifier.add(Convolution2D(1024, 3, 3, activation='relu'))
    classifier.add(MaxPooling2D(pool_size=(2, 2)))
    classifier.add(Dropout(0.15))
    classifier.add(Convolution2D(2048, 3, 3, activation='relu'))
    classifier.add(MaxPooling2D(pool_size=(2, 2)))
    classifier.add(Dropout(0.10))
    classifier.add(Flatten())

    classifier.add(Dense(output_dim=256, activation='relu'))
    classifier.add(Dense(output_dim=256, activation='relu'))
    classifier.add(Dense(output_dim=256, activation='relu'))
    classifier.add(Dense(output_dim=256, activation='relu'))
    classifier.add(GaussianNoise(0.25))
    classifier.add(Dense(output_dim=256, activation='relu'))
    classifier.add(Dense(output_dim=256, activation='relu'))
    classifier.add(Dense(output_dim=256, activation='relu'))
    classifier.add(Dense(output_dim=256, activation='relu'))
    classifier.add(Dense(output_dim=256, activation='relu'))
    classifier.add(Dense(output_dim=1, activation='sigmoid'))
    classifier.compile(optimizer=opt, loss='hinge', metrics=['accuracy'])
    return classifier
コード例 #11
0
def NN_generator_mix_noise(n_layers : int, n_nodes : int, input_dim : int, output_dim : int) -> Sequential:
    model = Sequential()

    model.add(GaussianNoise(0.1, input_shape = (input_dim,)))

    # Layer 1, input
    layer1 = Dense(n_nodes, activation = 'tanh',
        kernel_initializer=VarianceScaling())
    model.add(layer1)
    
    # Hidden layers
    if n_layers > 1:
        for i in range(n_layers - 1):
            layer = Dense(n_nodes, activation = 'softplus',
                kernel_initializer=VarianceScaling())
            model.add(layer)
    
    # Output layer, price
    price_layer = Dense(output_dim, kernel_initializer=VarianceScaling())
    
    model.add(price_layer)

    return model
コード例 #12
0
def getModel(config):
    window = config['window'][0]
    model = Sequential()
    stddev = config['noise'][0]
    model.add(
        GaussianNoise(stddev,
                      batch_input_shape=(config['batchsize'][0], window, 1)))
    model.add(
        LSTM(config['cs1'][0],
             activation='tanh',
             stateful=True,
             return_sequences=True))
    model.add(LSTM(config['cs2'][0], stateful=True, activation='tanh'))
    model.add(Dense(FORECASTING_STEPS, activation='sigmoid'))
    lr = math.pow(10, -config['lr'][0])
    opt = Adam(lr=lr,
               beta_1=0.9,
               beta_2=0.999,
               epsilon=None,
               decay=0.0,
               amsgrad=False)
    model.compile(loss='mse', optimizer=opt, metrics=['mean_absolute_error'])
    return model
コード例 #13
0
def discriminator_for_image_or_leaf(inputs_size, dense_units, lr):
    inputs = Input(shape=(inputs_size, ), name='inputs')
    inputs_with_noise = GaussianNoise(0.01)(inputs)
    hidden_layer = Dense(dense_units, activation="sigmoid")(inputs_with_noise)
    hidden_layer = Dropout(0.5)(hidden_layer)

    hidden_layer = Dense(dense_units, activation="sigmoid")(hidden_layer)
    hidden_layer = Dropout(0.5)(hidden_layer)

    outputs = Dense(2,
                    activation="softmax",
                    name="outputs",
                    activity_regularizer=l1_l2(l1=0, l2=0.02),
                    kernel_regularizer=l1_l2(l1=0, l2=0.02),
                    bias_regularizer=l1_l2(l1=0, l2=0.02))(hidden_layer)

    dis_model = Model(inputs=[inputs], outputs=[outputs])
    optimizer = RMSprop(lr=lr, clipvalue=1.0, decay=1e-8)
    dis_model.compile(loss="binary_crossentropy",
                      optimizer=optimizer,
                      metrics=["accuracy"])

    return dis_model
コード例 #14
0
def MotionFixModel_lstm_one(batch_size, timestep, data_dim, gpu=True):
    model = Sequential()

    model.add(
        GaussianNoise(0.1, batch_input_shape=(batch_size, timestep, data_dim)))

    model.add(
        CuDNNLSTM(150,
                  batch_input_shape=(batch_size, timestep, data_dim),
                  stateful=True,
                  return_sequences=True))

    model.add(Dropout(0.2))

    model.add(CuDNNLSTM(150, stateful=True, return_sequences=True))

    model.add(Dense(data_dim, activation='linear'))

    model.compile(loss=mean_squared_error,
                  optimizer='RMSprop',
                  metrics=['acc'])

    return model
コード例 #15
0
ファイル: generator.py プロジェクト: mislam5285/point_process
	def create_trainable_model(self,sequences, pred_length, proxy_layer=None, need_noise_dropout=False, stddev=5.,sample_stddev=None):
		from keras.layers import Input, GaussianNoise
		from keras.models import Model

		from pp_layer import HawkesLayer

		if self.sequence_weights is None:
			sys.stderr.write(str({
				'error info':'unpretrained generator',
			}) + '\n')
			sys.stderr.flush()

		x = Input(batch_shape=(1,1), dtype='int32')
		hawkes_layer = HawkesLayer(sequences,pred_length,sequence_weights=self.sequence_weights,proxy_layer=proxy_layer,sample_stddev=sample_stddev)
		y = hawkes_layer(x)
		if need_noise_dropout == True:
			y = GaussianNoise(stddev)(y)

		model = Model(inputs=[x], outputs=[y], name='hawkes_output')

		self.model = model
		self.hawkes_layer = hawkes_layer
		return model
コード例 #16
0
def create_net(batch_size):
    inputs = Input(batch_shape=(batch_size, F_train.shape[1],
                                F_train.shape[2]))
    x = Dense(512, activation='tanh')(inputs)
    x = GaussianNoise(.1)(x)
    x = LSTM(256,
             activation='tanh',
             dropout=0.,
             stateful=True,
             return_sequences=True,
             implementation=2)(x)
    x = LSTM(256,
             activation='tanh',
             dropout=0.,
             stateful=True,
             return_sequences=True,
             implementation=2)(x)
    x = Dropout(0.5)(x)
    x = Dense(128, activation='tanh')(x)
    outputs = Dense(T_train.shape[2], activation='softmax')(x)

    net = Model(inputs, outputs)
    return net
コード例 #17
0
def three_layer_Model_Noisy(loss='categorical_crossentropy',
                            optimizer='adadelta',
                            img_rows=28,
                            img_cols=28,
                            depth=1,
                            classes=10,
                            init='adam'):
    model = Sequential()
    model.add(
        GaussianNoise(sigma=0.01, input_shape=(depth, img_rows, img_cols)))
    model.add(
        Convolution2D(nb_filters,
                      3,
                      3,
                      border_mode='valid',
                      input_shape=(depth, img_rows, img_cols)))

    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))

    model.add(Convolution2D(nb_filters, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))

    model.add(Convolution2D(nb_filters, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(nb_pool, nb_pool)))

    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(128))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(classes))
    model.add(Activation('softmax'))
    model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
    return model
コード例 #18
0
    def create_model_1d(cfg):
        pool_size = cfg['pool_size']  # size of pooling area for max pooling
        kernel_size = cfg['kernel_size']  # convolution kernel size
        input_shape = (23, 3 * (2 * cfg['feature_context'] + 1))
        # Keras Model
        model = Sequential()

        model.add(InputLayer(batch_input_shape=(None, input_shape[0], input_shape[1]), name='input'))
        model.add(GaussianNoise(stddev=cfg['gaussian_noise']))

        for i in range(cfg['num_cnn_layers']):
            model.add(Conv1D(filters=cfg['filters'], kernel_size=kernel_size,
                             padding=cfg['padding'],
                             kernel_initializer='he_normal',
                             kernel_regularizer=regularizers.l2(cfg['weight_decay'])))
            model.add(Activation(activation=cfg['activation']))
            if cfg['batch_normalization']:
                model.add(BatchNormalization())
            model.add(MaxPooling1D(pool_size=pool_size))
            model.add(Dropout(cfg['dropout']))

        model.add(Flatten())

        for i in range(cfg['num_ff_layers']):
            model.add(Dense(cfg['ff_layer_size']))
            model.add(Activation(cfg['activation']))
            if cfg['batch_normalization']:
                model.add(BatchNormalization())
            model.add(Dropout(cfg['dropout']))

        model.add(Dense(cfg['output_dim']))
        model.add(BatchNormalization())
        # Optional softmax layer
        if cfg['task'] == 'classification':
            model.add(Softmax())

        return model
コード例 #19
0
def cnn_multi_filters(wv, sent_length, nfilters, nb_filters, **kwargs):
    noise = kwargs.get("noise", 0)
    trainable = kwargs.get("trainable", False)
    drop_text_input = kwargs.get("drop_text_input", 0.)
    drop_conv = kwargs.get("drop_conv", 0.)
    activity_l2 = kwargs.get("activity_l2", 0.)

    input_text = Input(shape=(sent_length,), dtype='int32')

    emb_text = embeddings_layer(max_length=sent_length, embeddings=wv,
                                trainable=trainable, masking=False)(input_text)
    emb_text = GaussianNoise(noise)(emb_text)
    emb_text = Dropout(drop_text_input)(emb_text)

    pooling_reps = []
    for i in nfilters:
        feat_maps = Convolution1D(nb_filter=nb_filters,
                                  filter_length=i,
                                  border_mode="valid",
                                  activation="relu",
                                  subsample_length=1)(emb_text)
        pool_vecs = MaxPooling1D(pool_length=2)(feat_maps)
        pool_vecs = Flatten()(pool_vecs)
        # pool_vecs = GlobalMaxPooling1D()(feat_maps)
        pooling_reps.append(pool_vecs)

    representation = concatenate(pooling_reps)

    representation = Dropout(drop_conv)(representation)

    probabilities = Dense(3, activation='softmax',
                          activity_regularizer=l2(activity_l2))(representation)

    model = Model(input=input_text, output=probabilities)
    model.compile(optimizer="adam", loss='categorical_crossentropy')

    return model
def create_model(label_count):
    model = Sequential()
    model.add(GaussianNoise(stddev=0.01, input_shape=(HEIGHT,WIDTH,5)))
    model.add(Conv2D(filters=32, kernel_size=1, padding='valid', activation='elu'))
    model.add(Conv2D(filters=32, kernel_size=3, padding='valid', activation='elu'))
    model.add(Conv2D(filters=32, kernel_size=1, padding='valid', activation='elu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=2))
    model.add(Conv2D(filters=64, kernel_size=1, padding='valid', activation='elu'))
    model.add(Conv2D(filters=64, kernel_size=2, padding='valid', activation='elu'))
    model.add(Conv2D(filters=64, kernel_size=1, padding='valid', activation='elu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=2))
    model.add(Conv2D(filters=128, kernel_size=1, padding='valid', activation='elu'))
    model.add(Conv2D(filters=128, kernel_size=(2,3), padding='valid', activation='elu'))
    model.add(Conv2D(filters=128, kernel_size=1, padding='valid', activation='elu'))
    model.add(BatchNormalization())
    model.add(MaxPooling2D(pool_size=2))
    model.add(Conv2D(filters=256, kernel_size=1, padding='valid', activation='elu'))
    model.add(Conv2D(filters=256, kernel_size=2, padding='valid', activation='elu'))
    model.add(Conv2D(filters=256, kernel_size=1, padding='valid', activation='elu'))
    model.add(MaxPooling2D(pool_size=2))
    model.add(GlobalAveragePooling2D())
    model.add(BatchNormalization())
    model.add(Dropout(2-PHI))
    model.add(Dense(256, activation='elu'))
    model.add(BatchNormalization())
    model.add(Dropout(2-PHI))
    model.add(Dense(256, activation='elu'))
    model.add(BatchNormalization())
    model.add(Dropout(2-PHI))
    model.add(Dense(256, activation='elu', activity_regularizer=regularizers.l1(0.0001)))
    model.add(BatchNormalization())
    model.add(Dropout(2-PHI))
    model.add(Dense(label_count, kernel_regularizer=regularizers.l1(0.01/(label_count*256))))
    model.add(Activation('softmax'))
    return model
コード例 #21
0
ファイル: lstm_with_classes.py プロジェクト: INTER-ACT/ilai
def create_Network():

    #model1
    #global model
    #model = Sequential()
    #model.add(GaussianNoise(0.3,input_shape=(None,300)))
    #model.add(LSTM(32,input_shape=(None,300),return_sequences=True,batch_size=1))
    #model.add(Dropout(0.3))
    #model.add(LSTM(32,input_shape=(None,300),return_sequences=False,batch_size=1))
    #model.add(Dropout(0.3))
    #model.add(Dense(10,activation='softmax'))

    #model2
    #global model
    #model = Sequential()
    #model.add(GaussianNoise(0.3, input_shape=(None, 300)))
    #model.add(LSTM(32, input_shape=(None, 300), return_sequences=False, batch_size=1))
    #model.add(Dropout(0.3))
    #model.add(Dense(10, activation='softmax'))

    # model3
    global model
    model = Sequential()
    model.add(GaussianNoise(0.1, input_shape=(None, 300)))
    model.add(
        LSTM(32, input_shape=(None, 300), return_sequences=True, batch_size=1))
    model.add(Dropout(0.3))
    model.add(
        LSTM(32, input_shape=(None, 300), return_sequences=False,
             batch_size=1))
    model.add(Dropout(0.3))
    model.add(Dense(10, activation='softmax'))

    model.compile(optimizer='adam',
                  loss='mean_squared_error',
                  metrics=['accuracy'])
    print(model.summary())
コード例 #22
0
ファイル: model.py プロジェクト: zswitten/KerasDeepSpeech
def graves(input_dim=26, rnn_size=512, output_dim=29, std=0.6):
    """ Implementation of Graves 2006 model

    Architecture:
        Gaussian Noise on input
        BiDirectional LSTM

    Reference:
        ftp://ftp.idsia.ch/pub/juergen/icml2006.pdf
    """

    K.set_learning_phase(1)
    input_data = Input(name='the_input', shape=(None, input_dim))
    # x = BatchNormalization(axis=-1)(input_data)

    x = GaussianNoise(std)(input_data)
    x = Bidirectional(LSTM(rnn_size,
                      return_sequences=True,
                      implementation=0))(x)
    y_pred = TimeDistributed(Dense(output_dim, activation='softmax'))(x)

    # Input of labels and other CTC requirements
    labels = Input(name='the_labels', shape=[None,], dtype='int32')
    input_length = Input(name='input_length', shape=[1], dtype='int32')
    label_length = Input(name='label_length', shape=[1], dtype='int32')

    # Keras doesn't currently support loss funcs with extra parameters
    # so CTC loss is implemented in a lambda layer
    loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name='ctc')([y_pred,
                                                                       labels,
                                                                       input_length,
                                                                       label_length])


    model = Model(inputs=[input_data, labels, input_length, label_length], outputs=[loss_out])

    return model
コード例 #23
0
def Res_model():

    x_input = Input(shape=(
        20,
        1,
    ))

    x = GaussianNoise(stddev=10)(x_input)
    y = Conv1D(filters=20, kernel_size=3, activation='tanh')(x)
    y = Conv1D(filters=20, kernel_size=3, activation='relu')(y)
    y = MaxPooling1D(pool_size=2)(y)
    y = Dropout(0.2)(y)

    x = Conv1D(filters=20, kernel_size=3, activation=None)(x)
    x = Conv1D(filters=20, kernel_size=3, activation=None)(x)
    x = AveragePooling1D(pool_size=2)(x)
    x = Add()([x, y])

    y = Conv1D(filters=20, kernel_size=3, activation='tanh')(x)
    y = Conv1D(filters=20, kernel_size=3, activation='relu')(y)
    y = Conv1D(filters=20, kernel_size=3, activation='relu')(y)
    y = MaxPooling1D(pool_size=2)(y)
    y = Dropout(0.2)(y)

    x = Conv1D(filters=20, kernel_size=3, activation=None)(x)
    x = Conv1D(filters=20, kernel_size=3, activation=None)(x)
    x = Conv1D(filters=20, kernel_size=3, activation=None)(x)
    x = AveragePooling1D(pool_size=2)(x)
    x = Add()([x, y])

    x = Flatten()(x)
    x = Dense(units=1)(x)

    Res1D = Model(input=x_input, output=x)
    Res1D.compile(loss='logcosh', optimizer=Adam(), metrics=['MSE'])

    return (Res1D)
コード例 #24
0
def get_2d_conv_model(input_shape):
    ''' CNN-LSTM'''
    inp = Input(shape=input_shape)

    x = Reshape((*input_shape, 1))(inp)
    x = GaussianNoise(0.05)(x)
    x = Conv2D(64, (6, 1), strides=1, padding='same')(x)
    x = Activation("relu")(x)
    x = MaxPooling2D(pool_size=(4, 1))(x)

    x = Reshape((-1, 64 * input_shape[1]))(x)
    x = Conv1D(128,
               3,
               strides=1,
               padding='same',
               kernel_regularizer=l1(0.0005))(x)
    x = Activation('relu')(x)
    x = MaxPooling1D(2)(x)
    x = BatchNormalization()(x)

    x = Dropout(0.5)(x)
    x = Bidirectional(LSTM(48, return_sequences=True), merge_mode='concat')(
        x)  # returns a sequence of vectors  , dropout=0.25
    x = Dropout(0.5)(x)  # TODO: Try Attention
    x = Bidirectional(LSTM(48, return_sequences=False), merge_mode='concat')(
        x)  # return a single vector  , dropout=0.25
    #     x1 = MaxPooling1D(pool_size=64)(x)
    #     x2 = AveragePooling1D(pool_size=64)(x)
    #     x = Concatenate()([x1, x2])

    #     x = Flatten()(x)
    #     x = Dense(32, activation='relu')(x)
    #     x = Dropout(0.5)(x)
    out = Dense(7, activation='softmax')(x)

    model = keras.models.Model(inputs=inp, outputs=out)
    return model
コード例 #25
0
    def init_default_model_1D(self):
        inputs = Input(shape=self.model_data.input)

        use_cnn = True
        if use_cnn:
            x = Conv1D(filters=32,
                       kernel_size=12,
                       activation="relu",
                       padding="same")(inputs)
            x = Conv1D(filters=32,
                       kernel_size=8,
                       activation="relu",
                       padding="same")(x)
            x = Conv1D(filters=32,
                       kernel_size=4,
                       activation="relu",
                       padding="same")(x)
            x = Dropout(rate=0.25)(x)
            x = Conv1D(filters=32,
                       kernel_size=2,
                       activation="relu",
                       padding="same")(x)
            x = Flatten()(x)
        else:
            x = Dense(64, activation=None)(inputs)
        x = GaussianNoise(0.1)(x)
        x = Activation(activation="relu")(x)
        x = Dense(64, activation="relu")(x)
        x = Dropout(rate=0.25)(x)
        x = Dense(64, activation="relu")(x)
        x = Dense(64, activation="relu")(x)

        predictions = Dense(self.model_data.output, activation="softmax")(x)

        self.model = Model(inputs=inputs, outputs=predictions)

        self.set_optimizer("adam")
コード例 #26
0
def generator_func(latent_dim):
    init = RandomNormal(stddev=0.02)
    model = Sequential()
    n_nodes = 256 * 8 * 8
    model.add(Dense(n_nodes, kernel_initializer=init, input_dim=latent_dim))
    model.add(LeakyReLU(alpha=0.2))
    model.add(Reshape((8, 8, 256)))
    model.add(GaussianNoise(0.3))
    model.add(
        Conv2DTranspose(16, (5, 5),
                        strides=(2, 2),
                        padding='same',
                        kernel_initializer=init))
    model.add(LeakyReLU(alpha=0.2))
    model.add(
        Conv2DTranspose(16, (5, 5),
                        strides=(2, 2),
                        padding='same',
                        kernel_initializer=init))
    model.add(self_attention(ch=16))
    model.add(LeakyReLU(alpha=0.2))
    model.add(
        Conv2DTranspose(32, (5, 5),
                        strides=(2, 2),
                        padding='same',
                        kernel_initializer=init))
    model.add(self_attention(ch=32))
    model.add(pixel_normalization())
    model.add(LeakyReLU(alpha=0.2))
    model.add(GaussianDropout(0.4))
    model.add(
        Conv2D(3, (5, 5),
               strides=(1, 1),
               activation='tanh',
               padding='same',
               kernel_initializer=init))
    return model
コード例 #27
0
def EmNet_creator(input_shape):
    model = Sequential()
    # default "image_data_format": "channels_last"
    # assert K.image_data_format() == 'channels_last':

    model.add(Reshape((*input_shape, 1), input_shape=input_shape))
    model.add(GaussianNoise(0.05))
    model.add(Conv2D(64, (6, 1), strides=1,
                     padding='same'))  # , kernel_regularizer=l1(0.001)
    model.add(Activation('relu'))
    #     model.add(LeakyReLU())
    model.add(MaxPooling2D(pool_size=(4, 1)))

    model.add(Reshape((-1, 64 * input_shape[1])))
    model.add(Conv1D(128, 3, strides=1,
                     padding='same'))  # , kernel_regularizer=l1(0.00001)
    model.add(Activation('relu'))
    #     model.add(LeakyReLU())
    model.add(MaxPooling1D(2))

    model.add(BatchNormalization())
    model.add(Dropout(0.5))
    model.add(
        Bidirectional(LSTM(48, return_sequences=True), merge_mode='concat')
    )  # returns a sequence of vectors  , dropout=0.25
    model.add(Dropout(0.5))  # Attention
    model.add(
        Bidirectional(
            LSTM(48, return_sequences=True),
            merge_mode='concat'))  # return a single vector  , dropout=0.25
    model.add(MaxPooling1D(pool_size=64))

    model.add(Flatten())
    model.add(Dense(32, activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(7, activation='softmax'))
    return model
コード例 #28
0
def create_network(nb_features, padding_value, lr=0.00001):

    # Define the network architecture
    input_data = Input(name='input', shape=(None, nb_features,
                                            1))  # nb_features = image height
    noise = GaussianNoise(0.01)(input_data)
    cnn = cnn_base(noise)

    out_cnn = MaxPooling2D(pool_size=(1, nb_features))(cnn)

    input_blstm = Lambda(lambda x: K.squeeze(x, axis=2))(out_cnn)

    blstm = Bidirectional(LSTM(256, return_sequences=True,
                               dropout=0.3))(input_blstm)

    notes_model = build_head(input_data, blstm, 23, "notes", lr)
    octaves_model = build_head(input_data, blstm, 15, "octaves", lr)
    rythms_model = build_head(input_data, blstm, 60, "rythms", lr)

    return {
        "notes": notes_model,
        "octaves": octaves_model,
        "rythms": rythms_model
    }
コード例 #29
0
def getArgmaxPlusBlock(input_dim, proj_dim, num_classes=10):
    '''getArgmaxPlusBlock(input_dim, proj_dim, num_classes=10)
    
    Builds and returns an argmax-plus block model with input layer of
    size input_dim, a linear layer of size proj_dim and an output
    layer of size num_classes

    Input:

    input_dim: dimension of the inut vector (number of neurons in the input layer)
    proj_dim: number of neurons in the linear layer
    num_classes: number of classes, which also the output dimension

    Output:

    The argmax-plus block model

    '''
    xin = Input(shape=(input_dim, ))
    xin1 = GaussianNoise(.2)(xin)
    x1 = Dense(proj_dim, activation='relu', name='linear',
               use_bias=False)(xin1)
    argMPlus = ArgMaxPlusDense(num_classes, name='argMaxPlus')(x1)
    return Model(xin, argMPlus)
コード例 #30
0
ファイル: phaze_a.py プロジェクト: s884812/faceswap
    def _g_block(cls, inputs, style, filters, recursions=2):
        """ G_block adapted from ADAIN StyleGAN.

        Parameters
        ----------
        inputs: tensor
            The input tensor to the G-Block model
        style: tensor
            The input combined 'style' tensor to the G-Block model
        filters: int
            The number of filters to use for the G-Block Convolutional layers
        recursions: int, optional
            The number of recursive Convolutions to process. Default: `2`

        Returns
        -------
        tensor
            The output tensor from the G-Block model
        """
        var_x = inputs
        for i in range(recursions):
            styles = [
                Reshape([1, 1, filters])(Dense(filters)(style))
                for _ in range(2)
            ]
            noise = KConv2D(filters, 1,
                            padding="same")(GaussianNoise(1.0)(var_x))

            if i == recursions - 1:
                var_x = KConv2D(filters, 3, padding="same")(var_x)

            var_x = AdaInstanceNormalization(dtype="float32")([var_x, *styles])
            var_x = Add()([var_x, noise])
            var_x = LeakyReLU(0.2)(var_x)

        return var_x