Ejemplo n.º 1
0
def create_model(train_data):

    model = Sequential()

    model.add(Flatten(input_shape=train_data.shape[1:]))

    model.add(Dense(128, activation='sigmoid', name='dense'))

    model.add(Dropout(0.75))

    model.add(
        Dense(config.num_classes,
              activation='sigmoid',
              kernel_regularizer=L1L2(0, 0.01),
              activity_regularizer=L1L2(0, 0.01),
              name='out'))

    #optimizer = Adam(lr=0.01)
    optimizer = RMSprop(lr=0.005)
    #optimizer = SGD(lr=0.005, decay=1e-6, momentum=0.9, nesterov=True)

    # In Keras we need to compile the model so it can be trained.
    model.compile(optimizer=optimizer,
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    return model
Ejemplo n.º 2
0
def lstm_keras(inp_dim,
               vocab_size,
               embed_size,
               use_word_embeddings=False,
               embedding_matrix=None,
               embedding_trainable=False):
    #     K.clear_session()
    model = Sequential()
    if use_word_embeddings == True:
        model.add(
            Embedding(vocab_size,
                      embed_size,
                      weights=[embedding_matrix],
                      input_length=inp_dim,
                      trainable=embedding_trainable))
    else:
        model.add(
            Embedding(vocab_size,
                      embed_size,
                      input_length=inp_dim,
                      trainable=True))
    model.add(Dropout(0.25))
    model.add(
        LSTM(embed_size,
             kernel_regularizer=L1L2(l1=0.0, l2=0.00000001),
             bias_regularizer=L1L2(l2=0.00000001)))
    model.add(Dropout(0.50))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    #print (model.summary())
    return model
Ejemplo n.º 3
0
    def build_module(self):
        '''
        Build the module from the initialized arguments above

        # Returns
        ---------
            collection: list
                list of layers according to the specified hyperparameters
        '''
        collection = []
        if self.timedistributed:
            for i in range(self.layers):
                tmp = TimeDistributed(Dense(self.sizes[i]))
                activation = self.activations[i]
                collection.append(tmp)
                collection.append(activation)

                if self.dropout:
                    tmp = Dropout(0.25)
                    collection.append(tmp)
                if (self.L1 + self.L2) > 0.0:
                    tmp = L1L2(self.L1, self.L2)
                    collection.append(tmp)
        else:
            for i in range(self.layers):
                tmp = Dense(self.sizes[i], activation=self.activations[i])
                collection.append(tmp)
                if self.dropout:
                    tmp = Dropout(0.3)
                    collection.append(tmp)
                if (self.L1 + self.L2) > 0.0:
                    tmp = L1L2(self.L1, self.L2)
                    collection.append(tmp)

        return collection
Ejemplo n.º 4
0
def run():
    # load dataset
    series = read_csv('shampoo-sales.csv',
                      header=0,
                      parse_dates=[0],
                      index_col=0,
                      squeeze=True,
                      date_parser=parser)
    # configure the experiment
    n_lag = 1
    n_repeats = 30
    n_epochs = 1000
    n_batch = 4
    n_neurons = 3
    regularizers = [
        L1L2(l1=0.0, l2=0.0),
        L1L2(l1=0.01, l2=0.0),
        L1L2(l1=0.0, l2=0.01),
        L1L2(l1=0.01, l2=0.01)
    ]
    # run the experiment
    results = DataFrame()
    for reg in regularizers:
        name = ('l1 %.2f,l2 %.2f' % (reg.l1, reg.l2))
        results[name] = experiment(series, n_lag, n_repeats, n_epochs, n_batch,
                                   n_neurons, reg)
    # summarize results
    print(results.describe())
    # save boxplot
    results.boxplot()
    pyplot.savefig('experiment_reg_bias.png')
Ejemplo n.º 5
0
def tune_weight_regularization(train_X, train_y, validation_X, validation_y):
    # define scope of search
    regularizers = {
        1: L1L2(l1=0.0, l2=0.01),
        2: L1L2(l1=0.01, l2=0.0),
        3: L1L2(l1=0.0, l2=0.0),
        4: L1L2(l1=0.01, l2=0.01)
    }
    n_repeats = 5
    # grid search parameter values
    scores = DataFrame()
    for reg in regularizers.keys():
        # repeat each experiment multiple times
        loss_values = list()
        for i in range(n_repeats):
            loss = fit_weight_regularization_model(regularizers[reg], train_X,
                                                   train_y, validation_X,
                                                   validation_y)
            loss_values.append(loss)
            print('>%d/%d param=%f, loss=%f' % (i + 1, n_repeats, reg, loss))
        # store results for this parameter
        scores[str(reg)] = loss_values
    # summary statistics of results
    print(scores.describe())
    # box and whisker plot of results
    scores.boxplot()
    pyplot.show()
def experiment():
    dataset = pd.read_csv(
        r'G:\Final Dev\12_Classifier\Data Merged\train_file.csv', header=None)
    neuron = 26
    n_epochs = 5
    n_batch = 144
    k_fold = 3
    x, y = read_data(dataset)
    results = pd.DataFrame()
    regularizer = [
        L1L2(l1=0.0, l2=0.0),
        L1L2(l1=0.001, l2=0.0),
        L1L2(l1=0.0, l2=0.001),
        L1L2(l1=0.001, l2=0.001)
    ]
    for reg in regularizer:
        name = ("l1 %.3f l2 %.3f" % (reg.l1, reg.l2))
        results[name] = model_fit(x, y, neuron, n_epochs, n_batch, k_fold, reg)
    print(results)
    print(results.describe())

    stop = timeit.default_timer()
    print('Time :', stop - start)

    # save boxplot
    results.boxplot()
    plt.ylabel('Accuracy')
    plt.xlabel('Regularizer')
    Box_Name = input("Nama Figure: ")
    plt.savefig(r'G:\Final Dev\12_Classifier\Box Plot Model\%s.png' % Box_Name)
Ejemplo n.º 7
0
def default_model(n_fft):
    """
    This model is a bit too large for Tegra, but it is proven
    """
    assert n_fft == 257, "Default model cannot handle non-257 fft sizes"
    input_lower = Input((None, 257), name="input_lf")
    layer = Lambda(K.expand_dims)(input_lower)
    layer = LeakyReLU(0.01)(Conv2D(12, kernel_size=(9, 1),
                                   activation='linear')(layer))
    layer = LeakyReLU(0.01)(Conv2D(12, kernel_size=(1, 5),
                                   activation='linear')(layer))
    layer = LeakyReLU(0.01)(Conv2D(12, kernel_size=(9, 1),
                                   activation='linear')(layer))
    layer = LeakyReLU(0.01)(Conv2D(12, kernel_size=(1, 5),
                                   activation='linear')(layer))
    layer = TimeDistributed(Flatten())(layer)
    layer = LeakyReLU(0.01)(Dense(1024,
                                  kernel_regularizer=L1L2(l1=1e-5))(layer))
    layer = LeakyReLU(0.01, name='hidden')(Dense(
        512, kernel_regularizer=L1L2(l1=1e-5))(layer))
    layer = LeakyReLU(0.01)(Dense(350,
                                  kernel_regularizer=L1L2(l2=1e-5))(layer))
    layer = Dense(257)(layer)
    mdl = Model(input_lower, layer)
    mdl.summary()
    return mdl
Ejemplo n.º 8
0
 def fit(self, X, y, weights):
     input_shape = X.shape[1]
     self.reg.add(
         Dense(int(np.floor(input_shape / 2)),
               input_dim=input_shape,
               activation='relu',
               kernel_regularizer=L1L2(l1=0.0, l2=0.1)))
     self.reg.add(Dense(1, kernel_regularizer=L1L2(l1=0.0, l2=0.1)))
     self.reg.compile(optimizer='adam', loss='mean_squared_error')
     self.reg.fit(X, y, sample_weight=weights, verbose=True)
Ejemplo n.º 9
0
    def create_model(self):
        lstm_units = 100

        # Comment input
        encoder_inputs = Input(shape=(self.comlen, ))
        encoder_embedding = Embedding(output_dim=10,
                                      input_dim=self.comvocabsize,
                                      mask_zero=False)(encoder_inputs)
        encoder = Bidirectional(
            CuDNNLSTM(lstm_units,
                      return_state=True,
                      bias_regularizer=L1L2(0.01, 0.0)))
        encoder_outputs, forward_h, forward_c, backward_h, backward_c = encoder(
            encoder_embedding)
        state_h = concatenate([forward_h, backward_h])
        state_c = concatenate([forward_c, backward_c])

        encoder_states = [state_h, state_c]

        # Tag input
        decoder_inputs = Input(shape=(self.taglen, ))
        decoder_embedding = Embedding(output_dim=2,
                                      input_dim=self.tagvocabsize,
                                      mask_zero=False)(decoder_inputs)
        decoder_lstm = CuDNNLSTM(lstm_units * 2,
                                 return_sequences=True,
                                 return_state=True,
                                 bias_regularizer=L1L2(0.01, 0.0))
        decoder_outputs, _, _ = decoder_lstm(decoder_embedding,
                                             initial_state=encoder_states)
        #decoder_dropout = Dropout(0.5)(decoder_outputs)
        #decoder_d = TimeDistributed(Dense(400))(decoder_dropout)
        decoder_dense = Dense(self.tagvocabsize, activation='softmax')
        decoder_outputs = decoder_dense(decoder_outputs)
        train_model = Model(inputs=[encoder_inputs, decoder_inputs],
                            outputs=decoder_outputs)

        # Define Inference Model
        # Inference Encoder
        encoder_model = Model(encoder_inputs, encoder_states)

        # Inference Decoder
        decoder_state_h = Input(shape=(lstm_units * 2, ))
        decoder_state_c = Input(shape=(lstm_units * 2, ))
        decoder_state_inputs = [decoder_state_h, decoder_state_c]
        decoder_outputs, state_h, state_c = decoder_lstm(
            decoder_embedding, initial_state=decoder_state_inputs)
        #decoder_dropout = Dropout(0.5)(decoder_outputs)
        #decoder_d = TimeDistributed(Dense(400))(decoder_dropout)
        decoder_states = [state_h, state_c]
        decoder_outputs = decoder_dense(decoder_outputs)
        decoder_model = Model([decoder_inputs] + decoder_state_inputs,
                              [decoder_outputs] + decoder_states)

        return train_model, encoder_model, decoder_model
Ejemplo n.º 10
0
def build_model(stateful, batch_size=None):
    i = Input(shape=IN_SHAPE[1:])
    i = Permute((1, 3, 4, 2), batch_input_shape=IN_SHAPE)(i)
    R = representation_rnn()
    C = consciousness_rnn()
    G = generator_rnn()
    D = decoder_rnn(nb_actions)

    h = R(i)  # Get h from R
    c_A, c_B, c_A_soft, c_B_soft = C(h)  # Get masks c_A and c_B from C
    b = multiply([h, c_B],
                 name='b')  # Get b through elementwise multiplication
    a_hat = G([c_A, c_B, b])  # Send c_A, c_B and b to G to get a_hat

    a_hat = Lambda(lambda x: x[:, :-1, :], output_shape=(None, latent_dim))(
        a_hat)  # Slice dimensions to align vectors
    h_A = Lambda(lambda x: x[:, 1:, :], output_shape=(None, latent_dim))(
        h)  # Slice dimensions to align vectors
    c_A = Lambda(lambda x: x[:, :-1, :], output_shape=(None, latent_dim))(
        c_A)  # Slice dimensions to align vectors

    h_A = multiply([h_A, c_A])  # Calculate h[A] to compare against a_hat
    a_hat = multiply([a_hat, c_A])  # Mask a_hat
    consciousness_error = subtract([a_hat, h_A])
    consciousness_error = Regularize(
        L1L2(l1=0., l2=1. * reg_lambda),
        name='Consciousness_Generator_Error')(consciousness_error)

    b_transformed = Dense(latent_dim, activation='linear')(
        b)  # Create a layer that attempts to make b independent from h[A]
    b_transformed = Lambda(lambda x: x[:, :-1, :],
                           output_shape=(None, latent_dim))(b_transformed)
    b_transformed = multiply([b_transformed, c_A])
    transformation_error = subtract([b_transformed, h_A])
    transformation_error = Regularize(
        L1L2(l1=0., l2=1. * reg_lambda),
        name='Transformation_Error')(transformation_error)

    intelligence_error = concatenate([
        c_A_soft, c_B_soft
    ])  # The more elements we choose to predict, the more "intelligent" we are
    intelligence_error = Flatten()(intelligence_error)
    intelligence_error = Regularize(
        LinearRegularizer(c=1. * reg_lambda),
        name='Intelligence_Level')(intelligence_error)

    x_hat = D(a_hat)
    x_hat = ApplyRegularization()(
        [x_hat, consciousness_error, transformation_error, intelligence_error])

    ## Compile the model and start training
    CN = Model(inputs=i, outputs=[x_hat])

    return CN
Ejemplo n.º 11
0
def lstm_bert(embed_size):
    model = Sequential()
    model.add(
        LSTM(embed_size,
             kernel_regularizer=L1L2(l1=0.0, l2=0.00000001),
             bias_regularizer=L1L2(l2=0.00000001)))
    model.add(Dropout(0.50))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
Ejemplo n.º 12
0
 def build(self, input_shape):
     self.W = self.add_weight(name='Attention_Dot_Weight',
                              shape=(input_shape[1], input_shape[1]),
                              regularizer=L1L2(0.0000032),
                              initializer='uniform',
                              trainable=True)
     self.b = self.add_weight(name='Attention_Dot_Bias',
                              regularizer=L1L2(0.00032),
                              shape=(input_shape[1], ),
                              initializer='uniform',
                              trainable=True)
     super().build(input_shape)
Ejemplo n.º 13
0
    def setup(self, input_dim: tuple, **kwargs) -> None:
        """
            Method that builds the NN architecture

        :param input_dim:   The dimension of the inputs
        :param kwargs:      Other arguments. Not used here
        :return:            -
        """
        assert not self.built
        # Defining layers
        batchNormLayer1 = BatchNormalization(input_shape=input_dim)

        dropoutInputLayer = Dropout(rate=.2)

        denseLayer1 = Dense(32,
                            activation='relu',
                            kernel_regularizer=L1L2(l1=.0, l2=.1),
                            kernel_initializer='uniform')

        bathNormLayer2 = BatchNormalization()

        dropoutDenseLayer1 = Dropout(rate=.2)

        denseLayer2 = Dense(16,
                            activation='relu',
                            kernel_regularizer=L1L2(l1=.0, l2=.1),
                            kernel_initializer='uniform')

        bathNormLayer3 = BatchNormalization()

        dropoutDenseLayer2 = Dropout(rate=.2)

        outputLayer = Dense(2,
                            activation='softmax',
                            kernel_initializer='uniform')

        # adding the layers
        self.model.add(batchNormLayer1)
        self.model.add(dropoutInputLayer)
        self.model.add(denseLayer1)
        self.model.add(bathNormLayer2)
        self.model.add(dropoutDenseLayer1)
        self.model.add(denseLayer2)
        self.model.add(bathNormLayer3)
        self.model.add(dropoutDenseLayer2)
        self.model.add(outputLayer)

        # compiling the model
        self.model.compile(optimizer=Adam(),
                           loss='categorical_crossentropy',
                           metrics=['accuracy'])

        self.built = True
Ejemplo n.º 14
0
def model_CNN_LSTM_layers():
    '''
    returns a CNN+LSTM model with 2 conv and 3 lstm layers. 
    
    Parameters
    ----------
    None
    
    Returns
    -------
    model: keras model with the architecture described
    '''
    model = Sequential()

    model.add(
        TimeDistributed(Conv1D(filters=32,
                               kernel_size=2,
                               padding='valid',
                               activation='relu'),
                        input_shape=(None, nTimeSteps, nFeatures)))
    model.add(TimeDistributed(Dropout(0.05)))

    model.add(
        TimeDistributed(Conv1D(filters=64,
                               kernel_size=2,
                               padding='valid',
                               activation='relu'),
                        input_shape=(None, nTimeSteps, nFeatures)))
    model.add(TimeDistributed(Dropout(0.05)))

    model.add(TimeDistributed(Flatten()))

    model.add(
        LSTM(50,
             return_sequences=True,
             activation='relu',
             dropout=0.2,
             kernel_regularizer=L1L2(l1=0.01, l2=0.01)))
    model.add(
        LSTM(50,
             return_sequences=True,
             activation='relu',
             dropout=0.2,
             kernel_regularizer=L1L2(l1=0.01, l2=0.01)))
    model.add(LSTM(30))
    model.add(Dense(1, activation='tanh'))  #Final activation tanh function

    print("\nModel Architecture \n")
    model.summary()
    model.compile(loss='mean_absolute_error',
                  optimizer=keras.optimizers.Adam(learning_rate=0.001),
                  metrics=['accuracy'])
    return model
Ejemplo n.º 15
0
def main():
    # load data set
    open_db()
    origin = raw_input("Origin: ")
    destination = raw_input("Destination: ")
    end = raw_input("End Date (MM/YY): ")
    end_date = "20" + end[3:6] + "-" + end[0:2]
    segments = concatenate_segment(origin, destination, end_date)

    series = segments[pd.notnull(segments.passengers)].passengers

    # configure the experiment
    n_lag = 1
    n_repeats = 10
    n_epochs = 1000
    n_batch = (len(series) - 2) % 12
    n_neurons = 3
    n_dropout = [0.0, 0.2, 0.4, 0.6]
    input_regularizers = [
        L1L2(l1=0.0, l2=0.0),
        L1L2(l1=0.01, l2=0.0),
        L1L2(l1=0.0, l2=0.01),
        L1L2(l1=0.01, l2=0.01)
    ]
    # run the experiment
    results = pd.DataFrame()
    for dropout in n_dropout:
        for input_reg in input_regularizers:
            name = ('l1 %.2f,l2 %.2f - dropout: %.2f' %
                    (input_reg.l1, input_reg.l2, dropout))
            results[name] = experiment(series, n_lag, n_repeats, n_epochs,
                                       n_batch, n_neurons, input_reg, dropout)
    """
    n_dim = series.shape[1]
    for dropout in n_dropout:
        for input_reg in input_regularizers:
            name = ('l1 %.2f,l2 %.2f - dropout: %.2f' % (input_reg.l1, input_reg.l2, dropout))
            results[name] = experiment(series, n_lag, n_repeats, n_epochs, n_batch, n_neurons, input_reg, dropout)
    
    """

    # summarize results
    print(results.describe())
    # save boxplot
    pyplot.title("%s - %s" % (origin, destination))
    results.boxplot()
    pyplot.savefig('~/experiment_baseline.png')

    pyplot.figure()
    pyplot.boxplot(results)
Ejemplo n.º 16
0
def generate_model2(con_cols, lstm_list, M, cells):

    # Initialize input
    k = 0
    inputs = [[], [], []]
    for m in M:
        inputs[0].append(Input(shape=(None, 1)))
        inputs[1].append(Embedding(m[0], min(m[0], m[1]))(inputs[0][-1]))
        inputs[2].append(Reshape((-1, min(m[0], m[1])))(inputs[1][-1]))
        k += min(m[0], m[1])

    cont_input = Input(shape=(None, len(con_cols)), name='cont_input')
    inputs[2].append(cont_input)
    # input concatenation
    concat1 = Concatenate(name='profile_concat')(inputs[2])

    # LSTM layers
    lstm_input = Input(shape=(None, len(lstm_list)))
    k += len(lstm_list)
    lstm = LSTM(cells,
                return_sequences=True,
                input_shape=(None, k),
                stateful=False,
                dropout=0.1,
                recurrent_regularizer=L1L2(l1=0.0))(lstm_input)
    lstm1 = LSTM(12,
                 return_sequences=True,
                 input_shape=(None, k),
                 stateful=False,
                 dropout=0.1,
                 recurrent_regularizer=L1L2(l1=0.0))(lstm)
    lstm2 = Concatenate(axis=-1)(inputs[2] + [lstm1])
    print(k)
    # Dense layers
    dns1 = Dense(128, activation='relu')(lstm2)
    con3 = Dropout(0.1)(dns1)
    dns2 = Dense(64, activation='relu')(con3)
    dp1 = Dropout(0.2)(dns2)
    dns3 = Dense(1, activation='sigmoid')(dp1)

    # Model
    model = Model(inputs[0] + [cont_input, lstm_input], dns3)
    print(model.summary(90))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  sample_weight_mode='temporal',
                  metrics=['binary_crossentropy'])
    return model
Ejemplo n.º 17
0
 def _build_model(self, input_dim, output_dim):
     model = Sequential()
     model.add(
         Dense(500,
               activation='relu',
               kernel_regularizer=L1L2(l1=0.0, l2=0.002),
               input_dim=input_dim))
     model.add(
         Dense(output_dim,
               activation='softmax',
               kernel_regularizer=L1L2(l1=0.0, l2=0.002)))
     model.compile(optimizer='sgd',
                   loss='categorical_crossentropy',
                   metrics=['accuracy'])
     return model
Ejemplo n.º 18
0
def LR(inp_dim):
    print("Model LR")
    model = Sequential()
    model.add(
        Dense(1,
              activation='sigmoid',
              input_dim=inp_dim,
              kernel_regularizer=L1L2(l1=0.0, l2=0.00000001),
              bias_regularizer=L1L2(l2=0.00000001)))
    model.compile(optimizer="adam",
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    #print(model.summary())
    return model
Ejemplo n.º 19
0
def model_discriminator():
    nch = 256
    h = 5
    reg = lambda: L1L2(l1=1e-7, l2=1e-7)

    c1 = Conv2D(int(nch / 4), (h, h), padding='same', kernel_regularizer=reg(),
                input_shape=(32, 32, 3))
    c2 = Conv2D(int(nch / 2), (h, h), padding='same', kernel_regularizer=reg())
    c3 = Conv2D(nch, (h, h), padding='same', kernel_regularizer=reg())
    c4 = Conv2D(1, (h, h), padding='same', kernel_regularizer=reg())

    model = Sequential()
    model.add(c1)
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(LeakyReLU(0.2))
    model.add(c2)
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(LeakyReLU(0.2))
    model.add(c3)
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(LeakyReLU(0.2))
    model.add(c4)
    model.add(AveragePooling2D(pool_size=(4, 4), padding='valid'))
    model.add(Flatten())
    model.add(Activation('sigmoid'))
    return model
Ejemplo n.º 20
0
	def __init__(self):
		'''
		self.data_dir="data/images_data/the-simpsons-characters-dataset/simpsons_dataset"
		self.model_dir = "model/image_synthesis_GAN"
		self.result_dir = "results/gan_images"
		self.loss_fn = "binary_crossentropy"
		self.generator_layers=[256,256,512]
		self.discriminator_layers=[512,256]
		self.latent_dim = 100
		self.img_rows = 32
		self.img_cols = 32
		self.n_channels = 3
		self.use_channel_in_img = True
		self.learning_rate=0.01
		self.batch_size=200
		self.batch_num = 10
		self.generate_images_flag = False
		self.load_previous_model = False

		self.use_cifar_flag=True
		self.plot_rows=5
		self.plot_cols=5
		'''
		self.read_configuration("config/image_synthesis_GAN.prop")
		self.reg = lambda: L1L2(1e-5, 1e-5)
		self.img_shape = (self.img_rows,self.img_cols,self.n_channels)
		self.run_image_synthesis()
Ejemplo n.º 21
0
    def setup(self, input_dim: tuple, **kwargs) -> None:
        """
            Method used to setup the Keras machine learning model

        :param input_dim:        Dimensions of the input vectors
        :param kwargs:           Other potential arguments. Not applicable here.
        :return:                 -
        """

        # Defining layers
        batchNormLayer = BatchNormalization(input_shape=input_dim)

        denseLayer = Dense(2,
                           activation='softmax',
                           kernel_regularizer=L1L2(l1=.0, l2=.1))

        # Adding layers
        self.model.add(batchNormLayer)
        self.model.add(denseLayer)

        # Compiling model

        self.model.compile(loss='categorical_crossentropy',
                           optimizer=SGD(lr=0.001),
                           metrics=['accuracy'])

        self.built = True
Ejemplo n.º 22
0
def classify_participant_independent(csv, trainingid, validationid, testingid, normalizer_type, normalizer1, normalizer2, totalDays):
    X_train = []
    Y_train = []

    X_val = []
    Y_val = []

    X_test = []
    Y_test = []

    for i in range(totalDays, len(csv)):
        days = data_collector.collectDayData(csv, i, totalDays)

        userid = days[0][1]

        if data_collector.isSameUserAcross(days):
            x = transform_to_train_vec.transform(
                days,
                False,
                "LSTM",
                totalDays,
                normalizer_type,
                normalizer1,
                normalizer2
            )

            # put the x vector into the appropriate set (i.e. training, validation, testing)
            if days[0][EMA_INDEX] != '':

                X_train, Y_train, X_val, Y_val, X_test, Y_test = assign_data.independent_assign(
                    True,
                    days[0][1],
                    trainingid,
                    validationid,
                    testingid,
                    X_train,
                    Y_train,
                    X_val,
                    Y_val,
                    X_test,
                    Y_test,
                    x,
                    days[0][EMA_INDEX]
                )

    model = Sequential()
    model.add(LSTM(64, return_sequences=True, input_shape=(totalDays, len(X_train[0][0]))))
    model.add(LSTM(64, return_sequences=True, recurrent_dropout=0.2))
    model.add(LSTM(64))

    model.add(Dense(4, activation='softmax', kernel_regularizer=L1L2(l1=0.0, l2=0.0)))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['categorical_accuracy'])

    if len(X_val) == 0:
        model.fit(X_train, Y_train, epochs=5)
    else:
        model.fit(X_train, Y_train, epochs=5,  validation_data=(X_val, Y_val))

    y_pred = model.predict(X_test)
    return prediction_utilities.convert_preds_into_ema(y_pred), Y_test
Ejemplo n.º 23
0
    def build_mlp(input_layer, num_units=16, activation="selu", n_layers=1, p_dropout=0.0,
                  l2_weight=0.0, with_bn=False, with_bias=True, **kwargs):
        last_layer = input_layer
        for i in range(n_layers):
            last_layer = Dense(num_units,
                               kernel_regularizer=L1L2(l2=l2_weight),
                               bias_regularizer=L1L2(l2=l2_weight),
                               use_bias=with_bias)(last_layer)
            if with_bn:
                last_layer = BatchNormalization(beta_regularizer=L1L2(l2=l2_weight),
                                                gamma_regularizer=L1L2(l2=l2_weight))(last_layer)
            last_layer = Activation(activation)(last_layer)

            if p_dropout != 0.0:
                last_layer = Dropout(p_dropout)(last_layer)
        return last_layer
Ejemplo n.º 24
0
    def get_model(self, batch_size):

        dengue_input = Input(batch_shape=(batch_size, self.history_length, 1))
        dropout_input = Dropout(self.dropout_input)(dengue_input)

        emb_input = Input(batch_shape=(batch_size, self.history_length))
        emb = Embedding(52, self.neurons_emb,
                        input_length=self.history_length)(emb_input)
        dropout_emb = Dropout(self.dropout)(emb)

        merged = keras.layers.concatenate([dropout_input, dropout_emb])

        lstm = LSTM(self.neurons,
                    stateful=True,
                    kernel_regularizer=L1L2(l1=self.regularization_l1,
                                            l2=self.regularization_l2))(merged)

        dropout_lstm = Dropout(self.dropout)(lstm)

        output_dense = Dense(1, activation='sigmoid',
                             use_bias=True)(dropout_lstm)

        model_temp = Model(inputs=[dengue_input, emb_input],
                           outputs=output_dense)

        op = None
        if self.optimizer == OpUtil.RMSPROP.value:
            op = keras.optimizers.RMSprop(lr=self.learning_rate,
                                          rho=0.9,
                                          epsilon=1e-6,
                                          decay=0.0)
        model_temp.compile(loss='mean_squared_error', optimizer=op)

        return model_temp
Ejemplo n.º 25
0
def model_discriminator():
    nch = 256
    h = 5
    reg = lambda: L1L2(l1=1e-7, l2=1e-7)

    c1 = Conv2D(int(nch / 4), (h, h),
                padding="same",
                kernel_regularizer=reg(),
                input_shape=dim_ordering_shape((3, 32, 32)))
    c2 = Conv2D(int(nch / 2), (h, h), padding="same", kernel_regularizer=reg())
    c3 = Conv2D(nch, (h, h), padding="same", kernel_regularizer=reg())
    c4 = Conv2D(1, (h, h), padding="same", kernel_regularizer=reg())

    def m(dropout):
        model = Sequential()
        model.add(c1)
        model.add(SpatialDropout2D(dropout))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(LeakyReLU(0.2))
        model.add(c2)
        model.add(SpatialDropout2D(dropout))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(LeakyReLU(0.2))
        model.add(c3)
        model.add(SpatialDropout2D(dropout))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        model.add(LeakyReLU(0.2))
        model.add(c4)
        model.add(AveragePooling2D(pool_size=(4, 4), padding="valid"))
        model.add(Flatten())
        model.add(Activation("sigmoid"))
        return model

    return m
Ejemplo n.º 26
0
def model_generator():
    model = Sequential()
    nch = 256
    reg = lambda: L1L2(l1=1e-7, l2=1e-7)
    h = 5
    model.add(Dense(nch * 4 * 4, input_dim=100, kernel_regularizer=reg()))
    model.add(BatchNormalization())
    model.add(Reshape(dim_ordering_shape((nch, 4, 4))))
    model.add(
        Conv2D(int(nch / 2), (h, h), padding="same", kernel_regularizer=reg()))
    model.add(BatchNormalization())
    model.add(LeakyReLU(0.2))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(
        Conv2D(int(nch / 2), (h, h), padding="same", kernel_regularizer=reg()))
    model.add(BatchNormalization())
    model.add(LeakyReLU(0.2))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(
        Conv2D(int(nch / 4), (h, h), padding="same", kernel_regularizer=reg()))
    model.add(BatchNormalization())
    model.add(LeakyReLU(0.2))
    model.add(UpSampling2D(size=(2, 2)))
    model.add(Conv2D(3, (h, h), padding="same", kernel_regularizer=reg()))
    model.add(Activation("sigmoid"))
    return model
Ejemplo n.º 27
0
def create_model(train_X, train_y, val_X, val_y):
    def root_mean_squared_error(y_true, y_pred):
        return K.sqrt(K.mean(K.square(y_pred - y_true)))

    # design network
    model = Sequential()
    model.add(
        LSTM(units=128,
             return_sequences=True,
             input_shape=(train_X.shape[1], train_X.shape[2]),
             bias_regularizer=L1L2(l1=0.1, l2=0.05)))
    model.add(Dropout({{uniform(0, 1)}}))

    model.add(LSTM(units=64))
    model.add(Dropout({{uniform(0, 1)}}))

    # model.add(Dense(16,init='uniform',activation='relu'))
    model.add(Dense({{choice([64, 32, 16, 8])}}))

    model.add(Dense(units=1))
    model.compile(optimizer={{choice(['adam'])}}, loss=root_mean_squared_error)
    # fit network
    history = model.fit(train_X,
                        train_y,
                        epochs=500,
                        batch_size={{choice([25])}},
                        verbose=2,
                        shuffle=False,
                        validation_split=0.1)
    #get the highest validation accuracy of the training epochs
    validation_acc = np.amin(history.history['val_loss'])
    print('Best validation loss of epoch:', validation_acc)
    return {'loss': validation_acc, 'status': STATUS_OK, 'model': model}
Ejemplo n.º 28
0
    def network_initializer(self, max_timesteps=None):
        ### build encoder
        enc_input = Input(shape=(self.max_length, ),
                          dtype='int32',
                          name='input')
        enc_embedding = Embedding(self.vocab_size,
                                  self.embedding_size,
                                  name='embedding')(enc_input)

        # stacking encoding LSTMs
        convos = []
        for i, hs in enumerate(self.kernel_sizes):
            if hs > 0:
                convo_layer = Conv1D(filters=hs,
                                     kernel_size=i + 1,
                                     padding='same')(enc_embedding)
                if self.L_pooling > 1:
                    convo_layer = MaxPooling1D(
                        pool_size=self.L_pooling)(convo_layer)
                convo_layer = Flatten()(convo_layer)
                convos.append(convo_layer)
        enc_output = Concatenate()(convos) if len(convos) > 1 else convos[0]
        enc_output = Lambda(lambda x: K.l2_normalize(x, axis=1),
                            name=BioactivityLSTM.ENCODING_NAME)(enc_output)

        ### output layer

        out_layer = Dense(1, kernel_regularizer=L1L2(l2=self.l2))(enc_output)
        self.model = Model(inputs=enc_input, outputs=out_layer)
        self.model.compile(optimizer=self.optimizer, loss='mse')
Ejemplo n.º 29
0
    def network_initializer(self, max_timesteps=None):
        ### build encoder
        enc_input = Input(shape=(None, ), dtype='int32', name='input')
        enc_embedding = Embedding(self.vocab_size,
                                  self.embedding_size,
                                  mask_zero=True,
                                  name='embedding')(enc_input)

        # stacking encoding LSTMs
        hidden_states = []
        enc_layer = enc_embedding
        for i, layer_size in enumerate(self.layer_sizes):
            return_sequences = (i != len(self.layer_sizes) - 1)
            enc_layer, hidden_state, cell_state = LSTM(
                layer_size,
                return_sequences=return_sequences,
                return_state=True,
                name='lstm_%d' % (i + 1))(enc_layer)
            hidden_states += [hidden_state, cell_state]

        # concatenating LSTMs' states and normalizing their norms
        enc_output = Concatenate()(hidden_states)
        enc_output = Lambda(lambda x: K.l2_normalize(x, axis=1),
                            name=BioactivityLSTM.ENCODING_NAME)(enc_output)

        ### output layer
        out_layer = Dense(1, kernel_regularizer=L1L2(l2=self.l2))(enc_output)
        self.model = Model(inputs=enc_input, outputs=out_layer)
        self.model.compile(optimizer=self.optimizer, loss='mse')
Ejemplo n.º 30
0
def dcgan_discriminator_max_pool(channel=1):
    nch = 256
    h = 5
    reg = lambda: L1L2(l1=1e-7, l2=1e-7)  #L1L2(l1=1e-7, l2=1e-7)

    c1 = Convolution2D(int(nch / 4), (h, h),
                       padding='same',
                       kernel_regularizer=reg(),
                       input_shape=(64, 64, channel))
    c2 = Convolution2D(int(nch / 2), (h, h),
                       padding='same',
                       kernel_regularizer=reg())
    c3 = Convolution2D(nch, (h, h), padding='same', kernel_regularizer=reg())
    c4 = Convolution2D(nch, (h, h), padding='same', kernel_regularizer=reg())

    model = Sequential()
    model.add(c1)
    model.add(MaxPooling2D(pool_size=(2, 2), data_format='channels_last'))
    model.add(LeakyReLU(0.2))
    model.add(c2)
    model.add(MaxPooling2D(pool_size=(2, 2), data_format='channels_last'))
    model.add(LeakyReLU(0.2))
    model.add(c3)
    model.add(MaxPooling2D(pool_size=(4, 4), data_format='channels_last'))
    model.add(LeakyReLU(0.2))
    model.add(c4)
    model.add(
        MaxPooling2D(pool_size=(4, 4),
                     data_format='channels_last'))  #, border_mode='valid')
    model.add(LeakyReLU(0.2))
    model.add(Flatten())
    #model.add(Dense(1, activation='linear'))
    model.add(Dense(1, activation='sigmoid'))
    return model