Esempio n. 1
0
def CNN_model():
    model = Sequential()

    model.add(
        keras.layers.ZeroPadding2D(padding=(2, 2),
                                   input_shape=(INPUT_SIZE, INPUT_SIZE, 1)))
    model.add(Conv2D(64, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(128, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(256, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    model.add(Conv2D(512, (3, 3), activation='relu', padding='same'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dense(4096, activation='relu'))
    #model.add(Dropout(0.5)) # need to reduce batch_size
    model.add(Dense(4096, activation='relu'))
    #model.add(Dropout(0.5))
    model.add(Dense(10, activation='softmax'))

    plot_model(model, to_file=OUT_PATH + 'model.png', show_shapes=True)
    print model.count_params()
    return model
Esempio n. 2
0
    def fit(self, X_train, Y_train):

        model = Sequential()
        model.add(Dense(1, input_dim=len(X_train[0]), activation='sigmoid'))
        model.compile(optimizer='rmsprop',
                      loss='binary_crossentropy',
                      metrics=['accuracy'])

        earlyStopping = EarlyStopping(monitor='val_loss',
                                      patience=5,
                                      verbose=1,
                                      mode='auto')
        model.fit(X_train,
                  Y_train,
                  verbose=0,
                  batch_size=self.batch_size,
                  epochs=self.epochs,
                  callbacks=[earlyStopping],
                  validation_split=self.validation_split,
                  shuffle=True)

        y_val = model.predict(X_train)
        index_one = 0
        acc_y = np.array(
            [float(y_val[i][index_one]) for i in range(len((y_val)))])
        SSR = sum([(Y_train[i] - acc_y[i])**2 for i in range(len(acc_y))])
        N = len(Y_train)
        s2 = SSR / float(N)
        L = (N * np.log(1.0 / np.sqrt(2 * np.pi * s2)) - (1.0 /
                                                          (2 * s2)) * SSR)
        AIC = 2 * (model.count_params()) - 2 * L
        BIC = (model.count_params()) * np.log(N) - 2 * L
        return model, AIC, BIC
Esempio n. 3
0
def PrepModel(count, embedding_matrix, l, lrate=0.001):
    model = Sequential()
    e = Embedding(count,
                  100,
                  weights=[embedding_matrix],
                  input_length=l,
                  trainable=False)
    model.add(e)

    model.add(
        LSTM(100,
             kernel_initializer='he_normal',
             activation='sigmoid',
             dropout=0.5,
             recurrent_dropout=0.5,
             unroll=False,
             return_sequences=True))

    model.add(Attention())
    model.add(Dense(1, activation='sigmoid'))
    # model.compile(optimizer=Adam(lr=lrate), loss='binary_crossentropy', metrics=["accuracy"])
    model.compile(optimizer="rmsprop",
                  loss='binary_crossentropy',
                  metrics=["acc", f1, precision, recall])

    print('No of parameter:', model.count_params())

    print(model.summary())
    print("Learning rate:", K.eval(model.optimizer.lr))
    return model
Esempio n. 4
0
def build_and_test_lstm(x_train, y_train, x_test, y_test, word_index,
                        max_seq_length, glove_dim, embed_matrix, state_dim,
                        num_dense, num_epochs, batch_size):

    model = Sequential()
    model.add(
        Embedding(len(word_index) + 1,
                  glove_dim,
                  embeddings_initializer=Constant(embed_matrix),
                  input_length=max_seq_length,
                  mask_zero=True,
                  trainable=False))
    model.add(CuDNNLSTM(state_dim, activation='tanh'))
    model.add(Dense(num_dense, activation='relu'))
    model.add(Dense(2, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['acc'])

    start_fit = time.time()
    history = model.fit(x_train,
                        y_train,
                        epochs=num_epochs,
                        batch_size=batch_size)
    result = model.evaluate(x_test, y_test)
    fit_time = time.time() - start_fit
    result = ([
        glove_dim, state_dim, num_dense,
        model.count_params(), history.history['loss'][-1],
        history.history['acc'][-1], result[0], result[1], fit_time
    ])
    return result
def PrepModel(count, embedding_matrix, l, lrate=0.01):
    model = Sequential()
    e = Embedding(count,
                  300,
                  weights=[embedding_matrix],
                  input_length=l,
                  trainable=False)
    model.add(e)

    model.add(
        LSTM(100,
             kernel_initializer='he_normal',
             activation='sigmoid',
             dropout=0.5,
             recurrent_dropout=0.5,
             unroll=False,
             return_sequences=True))

    model.add(Attention())
    model.add(Dense(1, activation='sigmoid'))
    model.compile(optimizer=Adam(lr=lrate),
                  loss='binary_crossentropy',
                  metrics=[f1])

    # model.add(Dense(2))
    # model.add(Activation('softmax'))
    # adam = Adam(lr=0.001)
    # model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
    print('No of parameter:', model.count_params())

    print(model.summary())
    print(K.eval(model.optimizer.lr))
    return model
Esempio n. 6
0
def build_single_layer_model(data_dim, num_hidden_units):
    """
    Compile a simple NN for regression
    """
    model = Sequential()

    model.add(
        Dense(num_hidden_units,
              input_dim=data_dim,
              kernel_initializer='normal',
              activation='relu'))
    model.add(Dropout(0.2))
    model.add(
        Dense(num_hidden_units, kernel_initializer='normal',
              activation='relu'))
    model.add(Dropout(0.2))
    #model.add(Dense(num_hidden_units, kernel_initializer='normal', activation='relu'))
    #model.add(Dropout(0.2))

    # regression
    #model.add(Dense(1, kernel_initializer='normal'))
    # Compile model
    #model.compile(loss='mean_squared_error', optimizer='adam')

    model.add(Dense(1, activation='sigmoid'))
    model.compile(loss='binary_crossentropy', optimizer='adam')
    print("Model parameters: {}".format(model.count_params()))
    return model
Esempio n. 7
0
def VGG_like_convnet(data_shape, opt):
    print('Training VGG net.')
    model = Sequential()
    # input: 100x100 images with 3 channels -> (3, 100, 100) tensors.
    # this applies 32 convolution filters of size 3x3 each.
    model.add(Convolution2D(32, 3, 3, border_mode='valid', input_shape=(data_shape[0], data_shape[1], data_shape[2])))
    model.add(Activation('relu'))
    model.add(Convolution2D(32, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #model.add(Dropout(0.25))

    model.add(Convolution2D(64, 3, 3, border_mode='valid'))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #model.add(Dropout(0.25))

    model.add(Flatten())
    # Note: Keras does automatic shape inference.
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(2))
    model.add(Activation('softmax'))

    print ('VGG_like_convnet... nb params: {}'.format(model.count_params()))
    model.compile(loss='categorical_crossentropy', optimizer=opt)
    return model
Esempio n. 8
0
    def _build_network(self,
                       vocab_size,
                       maxlen,
                       embedding_dimension=256,
                       hidden_units=256,
                       trainable=False):
        print('Build model...')
        model = Sequential()

        model.add(
            Embedding(vocab_size,
                      embedding_dimension,
                      input_length=maxlen,
                      embeddings_initializer='glorot_normal'))

        model.add(
            Convolution1D(hidden_units,
                          3,
                          kernel_initializer='he_normal',
                          padding='valid',
                          activation='sigmoid',
                          input_shape=(1, maxlen)))
        # model.add(MaxPooling1D(pool_size=3))
        model.add(
            Convolution1D(hidden_units,
                          3,
                          kernel_initializer='he_normal',
                          padding='valid',
                          activation='sigmoid',
                          input_shape=(1, maxlen - 2)))
        # model.add(MaxPooling1D(pool_size=3))

        # model.add(Dropout(0.25))

        model.add(
            LSTM(hidden_units,
                 kernel_initializer='he_normal',
                 activation='sigmoid',
                 dropout=0.5,
                 return_sequences=True))
        model.add(
            LSTM(hidden_units,
                 kernel_initializer='he_normal',
                 activation='sigmoid',
                 dropout=0.5))

        model.add(
            Dense(hidden_units,
                  kernel_initializer='he_normal',
                  activation='sigmoid'))
        model.add(Dense(2))
        model.add(Activation('softmax'))
        adam = Adam(lr=0.0001)
        model.compile(loss='categorical_crossentropy',
                      optimizer=adam,
                      metrics=['accuracy'])
        print('No of parameter:', model.count_params())

        print(model.summary())
        return model
Esempio n. 9
0
 def __init__(self,
              n_pop=10,
              n_neurons=5,
              F=0.2,
              Cr=0.9,
              p=1,
              change_scheme=True,
              scheme='rand',
              bounds=[-1, 1],
              max_sp_evals=np.int(1e5)):
     #self.n_gens=n_gens
     self.n_pop = n_pop
     self.n_neurons = n_neurons
     self.F = F * np.ones(self.n_pop)
     self.Cr = Cr * np.ones(self.n_pop)
     self.bounds = bounds
     self.p = p
     self.scheme = scheme
     self.change_schame = change_scheme
     self.max_sp_evals = max_sp_evals
     self.sp_evals = 0
     self.interactions = 0
     # Build generic model
     model = Sequential()
     model.add(Dense(self.n_neurons, input_dim=100, activation='tanh'))
     model.add(Dense(1, activation='tanh'))
     model.compile(loss='mean_squared_error',
                   optimizer='rmsprop',
                   metrics=['accuracy'])
     self.model = model
     self.change_schame = False
     self.n_dim = model.count_params()
def model_v9(input_shape, nb_actions):

    model = Sequential()
    if common.image_dim_ordering() == 'tf':
        # (width, height, channels)
        model.add(Permute((2, 3, 1), input_shape=input_shape))
    elif common.image_dim_ordering() == 'th':
        # (channels, width, height)
        model.add(Permute((1, 2, 3), input_shape=input_shape))
    else:
        raise RuntimeError('Unknown image_dim_ordering.')
    model.add(Convolution2D(15, (4, 4), strides=(1, 1)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(30, (3, 3), strides=(1, 1)))
    model.add(Activation('relu'))
    model.add(Convolution2D(60, (3, 3), strides=(1, 1)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Flatten())
    model.add(Dense(600))
    model.add(Activation('relu'))
    model.add(Dense(300))
    model.add(Activation('relu'))
    model.add(Dense(200))
    model.add(Activation('relu'))
    model.add(Dense(nb_actions))
    model.add(Activation('linear'))
    print(model.summary())
    print(model.count_params())

    return model
Esempio n. 11
0
def build_keras_model():
    """Build a simple Keras model and return the number of parameters.
    >>> build_keras_model()
    528
    """
    model = Sequential()
    model.add(Dense(16, input_dim=32))
    return model.count_params()
Esempio n. 12
0
    def createNetwork(self):

        model = Sequential()

        firstlayer = True

        # convolutional part
        for l in self.conv_layers:
            if type(l) is ConvLayer:
                if firstlayer:
                    model.add(
                        Conv2D(
                            l.filters,
                            (l.kernel_size, l.kernel_size),
                            padding='same',  # let not the shape vanish
                            input_shape=self.input_shape))
                    firstlayer = False
                else:
                    model.add(
                        Conv2D(l.filters, (l.kernel_size, l.kernel_size),
                               padding='same'))
                model.add(Activation(l.activation))

            elif type(l) is MaxPoolLayer:
                if firstlayer:
                    model.add(
                        MaxPooling2D(pool_size=(l.pool_size, l.pool_size),
                                     input_shape=self.input_shape))
                    firstlayer = False
                else:
                    # check if pooling is possible
                    if model.layers[-1].output_shape[
                            1] >= l.pool_size and model.layers[
                                -1].output_shape[2] >= l.pool_size:
                        model.add(
                            MaxPooling2D(pool_size=(l.pool_size, l.pool_size)))

            else:
                raise TypeError("unknown type of layer")

        # dense part
        model.add(Flatten())
        for l in self.dense_layers:
            model.add(Dense(l.size))
            model.add(Activation(l.activation))
            if l.dropout > 0:
                model.add(Dropout(l.dropout))

        # final part
        model.add(Dense(self.noutputs))
        if Config.task_type == "classification":
            model.add(Activation('softmax'))

        model.compile(loss=Config.loss, optimizer=RMSprop())

        self.nparams = model.count_params()

        return model
Esempio n. 13
0
def main(args):
    x_train, x_test, y_train, y_test = load_data()
    print('BUILDING MODEL ---')
    width, depth = args.width, args.depth
    model_name = 'dnn-depth_{}-width_{}'.format(depth, width)
    model = Sequential()
    model.add(Reshape((784, ), input_shape=(28, 28, 1)))
    model.add(Dense(output_dim=width, init='he_normal', bias=True))
    model.add(Activation("relu"))
    for _ in range(depth - 1):
        model.add(Dense(output_dim=width, init='he_normal', bias=True))
        model.add(Activation("relu"))
    model.add(Dense(output_dim=10, init='he_normal', bias=True))
    model.add(Activation("softmax"))

    # Chose Loss and Compile
    model.compile(loss='categorical_crossentropy',
                  optimizer=args.optimizer,
                  metrics=['accuracy'])

    print('Num Params: %d' % model.count_params())

    # Callbacks
    csv_logger = keras.callbacks.CSVLogger(SAVEDIR + model_name + '.log')
    stats = StatsCallback(model_name, savedir=SAVEDIR + 'stats/')
    early_stop = keras.callbacks.EarlyStopping(monitor='val_loss',
                                               patience=6,
                                               verbose=args.verbose,
                                               mode='auto')
    history = keras.callbacks.History()
    remote = keras.callbacks.RemoteMonitor(root='http://localhost:9000/')
    tensorboard = keras.callbacks.TensorBoard(log_dir=SAVEDIR + 'tf-logs/' +
                                              model_name,
                                              histogram_freq=0,
                                              write_graph=True,
                                              write_images=True)

    callback_list = []
    if args.save:
        if not os.path.exists(SAVEDIR):
            os.makedirs(SAVEDIR)
            os.makedirs(SAVEDIR + 'models')
            os.makedirs(SAVEDIR + 'stats')
        callback_list.append(csv_logger)
        callback_list.append(stats)
    if args.early_stop:
        callback_list.append(early_stop)

    # Train Model
    print('STARTING TRAINING ---')
    model.fit(x_train, y_train, nb_epoch=args.epochs, batch_size=args.batch_size, \
            validation_data=(x_test, y_test), callbacks=callback_list, verbose=args.verbose)

    if args.save:
        print('Saving results...')
        model.save(SAVEDIR + 'models/' + model_name + '.h5')

    print('Complete.')
Esempio n. 14
0
def test_sequential_count_params():
    input_dim = 20
    nb_units = 10
    nb_classes = 2

    n = input_dim * nb_units + nb_units
    n += nb_units * nb_units + nb_units
    n += nb_units * nb_classes + nb_classes

    model = Sequential()
    model.add(Dense(nb_units, input_shape=(input_dim,)))
    model.add(Dense(nb_units))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))
    assert(n == model.count_params())

    model.compile('sgd', 'binary_crossentropy')
    assert(n == model.count_params())
Esempio n. 15
0
def test_sequential_count_params():
    input_dim = 20
    nb_units = 10
    nb_classes = 2

    n = input_dim * nb_units + nb_units
    n += nb_units * nb_units + nb_units
    n += nb_units * nb_classes + nb_classes

    model = Sequential()
    model.add(Dense(nb_units, input_shape=(input_dim,)))
    model.add(Dense(nb_units))
    model.add(Dense(nb_classes))
    model.add(Activation('softmax'))
    assert(n == model.count_params())

    model.compile('sgd', 'binary_crossentropy')
    assert(n == model.count_params())
 def compute(self, config, budget, *args, **kwargs):
      with tf.Session(graph=tf.Graph()) as sess:
         K.set_session(sess)
         
         model = Sequential()
         model.add(Conv2D(filters=config['num_filters_1'], kernel_size=(3, 3),
                          padding='same', activation='relu', input_shape=self.input_shape,
                          kernel_initializer='he_normal'))
         model.add(Conv2D(filters=config['num_filters_1'], kernel_size=(3, 3),
                          activation='relu', kernel_initializer='he_normal'))
         model.add(MaxPooling2D(pool_size=(2, 2)))
         model.add(Dropout(config['dropout']))
         
         model.add(Conv2D(filters=config['num_filters_2'], kernel_size=(3, 3), 
                          activation='relu', padding='same', kernel_initializer='he_normal'))
         model.add(Conv2D(filters=config['num_filters_2'], kernel_size=(3, 3),
                          activation='relu', kernel_initializer='he_normal'))
         model.add(MaxPooling2D(pool_size=(2, 2)))
         model.add(Dropout(config['dropout']))
         
         model.add(Flatten())
         for i in range(config['num_dense_layers']):
             model.add(Dense(config['num_dense_nodes'], activation=config['dense_activation'])) #1
             
         model.add(Dropout(config['dropout']))
         model.add(Dense(self.num_classes, activation='softmax'))
                
         optimizer = Adam(lr=config['learning_rate'])
         
         model.compile(optimizer=optimizer,
                       loss='categorical_crossentropy',
                       metrics=['accuracy'])
         #------------------------------------------
         
         history = model.fit(x=self.x_train,
                             y=self.y_train,
                             epochs=int(budget),
                             batch_size=config['batch_size'],
                             validation_data=(self.x_test, self.y_test),
                             verbose=0)
         
         accuracy = history.history['val_acc'][-1]
         
         self.total_epochs = self.total_epochs + int(budget)
                     
         time.sleep(self.sleep_interval)
         
         return ({
         	'loss': -accuracy, # remember: HpBandSter always minimizes!
         	'info': {	
                         'configuration': config,
         				'number of parameters': model.count_params(),
         			}
         						
         })
Esempio n. 17
0
    def run(audio_name):
        nb_classes = 2
        n_mfcc = 80
        sample_size = 32000
        sample_rate = 16000
        mfcc_hop_size = 512
        mfcc_size = math.ceil(sample_size / float(sample_rate) * 31.4 *
                              sample_rate / 16000 * 512 / mfcc_hop_size)
        input_shape = (n_mfcc, mfcc_size, 1)
        # number of convolutional filters to use
        nb_filters = 16
        # size of pooling area for max pooling
        pool_size = (3, 3)
        # convolution kernel size
        kernel_size = (3, 3)

        model = Sequential()
        model.add(
            Convolution2D(16,
                          3,
                          3,
                          border_mode='valid',
                          input_shape=input_shape))
        model.add(Activation('relu'))

        model.add(Convolution2D(16, 3, 3))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Convolution2D(16, 3, 3))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 2)))
        print(model.output_shape)
        model.add(Dropout(0.5))
        model.add(Flatten())
        print(model.output_shape)
        # sleep(100)
        model.add(Dense(128))
        model.add(Activation('relu'))
        model.add(Dropout(0.5))
        model.add(Dense(nb_classes))
        print(model.count_params())
        model.add(Activation('softmax'))
        # print(model.count_params())
        model.compile(loss='categorical_crossentropy',
                      optimizer='adadelta',
                      metrics=['accuracy'])
        model.load_weights('./model-mcnn.h5')
        mfcc_chunks = Predict.process_data(audio_name)

        mfcc_chunks = np.reshape(mfcc_chunks, [-1, n_mfcc, mfcc_size, 1])
        if mfcc_chunks.shape[0] == 0:
            return
        predict = model.predict(mfcc_chunks, batch_size=mfcc_chunks.shape[0])
        return predict
Esempio n. 18
0
def test_sequential_count_params():
    input_dim = 20
    nb_units = 10
    nb_classes = 2

    n = input_dim * nb_units + nb_units
    n += nb_units * nb_units + nb_units
    n += nb_units * nb_classes + nb_classes

    model = Sequential()
    model.add(Dense(nb_units, input_shape=(input_dim,)))
    model.add(Dense(nb_units))
    model.add(Dense(nb_classes))
    model.add(Activation("softmax"))
    model.build()

    assert n == model.count_params()

    model.compile("sgd", "binary_crossentropy")
    assert n == model.count_params()
Esempio n. 19
0
    def test_count_params(self):
        print('test count params')
        nb_units = 100
        nb_classes = 2

        n = nb_units * nb_units + nb_units
        n += nb_units * nb_units + nb_units
        n += nb_units * nb_classes + nb_classes

        model = Sequential()
        model.add(Dense(nb_units, nb_units))
        model.add(Dense(nb_units, nb_units))
        model.add(Dense(nb_units, nb_classes))
        model.add(Activation('softmax'))

        self.assertEqual(n, model.count_params())

        model.compile('sgd', 'binary_crossentropy')

        self.assertEqual(n, model.count_params())
Esempio n. 20
0
    def test_count_params(self):
        print('test count params')
        nb_units = 100
        nb_classes = 2

        n = nb_units * nb_units + nb_units
        n += nb_units * nb_units + nb_units
        n += nb_units * nb_classes + nb_classes

        model = Sequential()
        model.add(Dense(nb_units, nb_units))
        model.add(Dense(nb_units, nb_units))
        model.add(Dense(nb_units, nb_classes))
        model.add(Activation('softmax'))

        self.assertEqual(n, model.count_params())

        model.compile('sgd', 'binary_crossentropy')

        self.assertEqual(n, model.count_params())
    def _build_emotion_network(self,
                               vocab_size,
                               maxlen,
                               emb_weights=None,
                               hidden_units=256,
                               trainable=False):
        print('Build model...')
        model = Sequential()

        model.add(
            Embedding(vocab_size,
                      128,
                      input_length=maxlen,
                      embeddings_initializer='glorot_normal'))

        model.add(Reshape((30, 128, 1)))
        model.add(BatchNormalization(momentum=0.9))

        #CNN
        model.add(
            Convolution2D(64, (3, 5),
                          kernel_initializer='he_normal',
                          padding='valid',
                          activation='relu'))
        model.add(MaxPooling2D(2, 2))
        model.add(Dropout(0.5))

        model.add(
            Convolution2D(128, (3, 5),
                          kernel_initializer='he_normal',
                          padding='valid',
                          activation='relu'))
        model.add(MaxPooling2D(2, 2))
        model.add(Dropout(0.5))

        model.add(Flatten())

        # DNN
        model.add(
            Dense(hidden_units,
                  kernel_initializer='he_normal',
                  activation='relu'))
        model.add(BatchNormalization(momentum=0.9))

        model.add(Dense(6))
        model.add(Activation('softmax'))
        adam = Adam(lr=0.0001)
        model.compile(loss='categorical_crossentropy',
                      optimizer=adam,
                      metrics=['accuracy'])
        print('No of parameter:', model.count_params())
        print(model.summary())
        return model
Esempio n. 22
0
def create_model(input_length):
    model = Sequential()
    model.add(LSTM(8, input_length=input_length, input_dim=1))
    model.add(core.Dense(8))
    model.add(core.Activation('tanh'))
    model.add(core.Dense(4))
    model.add(core.Activation('tanh'))
    model.add(core.Dense(1))
    model.add(core.Activation('linear'))
    model.compile(optimizer='rmsprop', loss='mse', metrics=['mse', 'mae'])
    print 'num_params = ' + str(model.count_params())
    return model
Esempio n. 23
0
def main():
    # model = create_model(201)
    #
    # train_data = pd.read_csv('data/train_features.csv', header = None).values
    # train_labels = pd.read_csv('data/train_labels.csv', header = None).values
    # validation_data = pd.read_csv('data/val_features.csv', header=None).values
    # validation_labels = pd.read_csv('data/val_labels.csv', header = None).values
    #
    # features_scaler = MinMaxScaler()
    # labels_scaler = MinMaxScaler()
    # scaled_features = features_scaler.fit_transform(train_data.reshape(-1, 1)).reshape(train_data.shape[0], train_data.shape[1], 1)
    # scaled_labels = labels_scaler.fit_transform(train_labels.reshape(-1,1)).reshape(train_labels.shape)
    #
    # scaled_validation_features = features_scaler.transform(validation_data.reshape(-1, 1)).\
    #     reshape(validation_data.shape[0], validation_data.shape[1], 1)
    # scaled_validation_labels = labels_scaler.transform(validation_labels.reshape(-1, 1)).\
    #     reshape(validation_labels.shape)
    #
    # model.fit(x = scaled_features, y = scaled_labels, nb_epoch=100, batch_size=20, verbose=2,
    #           validation_data=(scaled_validation_features, scaled_validation_labels))
    model = Sequential()

    #     model.add(LSTM(hidden_dim, input_shape=(timesteps, data_dim)))
    #     model.add(Dropout(0.2))
    data_dim = 4
    hidden_dim = 32
    output_dim = 4
    batch_size = 9
    nb_epoch = 100
    timesteps = 12
    model.add(
        LSTM(hidden_dim,
             return_sequences=True,
             input_shape=(timesteps, data_dim))
    )  # returns a sequence of vectors of dimension hidden_dim
    model.add(core.Dropout(0.2))

    #     model.add(LSTM(hidden_dim, return_sequences=True))  # returns a sequence of vectors of dimension hidden_dim
    #     model.add(Dropout(0.2))

    model.add(
        LSTM(hidden_dim))  # return a single vector of dimension hidden_dim
    model.add(core.Dropout(0.2))

    model.add(core.Dense(output_dim=output_dim, input_dim=hidden_dim))
    model.add(core.Activation("linear"))
    model.compile(loss="mean_squared_error", optimizer="rmsprop")
    print 'model.params = ' + str(model.count_params())
    return model
Esempio n. 24
0
    def _build_network(self, vocab_size, maxlen, emb_weights=[], hidden_units=256, trainable=False):
        print('Build model...')

        model = Sequential()

        model.add(Masking(mask_value=0, input_shape=(maxlen,)))

        if (len(emb_weights) == 0):
            model.add(Embedding(vocab_size, 20, input_length=maxlen, embeddings_initializer='he_normal',
                                trainable=trainable, mask_zero=True))
        else:
            model.add(Embedding(vocab_size, emb_weights.shape[1], input_length=maxlen, weights=[emb_weights],
                                trainable=trainable))

        model.add(Reshape((model.output_shape[1], model.output_shape[2], 1)))

        model.add(Convolution2D(int(hidden_units / 8), (5, 1), kernel_initializer='he_normal', padding='valid',
                                activation='relu'))
        model.add(MaxPooling2D((2, 1)))
        model.add(Dropout(0.5))

        model.add(Convolution2D(int(hidden_units / 4), (5, 1), kernel_initializer='he_normal', padding='valid',
                                activation='relu'))
        model.add(MaxPooling2D((2, 1)))
        model.add(Dropout(0.5))

        model.add(Reshape((model.output_shape[1], -1)))

        # model.add(Bidirectional(
        #     LSTM(hidden_units, kernel_initializer='he_normal', activation='sigmoid', dropout=0.5, recurrent_dropout=0.5,
        #          return_sequences=False), merge_mode='sum'))
        model.add(
            LSTM(int(hidden_units / 8), kernel_initializer='he_normal', activation='sigmoid', dropout=0.5,
                 recurrent_dropout=0.5,
                 return_sequences=False))

        # model.add(Dense(int(hidden_units / 2), kernel_initializer='he_normal', activation='sigmoid'))

        model.add(Dropout(0.5))

        model.add(Dense(2, activation='softmax'))

        adam = Adam(lr=0.001)
        model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy'])
        print('No of parameter:', model.count_params())

        print(model.summary())
        return model
def createModel(args, opt):
    numNodes = [8, 16, 32, 64, 128, 256, 512]
    k = (args.kernel_size * 2) + 1
    w8s = {"Zeros": Zeros(), "Ones": Ones(), "Constant": Constant(value=0.2),
           "RandNormal": RandomNormal(mean=0.0, stddev=0.05, seed=0)}

    model = Sequential()
    model.add(GaussianNoise(args.gauss_noise, input_shape=(32, 32, 3)))
    model.add(Conv2D(filters=numNodes[0], kernel_size=(k, k), padding='same',
                     kernel_initializer=w8s[args.w8s], bias_initializer=w8s[args.w8s]))             # Convolution 1
    model.add(Activation('relu'))                                                                   #   ReLU 1
    model.add(Conv2D(filters=numNodes[1], kernel_size=(k, k), padding='same',                       #
                     kernel_initializer=w8s[args.w8s], bias_initializer=w8s[args.w8s]))             # Convlution 2
    model.add(Activation('relu'))                                                                   #   ReLU 2
    model.add(Dropout(args.dropout_conv))                                                           #       Dropout 1
    model.add(MaxPooling2D(pool_size=2))                                                            #       Max Pooling 1
                                                                                                    #
    model.add(Conv2D(filters=numNodes[2], kernel_size=(k, k), padding='same',                       #
                     kernel_initializer=w8s[args.w8s], bias_initializer=w8s[args.w8s]))             # Convolution 3
    model.add(Activation('relu'))                                                                   #   ReLU 3
    model.add(Conv2D(filters=numNodes[3], kernel_size=(k, k), padding='same',                       #
                     kernel_initializer=w8s[args.w8s], bias_initializer=w8s[args.w8s]))             # Convolution 4
    model.add(Activation('relu'))                                                                   #   ReLU 4
    model.add(Dropout(args.dropout_conv))                                                           #       Dropout 2
    model.add(MaxPooling2D(pool_size=2))                                                            #       Max Pooling 2
                                                                                                    #
    model.add(Conv2D(filters=numNodes[4], kernel_size=(k, k), padding='same',                       #
                     kernel_initializer=w8s[args.w8s], bias_initializer=w8s[args.w8s]))             # Convolution 5
    model.add(Activation('relu'))                                                                   #   ReLU 5
    model.add(Conv2D(filters=numNodes[5], kernel_size=(k, k), padding='same',                       #
                     kernel_initializer=w8s[args.w8s], bias_initializer=w8s[args.w8s]))             # Convolution 6
    model.add(Activation('relu'))                                                                   #   ReLU 6
    model.add(Dropout(args.dropout_conv))                                                           #       Dropout 3
    model.add(MaxPooling2D(pool_size=2))                                                            #       Max Pooling 3
                                                                                                    #
    model.add(Flatten())                                                                            #       Flatten
    model.add(Dense(numNodes[6], kernel_initializer=w8s[args.w8s], bias_initializer=w8s[args.w8s])) #       Dense 1 (FC)
    model.add(Activation('relu'))                                                                   #       ReLU 8
    model.add(Dropout(args.dropout_dense))                                                          #       Dropout 4
    model.add(Dense(100, activation='softmax',                                                      #
                    kernel_initializer=w8s[args.w8s], bias_initializer=w8s[args.w8s]))              #       Dense 2 (FC)
                                                                                                    #
    #print(model.summary())
    print("Model has {} paramters".format(model.count_params()))
    model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])

    return model
Esempio n. 26
0
def make_model():
	''' define the model'''
	model = Sequential()
	# input: 32x32 images with 3 channels -> (3, 32, 32) tensors.
	# this applies 32 convolution filters of size 3x3 each.
	model.add(Convolution2D(maps_count_param, 3, 3, border_mode='same', input_shape=(3, input_size, input_size),init='he_normal',W_regularizer=l2(lambda_reg)))
	model.add(Activation('relu'))
	model.add(Convolution2D(maps_count_param, 3, 3, border_mode='same', init='he_normal',W_regularizer=l2(lambda_reg)))
	model.add(Activation('relu'))
#	model.add(Dropout(0.3))
	model.add(MaxPooling2D(pool_size=(2, 2)))

	model.add(Convolution2D(maps_count_param*2, 3, 3, border_mode='same', init='he_normal',W_regularizer=l2(lambda_reg)))
	model.add(Activation('relu'))
#	model.add(Dropout(0.3))
	model.add(Convolution2D(maps_count_param*2, 3, 3, border_mode='same', init='he_normal',W_regularizer=l2(lambda_reg)))
	model.add(Activation('relu'))
#	model.add(Dropout(0.3))
	model.add(MaxPooling2D(pool_size=(2, 2)))

	model.add(Convolution2D(maps_count_param*4, 3, 3, border_mode='same', init='he_normal',W_regularizer=l2(lambda_reg)))
	model.add(Activation('relu'))
#	model.add(Dropout(0.3))
	model.add(MaxPooling2D(pool_size=(2, 2)))

	model.add(Flatten())

	model.add(Dense(2048,W_regularizer=l2(lambda_reg)))
	model.add(Activation('relu'))
	model.add(Dropout(0.5))

	model.add(Dense(1024,W_regularizer=l2(lambda_reg)))
	model.add(Activation('relu'))
	model.add(Dropout(0.5))

	model.add(Dense(10,W_regularizer=l2(lambda_reg)))
	model.add(Activation('softmax'))
#	model.add(Dropout(0.5))

	sgd = SGD(lr=learn_rate, decay=decay_param, momentum=0.9, nesterov=True)
	model.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=["accuracy"])

	print('model parameters:',model.count_params())
	print('model characteristics:',model.summary())
	print('----------------------------------------------------------------------------------------')

	return model
Esempio n. 27
0
def fully_convnet(data_shape, opt):
    print('Training VGG net.')
    model = Sequential()
    # input: 100x100 images with 3 channels -> (3, 100, 100) tensors.
    # this applies 32 convolution filters of size 3x3 each.
    model.add(
        Convolution2D(32,
                      3,
                      3,
                      border_mode='valid',
                      input_shape=(data_shape[0], data_shape[1],
                                   data_shape[2])))
    model.add(Activation('relu'))
    model.add(Convolution2D(32, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(64, 3, 3, border_mode='valid'))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(128, 3, 3, border_mode='valid'))
    model.add(Activation('relu'))
    model.add(Convolution2D(128, 3, 3))
    model.add(Activation('relu'))
    #model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    # Note: Keras does automatic shape inference.
    model.add(Dense(64))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(2))
    model.add(Activation('softmax'))

    print('Almost Fully Convnet... nb params: {}'.format(model.count_params()))
    model.compile(loss='categorical_crossentropy', optimizer=opt)
    return model
Esempio n. 28
0
def AlexNet_like_convnet(data_shape, opt):
    print('Training AlexNet net.')
    model = Sequential()
    model.add(
        Convolution2D(96,
                      10,
                      10,
                      border_mode='valid',
                      input_shape=(data_shape[0], data_shape[1],
                                   data_shape[2])))
    model.add(Activation('relu'))
    #model.add(BatchNormalization(epsilon=1e-06, mode=0))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(128, 5, 5, border_mode='valid'))
    model.add(Activation('relu'))
    #model.add(BatchNormalization(epsilon=1e-06, mode=0))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(256, 3, 3, border_mode='valid'))
    model.add(Activation('relu'))
    #model.add(BatchNormalization(epsilon=1e-06, mode=0))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(768, init='normal'))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    #model.add(BatchNormalization(epsilon=1e-06, mode=0))
    model.add(Dense(256, init='normal'))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    #model.add(BatchNormalization(epsilon=1e-06, mode=0))
    model.add(Dense(2))
    model.add(Activation('softmax'))

    print('AlexNet_like_convnet... nb params: {}'.format(model.count_params()))
    model.compile(loss='categorical_crossentropy', optimizer=opt)
    return model
Esempio n. 29
0
class AI():
    def __init__(self, weights_path=None):
        self.create_model()
        if weights_path:
            print('loading model weights from path: {}'.format(weights_path))
            self.model.load_weights(weights_path, by_name=False)
            print('loaded weights')
    
    def create_model(self, hdim=32):
        self.model = Sequential([
            Dense(hdim, input_dim=9),
            Activation('tanh'),
            Dense(1),
            Activation('linear'),
        ])

        self.model.compile(loss=keras.losses.mean_squared_error,
                        optimizer=keras.optimizers.Adadelta(), metrics=['accuracy'])

        print('model created with # params: {}'.format(self.model.count_params()))
    
    def fit(self, x_train, y_train, epochs=1):
        # train
        checkpointer = ModelCheckpoint(filepath='./t3_weights-{epoch:02d}-{val_acc:.2f}.hdf5', verbose=1, save_best_only=True)
        self.model.fit(x_train, y_train, epochs=epochs, validation_split=.1, callbacks=[checkpointer])
        
    def best_move(self, state):
        available = np.argwhere(state==0).flatten()
        maxv = -100000
        besta = -1
        for a in available:
            # get its value
            test_state = state.copy()
            test_state[a] = 1
            value = self.model.predict(test_state.reshape(1, 9))[0,0]
            if value > maxv:
                maxv = value
                besta = a

        if besta == -1:
            print('whoops, dunno the answer')
        return besta
Esempio n. 30
0
    def createNetwork(self, input_layer=None):
        model = Sequential()

        model.add(input_layer
                  or InputLayer(config.global_config["input_shape"]))

        # convolutional part
        for l in self.conv_layers:
            if type(l) is ConvLayer:
                model.add(
                    Conv2D(l.filters, (l.kernel_size, l.kernel_size),
                           padding='same'))
                model.add(Activation(l.activation))
            elif type(l) is MaxPoolLayer:
                # check if pooling is possible
                if model.output_shape[1] >= l.pool_size and model.output_shape[
                        2] >= l.pool_size:
                    model.add(
                        MaxPooling2D(pool_size=(l.pool_size, l.pool_size)))
            else:
                raise TypeError("unknown type of layer")

        # dense part
        model.add(Flatten())
        for l in self.dense_layers:
            model.add(Dense(l.size))
            model.add(Activation(l.activation))
            if l.dropout > 0:
                model.add(Dropout(l.dropout))

        # final part
        model.add(Dense(self.noutputs))
        if config.global_config["main_alg"]["task_type"] == "classification":
            model.add(Activation('softmax'))
        elif config.global_config["main_alg"][
                "task_type"] == "binary_classification":
            model.add(Activation('sigmoid'))

        self.nparams = model.count_params()

        return model
Esempio n. 31
0
def AlexNet_like_convnet(data_shape, opt):
    print('Training AlexNet net.')
    model = Sequential()
    model.add(Convolution2D(96, 10, 10, border_mode='valid', input_shape=(data_shape[0], data_shape[1], data_shape[2])))
    model.add(Activation('relu'))
    #model.add(BatchNormalization(epsilon=1e-06, mode=0))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(128, 5, 5, border_mode='valid'))
    model.add(Activation('relu'))
    #model.add(BatchNormalization(epsilon=1e-06, mode=0))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Convolution2D(256, 3, 3, border_mode='valid'))
    model.add(Activation('relu'))
    #model.add(BatchNormalization(epsilon=1e-06, mode=0))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(768, init='normal'))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    #model.add(BatchNormalization(epsilon=1e-06, mode=0))
    model.add(Dense(256, init='normal'))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    #model.add(BatchNormalization(epsilon=1e-06, mode=0))
    model.add(Dense(2))
    model.add(Activation('softmax'))

    print ('AlexNet_like_convnet... nb params: {}'.format(model.count_params()))
    model.compile(loss='categorical_crossentropy', optimizer=opt)
    return model
Esempio n. 32
0
    model.add(
        Dense(unit,
              activation='relu',
              kernel_initializer='normal',
              input_shape=(2, )))

    model.add(keras.layers.BatchNormalization())
    model.add(Dense(1))
    st = time.time()
    model.compile(loss='mean_squared_error',
                  optimizer=optimizers.SGD(lr=0.0001,
                                           momentum=0.9,
                                           nesterov=True))
    #model.compile(loss='mean_squared_error', optimizer=optimizers.adam())
    model.fit(x_train, y_train, batch_size=1000, epochs=epochs, verbose=0)
    param[0].append(model.count_params())

    acc[0].append(np.sqrt(model.evaluate(x_test, y_test)))
    tm[0].append(time.time() - st)
    units[0].append(unit)
    print(model.summary())
    print(acc[0][len(acc[0]) - 1])

    print(tm[0][len(tm[0]) - 1])

conf = [[16, 16], [16, 64], [64, 64], [16, 128], [64, 128], [64, 256],
        [128, 256]]
for c in conf:
    model = Sequential()
    model.add(Dense(c[0], activation='relu', input_shape=(2, )))
    model.add(keras.layers.BatchNormalization())
Esempio n. 33
0
model.add(Dropout(params['dropout']))
model.add(
    Conv1D(filters=params['filter'],
           kernel_size=params['filter_size'],
           padding='same',
           activation=params['activation']))
model.add(Dropout(params['dropout']))

model.add(Flatten())
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
              optimizer=params['optimizer'],
              metrics=['accuracy'])
#print model.summary() to preserve automatically in `Output` tab
print(model.summary())
params.update({'total_number_of_parameters': model.count_params()})

#will log metrics with the prefix 'train_'
with experiment.train():
    model.fit(X_train,
              y_train,
              epochs=params['epochs'],
              batch_size=params['batch_size'],
              verbose=1,
              validation_data=(X_test, y_test))

#will log metrics with the prefix 'test_'
with experiment.test():
    loss, accuracy = model.evaluate(X_test, y_test)
    metrics = {'loss': loss, 'accuracy': accuracy}
    experiment.log_multiple_metrics(metrics)
Esempio n. 34
0
    validation_data=validation_generator,
    validation_steps=(int(validation_samples // batch_size) + 1),
    callbacks=[tbCallBack, save_callback])

print('Saving the weights into ' + weights_dir + ' \n')
model.save_weights(
    weights_dir + "model_weights"
)  # always save your weights after training or during training

_, validation_accuracy = model.evaluate_generator(
    test_generator, val_samples=validation_samples)
print("validation accuracy = {}".format(validation_accuracy))

# save accuracy and loss plot curves
save_accuracy(history,
              results_dir,
              baseline=0.78,
              legend_name='Baseline',
              xmax=number_of_epoch)
save_loss(history,
          results_dir,
          baseline=1.2,
          legend_name='Baseline',
          xmax=number_of_epoch)

num_parameters = model.count_params()
ratio = validation_accuracy / (num_parameters / 100000)
print("This model std_feat has {} parameters and a ratio of {}".format(
    num_parameters, ratio))
print("This model std_ has {} parameters and a mean of {}".format(
    train_std, train_mean))
Esempio n. 35
0
model.add(Activation('sigmoid'))

model.compile(
    loss='binary_crossentropy',
    # loss='mse',
    optimizer='adam',
    metrics=['accuracy'])

print()
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print("Model architecture")
plot(model, show_shapes=True, to_file=FOLDER + "simple_rnn_for_copying.png")
print("Model summary")
print(model.summary())
print("Model parameter count")
print(model.count_params())

print()
print(time.strftime('%Y-%m-%d %H:%M:%S'))
print("Training...")
# Train the model each generation and show predictions against the
# validation dataset
for iteration in range(1, 200):
    print()
    print('-' * 78)
    print(time.strftime('%Y-%m-%d %H:%M:%S'))
    print('Iteration', iteration)
    history = LossHistory()
    check_pointer = ModelCheckpoint(filepath=FOLDER +
                                    "copying_model_weights.hdf5",
                                    verbose=1,
Esempio n. 36
0
model.add(Flatten())
model.add(Dense(1024, activation='elu'))
model.add(Dense(1024, activation='elu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))

optimizer = keras.optimizers.Adam(lr=0.0005)

model.compile(optimizer=optimizer,
              loss='categorical_crossentropy',
              metrics=['accuracy'])

print(model.summary())
ctx.channel_send('n_layers', len(model.layers))
ctx.channel_send('n_parameters', model.count_params())

early_stopping = EarlyStopping(patience=10)

datagen = ImageDataGenerator(
        featurewise_center=False,
        samplewise_center=False,
        featurewise_std_normalization=False,
        samplewise_std_normalization=False,
        zca_whitening=False,
        rotation_range=0,
        width_shift_range=0.1,
        height_shift_range=0.1,
        horizontal_flip=True,
        vertical_flip=False)
model.add(TimeDistributed(MaxPooling1D(2, 2)))
model.add(Dropout(0.25))
model.add(TimeDistributed(Convolution1D(64, 3, activation='relu')))
model.add(TimeDistributed(Convolution1D(64, 3, activation='relu')))
model.add(TimeDistributed(MaxPooling1D(2, 2)))
model.add(Dropout(0.25))
model.add(TimeDistributed(Flatten()))
model.add(BatchNormalization())
model.add(Bidirectional(LSTM(256, return_sequences=True)))
model.add(Bidirectional(LSTM(256, return_sequences=True)))
model.add(Dropout(0.25))
model.add(TimeDistributed(Dense(12, activation='sigmoid')))

model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])

print('param count:', model.count_params())
print('input shape:', model.input_shape)
print('output shape:', model.output_shape)

def new_model_id():
    return 'model_%s' % arrow.get().format('YYYY-MM-DD-HH-mm-ss')

def save_model_arch(model_id, model):
    arch_file = '%s/%s_arch.json' % (model_dir, model_id)
    print('Saving model architecture:', arch_file)
    open(arch_file, 'w').write(model.to_json())

def weights_file(model_id, suffix=''):
    return '%s/%s_weights%s.h5' % (model_dir, model_id, suffix)

model_id = new_model_id()
Esempio n. 38
0
def run():
    stats = {}
    stats['runtime_second'] = time.time()

    startTime = time.time()

    # Initialize using the same seed (to get stable results on comparisons)
    np.random.seed(RP['seed'])

    rawData = db.getData()

    # filter infs and nans from data cols
    cols = rawData.columns.tolist()[1:-1]

    print(cols)

    for col in cols:
        rawData = rawData.drop(rawData[np.isinf(rawData[col])].index)
        rawData = rawData.drop(rawData[np.isnan(rawData[col])].index)

    rawData.reset_index(drop=True,inplace=True)
    rawData.reindex(np.random.permutation(rawData.index))

    # print(rawData)

    X_raw = rawData.iloc[:, 2:-1]
    y_raw = rawData.iloc[:, 1:2]

    scalerX = preprocessing.StandardScaler(copy=False)
    scalerX.fit(X_raw)
    scalery = preprocessing.StandardScaler(copy=False)
    scalery.fit(y_raw)

    if RP['zscore_norm']:
        X = pd.DataFrame(scalerX.transform(X_raw), columns=X_raw.columns.values)
        y = pd.DataFrame(scalery.transform(y_raw), columns=y_raw.columns.values)
    else:
        X = X_raw
        y = y_raw

    # print(X.head(), y.head())

    model = Sequential()

    # hidden
    model.add(Dense(300, W_regularizer=l2(0.0),activity_regularizer=activity_l2(0.0), input_shape=(X.shape[1], )))
    model.add(Activation('relu'))
    model.add(Dropout(0.300))
    model.add(Dense(300, W_regularizer=l2(0.0),activity_regularizer=activity_l2(0.0)))
    model.add(Activation('relu'))
    model.add(Dropout(0.200))
    model.add(Dense(1))

    model.compile(loss = 'mse', optimizer = OPTIMIZER)

    if RD['use_test_flags']:
        maskTrain = np.zeros(len(X),dtype=bool)
        maskTest = np.zeros(len(X),dtype=bool)
        for i in range(len(X)):
            maskTrain[i] = rawData[RD['testing']][i] == 0
            maskTest[i] = rawData[RD['testing']][i] == 1

        trainX = X.loc[maskTrain]
        testX = X.loc[maskTest]
        trainy = y.loc[maskTrain]
        testy = y.loc[maskTest]

    else:
        ratio = 0.8
        split = int(X.shape[0] * ratio)


        trainX, testX = X.iloc[:split], X.iloc[split:]
        trainy, testy = y.iloc[:split], y.iloc[split:]

    trainX.reset_index(drop=True,inplace=True)
    testX.reset_index(drop=True,inplace=True)
    trainy.reset_index(drop=True,inplace=True)
    testy.reset_index(drop=True,inplace=True)



    stats['training_row_count'] = len(trainX)
    stats['testing_row_count'] = len(testX)


    print(trainX.shape, testX.shape, trainy.shape, testy.shape)

    early = keras.callbacks.EarlyStopping(monitor = 'val_loss',
            patience = 20)

    history = model.fit(trainX.values, trainy.values, nb_epoch = RP['epochs'],
            batch_size = RP['batch'], callbacks = [early],
            validation_data = (testX.values, testy.values))


    preprocessMeta = {
        'scaler': scalery
    }

    # compute metrics for the model based on the task for both testing and training data
    print('\nGetting metrics for training data:')
    if RP['classify']:
        trainMetrics = metrics.classify(model, trainX.values, trainy.values, preprocessMeta)
    else:
        trainMetrics = metrics.predict(model, trainX.values, trainy.values, preprocessMeta)

    print('\nGetting metrics for test data:')
    if RP['classify']:
        testMetrics = metrics.classify(model, testX.values, testy.values, preprocessMeta)
    else:
        testMetrics = metrics.predict(model, testX.values, testy.values, preprocessMeta)


    print('Plot:')
    values = np.zeros((len(history.history['loss']), 2))
    for i in range(len(history.history['loss'])):
        values[i][0] = history.history['loss'][i]
        values[i][1] = history.history['val_loss'][i]
    utility.plotLoss(values)

    print('Dump csv pred')
    pred = model.predict(testX.values, batch_size = RP['batch'])


    if RP['zscore_norm']:
        predScaled = pd.DataFrame(scalery.inverse_transform(pred), columns=['pred'])
        testScaled = pd.DataFrame(scalery.inverse_transform(testy), columns=['true'])
    else:
        predScaled = pd.DataFrame(pred,columns=['pred'])
        testScaled = pd.DataFrame(testy,columns=['true'])

    predByTruth = pd.concat([predScaled, testScaled],axis=1)

    # predByTruth.plot(x='pred',y='true', kind='scatter')
    # plt.show()
    # predByTruth.to_csv('local/pred.csv')


    # statistics to send to journal
    stats['runtime_second'] = time.time() - stats['runtime_second']
    stats['memory_pm_mb'], stats['memory_vm_mb'] = utility.getMemoryUsage()
    stats['git_commit'] = utility.getGitCommitHash()
    stats['comment'] = RP['comment']
    stats['hostname'] = socket.gethostname()
    stats['experiment_config'] = yaml.dump(cc.exp,default_flow_style=False)

    stats['model'] = utility.modelToString(model)
    stats['loaded_model'] = RP['load_model']
    stats['parameter_count'] = model.count_params()
    stats['task'] = 'classification' if RP['classify'] else 'regression'

    stats['dataset_name'] = cc.exp['fetch']['table']
    stats['split_name'] = RD['testing']
    stats['label_name'] = ','.join(RD['labels'])

    stats['epoch_max'] = RP['epochs']
    stats['learning_rate'] = RP['learning_rate']
    stats['optimization_method'] = OPTIMIZER.__class__.__name__
    stats['batch_size'] = RP['batch']
    stats['seed'] = RP['seed']
    stats['objective'] = RP['objective']
    stats['learning_curve'] = {'val':open('{}/{}'.format(cc.cfg['plots']['dir'], utility.PLOT_NAME),'rb').read(),'type':'bin'}

    # metric statistics to send
    metricStats = {}

    if RP['classify']:
        metricStats['relevance_training'] = trainMetrics['acc_avg']
        metricStats['relevance_training_std'] = trainMetrics['acc_std']
        metricStats['relevance_testing'] = testMetrics['acc_avg']
        metricStats['relevance_testing_std'] = testMetrics['acc_std']
        metricStats['log_loss'] = testMetrics['log_loss_avg']
        metricStats['log_loss_std'] = testMetrics['log_loss_std']
        metricStats['auc'] = testMetrics['auc_avg']
        metricStats['auc_std'] = testMetrics['auc_std']
    else:
        metricStats['relevance_training'] = trainMetrics['r2_avg']
        metricStats['relevance_training_std'] = trainMetrics['r2_std']
        metricStats['relevance_testing'] = testMetrics['r2_avg']
        metricStats['relevance_testing_std'] = testMetrics['r2_std']
        metricStats['mse'] = testMetrics['mse_avg']
        metricStats['mse_std'] = testMetrics['mse_std']

    stats.update(metricStats)
    db.sendStatistics(**stats)
def constructDNNModel(modelIndex):
    model = []
    if modelIndex == 1:
        model = Sequential()
    #
        model.add(Activation('linear',input_shape=(channels,patchHeight,patchWidth)))  # 23 x 31
        model.add(Convolution2D(64, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))  # 21 x 29
        model.add(Convolution2D(64, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))  # 19 x 27
        model.add(MaxPooling2D(pool_size=(2,2),strides=(1,1)))  # 18 x 26
        #
        # # ------------------------------------------------------------------------------------------------------------------------------------------------ #
        #
        model.add(Convolution2D(128, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
        model.add(Convolution2D(128, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
        model.add(Convolution2D(128, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
        model.add(MaxPooling2D(pool_size=(2,2),strides=(1,1)))  # 11 x 19

        # ------------------------------------------------------------------------------------------------------------------------------------------------ #

        model.add(Convolution2D(128, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
        model.add(Convolution2D(128, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
        model.add(Convolution2D(128, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
        model.add(MaxPooling2D(pool_size=(2,2),strides=(1,1)))  # 2 x 6
        #
        # ------------------------------------------------------------------------------------------------------------------------------------------------ #


        model.add(Flatten())
        # model.add(Reshape(1))
        # model.add(Dropout(0.25))
        model.add(Dense(1024, trainable=True, init=initialization, W_regularizer=l2(regularizer), activation = "relu"))
        model.add(Dropout(0.5))
        model.add(Dense(1024, trainable=True, init=initialization, W_regularizer=l2(regularizer), activation = "relu"))
        model.add(Dropout(0.5))
        model.add(Dense(nb_output, trainable=True, init=initialization, W_regularizer=l2(regularizer), activation = "linear"))

        printing("Built the model")
        print("Model parameters = " + str(model.count_params()))

        # ------------------------------------------------------------------------------------------------------------------------------------------------ #

        if doWeightLoadSaveTest:
            # pdb.set_trace()
            model.save_weights(weightSavePath + 'weightsLoadSaveTest.h5', overwrite=True)
            model.load_weights(weightSavePath + 'weightsLoadSaveTest.h5')
            printing("Weight load/save test passed...")

        # ------------------------------------------------------------------------------------------------------------------------------------------------ #

        sgd = SGD(lr=learningRate, decay=1e-6, momentum=0.9, nesterov=True)
        model.compile(loss=linear_correlation_loss, optimizer=sgd)
        printing("Compilation Finished")
    elif modelIndex == 2:
        model = Sequential()

        model.add(Activation('linear',input_shape=(channels,patchHeight,patchWidth)))  # 23 x 31
        model.add(Convolution2D(32, 1, 1, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))  # 21 x 29
        model.add(Convolution2D(32, 2, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))  # 19 x 27 22, 29
        model.add(Convolution2D(32, 3, 4, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))  # 17 x 25 20, 26
        model.add(MaxPooling2D(pool_size=(2,2),strides=(1,1)))  # 16 x 24 19, 25
        #
        # # ------------------------------------------------------------------------------------------------------------------------------------------------ #
        #
        model.add(Convolution2D(48, 1, 1, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))  # 19, 25
        model.add(Convolution2D(48, 2, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))  # 18, 23
        model.add(Convolution2D(48, 3, 4, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))  # 16, 20
        model.add(MaxPooling2D(pool_size=(2,2),strides=(1,1)))  # 9 x 17 15, 19

        # ------------------------------------------------------------------------------------------------------------------------------------------------ #

        model.add(Convolution2D(48, 1, 1, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
        model.add(Convolution2D(48, 2, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
        model.add(Convolution2D(48, 3, 4, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
        model.add(MaxPooling2D(pool_size=(2,2), strides=(1,1)))  # 1 x 5 11, 13
        #
        # # ------------------------------------------------------------------------------------------------------------------------------------------------ #

        model.add(Convolution2D(48, 1, 1, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
        model.add(Convolution2D(48, 2, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
        model.add(Convolution2D(48, 3, 4, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
        model.add(MaxPooling2D(pool_size=(2,2), strides=(1,1)))  # 1 x 5 7, 7

        # ------------------------------------------------------------------------------------------------------------------------------------------------ #

        model.add(Convolution2D(48, 1, 1, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
        model.add(Convolution2D(48, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
        model.add(Convolution2D(48, 3, 3, border_mode='valid', trainable=True, init=initialization, W_regularizer=l2(regularizer), subsample=(1, 1), activation = "relu"))
        model.add(MaxPooling2D(pool_size=(2,2), strides=(1,1)))  # 1 x 5 2, 2

        # ------------------------------------------------------------------------------------------------------------------------------------------------ #

        model.add(Reshape((2 * 2 * 48,)))
        model.add(Dense(400, trainable=True, init=initialization, W_regularizer=l2(regularizer), activation = "relu"))
        model.add(Dropout(0.5))
        model.add(Dense(400, trainable=True, init=initialization, W_regularizer=l2(regularizer), activation = "relu"))
        model.add(Dropout(0.5))
        model.add(Dense(nb_output, trainable=True, init=initialization, W_regularizer=l2(regularizer), activation = "linear"))
        printing("Built the model")

        # ------------------------------------------------------------------------------------------------------------------------------------------------ #

        if doWeightLoadSaveTest:
            # pdb.set_trace()
            model.save_weights(weightSavePath + 'weightsLoadSaveTest.h5', overwrite=True)
            model.load_weights(weightSavePath + 'weightsLoadSaveTest.h5')
            printing("Weight load/save test passed...")

        # ------------------------------------------------------------------------------------------------------------------------------------------------ #

        sgd = SGD(lr=learningRate, decay=1e-6, momentum=0.9, nesterov=True)
        model.compile(loss=linear_correlation_loss, optimizer=sgd)
        printing("Compilation Finished")
    return  model