예제 #1
0
def create_model(embedding_matrix, tag_index):
    #构建模型
    input = Input(shape=(None, ))
    #word_index为用tokenizer处理后的word_index,embedding_matrix为词嵌入矩阵
    word_emb = Embedding(len(embedding_matrix),
                         emb_dim,
                         weights=[embedding_matrix],
                         dropout=0.5)(input)
    bilstm = Bidirectional(LSTM(100, return_sequences=True))(word_emb)
    #tag_index为tag与索引的映射,TimeDistributed为包装器,将一个层应用到输入的每一个时间步上
    # (每一个时间步上一个word,所以要应用到每一个时间步上,才能对每一个word进行标注预测),
    # 最后输出维度为shape(None,None,len(tag_index)),每个节点的输出可以直接经过激活层进行判断,
    # 也可以输入到crf层进行进一步的处理
    # print("bilstm:", bilstm)
    dense = TimeDistributed(Dense(len(tag_index),
                                  activation='softmax'))(bilstm)
    # print("dense:", dense)
    model = Model(inputs=input, outputs=dense)
    # crf_layer = CRF(len(tag_index), sparse_target = True) #若后接CRF
    # crf = crf_layer(dense)
    # model = Model(inputs=inputs, outputs=crf)
    model.summary()

    # 编译模型
    optmr = optimizers.Adam(lr=lr, beta_1=0.5)
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    #若使用crf作为最后一层,则修改模型编译的配置:
    # model.compile(loss='crf.loss_function',
    #               optimizer='adam',
    #               metrics=['crf.accuracy'])

    return model
예제 #2
0
def get_model():
    # implement the nvidia self driving car neural network
    model = Sequential()

    model.add(
        Conv2D(filters=24,
               kernel_size=(5, 5),
               strides=(2, 2),
               batch_input_shape=(None, 80, 160, 3),
               kernel_initializer='normal',
               padding='valid',
               activation='linear'))

    model.add(
        Conv2D(filters=34,
               kernel_size=(5, 5),
               init='normal',
               strides=(2, 2),
               padding='valid',
               activation='linear'))

    model.add(
        Conv2D(filters=44,
               kernel_size=(5, 5),
               init='normal',
               strides=(2, 2),
               padding='valid',
               activation='linear'))

    model.add(
        Conv2D(filters=52,
               kernel_size=(3, 3),
               init='normal',
               strides=(1, 1),
               padding='valid',
               activation='linear'))

    model.add(
        Conv2D(filters=52,
               kernel_size=(3, 3),
               init='normal',
               strides=(1, 1),
               padding='valid',
               activation='linear'))
    model.add(Flatten())
    model.add(Dense(1124, activation='relu', name='dense1'))
    model.add(Dropout(0.5))
    model.add(Dense(100, activation='relu', name='dense2'))
    model.add(Dense(50, activation='relu', name='dense3'))
    model.add(Dense(10, activation='relu', name='dense4'))
    model.add(Dense(1, activation='sigmoid', name='dense5'))
    model.summary()
    # use Nadam() with default learning rate parameters for randomized learning rate of small image set
    # allows faster exploration of the hyper parameters by treating learning rate as a parameter to be learnt
    optimizer = optimizers.Adam(lr=0.000005)
    model.compile(loss='binary_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])

    return model
예제 #3
0
    def __init__(self, lr, a_size, n_states):
        # These lines established the feed-forward part of the network.
        # The agent takes a state and produces an action.
        self.model = Sequential()
        self.model.add(Dense(10, input_shape=(n_states,), activation="relu"))

        self.model.add(Dense(a_size, activation="softmax"))
        sgd = optimizers.Adam(lr)
        self.model.compile(loss='categorical_crossentropy', optimizer=sgd)
예제 #4
0
    def build(self, lr=0.0002):
        self.input_img = Input(shape=(self.height, self.width, self.channel))

        conv1 = Conv2D(48, (7, 7), activation='relu',
                       padding='same')(self.input_img)
        conv1 = Conv2D(48, (7, 7), activation='relu', padding='same')(conv1)

        conv2 = Conv2D(64, (5, 5), activation='relu', padding='same')(conv1)
        conv2 = Conv2D(64, (5, 5), activation='relu', padding='same')(conv2)

        conv3 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv2)
        conv3 = Conv2D(64, (3, 3), activation='relu', padding='same')(conv3)

        conv4 = Conv2D(96, (3, 3), activation='relu', padding='same')(conv3)
        conv4 = Conv2D(96, (3, 3), activation='relu', padding='same')(conv4)

        dconv1 = Deconv2D(96, (3, 3), activation='relu', padding='same')(conv4)
        dconv1 = Deconv2D(96, (3, 3), activation='relu',
                          padding='same')(dconv1)
        dconv1 = Add()([dconv1, conv4])

        dconv2 = Deconv2D(64, (3, 3), activation='relu',
                          padding='same')(dconv1)
        dconv2 = Deconv2D(64, (3, 3), activation='relu',
                          padding='same')(dconv2)
        dconv2 = Add()([dconv2, conv3])

        dconv3 = Deconv2D(64, (5, 5), activation='relu',
                          padding='same')(dconv2)
        dconv3 = Deconv2D(64, (5, 5), activation='relu',
                          padding='same')(dconv3)
        dconv3 = Add()([dconv3, conv2])

        dconv4 = Deconv2D(48, (7, 7), activation='relu',
                          padding='same')(dconv3)
        dconv4 = Deconv2D(48, (7, 7), activation='relu',
                          padding='same')(dconv4)
        dconv4 = Add()([dconv4, conv1])

        self.output_img = Conv2D(self.channel, (7, 7),
                                 activation='relu',
                                 padding='same')(dconv4)

        optimizer = optimizers.Adam(lr=lr)
        self.model = Model(self.input_img, self.output_img)
        self.model.compile(optimizer=optimizer, loss='mse')
        self.model.summary()
예제 #5
0
def training(model, LR):
    train_datagen = ImageDataGenerator(
        rescale=1. / 255,
        horizontal_flip=True,
        # vertical_flip=True,
        width_shift_range=0.2,
        height_shift_range=0.2,
        zoom_range=0.2)

    test_datagen = ImageDataGenerator(rescale=1. / 255)

    train_generator = train_datagen.flow_from_directory(
        TRAIN_DATA_DIR,
        target_size=(128, 128),
        batch_size=BATCH_SIZE,
        class_mode='categorical')
    print(train_generator.class_indices)

    validation_generator = test_datagen.flow_from_directory(
        TEST_DATA_DIR,
        target_size=(128, 128),
        batch_size=BATCH_SIZE,
        class_mode='categorical')

    optimizer = optimizers.Adam(lr=LR,
                                beta_1=0.9,
                                beta_2=0.999,
                                epsilon=1e-08,
                                decay=0.0)
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])

    if os.path.exists(SAVED_WEIGHT_NAME):
        model.load_weights(SAVED_WEIGHT_NAME)

    for i in range(EPOCH):
        print("starting step {}".format(i))

        model.fit_generator(train_generator,
                            steps_per_epoch=100,
                            epochs=2,
                            validation_data=validation_generator,
                            validation_steps=20)
        model.save_weights(SAVED_WEIGHT_NAME)
예제 #6
0
def train_model(model, input, output):
    loss = 'categorical_crossentropy'
    opt = optimizers.Adam(lr=config.params['learning_rate'],
                          beta_1=0.9,
                          beta_2=0.999,
                          epsilon=1e-08,
                          decay=0.0)
    met = ['categorical_accuracy']
    model.compile(
        optimizer=opt,
        loss=loss,
        metrics=met,
    )
    logging.info(model.summary())
    monitor_quant = 'val_categorical_accuracy'  # 'val_loss'
    model_format = os.path.join(config.model_save_dir, dt.datetime.now().strftime('%m_%d_%H') + \
                                '_{epoch:03d}_ACC_{val_categorical_accuracy:.3f}.hdf5')
    checkpoint_val = ModelCheckpoint(model_format,
                                     monitor=monitor_quant,
                                     verbose=1,
                                     save_best_only=True,
                                     mode='auto')
    early_stop_val = EarlyStopping(monitor=monitor_quant,
                                   patience=3,
                                   mode='auto',
                                   min_delta=-5)
    csvlogger = CSVLogger(os.path.join(config.model_save_dir, 'log.csv'),
                          append=False,
                          separator=',')
    callbacks_list = [
        early_stop_val,
        checkpoint_val,
        csvlogger,
    ]
    hist = model.fit(
        input,
        output,
        verbose=0,
        epochs=config.params['epoch'],
        shuffle=True,
        batch_size=config.params['batch_size'],
        callbacks=callbacks_list,
        validation_split=0.2,
    )
def train(model, train_data, validation_data, batch_size, epochs):

    train_generator = generator(train_data,
                                PATH,
                                is_augment=IS_AUGMENT,
                                batch=batch_size)
    validation_generator = generator(validation_data,
                                     PATH,
                                     is_augment=False,
                                     batch=batch_size)

    n_train_samples = len(train_data) * AUG_MULTIPLY if IS_AUGMENT else len(
        train_data)
    n_train_steps = int(np.ceil(n_train_samples / float(batch_size)))

    n_valid_samples = len(validation_data)
    n_valid_steps = int(np.ceil(n_valid_samples / float(batch_size)))
    print(n_train_samples, n_train_steps, n_valid_samples, n_valid_steps)

    cbks = [EarlyStopping(patience=2)]
    weights_path = 'my_model_weights.h5'
    if os.path.isfile(weights_path):
        print('load weights')
        model.load_weights(weights_path)
    model.compile(loss='mse', optimizer=optimizers.Adam(lr=1e-04))
    history = model.fit_generator(generator=train_generator,
                                  steps_per_epoch=n_train_steps,
                                  validation_data=validation_generator,
                                  validation_steps=n_valid_steps,
                                  epochs=epochs,
                                  callbacks=cbks,
                                  workers=1)
    print('save weights and model ')
    model.save_weights(weights_path)
    model.save('model.h5')
    return history
예제 #8
0
model.add(Flatten())
'''
Dropout 40%
'''
model.add(Dropout(0.40))
model.add(Dense(100))
#model.add(Dropout(0.40))
model.add(Dense(50))
#model.add(Dropout(0.20))
model.add(Dense(10))
'''
Ouput Directly predict the steering measurement, so 1 output
'''
model.add(Dense(1))
model.summary()
adam = optimizers.Adam(lr=0.001)

# checkpoint
checkpoint = ModelCheckpoint("model-{epoch:02d}.h5",
                             monitor='loss',
                             verbose=1,
                             save_best_only=False,
                             mode='max')
callbacks_list = [checkpoint]
'''
Compile and train the model using the generator function
'''
model.compile(loss='mse', optimizer=adam)
history_object = model.fit_generator(train_generator, samples_per_epoch = \
                 len(train_samples), \
                 validation_data=validation_generator, \
예제 #9
0

if os.path.isfile(name_module_save):
    print()
    print("load model: ", name_module_save)
    print("continue training ...")
    checkpoint = ModelCheckpoint('model-{epoch:03d}.h5',
                                 monitor='val_loss',
                                 verbose=0,
                                 save_best_only=False,
                                 mode='auto')

    model = load_model(name_module_save)
    adam = optimizers.Adam(lr=0.001,
                           beta_1=0.9,
                           beta_2=0.999,
                           epsilon=0.001,
                           decay=0.0)
    model.compile(loss='mse', optimizer=adam)
    model.fit_generator(train_generator,
                        samples_per_epoch=len(train_samples),
                        validation_data=validation_generator,
                        nb_val_samples=len(validation_samples),
                        nb_epoch=5,
                        callbacks=[checkpoint])
    model.save(name_module_save)
    print("name module save: ", name_module_save)
else:
    print()
    print("Start training ...")
    model = Sequential()
예제 #10
0
    def create_model(self, embedding_matrix, tag_index):
        #构建BiLSTM+CRF模型
        char_input = Input(shape=(None, ))
        #word_index为用tokenizer处理后的word_index,embedding_matrix为词嵌入矩阵
        word_emb = Embedding(len(embedding_matrix),
                             emb_dim,
                             weights=[embedding_matrix],
                             mask_zero=True)(char_input)
        #若需要词特征,则进行字词向量的拼接
        if seg_dim:
            seg_input = Input(shape=(None, ))
            seg_emb = Embedding(4, seg_dim)(seg_input)
            word_emb = concatenate([word_emb, seg_emb], axis=-1)
        bilstm = Bidirectional(
            LSTM(100, return_sequences=True, dropout=dropout))(word_emb)
        #tag_index为tag与索引的映射,TimeDistributed为包装器,将一个层应用到输入的每一个时间步上
        # (每一个时间步上一个word,所以要应用到每一个时间步上,才能对每一个word进行标注预测),
        # 最后输出维度为shape(None,None,len(tag_index)),每个节点的输出可以直接经过激活层进行判断,
        # 也可以输入到crf层进行进一步的处理
        # print("bilstm:", bilstm)
        dense = TimeDistributed(Dense(len(tag_index)))(bilstm)
        # print("dense:", dense)
        # model = Model(inputs=input, outputs=dense)
        crf_layer = CRF(
            len(tag_index),
            sparse_target=True)  #keras_contrib包的CRF层 ,sparse_target是什么参数?
        # crf_layer = CRF(len(tag_index)) #keras_crf层
        crf = crf_layer(dense)
        if seg_dim:
            model = Model(inputs=[char_input, seg_input], outputs=crf)
        else:
            model = Model(inputs=char_input, outputs=crf)
        model.summary()
        # 编译模型
        optmr = optimizers.Adam(lr=lr, beta_1=0.5)
        # model.compile(loss='categorical_crossentropy',
        #               optimizer=optmr,
        #               metrics=['accuracy'])
        #若使用crf作为最后一层,则修改模型编译的配置:
        model.compile(
            loss=crf_layer.loss_function,  #注意这里的参数配置,crf_layer为对CRF()进行初始化的命名
            # loss=crf_layer.loss,    #keras_crf层
            optimizer=optmr,
            metrics=[crf_layer.accuracy])

        #单独的BiLSTM模型
        # input = Input(shape=(None,))
        # word_emb = Embedding(len(embedding_matrix), emb_dim, weights=[embedding_matrix], mask_zero=True)(input)
        # bilstm = Bidirectional(LSTM(100, return_sequences=True, dropout=dropout))(word_emb)
        # dense = TimeDistributed(Dense(len(tag_index)))(bilstm)
        # model = Model(inputs=input, outputs=dense)
        # optmr = optimizers.Adam(lr=lr, beta_1=0.5)
        # model.compile(loss='categorical_crossentropy',
        #               optimizer=optmr,
        #               metrics=['accuracy'])

        # 单独的CRF模型
        # input = Input(shape=(None,))
        # word_emb = Embedding(len(embedding_matrix), emb_dim, weights=[embedding_matrix], mask_zero=True)(input)
        # crf_layer = CRF(len(tag_index), sparse_target = True) #keras_contrib包的CRF层 ,sparse_target是什么参数?
        # crf = crf_layer(word_emb)
        # model = Model(inputs=input, outputs=crf)
        # model.summary()
        # optmr = optimizers.Adam(lr=lr, beta_1=0.5)
        # model.compile(
        #               loss=crf_layer.loss_function, #注意这里的参数配置,crf_layer为对CRF()进行初始化的命名
        #               optimizer=optmr,
        #               metrics=[crf_layer.accuracy])

        #序列式模型
        # model = Sequential()
        # model.add(Embedding(len(embedding_matrix), emb_dim, weights=[embedding_matrix], input_shape=(None,)))
        # model.add(Bidirectional(LSTM(100, return_sequences=True, dropout=dropout)))
        # model.add(TimeDistributed(Dense(len(tag_index))))
        # crf_layer = CRF(len(tag_index), sparse_target = True)
        # model.add(crf_layer)
        # model.summary()
        # optmr = optimizers.Adam(lr=lr, beta_1=0.5)
        # model.compile(
        #       loss=crf_layer.loss_function, #注意这里的参数配置,crf_layer为对CRF()进行初始化的命名
        #       # loss=crf_layer.loss,    #keras_crf层
        #       optimizer=optmr,
        #       metrics=[crf_layer.accuracy])

        return model
예제 #11
0
lstm_out= LSTM(units= 128,activation='tanh')(main_input)
#lstm_out = LSTM(units=32,activation='tanh')(lstm_out)
lstm_out=Dropout(0.2)(lstm_out)
#lstm_out=Dense(128,activation='relu')(lstm_out)
#lstm_out=Dense(128,activation='relu')(lstm_out)
lstm_out=Dense(action_count,activation='relu')(lstm_out)
main_output=Activation('softmax')(lstm_out)
model=Model(inputs=[main_input],outputs=main_output)
print(model.summary())

#model_define
lr=[0.1]
epochs= 30

sgd=optimizers.SGD(lr=lr,momentum=0.9,nesterov=True)
adam=optimizers.Adam(lr=lr,beta_1=0.9,beta_2=0.999,epsilon=1e-08)
rmsprop=optimizers.rmsprop(lr=lr,rho=0.9,epsilon=None)
model.compile(optimizer=sgd,loss='categorical_crossentropy')

model_weights_file = '.tmp_nn_weights.{}'.format(uuid.uuid4())
model.save_weights(model_weights_file)

#Training _Testing
seq_count=len(actions)
LOOCV=seq_count
Fold= 5
fold_size=int(math.ceil(float(seq_count)/Fold))
print('Training on {} sample in {} fold'.format(seq_count,Fold))

train_accs=[]
test_accs=[]
예제 #12
0
    Convolution2D(48,
                  5,
                  5,
                  border_mode="valid",
                  subsample=(2, 2),
                  activation="elu"))
model.add(Dropout(0.7))
model.add(Convolution2D(64, 3, 3, border_mode="valid", activation="elu"))
model.add(Dropout(0.6))
model.add(Convolution2D(64, 3, 3, border_mode="valid", activation="elu"))
model.add(Dropout(0.6))

model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(100, activation="elu"))
model.add(Dense(50, activation="elu"))
model.add(Dense(10, activation="elu"))
model.add(Dropout(0.5))
model.add(Dense(1))

adam = optimizers.Adam(lr=0.001, epsilon=0.001)
model.compile(loss='mse', optimizer=adam)

model.fit_generator(train_generator,
                    samples_per_epoch=len(train_samples),
                    validation_data=validation_generator,
                    nb_val_samples=len(validation_samples),
                    nb_epoch=2)

model.save('./model1.h5')
예제 #13
0
def keras_model_MLP_gen(trainx,
                        trainy,
                        valx,
                        valy,
                        voc_size,
                        idx2w,
                        idx2la,
                        upto=float('inf')):
    if upto and upto > 0:
        trainx = trainx[0:upto]
        trainy = trainy[0:upto]
    qs_input = None
    input_embed = None
    ent_input = None
    if USE_CONTEXT_WINDOW:
        if MEDIA_ENTITY_ONLY:
            qs_input = []
            input_real = []
        else:
            input_defs, input_real = context_window_embeddins(
                voc_size, atis_max_qlen, name_prefix='sent')
            qs_input = input_defs
        if USE_CONTEXT_ENTITY:
            ent_input, ents_real = context_window_embeddins(
                MEDIA_ENTITY_VOCAB_SIZE, atis_max_qlen, name_prefix='entity')
            input_embed = concatenate(input_real + ents_real)
        else:
            if input_real:
                input_embed = concatenate(input_real)
    elif USE_ENTITY_AS_SEQUENCE:
        all_qs_inputs = []
        all_ems = []
        qi = Input(shape=(atis_max_qlen, ), name="sent_input")
        ei = Input(shape=(atis_max_qlen, ), name="entity_input")
        all_qs_inputs.append(qi)
        all_qs_inputs.append(ei)
        qi_eb = get_word_embeddings(None,
                                    None,
                                    word_voc_size=voc_size,
                                    maxlen=atis_max_qlen,
                                    name='sent_embed')(qi)
        ent_eb = get_word_embeddings(None,
                                     None,
                                     word_voc_size=MEDIA_ENTITY_VOCAB_SIZE,
                                     maxlen=atis_max_qlen,
                                     name='ent_embed')(ei)

        all_ems.append(qi_eb)
        all_ems.append(ent_eb)
        qs_input = all_qs_inputs
        input_embed = concatenate(all_ems)
    else:
        qs_input = Input(shape=(atis_max_qlen, ), name="sent_input")
        input_embed = get_word_embeddings(None,
                                          qs_input,
                                          word_voc_size=voc_size,
                                          maxlen=atis_max_qlen)
    concat = input_embed
    drop1 = Dropout(0.2, name='dropout_1')(concat)
    hidden = TimeDistributed(
        Dense(units=200,
              activation='relu',
              name='dense_2',
              kernel_regularizer=regularizers.l2(1e-4)))(drop1)
    drop2 = Dropout(config.spacy_tag_dep['dropout_rate'],
                    name='dropout_2')(hidden)
    dense3 = TimeDistributed(
        Dense(units=num_output_classes, name='dense_3',
              activation='softmax'))(drop2)  # no activation here
    output = dense3
    last_dim = int(output.shape[2])
    lambda_layer = Lambda(lambda x: x, output_shape=(atis_max_qlen, last_dim))
    output_layer = lambda_layer(output)
    if USE_CONTEXT_WINDOW:
        multi_inputs_definition = qs_input
        if USE_CONTEXT_ENTITY:
            multi_inputs_definition.extend(ent_input)
    elif USE_ENTITY_AS_SEQUENCE:
        multi_inputs_definition = qs_input
    else:
        multi_inputs_definition = [qs_input]
    final_model = Model(inputs=multi_inputs_definition, outputs=output_layer)
    loss_func = 'categorical_crossentropy'
    met = ['categorical_accuracy']
    opt = optimizers.Adam(lr=5e-3,
                          beta_1=0.9,
                          beta_2=0.999,
                          epsilon=1e-08,
                          decay=0.0)
    final_model.compile(
        optimizer=opt,
        loss=loss_func,
        metrics=met,
        #sample_weight_mode='temporal',
    )
    logging.info(final_model.summary())
    #monitor_quant = 'val_' + met[0] # accuracy based, if loss to be used, change to + 'loss' or acc for accuracy
    monitor_quant = 'val_loss'
    mlp_models = model_save_dir + 'MLP_' + now.strftime(
        '%m_%d_%H'
    ) + '_{epoch:03d}_ACC_{val_custom_categorical_accuracy:.3f}.hdf5'
    checkpoint_val = ModelCheckpoint(mlp_models,
                                     monitor=monitor_quant,
                                     verbose=1,
                                     save_best_only=True,
                                     mode='auto')
    early_stop_val = EarlyStopping(monitor=monitor_quant,
                                   patience=3,
                                   mode='auto',
                                   min_delta=-5)
    csvlogger = CSVLogger(model_save_dir + 'log.csv',
                          append=False,
                          separator=',')
    callbacks_list = [
        early_stop_val,
        checkpoint_val,
        csvlogger,
    ]
    batch_size = config.spacy_tag_dep['batch_size']
    gen_params = {
        'batch_size': batch_size,
        'shuffle': True,
        'idx2w': idx2w,
        'idx2la': idx2la,
    }
    'the generator needs to produce weights IF sammple_weight=temporal is used!'
    train_gen = data_gen.DataFeeder(**gen_params).generate(trainx, trainy)
    val_gen = data_gen.DataFeeder(**gen_params).generate(valx, valy)
    hist = final_model.fit_generator(
        generator=train_gen,
        steps_per_epoch=1 + (len(trainx) // batch_size),
        verbose=config.spacy_tag_dep['verbose'],
        epochs=config.spacy_tag_dep['epochs'],
        callbacks=callbacks_list,
        validation_data=val_gen,
        validation_steps=1 + (len(valx) // batch_size),
        #sample_weight=train_sample_wts,
    )
    logging.info("training done, model saved. Now plotting..")
예제 #14
0
                  5,
                  border_mode='valid',
                  subsample=(2, 2),
                  activation="relu"))
model.add(
    Convolution2D(64,
                  3,
                  3,
                  border_mode='valid',
                  subsample=(1, 1),
                  activation="relu"))
model.add(
    Convolution2D(64,
                  3,
                  3,
                  border_mode='valid',
                  subsample=(1, 1),
                  activation="relu"))
model.add(Flatten())
model.add(Dense(1164, activation='relu'))
model.add(Dropout(.2))
model.add(Dense(100, activation='relu'))
model.add(Dense(50, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dropout(.5))
model.add(Dense(1))
model.compile(optimizer=optimizers.Adam(lr=1e-04), loss='mse')
model.fit(x_train, y_train, validation_split=0.2, shuffle=True, nb_epoch=8)
model.save('model.h5')
model.summary()