Пример #1
0
def get_nn_model(token_dict_size):
    _logger.info('Initializing NN model with the following params:')
    _logger.info('Input dimension: %s (token vector size)' % TOKEN_REPRESENTATION_SIZE)
    _logger.info('Hidden dimension: %s' % HIDDEN_LAYER_DIMENSION)
    _logger.info('Output dimension: %s (token dict size)' % token_dict_size)
    _logger.info('Input seq length: %s ' % INPUT_SEQUENCE_LENGTH)
    _logger.info('Output seq length: %s ' % ANSWER_MAX_TOKEN_LENGTH)
    _logger.info('Batch size: %s' % SAMPLES_BATCH_SIZE)

    model = Sequential()
    seq2seq = SimpleSeq2seq(
        input_dim=TOKEN_REPRESENTATION_SIZE,
        input_length=INPUT_SEQUENCE_LENGTH,
        hidden_dim=HIDDEN_LAYER_DIMENSION,
        output_dim=token_dict_size,
        output_length=ANSWER_MAX_TOKEN_LENGTH,
        depth=1
    )

    model.add(seq2seq)
    model.compile(loss='mse', optimizer='rmsprop')

    model.save_weights(NN_MODEL_PATH)

    # use previously saved model if it exists
    _logger.info('Looking for a model %s' % NN_MODEL_PATH)

    if os.path.isfile(NN_MODEL_PATH):
        _logger.info('Loading previously calculated weights...')
        model.load_weights(NN_MODEL_PATH)

    _logger.info('Model is built')
    return model
Пример #2
0
def lstm(trainData, trainMark, testData, embedding_dim, embedding_matrix, maxlen, output_len):
    # 填充数据,将每个序列长度保持一致
    trainData = list(sequence.pad_sequences(trainData, maxlen=maxlen,
                                            dtype='float64'))  # sequence返回的是一个numpy数组,pad_sequences用于填充指定长度的序列,长则阶段,短则补0,由于下面序号为0时,对应值也为0,因此可以这样
    testData = list(sequence.pad_sequences(testData, maxlen=maxlen,
                                           dtype='float64'))  # sequence返回的是一个numpy数组,pad_sequences用于填充指定长度的序列,长则阶段,短则补0

    # 建立lstm神经网络模型
    model = Sequential()  # 多个网络层的线性堆叠,可以通过传递一个layer的list来构造该模型,也可以通过.add()方法一个个的加上层
    # model.add(Dense(256, input_shape=(train_total_vova_len,)))   #使用全连接的输入层
    model.add(Embedding(len(embedding_matrix), embedding_dim, weights=[embedding_matrix], mask_zero=False,
                        input_length=maxlen))  # 指定输入层,将高维的one-hot转成低维的embedding表示,第一个参数大或等于0的整数,输入数据最大下标+1,第二个参数大于0的整数,代表全连接嵌入的维度
    # lstm层,也是比较核心的层
    model.add(LSTM(256))  # 256对应Embedding输出维度,128是输入维度可以推导出来
    model.add(Dropout(0.5))  # 每次在参数更新的时候以一定的几率断开层的链接,用于防止过拟合
    model.add(Dense(output_len))  # 全连接,这里用于输出层,1代表输出层维度,128代表LSTM层维度可以自行推导出来
    model.add(Activation('softmax'))  # 输出用sigmoid激活函数
    # 编译该模型,categorical_crossentropy(亦称作对数损失,logloss),adam是一种优化器,class_mode表示分类模式
    model.compile(loss='categorical_crossentropy', optimizer='sgd')

    # 正式运行该模型,我知道为什么了,因为没有补0!!每个array的长度是不一样的,因此才会报错
    X = np.array(list(trainData))  # 输入数据
    print("X:", X)
    Y = np.array(list(trainMark))  # 标签
    print("Y:", Y)
    # batch_size:整数,指定进行梯度下降时每个batch包含的样本数
    # nb_epoch:整数,训练的轮数,训练数据将会被遍历nb_epoch次
    model.fit(X, Y, batch_size=200, nb_epoch=10)  # 该函数的X、Y应该是多个输入:numpy list(其中每个元素为numpy.array),单个输入:numpy.array

    # 进行预测
    A = np.array(list(testData))  # 输入数据
    print("A:", A)
    classes = model.predict(A)  # 这个是预测的数据
    return classes
    def create(self):
        language_model = Sequential()
        self.textual_embedding(language_model, mask_zero=True)
        self.language_model = language_model

        visual_model_factory = \
                select_sequential_visual_model[self._config.trainable_perception_name](
                    self._config.visual_dim)
        visual_model = visual_model_factory.create()
        visual_dimensionality = visual_model_factory.get_dimensionality()
        self.visual_embedding(visual_model, visual_dimensionality)
        #visual_model = Sequential()
        #self.visual_embedding(visual_model)
        # the below should contain all zeros
        zero_model = Sequential()
        zero_model.add(RepeatVector(self._config.max_input_time_steps)-1)
        visual_model.add(Merge[visual_model, zero_model], mode='concat')
        self.visual_model = visual_model

        if self._config.multimodal_merge_mode == 'dot':
            self.add(Merge([language_model, visual_model], mode='dot', dot_axes=[(1,),(1,)]))
        else:
            self.add(Merge([language_model, visual_model], mode=self._config.multimodal_merge_mode))

        self.add(self._config.recurrent_encoder(
            self._config.hidden_state_dim, 
            return_sequences=False,
            go_backwards=self._config.go_backwards))
        self.deep_mlp()
        self.add(Dense(self._config.output_dim))
        self.add(Activation('softmax'))
Пример #4
0
def build_partial_cnn1(img_rows, img_cols):
    model = Sequential()
    #model.add(Convolution2D(nb_filter=100, nb_row=5, nb_col=5,
    model.add(Convolution2D(nb_filter=10, nb_row=2, nb_col=2,
                            init='glorot_uniform', activation='linear',
                            border_mode='valid',
                            input_shape=(1, img_rows, img_cols)))
    model.add(Activation('relu'))

    #model.add(MaxPooling2D(pool_size=(2, 2)))

    #model.add(Convolution2D(nb_filter=100, nb_row=5, nb_col=5,
    '''model.add(Convolution2D(nb_filter=512, nb_row=5, nb_col=5,
                            init='glorot_uniform', activation='linear',
                            border_mode='valid'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    #model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(256))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))'''

    return model
    def create(self):
        language_model = Sequential()
        self.textual_embedding(language_model, mask_zero=True)
        self.stacked_RNN(language_model)
        language_model.add(self._config.recurrent_encoder(
            self._config.hidden_state_dim, 
            return_sequences=False,
            go_backwards=self._config.go_backwards))
        self.language_model = language_model

        visual_model_factory = \
                select_sequential_visual_model[self._config.trainable_perception_name](
                    self._config.visual_dim)
        visual_model = visual_model_factory.create()
        visual_dimensionality = visual_model_factory.get_dimensionality()
        self.visual_embedding(visual_model, visual_dimensionality)
        #visual_model = Sequential()
        #self.visual_embedding(visual_model)
        self.visual_model = visual_model

        if self._config.multimodal_merge_mode == 'dot':
            self.add(Merge([language_model, visual_model], mode='dot', dot_axes=[(1,),(1,)]))
        else:
            self.add(Merge([language_model, visual_model], mode=self._config.multimodal_merge_mode))

        self.add(Dropout(0.5))
        self.add(Dense(self._config.output_dim))

        self.add(RepeatVector(self._config.max_output_time_steps))
        self.add(self._config.recurrent_decoder(
                self._config.hidden_state_dim, return_sequences=True))
        self.add(Dropout(0.5))
        self.add(TimeDistributedDense(self._config.output_dim))
        self.add(Activation('softmax'))
Пример #6
0
def test_TensorBoard_with_ReduceLROnPlateau(tmpdir):
    import shutil
    np.random.seed(np.random.randint(1, 1e7))
    filepath = str(tmpdir / 'logs')

    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_class)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)

    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_class, activation='softmax'))
    model.compile(loss='binary_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    cbks = [
        callbacks.ReduceLROnPlateau(
            monitor='val_loss',
            factor=0.5,
            patience=4,
            verbose=1),
        callbacks.TensorBoard(
            log_dir=filepath)]

    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, epochs=2)

    assert os.path.isdir(filepath)
    shutil.rmtree(filepath)
    assert not tmpdir.listdir()
    def create(self):
        language_model = Sequential()
        self.textual_embedding(language_model, mask_zero=True)
        self.temporal_pooling(language_model)
        language_model.add(DropMask())
        #language_model.add(BatchNormalization(mode=1))
        self.language_model = language_model

        visual_model_factory = \
                select_sequential_visual_model[self._config.trainable_perception_name](
                    self._config.visual_dim)
        visual_model = visual_model_factory.create()
        visual_dimensionality = visual_model_factory.get_dimensionality()
        self.visual_embedding(visual_model, visual_dimensionality)
        #visual_model.add(BatchNormalization(mode=1))
        self.visual_model = visual_model
        
        if self._config.multimodal_merge_mode == 'dot':
            self.add(Merge([language_model, visual_model], mode='dot', dot_axes=[(1,),(1,)]))
        else:
            self.add(Merge([language_model, visual_model], mode=self._config.multimodal_merge_mode))

        self.deep_mlp()
        self.add(Dense(self._config.output_dim))
        self.add(Activation('softmax'))
Пример #8
0
def test_EarlyStopping():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_class)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_class, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    mode = 'max'
    monitor = 'val_acc'
    patience = 0
    cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]
    history = model.fit(X_train, y_train, batch_size=batch_size,
                        validation_data=(X_test, y_test), callbacks=cbks, epochs=20)

    mode = 'auto'
    monitor = 'val_acc'
    patience = 2
    cbks = [callbacks.EarlyStopping(patience=patience, monitor=monitor, mode=mode)]
    history = model.fit(X_train, y_train, batch_size=batch_size,
                        validation_data=(X_test, y_test), callbacks=cbks, epochs=20)
Пример #9
0
def test_LambdaCallback():
    np.random.seed(1337)
    (X_train, y_train), (X_test, y_test) = get_test_data(num_train=train_samples,
                                                         num_test=test_samples,
                                                         input_shape=(input_dim,),
                                                         classification=True,
                                                         num_classes=num_class)
    y_test = np_utils.to_categorical(y_test)
    y_train = np_utils.to_categorical(y_train)
    model = Sequential()
    model.add(Dense(num_hidden, input_dim=input_dim, activation='relu'))
    model.add(Dense(num_class, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='sgd',
                  metrics=['accuracy'])

    # Start an arbitrary process that should run during model training and be terminated after training has completed.
    def f():
        while True:
            pass

    p = multiprocessing.Process(target=f)
    p.start()
    cleanup_callback = callbacks.LambdaCallback(on_train_end=lambda logs: p.terminate())

    cbks = [cleanup_callback]
    model.fit(X_train, y_train, batch_size=batch_size,
              validation_data=(X_test, y_test), callbacks=cbks, epochs=5)
    p.join()
    assert not p.is_alive()
Пример #10
0
def test_merge_overlap():
    left = Sequential()
    left.add(Dense(nb_hidden, input_shape=(input_dim,)))
    left.add(Activation('relu'))

    model = Sequential()
    model.add(Merge([left, left], mode='sum'))
    model.add(Dense(nb_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=1, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=2, validation_data=(X_test, y_test))
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=True, verbose=2, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, show_accuracy=False, verbose=1, validation_split=0.1)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=0)
    model.fit(X_train, y_train, batch_size=batch_size, nb_epoch=nb_epoch, verbose=1, shuffle=False)

    model.train_on_batch(X_train[:32], y_train[:32])

    loss = model.evaluate(X_train, y_train, verbose=0)
    assert(loss < 0.7)
    model.predict(X_test, verbose=0)
    model.predict_classes(X_test, verbose=0)
    model.predict_proba(X_test, verbose=0)
    model.get_config(verbose=0)

    fname = 'test_merge_overlap_temp.h5'
    model.save_weights(fname, overwrite=True)
    model.load_weights(fname)
    os.remove(fname)

    nloss = model.evaluate(X_train, y_train, verbose=0)
    assert(loss == nloss)
Пример #11
0
def define_model(lr, momentum):
    # CONFIG
    model = Sequential()

    # Create Layers
    # CONVNET
    layers = []
    #layers.append(GaussianNoise(0.02))
    layers.append(Convolution2D(8, 9, 9, activation = "relu", input_shape=(1,100,100)))
    layers.append(MaxPooling2D(pool_size=(2,2)))
    layers.append(Convolution2D(16, 7, 7, activation = "relu"))
    layers.append(MaxPooling2D(pool_size=(2,2)))
    layers.append(Convolution2D(32, 5, 5, activation = "relu"))
    layers.append(MaxPooling2D(pool_size=(2,2)))
    layers.append(Convolution2D(64, 3, 3, activation = "relu"))
    layers.append(MaxPooling2D(pool_size=(2,2)))
    layers.append(Convolution2D(250, 3, 3, activation= "relu"))
    # MLP
    layers.append(Flatten())
    layers.append(Dense(125, activation="relu"))
    layers.append(Dense(2, activation="softmax"))

    # Adding Layers
    for layer in layers:
        model.add(layer)

    # COMPILE (learning rate, momentum, objective...)
    sgd = SGD(lr=lr, momentum=momentum)

    model.compile(loss="categorical_crossentropy", optimizer=sgd)

    return model
Пример #12
0
def test_sequential_model_saving():
    model = Sequential()
    model.add(Dense(2, input_dim=3))
    model.add(Dense(3))
    model.compile(loss='mse', optimizer='rmsprop', metrics=['acc'])

    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)

    out = model.predict(x)
    fname = 'tmp_' + str(np.random.randint(10000)) + '.h5'
    save_model(model, fname)

    new_model = load_model(fname)

    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # test that new updates are the same with both models
    x = np.random.random((1, 3))
    y = np.random.random((1, 3))
    model.train_on_batch(x, y)
    new_model.train_on_batch(x, y)
    out = model.predict(x)
    out2 = new_model.predict(x)
    assert_allclose(out, out2, atol=1e-05)

    # test load_weights on model file
    model.load_weights(fname)
    os.remove(fname)
Пример #13
0
def train_rnn(character_corpus, seq_len, train_test_split_ratio):
    model = Sequential()
    model.add(Embedding(character_corpus.char_num(), 256))
    model.add(LSTM(256, 5120, activation='sigmoid', inner_activation='hard_sigmoid', return_sequences=True))
    model.add(Dropout(0.5))
    model.add(TimeDistributedDense(5120, character_corpus.char_num()))
    model.add(Activation('time_distributed_softmax'))

    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    seq_X, seq_Y = character_corpus.make_sequences(seq_len)

    print "Sequences are made"

    train_seq_num = train_test_split_ratio*seq_X.shape[0]
    X_train = seq_X[:train_seq_num]
    Y_train = to_time_distributed_categorical(seq_Y[:train_seq_num], character_corpus.char_num())

    X_test = seq_X[train_seq_num:]
    Y_test = to_time_distributed_categorical(seq_Y[train_seq_num:], character_corpus.char_num())

    print "Begin train model"
    checkpointer = ModelCheckpoint(filepath="model.step", verbose=1, save_best_only=True)
    model.fit(X_train, Y_train, batch_size=256, nb_epoch=100, verbose=2, validation_data=(X_test, Y_test), callbacks=[checkpointer])

    print "Model is trained"

    score = model.evaluate(X_test, Y_test, batch_size=512)

    print "valid score = ", score

    return model
def baseline_model():
  model = Sequential()
  model.add(Dense(4, input_dim=4, init='normal', activation='relu'))
  model.add(Dense(3, init='normal', activation='sigmoid'))
  model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
  
  return model
def get_ts_model( trainX, trainY, look_back = 1, nb_epochs = 100 ):
    model = Sequential()
    # takes input array of shape (*, 1) where (2,1) - (row,col) array example looks like [23]
    # 																					 [43]
    model.add(LSTM(20, input_shape=(None , look_back) ))
    #model.add(LSTM(20,  batch_input_shape=(None, None, look_back), return_sequences= True ))
    #print(model.summary)
    model.add( Dense(1) )
    model.add(Dense(1))
    model.add(Dense(1))
    model.add(Dense(1))
    model.add(Dense(1))
    model.add(Dense(1))
    #model.add(LSTM(1, return_sequences= True))
    #model.add(LSTM(1))
    # outputs array of shape (*,1)
    #model.add(Dense(1))
    #model.compile(loss='mean_absolute_error', optimizer='SGD')  # mape
    #model.compile(loss='poisson', optimizer='adam')  # mape
    model.compile( loss =  'mean_squared_error', optimizer = 'adam' ) # values closer to zero are better.
    #model.compile(loss='mean_squared_error', optimizer='adagrad')
    # Values of MSE are used for comparative purposes of two or more statistical meythods. Heavily weight outliers,  i.e weighs large errors more heavily than the small ones.
    # "In cases where this is undesired, mean absolute error is used.
    # REF: Available loss functions  https://keras.io/objectives.
    print('Start : Training model')
    # default  configuration
    model.fit(trainX, trainY, nb_epoch=nb_epochs, batch_size=1, verbose=2)
    #model.fit(trainX, trainY, nb_epoch=100, batch_size=1, verbose=2)
    print('Ends : Training Model')
    return model
Пример #16
0
def test_sequential_fit_generator():
    (x_train, y_train), (x_test, y_test) = _get_test_data()

    def data_generator(train):
        if train:
            max_batch_index = len(x_train) // batch_size
        else:
            max_batch_index = len(x_test) // batch_size
        i = 0
        while 1:
            if train:
                yield (x_train[i * batch_size: (i + 1) * batch_size], y_train[i * batch_size: (i + 1) * batch_size])
            else:
                yield (x_test[i * batch_size: (i + 1) * batch_size], y_test[i * batch_size: (i + 1) * batch_size])
            i += 1
            i = i % max_batch_index

    model = Sequential()
    model.add(Dense(num_hidden, input_shape=(input_dim,)))
    model.add(Activation('relu'))
    model.add(Dense(num_class))
    model.pop()
    model.add(Dense(num_class))
    model.add(Activation('softmax'))
    model.compile(loss='categorical_crossentropy', optimizer='rmsprop')

    model.fit_generator(data_generator(True), 5, epochs)
    model.fit_generator(data_generator(True), 5, epochs,
                        validation_data=(x_test, y_test))
    model.fit_generator(data_generator(True), 5, epochs,
                        validation_data=data_generator(False),
                        validation_steps=3)
    model.fit_generator(data_generator(True), 5, epochs, max_queue_size=2)
    model.evaluate(x_train, y_train)
Пример #17
0
def getVggModel():
    """Pretrained VGG16 model with fine-tunable last two layers"""
    input_image = Input(shape = (160,320,3))
    
    model = Sequential()
    model.add(Lambda(lambda x: x/255.0 -0.5,input_shape=(160,320,3)))
    model.add(Cropping2D(cropping=((70,25),(0,0))))
    
    base_model = VGG16(input_tensor=input_image, include_top=False)
        
    for layer in base_model.layers[:-3]:
        layer.trainable = False

    W_regularizer = l2(0.01)

    x = base_model.get_layer("block5_conv3").output
    x = AveragePooling2D((2, 2))(x)
    x = Dropout(0.5)(x)
    x = BatchNormalization()(x)
    x = Dropout(0.5)(x)
    x = Flatten()(x)
    x = Dense(4096, activation="elu", W_regularizer=l2(0.01))(x)
    x = Dropout(0.5)(x)
    x = Dense(2048, activation="elu", W_regularizer=l2(0.01))(x)
    x = Dense(2048, activation="elu", W_regularizer=l2(0.01))(x)
    x = Dense(1, activation="linear")(x)
    return Model(input=input_image, output=x)
Пример #18
0
    def make_fc_model(self):
        '''
        creates a fully convolutional model from self.model
        '''
        # get index of first dense layer in model
        behead_ix = self._get_behead_index(self.model_layer_names)
        model_layers = self.model.layers[:behead_ix]
        # shape of image entering FC layers
        inp_shape = self.model.layers[behead_ix - 1].get_output_shape_at(-1)

        # replace dense layers with convolutions
        model = Sequential()
        model_layers += [Convolution2D(2048, 1, 1)]
        model_layers += [Activation('relu')]
        model_layers += [Convolution2D(2048, 1, 1)]
        model_layers += [Activation('relu')]
        model_layers += [Convolution2D(self.nb_classes, inp_shape[-1], inp_shape[-1])]
        # must be same shape as target vector (None, num_classes, 1)
        model_layers += [Reshape((self.nb_classes-1,1))]
        model_layers += [Activation('softmax')]

        print 'Compiling Fully Convolutional Model...'
        for process in model_layers:
            model.add(process)
        sgd = SGD(lr=self.lr_1, momentum=0.9, nesterov=True)
        model.compile(loss='categorical_crossentropy', optimizer='sgd')
        print 'Done.'
        return model
    def __init__(self, restore=None, session=None, Dropout=Dropout, num_labels=10):
        self.num_channels = 1
        self.image_size = 28
        self.num_labels = num_labels

        model = Sequential()

        nb_filters = 64
        layers = [Conv2D(nb_filters, (5, 5), strides=(2, 2), padding="same",
                         input_shape=(28, 28, 1)),
                  Activation('relu'),
                  Conv2D(nb_filters, (3, 3), strides=(2, 2), padding="valid"),
                  Activation('relu'),
                  Conv2D(nb_filters, (3, 3), strides=(1, 1), padding="valid"),
                  Activation('relu'),
                  Flatten(),
                  Dense(32),
                  Activation('relu'),
                  Dropout(.5),
                  Dense(num_labels)]

        for layer in layers:
            model.add(layer)

        if restore != None:
            model.load_weights(restore)
        
        self.model = model
Пример #20
0
 def make_model(self):
     model = Sequential()
     model.add(Dense(units=200,input_shape=(6400,), activation="relu"))
     model.add(Dense(6, activation="softmax"))
     model.compile(loss='sparse_categorical_crossentropy', 
                    optimizer=RMSprop(lr=0.01))
     self.model = model
Пример #21
0
class MotifScoreRNN(Model):

    def __init__(self, input_shape, gru_size=10, tdd_size=4):
        self.model = Sequential()
        self.model.add(GRU(gru_size, return_sequences=True,
                           input_shape=input_shape))
        if tdd_size is not None:
            self.model.add(TimeDistributedDense(tdd_size))
        self.model.add(Flatten())
        self.model.add(Dense(1))
        self.model.add(Activation('sigmoid'))
        print('Compiling model...')
        self.model.compile(optimizer='adam', loss='binary_crossentropy')

    def train(self, X, y, validation_data):
        print('Training model...')
        multitask = y.shape[1] > 1
        if not multitask:
            num_positives = y.sum()
            num_sequences = len(y)
            num_negatives = num_sequences - num_positives
        self.model.fit(
            X, y, batch_size=128, nb_epoch=100,
            validation_data=validation_data,
            class_weight={True: num_sequences / num_positives,
                          False: num_sequences / num_negatives}
            if not multitask else None,
            callbacks=[EarlyStopping(monitor='val_loss', patience=10)],
            verbose=True)

    def predict(self, X):
        return self.model.predict(X, batch_size=128, verbose=False)
Пример #22
0
def model(X_train, X_test, y_train, y_test, max_features, maxlen):
    model = Sequential()
    model.add(Embedding(max_features, 128, input_length=maxlen))
    model.add(LSTM(128))
    model.add(Dropout({{uniform(0, 1)}}))
    model.add(Dense(1))
    model.add(Activation('sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    early_stopping = EarlyStopping(monitor='val_loss', patience=4)
    checkpointer = ModelCheckpoint(filepath='keras_weights.hdf5',
                                   verbose=1,
                                   save_best_only=True)

    model.fit(X_train, y_train,
              batch_size={{choice([32, 64, 128])}},
              nb_epoch=1,
              validation_split=0.08,
              callbacks=[early_stopping, checkpointer])

    score, acc = model.evaluate(X_test, y_test, verbose=0)

    print('Test accuracy:', acc)
    return {'loss': -acc, 'status': STATUS_OK, 'model': model}
Пример #23
0
    def test_img_clf(self):
        print('image classification data:')
        (X_train, y_train), (X_test, y_test) = get_test_data(nb_train=1000,
                                                             nb_test=200,
                                                             input_shape=(3, 8, 8),
                                                             classification=True,
                                                             nb_class=2)
        print('X_train:', X_train.shape)
        print('X_test:', X_test.shape)
        print('y_train:', y_train.shape)
        print('y_test:', y_test.shape)

        y_train = to_categorical(y_train)
        y_test = to_categorical(y_test)

        model = Sequential()
        model.add(Convolution2D(8, 8, 8, input_shape=(3, 8, 8)))
        model.add(Activation('sigmoid'))
        model.add(Flatten())
        model.add(Dense(y_test.shape[-1]))
        model.add(Activation('softmax'))
        model.compile(loss='categorical_crossentropy', optimizer='sgd')
        history = model.fit(X_train, y_train, nb_epoch=12, batch_size=16,
                            validation_data=(X_test, y_test),
                            show_accuracy=True, verbose=0)
        print(history.history['val_acc'][-1])
        self.assertTrue(history.history['val_acc'][-1] > 0.9)
Пример #24
0
def test_multiprocessing_predict_error():

    batch_size = 32
    good_batches = 5

    def myGenerator():
        """Raises an exception after a few good batches"""
        for i in range(good_batches):
            yield (np.random.randint(batch_size, 256, (500, 2)),
                   np.random.randint(batch_size, 2, 500))
        raise RuntimeError

    model = Sequential()
    model.add(Dense(1, input_shape=(2, )))
    model.compile(loss='mse', optimizer='adadelta')

    samples = batch_size * (good_batches + 1)

    with pytest.raises(Exception):
        model.predict_generator(
            myGenerator(), samples, 1,
            nb_worker=4, pickle_safe=True,
        )

    with pytest.raises(Exception):
        model.predict_generator(
            myGenerator(), samples, 1,
            pickle_safe=False,
        )
    def test_simple_keras_udf(self):
        """ Simple Keras sequential model """
        # Notice that the input layer for a image UDF model
        # must be of shape (width, height, numChannels)
        # The leading batch size is taken care of by Keras
        with IsolatedSession(using_keras=True) as issn:
            model = Sequential()
            model.add(Flatten(input_shape=(640,480,3)))
            model.add(Dense(units=64))
            model.add(Activation('relu'))
            model.add(Dense(units=10))
            model.add(Activation('softmax'))
            # Initialize the variables
            init_op = tf.global_variables_initializer()
            issn.run(init_op)
            makeGraphUDF(issn.graph,
                         'my_keras_model_udf',
                         model.outputs,
                         {tfx.op_name(issn.graph, model.inputs[0]): 'image_col'})
            # Run the training procedure
            # Export the graph in this IsolatedSession as a GraphFunction
            # gfn = issn.asGraphFunction(model.inputs, model.outputs)
            fh_name = "test_keras_simple_sequential_model"
            registerKerasImageUDF(fh_name, model)

        self._assert_function_exists(fh_name)
def ae(data, feature_dim, train, test, learning_rate, lr_decay, reg_fn, l, momentum, evaluation):
    ''' Autoencoder '''
    
    batch_size=len(train)
    data_dim = data.shape[1]
    
    model = single_layer_autoencoder(data_dim, feature_dim, reg_fn(l), learning_rate, lr_decay, momentum)
    model.fit(data[train], data[train], batch_size=batch_size, nb_epoch=nb_epoch, verbose=verbose)
    
    output = model.predict(data)
    
    # Reconstruction
    model_rec = Sequential()
    model_rec.add(Dense(data_dim, input_dim=feature_dim, activation=activation, weights=model.layers[0].decoder.get_weights()[0:2]))
    model_rec.layers[0].get_input(False) # Get input from testing data
    model_rec.compile(loss='mse', optimizer='sgd')
    
    if evaluation:
        data_rec = model_rec.predict(output[test])
        loss = mean_squared_error(data[test], data_rec)
        return loss
    
    name = 'Autoencoder'
    
    return output, name, model_rec.predict
Пример #27
0
def test_multiprocessing_predicting():

    reached_end = False

    arr_data = np.random.randint(0, 256, (500, 2))

    def myGenerator():

        batch_size = 32
        n_samples = 500

        while True:
            batch_index = np.random.randint(0, n_samples - batch_size)
            start = batch_index
            end = start + batch_size
            X = arr_data[start: end]
            yield X

    # Build a NN
    model = Sequential()
    model.add(Dense(1, input_shape=(2, )))
    model.compile(loss='mse', optimizer='adadelta')
    model.predict_generator(myGenerator(),
                            val_samples=320,
                            max_q_size=10,
                            nb_worker=2,
                            pickle_safe=True)
    model.predict_generator(myGenerator(),
                            val_samples=320,
                            max_q_size=10,
                            pickle_safe=False)
    reached_end = True

    assert reached_end
Пример #28
0
 def __init__(self):
     model = Sequential()
     model.add(Embedding(115227, 50, input_length=75, weights=pre_weights))
     model.compile(loss=MCE, optimizer="adadelta")
     print "Build Network Completed..."
     self.model = model
     self.vocab = {"get_index":{}, "get_word":[]}
Пример #29
0
class MLP(BaseEstimator):
    def __init__(self, verbose=0, model=None, final_activation='sigmoid'):
        self.verbose = verbose
        self.model = model
        self.final_activation = final_activation

    def fit(self, X, y):
        if not self.model:
            self.model = Sequential()
            self.model.add(Dense(1000, input_dim=X.shape[1]))
            self.model.add(Activation('relu'))
            self.model.add(Dropout(0.5))
            self.model.add(Dense(y.shape[1]))
            self.model.add(Activation(self.final_activation))
            self.model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.01))
        self.model.fit_generator(generator=_batch_generator(X, y, 256, True),
                                 samples_per_epoch=X.shape[0], nb_epoch=20, verbose=self.verbose)

    def predict(self, X):
        pred = self.predict_proba(X)
        return sparse.csr_matrix(pred > 0.2)

    def predict_proba(self, X):
        pred = self.model.predict_generator(generator=_batch_generatorp(X, 512), val_samples=X.shape[0])
        return pred
Пример #30
0
def Simple(layers, func, ipt):
    model = Sequential()
    #model.add(BatchNormalization(input_shape = [ipt]))
    model.add(Dense(layers[0], input_dim = ipt, activation = func[0]))
    for i in range(1, len(layers)):
        model.add(Dense(layers[i], activation = func[i]))
    return model
Пример #31
0
## Reshaping ##
X_train = np.reshape(X_train, (1140, 1, 1))

###############################
## Part 2 - Building the RNN ##
###############################

## Importing the Keras libraries and packages ##
from keras.models import Sequential     # Initialise the RNN
from keras.layers import Dense          # Creates the output layer of RNN
from keras.layers import LSTM           # Type of RNN - Long Term Memory (Best)

## Initialising the RNN ##
regressor = Sequential()                # Create object for RNN model in a sequence of layers
## Adding the input layer and the LSTM layer ##
regressor.add(LSTM(units = 4, activation = 'sigmoid', input_shape = (None, 1)))
## Adding the output layer ##
regressor.add(Dense(units = 1))     # All default values & 1 input
## Compiling the RNN ##
regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')
## Fitting the RNN to the Training set ##
regressor.fit(X_train, y_train, batch_size = 32, epochs = 200)

#################################################################
## Part 3 - Making the predictions and visualising the results ##
#################################################################

# Getting the real stock price of 2017 ##
test_set = pd.read_csv('BTC[10_2018]_test.csv')                               # Get the real stock price dataset 
real_stock_price = test_set.iloc[:,1:2].values                          # Get all the rows in column 1
Пример #32
0
vocab_size = len(tokenizer.word_index) + 1

# load embedding from file
raw_embedding = load_embedding('glove.6B.100d.txt')
# get vectors in the right order
embedding_vectors = get_weight_matrix(raw_embedding, tokenizer.word_index)
# create the embedding layer
embedding_layer = Embedding(vocab_size,
                            100,
                            weights=[embedding_vectors],
                            input_length=max_length,
                            trainable=True)

# define model
model = Sequential()
model.add(embedding_layer)
model.add(Conv1D(filters=128, kernel_size=5, activation='relu'))
model.add(MaxPooling1D(pool_size=2))
model.add(Flatten())
model.add(Dense(10, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
print(model.summary())

# compile network
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
# fit network
model.fit(Xtrain, ytrain, epochs=10, verbose=2)

# evaluate
Пример #33
0
# print('P.shape:', P.shape)
# P_train = P.reshape(X_train.shape[0], -1)
# print('P.shape:', P.shape)
# print('X_train.shape:', X_train.shape)
# print('Y_train.shape:', Y_train.shape)

# model.fit(X_train, P_train, validation_split=0.1, batch_size=batch_size, shuffle=False, nb_epoch=200, callbacks=[get_tb_cb('tsne-ae')])

# plot_model(model.predict(X_train), Y_train)
# plt.savefig('tsne-ae-train.png')
# plot_model(model.predict(X_test), Y_test)
# plt.savefig('tsne-ae-test.png')

encoder = Sequential()
encoder.add(Dense(500, activation='relu', input_shape=(X_train.shape[1], )))
encoder.add(Dense(500, activation='relu'))
encoder.add(Dense(2000, activation='relu'))
encoder.add(Dense(2))
encoder.compile(loss='mse', optimizer='rmsprop')

# plot(encoder, to_file='encoder.png')

encoder.fit(X_train,
            X_2d,
            nb_epoch=200,
            verbose=2,
            callbacks=[get_tb_cb('encoder')])

plot_model(encoder.predict(X_train), Y_train)
plt.savefig('encoder-train.png')
Пример #34
0
def create_model(keep_prob=0.8):
    model = Sequential()

    # NVIDIA's model
    model.add(
        Conv2D(24,
               kernel_size=(5, 5),
               strides=(2, 2),
               activation='relu',
               input_shape=INPUT_SHAPE))
    model.add(Conv2D(36, kernel_size=(5, 5), strides=(2, 2),
                     activation='relu'))
    model.add(Conv2D(48, kernel_size=(5, 5), strides=(2, 2),
                     activation='relu'))
    model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
    model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
    model.add(Flatten())
    model.add(Dense(1164, activation='relu'))
    drop_out = 1 - keep_prob
    model.add(Dropout(drop_out))
    model.add(Dense(100, activation='relu'))
    model.add(Dropout(drop_out))
    model.add(Dense(50, activation='relu'))
    model.add(Dropout(drop_out))
    model.add(Dense(10, activation='relu'))
    model.add(Dropout(drop_out))
    model.add(Dense(OUT_SHAPE, activation='softsign'))
    return model
Пример #35
0
    def genModel_CV_L6s2(self):
        """
        # GENERATOR-1
        # 6 Layers
        # Upsampling Factor 2 per layer (except first and last)
        # Final Layer Shape: img_rows X 1 X 1
        # Activation: 'relu'
        # bias_initializer = Constant value of 0.1

        :return: Returns a Keras model with 5 layers, 1FC and 4 Convolutional
        """
        'Convolutional Layer 1'
        'In: 50 X 1, depth =1'
        'Out: 15 X 400, depth = 25'
        generator = Sequential()
        generator.add(
            Dense(15 * self.genFilters,
                  input_dim=self.input_dim,
                  activation='relu',
                  bias_initializer=Constant(0.1)))
        generator.add(BatchNormalization(momentum=0.9))
        generator.add(Reshape((15, self.genFilters)))
        generator.add(Dropout(self.dropout))

        'LAYER -2'
        'In: 15 X 1 X1, depth = 400'
        'Out: 30 X 1 X 1, depth = 200'
        generator.add(UpSampling1D(size=2))
        generator.add(
            Conv1D(int(self.genFilters / 2),
                   self.filterSize,
                   activation='relu',
                   padding='same',
                   bias_initializer=Constant(0.1)))
        generator.add(BatchNormalization(momentum=0.9))
        generator.add(Dropout(self.dropout))

        'LAYER -3'
        'In: 30 X 1 X1, depth = 400'
        'Out: 60 X 1 X 1, depth = 200'
        generator.add(UpSampling1D(size=2))
        generator.add(
            Conv1D(int(self.genFilters / 4),
                   self.filterSize,
                   activation='relu',
                   padding='same',
                   bias_initializer=Constant(0.1)))
        generator.add(BatchNormalization(momentum=0.9))
        generator.add(Dropout(self.dropout))

        'LAYER -4'
        'In: 60 X 1 X1, depth = 400'
        'Out: 120 X 1 X 1, depth = 200'
        generator.add(UpSampling1D(size=2))
        generator.add(
            Conv1D(int(self.genFilters / 8),
                   self.filterSize,
                   activation='relu',
                   padding='same',
                   bias_initializer=Constant(0.1)))
        generator.add(BatchNormalization(momentum=0.9))
        generator.add(Dropout(self.dropout))

        'LAYER -5'
        'In: 120 X 1 X1, depth = 400'
        'Out: 240 X 1 X 1, depth = 200'
        generator.add(UpSampling1D(size=2))
        generator.add(
            Conv1D(int(self.genFilters / 16),
                   self.filterSize,
                   activation='relu',
                   padding='same',
                   bias_initializer=Constant(0.1)))
        generator.add(BatchNormalization(momentum=0.9))
        generator.add(Dropout(self.dropout))

        'OUTPUT LAYER'
        'In: 240 X 1 X 1, depth=25'
        'Out: 240 X 1 X 1, depth =1'
        generator.add(
            Conv1D(1,
                   self.filterSize,
                   padding='same',
                   bias_initializer=Constant(0.1)))
        # generator.add(Flatten())
        generator.add(Activation('sigmoid'))

        return generator
Пример #36
0
    def disModel_CV_L6s2_rep(self, initModel=''):
        """
        This function creates a discriminator model which creates the final representation

        :param initModel: The model from which the weights are to be extracted if any
        :return:
        """

        if not initModel:
            model_l2 = Sequential()
            model_l2.add(
                Conv1D(filters=20,
                       kernel_size=11,
                       strides=2,
                       input_shape=(240, 1),
                       padding='same',
                       activation='relu'))
            model_l2.add(
                Conv1D(filters=40,
                       kernel_size=11,
                       strides=2,
                       padding='same',
                       activation='relu'))
            model_l2.add(
                Conv1D(filters=80,
                       kernel_size=11,
                       strides=2,
                       padding='same',
                       activation='relu'))
            model_l2.add(
                Conv1D(filters=160,
                       kernel_size=11,
                       strides=2,
                       padding='same',
                       activation='relu'))
            model_l2.add(Flatten())
            model_l2.compile(loss='binary_crossentropy', optimizer=Adam())
        else:
            model_l2 = Sequential()
            model_l2.add(
                Conv1D(filters=20,
                       kernel_size=11,
                       strides=2,
                       input_shape=(240, 1),
                       weights=initModel.layers[0].get_weights(),
                       padding='same',
                       activation='relu'))
            model_l2.add(
                Conv1D(filters=40,
                       kernel_size=11,
                       strides=2,
                       weights=initModel.layers[2].get_weights(),
                       padding='same',
                       activation='relu'))
            model_l2.add(
                Conv1D(filters=80,
                       kernel_size=11,
                       strides=2,
                       weights=initModel.layers[4].get_weights(),
                       padding='same',
                       activation='relu'))
            model_l2.add(
                Conv1D(filters=160,
                       kernel_size=11,
                       strides=2,
                       weights=initModel.layers[6].get_weights(),
                       padding='same',
                       activation='relu'))
            model_l2.add(Flatten())
            model_l2.compile(loss='binary_crossentropy', optimizer=Adam())

        return model_l2
Пример #37
0
    def disModel_CV_L4s2(self):
        """
        # DISCRIMINATOR-2
        # 3 Layers
        # Downsampling Factor (Stride) 2 per layer (except last)
        # Output Size  = 1 X 1
        # Activation: 'relu'
        # bias_initializer = Constant value of 0.1

        :return:
        """

        discriminator = Sequential()
        'LAYER -1'
        'In: 240 X 1 X 1, depth =1'
        'Out: 120 X 1 X 1, depth = 25'
        discriminator.add(
            Conv1D(filters=self.disFilters,
                   kernel_size=self.filterSize,
                   strides=2,
                   input_shape=(self.img_rows, 1),
                   bias_initializer=Constant(0.1),
                   activation='relu',
                   padding='same'))
        discriminator.add(Dropout(self.dropout))

        'LAYER -2'
        'In: 120 X 1 X 1, depth =25'
        'Out: 60 X 1 X 1, depth = 50'
        discriminator.add(
            Conv1D(filters=self.disFilters * 2,
                   kernel_size=self.filterSize,
                   strides=2,
                   bias_initializer=Constant(0.1),
                   activation='relu',
                   padding='same'))
        discriminator.add(Dropout(self.dropout))

        'LAYER -3'
        'In: 60 X 1 X 1, depth =50'
        'Out: 30 X 1 X 1, depth = 75'
        discriminator.add(
            Conv1D(filters=self.disFilters * 3,
                   kernel_size=self.filterSize,
                   strides=2,
                   bias_initializer=Constant(0.1),
                   activation='relu',
                   padding='same'))
        discriminator.add(Dropout(self.dropout))

        'Output Layer'
        discriminator.add(Flatten())
        discriminator.add(Dense(1))
        discriminator.add(Activation('sigmoid'))

        return discriminator
Пример #38
0
class_names = ['airplane','automobile','bird','cat','deer',
               'dog','frog','horse','ship','truck']
num_classes = 10


save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'keras_cifar10_trained_model.h5'

(x_train, y_train), (x_test, y_test) = cifar10.load_data()

# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',
                 input_shape=x_train.shape[1:], activation='relu'))
model.add(Conv2D(32, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.4))
model.add(BatchNormalization())

model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
#model.add(BatchNormalization())

model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
Пример #39
0
train_labels=np_utils.to_categorical(train_labels,num_classes=10)

model = Sequential()
"""
        Dense(units=200,input_dim=784,activation='tanh',kernel_regularizer=l2(0.0003)),

        Dense(units=100,activation='tanh',kernel_regularizer=l2(0.0003)),

        Dense(units=10,activation='softmax',kernel_regularizer=l2(0.0003))
"""
       

model.add(Convolution2D(
        input_shape=(28,28,1),
        filters=32,
        kernel_size=5,
        strides=1,
        padding='same',
        activation='relu',
        ))

model.add(MaxPooling2D(
        pool_size=2,
        strides= 2,
        padding='same'
        ))
model.add(Convolution2D(64,5,strides=1,padding='same',activation='relu'))
model.add(MaxPooling2D(2,2,'same'))
model.add(Flatten())
model.add(Dense(1024,activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10,activation='softmax'))
x_valid = pad_sequences(x_valid, maxlen=max_review_lenth, padding=pad_type, truncating=trunc_type)
# check your data preprocess
for x in x_train[0:10]:
    print(len(x))

no_to_text = ' '.join(index_word[id] for id in x_train[
    0])  # this will basically convert pick the no for sent and find the index of the word from word index
print(no_to_text)

# Design NN architecture
# After this you can have keras, tensorflow, or pytorch framework for yor neural network


# KERAS
model = Sequential()
model.add(Embedding(n_unique_words, n_dim, input_length=max_review_lenth))
# The first argument (n_unique_words) n embedded layer is the number of distinct words in the training set.
# here n_unique word is required so as to find the lenth of one hot encoding creating for each word so as use in neural network
# The second argument (n_dim) indicates the size of the embedding vectors
# The input_length argumet, of course, determines the size of each input sequence.
# model.output_shape == (None, max_review_lenth, n_dim), where None is the batch dimension
model.add(Flatten())
model.add(Dense(n_dense, activation='relu'))
model.add(Dropout(dropout))
model.add(Dense(1, activation='sigmoid'))

print(model.summary())

# compiling model
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
Пример #41
0
def build_model():
    model = Sequential()
    model.add(
        Conv1D(filters=64,
               kernel_size=5,
               activation="relu",
               input_shape=(max_sequence_length, aa_size)))
    model.add(MaxPooling1D(pool_size=2, strides=2))
    model.add(Bidirectional(LSTM(128, return_sequences=True)))
    model.add(Dropout(0.5))
    model.add(Flatten())
    model.add(Dense(512, activation='relu'))
    model.add(Dense(256, activation='relu'))
    model.add(Dense(1, activation='relu'))
    model.compile(loss="mean_absolute_error",
                  optimizer="adam",
                  metrics=["mean_absolute_percentage_error"])
    return model
Пример #42
0
    def __call__(self,
                 optimizer="rmsprop",
                 init="glorot_uniform",
                 activation="relu"):
        optimizer = BaseDeepLearning.get_optimizer(optimizer)

        model = Sequential()

        model.add(
            Convolution2D(32,
                          3,
                          3,
                          border_mode="same",
                          input_shape=self.input_shape))
        model.add(Activation("relu"))
        model.add(Convolution2D(32, 3, 3))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Convolution2D(64, 3, 3, border_mode="same"))
        model.add(Activation("relu"))
        model.add(Convolution2D(64, 3, 3))
        model.add(MaxPooling2D(pool_size=(2, 2)))

        model.add(Flatten())
        model.add(Dense(512))
        model.add(Activation("relu"))
        model.add(Dense(self.output_size))
        model.add(Activation("softmax"))

        model.compile(loss="categorical_crossentropy",
                      optimizer=optimizer,
                      metrics=["accuracy"])

        return model
Пример #43
0
class CNN(LoadData):
    """
    __init__(self, parent_directory, true_class_name, output_folder, epoch_count)

    load_neural_network(): Loads a predefined network

    callback_set(output_folder): Sets call back. Using early termination on validation accuracy and saving model at each
    iteration in set output folder

    train_network(epoch_count, callbacks_list): Trains network for set epoch_count with all callbacks in callback_list

    evaluate_network(test_data, test_labels): Evaluates and prints accuracy of testing data to file
    """
    def __init__(self, parent_directory, true_class_name, output_folder,
                 epoch_count, augment_data_flag, augment_data_count):
        LoadData.__init__(self, parent_directory, true_class_name, epoch_count,
                          augment_data_flag, augment_data_count)
        self.model = Sequential()
        self.load_neural_network()
        callback_list = self.callback_set(output_folder)
        self.train_network(epoch_count, callback_list)

    def load_neural_network(self):
        # CNN Model construction
        self.model.add(Conv2D(64, (7, 7), input_shape=(128, 128, 1)))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(Conv2D(128, (5, 5)))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(Conv2D(128, (3, 3)))
        self.model.add(Activation('relu'))
        self.model.add(MaxPooling2D(pool_size=(2, 2)))

        self.model.add(Flatten())
        self.model.add(Dense(70))
        self.model.add(Activation('relu'))
        self.model.add(Dropout(0.55))
        self.model.add(Dense(25))
        self.model.add(Activation('relu'))
        self.model.add(Dropout(0.55))
        self.model.add(Dense(1))
        self.model.add(Activation('tanh'))

        adam = Adam()
        self.model.compile(loss='mse', optimizer=adam, metrics=['accuracy'])
        print(" CNN model compiled")

    @staticmethod
    def callback_set(output_folder):
        if not os.path.exists(output_folder):
            os.makedirs(output_folder)
        file_path = output_folder + "/weights - {val_acc:.2f}.hdf5"
        checkpoint = ModelCheckpoint(file_path,
                                     monitor='val_acc',
                                     verbose=1,
                                     save_best_only=False,
                                     save_weights_only=True,
                                     mode='auto',
                                     period=1)
        early_stop = EarlyStopping(monitor='val_acc',
                                   min_delta=0.01,
                                   patience=2,
                                   verbose=1,
                                   mode='auto')
        return [checkpoint, early_stop]

    def train_network(self, epoch_count, callbacks_list):
        for _ in range(0, epoch_count):
            train_data, train_labels, test_data, test_labels = super(
            ).dataset_generator()
            self.model.fit(train_data,
                           train_labels,
                           validation_split=0.2,
                           shuffle=True,
                           epochs=1,
                           callbacks=callbacks_list)
            self.evaluate_network(test_data, test_labels)

    def evaluate_network(self, test_data, test_labels):
        score = self.model.evaluate(test_data, test_labels)
        print('Accuracy :', score)
        try:
            with open("Accuracy_log.txt", mode="a") as file:
                file.write("\n" + super().t_class + str(score[1]))
        except PermissionError:
            print(
                " Accuracy log write failed ! Use a different path or change permissions"
            )
        finally:
            print(" Accuracy log updated !")
			for image,measurement in zip(images,measurements):
				augmented_images.append(image)
				augmented_measurements.append(measurement)
				augmented_images.append(cv2.flip(image,1))
				augmented_measurements.append(measurement*-1.0)

			X_train = np.array(augmented_images)
			y_train = np.array(augmented_measurements)
			
			yield sklearn.utils.shuffle(X_train, y_train)

train_generator = generator(train_samples, batch_size=32)
validation_generator = generator(validation_samples, batch_size=32)

model = Sequential()
model.add(Lambda(lambda x: x / 127.5 - 1.0, 
		input_shape=(160,320,3)))
model.add(Cropping2D(cropping=((75,25),(0,0))))
model.add(Convolution2D(24,5,5,subsample=(2,2),activation="relu"))
model.add(Convolution2D(36,5,5,subsample=(2,2),activation="relu"))
model.add(Convolution2D(48,5,5,subsample=(2,2),activation="relu"))
model.add(Convolution2D(64,3,3,activation="relu"))
model.add(Convolution2D(64,2,2,activation="relu"))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(100))
model.add(Activation('relu'))
model.add(Dense(50))
model.add(Activation('relu'))
model.add(Dense(10))
model.add(Activation('relu'))
model.add(Dense(3))
Пример #45
0
def base_model():
    model = Sequential()
    model.add(Conv2D(32, (3, 3), padding='same',
                     input_shape=x_train.shape[1:]))
    model.add(Activation('relu'))
    model.add(Conv2D(32, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(Conv2D(64, (3, 3)))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))
    model.add(Dense(num_classes))
    model.add(Activation('softmax'))

    sgd = SGD(lr=0.1, decay=1e-6, nesterov=True)

    # Train model

    model.compile(loss='categorical_crossentropy',
                  optimizer=sgd,
                  metrics=['accuracy'])
    return model
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import SGD

X = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
Y = np.array([[0], [1], [1], [0]])

model = Sequential()

# 입력층 ~ 은닉층
model.add(Dense(input_dim=2, units=2))
model.add(Activation('sigmoid'))

# 은닉층 ~ 출력층
model.add(Dense(units=1))
model.add(Activation('sigmoid'))

model.compile(loss='binary_crossentropy', optimizer=SGD(lr=0.1))

model.fit(X, Y, epochs=8000, batch_size=4)

classes = model.predict_classes(X, batch_size=4)
prob = model.predict_proba(X, batch_size=4)


print('classified:')
print(Y == classes)
print()

print('output probability:')
Пример #47
0
def build_model(params):
    n_hidden_layers = int(np.round(params['n_hidden_layers']))
    n_neurons = int(np.round(params['n_neurons']))
    log_l1_weight_reg = params['log_l1_weight_reg']
    log_l2_weight_reg = params['log_l2_weight_reg']
    prob_drop_out = float(params['prob_drop_out'])
    log_l_rate = params['log_learning_rate']

    model = Sequential()
    model.add(
        Dense(n_neurons,
              input_shape=(784, ),
              W_regularizer=l1_l2(l1=np.exp(log_l1_weight_reg),
                                  l2=np.exp(log_l2_weight_reg))))
    model.add(Activation('relu'))
    model.add(Dropout(prob_drop_out))
    for i in range(n_hidden_layers - 1):
        model.add(
            Dense(n_neurons,
                  W_regularizer=l1_l2(l1=np.exp(log_l1_weight_reg),
                                      l2=np.exp(log_l2_weight_reg))))
        model.add(Activation('relu'))
        model.add(Dropout(prob_drop_out))
    n_classes = 10
    model.add(Dense(n_classes))
    model.add(Activation('softmax'))

    adam = Adam(lr=np.exp(log_l_rate), beta_1=0.9, beta_2=0.999, epsilon=1e-08)
    model.compile(loss='categorical_crossentropy', optimizer=adam)

    return model
Пример #48
0
input_shape = (img_rows, img_cols, 1)

# Нормализация данных
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255

# Преобразуем метки в категории
Y_train = np_utils.to_categorical(y_train, 10)
Y_test = np_utils.to_categorical(y_test, 10)

# Создаем последовательную модель
model = Sequential()

model.add(
    Conv2D(75, kernel_size=(5, 5), activation='relu', input_shape=input_shape))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(100, (5, 5), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(500, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))

# Компилируем модель
model.compile(loss="categorical_crossentropy",
              optimizer="adam",
              metrics=["accuracy"])
Пример #49
0
def makeCNN():
    model = Sequential()

    model.add(
        Convolution2D(32,
                      3,
                      3,
                      border_mode='same',
                      input_shape=(3, helpers.IMAGE_HEIGHT,
                                   helpers.IMAGE_WIDTH)))
    model.add(Activation('relu'))

    model.add(Convolution2D(32, 3, 3, border_mode='same'))
    model.add(Activation('relu'))

    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(
        Convolution2D(64,
                      3,
                      3,
                      border_mode='same',
                      input_shape=(3, helpers.IMAGE_HEIGHT,
                                   helpers.IMAGE_WIDTH)))
    model.add(Activation('relu'))

    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))

    model.add(Flatten())
    model.add(Dense(input_dim=(64 * 8 * 8), output_dim=512))
    model.add(Activation('relu'))
    model.add(Dropout(0.5))

    model.add(Dense(input_dim=512, output_dim=2))
    model.add(Activation('softmax'))

    model = makeCNN()
    return model
Пример #50
0
    # Get the tokens from the 9 training folds.
    training_batches = LoopBatchesEndlessly\
        .for_training(filename, FOLD,
                      batch_size=BATCH_SIZE,
                      sentence_length=SENTENCE_LENGTH,
                      backwards=True)
    eval_batches = LoopBatchesEndlessly\
        .for_evaluation(filename, FOLD,
                      batch_size=BATCH_SIZE,
                      sentence_length=SENTENCE_LENGTH,
                      backwards=True)
    print("Will train on", training_batches.samples_per_epoch, "samples")

    # Defining the model:
    model = Sequential()
    model.add(LSTM(SIGMOID_ACTIVATIONS,
                   input_shape=(SENTENCE_LENGTH, len(vocabulary))))
    model.add(Dense(len(vocabulary)))
    model.add(Activation('softmax'))

    print("Compiling the model...")
    model.compile(loss='categorical_crossentropy',
                  optimizer=RMSprop(lr=0.001),
                  metrics=['categorical_accuracy'])
    print("Done")

    #model.load_weights("javascript.2.h5")

    print("Training for one epoch...")
    model.fit_generator(iter(training_batches),
                        steps_per_epoch=training_batches.samples_per_epoch // BATCH_SIZE,
                        validation_data=iter(eval_batches),
Пример #51
0
    '../DataSetNew/Validation',
    target_size=(256, 256),
    color_mode="rgb",
    class_mode="categorical",
    batch_size=BATCHSIZE)

test_generator = test_datagen.flow_from_directory(
    '../DataSetNew/Test',
    target_size=(256, 256),
    color_mode="rgb",
    class_mode="categorical",
    batch_size=BATCHSIZE)

model = Sequential()

model.add(Conv2D(32, (3, 3), activation='relu', padding='same', name='conv1.1', input_shape=(256, 256, 3)))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu', padding='same', name='conv2.1'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu', padding='same', name='conv3.1'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(256, (3, 3), activation='relu', padding='same', name='conv4.1'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(512, (3, 3), activation='relu', padding='same', name='conv5.1'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(1024, (3, 3), activation='relu', padding='same', name='conv6.1'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(256, activation='relu', name='fc1', ))
model.add(Dense(16, activation='softmax'))  # Für jedes Label ein output
x_train /= 255 # [0, 1] aralığına normalize etme işlemi
x_test /= 255

x_train = x_train.reshape(x_train.shape[0], 48, 48, 1)
x_train = x_train.astype('float32')
x_test = x_test.reshape(x_test.shape[0], 48, 48, 1)
x_test = x_test.astype('float32')

print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')

#Evrişimli Sinir Ağı Mimarisini Oluşturma
model = Sequential()

#1. evrişim katmanı
model.add(Conv2D(64, (5, 5), activation='relu', input_shape=(48,48,1)))
model.add(MaxPooling2D(pool_size=(5,5), strides=(2, 2)))

#2. Evrişim katmanı
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(AveragePooling2D(pool_size=(3,3), strides=(2, 2)))

#3. Evrişim katmanı
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(Conv2D(128, (3, 3), activation='relu'))
model.add(AveragePooling2D(pool_size=(3,3), strides=(2, 2)))

model.add(Flatten())

# Tam bağlantı katmanı
Пример #53
0
def return_VGG_ARCH( returnLayer='softmax'):
    '''
    returnLayer='softmax/featureVector'  : For model training/Siamese network 
    '''
    model = Sequential()
    model.add(Conv2D(input_shape=(224,224,3),filters=64,kernel_size=(3,3),padding="same", activation="relu"))
    model.add(Conv2D(filters=64,kernel_size=(3,3),padding="same", activation="relu"))
    model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
    model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"))
    model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"))
    model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
    model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
    model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
    model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
    model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
    model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
    model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
    model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
    model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
    model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
    model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
    model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
    model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
    model.add(Flatten())
    model.add(Dense(units=4096,activation="relu"))
    model.add(Dense(units=4096,activation="relu"))
    model.add(Dense(units=17, activation="softmax"))
    #model.summary()
    return model
Пример #54
0
def makeNormal():
    model = Sequential()
    model.add(
        Dense(input_dim=helpers.ARRAY_DIM,
              output_dim=200,
              input_shape=(3, 25, 40)))
    model.add(Activation("tanh"))

    model.add(Dense(input_dim=100, output_dim=50))
    model.add(Activation("tanh"))

    model.add(Dense(input_dim=50, output_dim=20))
    model.add(Activation("tanh"))

    model.add(Dense(input_dim=20, output_dim=1))
    model.add(Activation("sigmoid"))
    return model
Пример #55
0
    def createRegularizedModel(self, inputs, outputs, hiddenLayers, activationType, learningRate):
        bias = True
        dropout = 0
        regularizationFactor = 0.01
        model = Sequential()
        if len(hiddenLayers) == 0: 
            model.add(Dense(self.output_size, input_shape=(self.input_size,), init='lecun_uniform', bias=bias))
            model.add(Activation("linear"))
        else :
            if regularizationFactor > 0:
                model.add(Dense(hiddenLayers[0], input_shape=(self.input_size,), init='lecun_uniform', W_regularizer=l2(regularizationFactor),  bias=bias))
            else:
                model.add(Dense(hiddenLayers[0], input_shape=(self.input_size,), init='lecun_uniform', bias=bias))

            if (activationType == "LeakyReLU") :
                model.add(LeakyReLU(alpha=0.01))
            else :
                model.add(Activation(activationType))
            
            for index in range(1, len(hiddenLayers)-1):
                layerSize = hiddenLayers[index]
                if regularizationFactor > 0:
                    model.add(Dense(layerSize, init='lecun_uniform', W_regularizer=l2(regularizationFactor), bias=bias))
                else:
                    model.add(Dense(layerSize, init='lecun_uniform', bias=bias))
                if (activationType == "LeakyReLU") :
                    model.add(LeakyReLU(alpha=0.01))
                else :
                    model.add(Activation(activationType))
                if dropout > 0:
                    model.add(Dropout(dropout))
            model.add(Dense(self.output_size, init='lecun_uniform', bias=bias))
            model.add(Activation("linear"))
        optimizer = optimizers.RMSprop(lr=learningRate, rho=0.9, epsilon=1e-06)
        model.compile(loss="mse", optimizer=optimizer)
        return model
Пример #56
0
def return_VGG_ARCH_FE():
    '''
    returnLayer='softmax/featureVector'  : For model training/Siamese network 
    '''
    model = Sequential()
    model.add(Conv2D(input_shape=(224,224,3),filters=64,kernel_size=(3,3),padding="same", activation="relu"))
    model.add(Conv2D(filters=64,kernel_size=(3,3),padding="same", activation="relu"))
    model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
    model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"))
    model.add(Conv2D(filters=128, kernel_size=(3,3), padding="same", activation="relu"))
    model.add(MaxPool2D(name='db', pool_size=(2,2),strides=(2,2)))
    model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
    model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
    model.add(Conv2D(filters=256, kernel_size=(3,3), padding="same", activation="relu"))
    model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
    model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
    model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
    model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
    model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
    
    model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
    model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
    model.add(Conv2D(filters=512, kernel_size=(3,3), padding="same", activation="relu"))
    model.add(MaxPool2D(pool_size=(2,2),strides=(2,2)))
    model.add(Flatten( name='featureVector'))
    model.add(Dense(17, activation='sigmoid', name='featureVector_sigmoid',
                    kernel_regularizer=l2(1e-3),
                    kernel_initializer=initialize_weights,bias_initializer=initialize_bias))
    
    #model.summary()
    return model
    if index > vocabulary_size - 1:
        break
    else:
        embedding_vector = embeddings_index.get(word)
        if embedding_vector is not None:
            embedding_matrix[index] = embedding_vector


# pad sentences
encoded_sent = tokenizer.texts_to_sequences(df['Title'])
padded_sent = pad_sequences(encoded_sent, sentence_length, padding='post')


#model
model = Sequential()
model.add(Embedding(vocabulary_size, 100, input_length=sentence_length, weights=[embedding_matrix], trainable=True))
model.add(Dropout(0.2))
model.add(Conv1D(64, 5, activation='relu'))
model.add(MaxPooling1D(pool_size=4))
model.add(LSTM(lstm_size))
model.add(Dense(number_of_classes, activation='sigmoid'))

# atleast one class match
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])

# perfect match of all classes
#model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['categorical_accuracy'])


history = model.fit(padded_sent, Y, validation_split=0.1, batch_size=512, epochs=10, verbose =1)
Пример #58
0
 def createModel(self, inputs, outputs, hiddenLayers, activationType, learningRate):
     model = Sequential()
     if len(hiddenLayers) == 0: 
         model.add(Dense(self.output_size, input_shape=(self.input_size,), init='lecun_uniform'))
         model.add(Activation("linear"))
     else :
         model.add(Dense(hiddenLayers[0], input_shape=(self.input_size,), init='lecun_uniform'))
         if (activationType == "LeakyReLU") :
             model.add(LeakyReLU(alpha=0.01))
         else :
             model.add(Activation(activationType))
         
         for index in range(1, len(hiddenLayers)-1):
             layerSize = hiddenLayers[index]
             model.add(Dense(layerSize, init='lecun_uniform'))
             if (activationType == "LeakyReLU") :
                 model.add(LeakyReLU(alpha=0.01))
             else :
                 model.add(Activation(activationType))
         model.add(Dense(self.output_size, init='lecun_uniform'))
         model.add(Activation("linear"))
     optimizer = optimizers.RMSprop(lr=learningRate, rho=0.9, epsilon=1e-06)
     model.compile(loss="mse", optimizer=optimizer)
     return model
Пример #59
0
def pretrained_finetune(weights_path, freezeAndStack):
    model = Sequential()
    if freezeAndStack == True:
        model.trainLayersIndividually = 1
    #conv-spatial batch norm - relu #1
    model.add(ZeroPadding2D((2, 2), input_shape=(3, 64, 64)))
    model.add(
        Convolution2D(64,
                      5,
                      5,
                      subsample=(2, 2),
                      W_regularizer=WeightRegularizer(l1=1e-7, l2=1e-7)))
    model.add(BatchNormalization(epsilon=1e-06, mode=0, axis=1, momentum=0.9))
    model.add(Activation('relu'))
    print "added conv1"

    #conv-spatial batch norm - relu #2
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(64, 3, 3, subsample=(1, 1)))
    model.add(BatchNormalization(epsilon=1e-06, mode=0, axis=1, momentum=0.9))
    model.add(Activation('relu'))
    print "added conv2"

    #conv-spatial batch norm - relu #3
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, subsample=(2, 2)))
    model.add(BatchNormalization(epsilon=1e-06, mode=0, axis=1, momentum=0.9))
    model.add(Activation('relu'))
    model.add(Dropout(0.25))
    print "added conv3"

    #conv-spatial batch norm - relu #4
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(128, 3, 3, subsample=(1, 1)))
    model.add(BatchNormalization(epsilon=1e-06, mode=0, axis=1, momentum=0.9))
    model.add(Activation('relu'))
    print "added conv4"

    #conv-spatial batch norm - relu #5
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, subsample=(2, 2)))
    model.add(BatchNormalization(epsilon=1e-06, mode=0, axis=1, momentum=0.9))
    model.add(Activation('relu'))
    print "added conv5"

    #conv-spatial batch norm - relu #6
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(256, 3, 3, subsample=(1, 1)))
    model.add(BatchNormalization(epsilon=1e-06, mode=0, axis=1, momentum=0.9))
    model.add(Activation('relu'))
    model.add(Dropout(0.25))
    print "added conv6"

    #conv-spatial batch norm - relu #7
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, subsample=(2, 2)))
    model.add(BatchNormalization(epsilon=1e-06, mode=0, axis=1, momentum=0.9))
    model.add(Activation('relu'))
    print "added conv7"

    #conv-spatial batch norm - relu #8
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(512, 3, 3, subsample=(1, 1)))
    model.add(BatchNormalization(epsilon=1e-06, mode=0, axis=1, momentum=0.9))
    model.add(Activation('relu'))
    print "added conv8"

    #conv-spatial batch norm - relu #9
    model.add(ZeroPadding2D((1, 1)))
    model.add(Convolution2D(1024, 3, 3, subsample=(2, 2)))
    model.add(BatchNormalization(epsilon=1e-06, mode=0, axis=1, momentum=0.9))
    model.add(Activation('relu'))
    print "added conv9"
    model.add(Dropout(0.25))

    #Affine-spatial batch norm -relu #10
    model.add(Flatten())
    model.add(Dense(512, W_regularizer=WeightRegularizer(l1=1e-5, l2=1e-5)))
    model.add(BatchNormalization(epsilon=1e-06, mode=0, axis=1, momentum=0.9))
    model.add(Activation('relu'))
    print "added affine!"
    model.add(Dropout(0.5))

    #affine layer w/ softmax activation added
    model.add(
        Dense(200,
              activation='softmax',
              W_regularizer=WeightRegularizer(l1=1e-5, l2=1e-5))
    )  #pretrained weights assume only 100 outputs, we need to train this layer from scratch
    print "added final affine"

    if freezeAndStack == True:
        for layer in model.layers:
            layer.trainable = False
        model.layers[1].trainable = True

    model.load_weights(weights_path)
    return model
Пример #60
0
x_train = tokeniser.sequences_to_matrix(x_train, mode='count')
x_train = pad_sequences(x_train)
x_test = tokeniser.sequences_to_matrix(x_test, mode='count')
x_test = pad_sequences(x_test)

print(x_train)

Y_train = to_categorical(Y_train, num_classes)
Y_test = to_categorical(Y_test, num_classes)

# print(x_train[0])

model = Sequential()

model.add(Embedding(len(x_train), 256, input_length=x_train.shape[1]))
model.add(Dropout(0.2))

model.add(LSTM(256, activation='relu', return_sequences=True, dropout=0.3, recurrent_dropout=0.2))

model.add(LSTM(256, activation='relu', dropout=0.3, recurrent_dropout=0.2))

model.add(Dense(num_classes, activation='softmax'))

optimiser = keras.optimizers.Adam(lr=1e-3, decay=1e-20)
model.compile(optimizer=optimiser,
              loss='categorical_crossentropy',
              metrics=['accuracy'])
model.fit(x_train, Y_train, batch_size=32, epochs=3, validation_split=0.3)
# score = model.evaluate(x_test, Y_test, batch_size=32)