Ejemplo n.º 1
0
def build_model(in_dim=20, drate=0.5, out=64):
    mdl = Sequential()
    mdl.add(Dense(out, input_dim=in_dim, activation='relu'))
    if drate:
        mdl.add(Dropout(drate))
    mdl.add(Dense(out, activation='relu'))
    if drate:
        mdl.add(Dropout(drate))
    mdl.add(Dense(1, activation='sigmoid'))

    return mdl
Ejemplo n.º 2
0
    def create(cls, tokenizer: Tokenizer, hidden: int, dropout: float) -> "LanguageModel":
        from keras import Sequential
        from keras.layers import LSTM, Dropout, Dense

        if tokenizer.vocabulary_size == 0:
            logging.warning("Creating a model using a codec with an empty vocabulary.")
        model = Sequential()
        model.add(LSTM(hidden, input_shape=(tokenizer.context_size, 1)))
        model.add(Dropout(dropout))
        model.add(Dense(tokenizer.vocabulary_size, activation="softmax"))
        model.compile(loss="categorical_crossentropy", optimizer="adam")
        return cls(model, tokenizer)
Ejemplo n.º 3
0
def create_lstm_model(num_features):
    model = Sequential()
    # "Encode" the input sequence using an RNN, producing an output of HIDDEN_SIZE.
    # Note: In a situation where your input sequences have a variable length,
    # use input_shape=(None, num_feature).
    # By setting return_sequences to True, return not only the last output but
    # all the outputs so far in the form of (num_samples, timesteps,
    # output_dim). This is necessary as TimeDistributed in the below expects
    # the first dimension to be the timesteps.
    model.add(RNN(HIDDEN_SIZE, input_shape=(None, num_features), return_sequences=True))

    # Apply a dense layer to the every temporal slice of an input. For each of step
    # of the output sequence, decide which character should be chosen.
    model.add(layers.TimeDistributed(layers.Dense(len(LABEL_CLASS_MAPPING) + 1)))
    model.add(layers.Activation('softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    model.summary()
    return model
Ejemplo n.º 4
0
def make_model(n_in, n_out):
    print(n_in, n_out)
    mode = "ML_SOFTMAX"

    model = Sequential()

    if mode == "MLBIN":
        model.add(Dropout(0.5, input_shape=(n_in, )))
        model.add(Dense(4800, activation='relu', input_dim=n_in))
        model.add(Dropout(0.5))
        model.add(Dense(2400, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(1200, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(n_out, activation='sigmoid'))
        # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
        # optimiser = Adam(0.0002, 0.5)
        optimiser = Adam()
        model.compile(loss="binary_crossentropy", optimizer='rmsprop')
    elif mode == "MLBIN_SMALL":
        model.add(Dropout(0.5, input_shape=(n_in, )))
        model.add(Dense(8 * n_out, activation='relu', input_dim=n_in))
        model.add(Dropout(0.5))
        model.add(Dense(8 * n_out, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(8 * n_out, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(8 * n_out, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(n_out, activation='sigmoid'))
        # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
        # optimiser = Adam(0.0002, 0.5)
        optimiser = Adam()
        model.compile(loss="binary_crossentropy", optimizer='rmsprop')
    elif mode == "ML_SOFTMAX":
        model.add(Dropout(0.5, input_shape=(n_in, )))
        model.add(Dense(8 * n_out, activation='relu', input_dim=n_in))
        model.add(Dropout(0.5))
        model.add(Dense(8 * n_out, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(8 * n_out, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(4 * n_out, activation='relu'))
        model.add(Dropout(0.5))
        model.add(Dense(n_out, activation='softmax'))
        # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
        # optimiser = Adam(0.0002, 0.5)
        optimiser = Adam()
        model.compile(loss="categorical_crossentropy", optimizer='rmsprop')
    return model
def build_model(spec, X_train):
    model = Sequential()
    # create first layer
    layer = spec[0]
    num_posts, bow_dim = X_train[0].shape
    model.add(InputLayer(input_shape=(num_posts, bow_dim)))
    model.add(Flatten())
    if 'none' in layer:
        model.add(Dense(  # input_dim=1,
            units=int(layer.split('none')[1]),
            activation=None
        ))
    elif 'relu' in layer:
        model.add(Dense(  # input_shape=train_X[0].shape,
            units=int(layer.split('relu')[1]),
            activation='relu'
        ))
    elif 'sig' in layer:
        model.add(Dense(  # input_shape=train_X[0].shape,
            units=int(layer.split('sig')[1]),
            activation='sigmoid'
        ))
    else:
        return None

    for layer in spec[1:]:
        if 'none' in layer:
            model.add(Dense(int(layer.split('none')[1]), activation=None))
        elif 'relu' in layer:
            model.add(Dense(int(layer.split('relu')[1]), activation='relu'))
        elif 'sig' in layer:
            model.add(Dense(int(layer.split('sig')[1]), activation='sigmoid'))
        elif 'drop' in layer:
            model.add(Dropout(float(layer.split('drop')[1]), seed=None))
        elif 'l1' in layer:
            model.add(ActivityRegularization(l1=float(layer.split('l1')[1])))
        elif 'l2' in layer:
            model.add(ActivityRegularization(l2=float(layer.split('l2')[1])))
        else:
            return None

    # add softmax layer
    model.add(Dense(1, activation='sigmoid'))

    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])

    return model
Ejemplo n.º 6
0
def build_model():
    model = Sequential()
    # Reshape In:[Batch_size,28,28] Out:[Batch_size,28,28,1]
    model.add(
        Reshape(name="Reshape", input_shape=(28, 28),
                target_shape=(28, 28, 1)))
    # Conv2D In:[Batch_size,28,28,1] Out:[Batch_size,28,28,32]
    model.add(
        Conv2D(name="Conv_1",
               filters=32,
               input_shape=(28, 28, 1),
               kernel_size=(5, 5),
               padding="same",
               activation="relu",
               kernel_initializer="glorot_uniform",
               kernel_regularizer=l2(0.0001)))
    # Max_pool2D In:[Batch_size,28,28,32] Out:[Batch_size,14,14,32]
    model.add(MaxPooling2D(name="MaxPool_1", pool_size=(2, 2), strides=(2, 2)))
    # Conv2D In:[Batch_size,14,14,32] Out:[Batch_size,14,14,64]
    model.add(
        Conv2D(name="Conv_2",
               filters=64,
               kernel_size=(5, 5),
               padding="same",
               activation="relu",
               kernel_regularizer=l2(0.0001)))
    # Max_pool2D In:[Batch_size,14,14,64] Out:[Batch_size,7,7,64]
    model.add(MaxPooling2D(name="MaxPool_2", pool_size=(2, 2), strides=(2, 2)))
    # Flattern In:[Batch_size,7,7,64] Out:[Batch_size,7*7*64]
    model.add(Flatten(name="Flattern"))
    # Dense In:[Batch_size,7*7*64] Out:[Barch_size,1024]
    model.add(
        Dense(name="Dense_1",
              units=1024,
              activation="relu",
              kernel_regularizer=l2(0.0001)))
    # DropOut In:[Batch_size,108]
    model.add(Dropout(name="DropOut_1", rate=0.4))
    # Dense In:[Batch_size,1024] Out:[Batch_size,10]
    model.add(Dense(name="Soft_max_1", activation="softmax", units=10))
    optim = optimizers.Adadelta()
    model.compile(optimizer=optim,
                  loss=keras.losses.categorical_crossentropy,
                  metrics=['accuracy'])
    return model
Ejemplo n.º 7
0
def train_lstm(train_data, test_data, val_data, word_vectors, emb_len):
    backend.clear_session()
    # print("LSTM Training")

    x_train, y_train, x_test, y_test, x_val, y_val = split_train_test(
        train_data, test_data, val_data)

    tokenizer = Tokenizer(num_words=MAX_NUMBER_WORDS,
                          filters='!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n\'',
                          lower=True)

    tokenizer.fit_on_texts(x_train)
    word_index = tokenizer.word_index
    # print('\tFound %s unique tokens.' % len(word_index))

    x_train = tokenizer.texts_to_sequences(x_train)
    x_train = pad_sequences(x_train, maxlen=EMBEDDINGS_MAX_LEN_SEQ)
    x_val = tokenizer.texts_to_sequences(x_val)
    x_val = pad_sequences(x_val, maxlen=EMBEDDINGS_MAX_LEN_SEQ)
    # print('\tShape of data tensor:', x_train.shape)

    y_train = pd.get_dummies(y_train).values
    y_val = pd.get_dummies(y_val).values

    # if embeddings.find("glove") != -1:
    #     glove_file = datapath(embeddings)
    #     embeddings = get_tmpfile("glove2word2vec.txt")
    #     glove2word2vec(glove_file, embeddings)
    #
    # print("Loading embeddings", embeddings)
    # word_vectors = KeyedVectors.load_word2vec_format(embeddings, binary=False)

    vocabulary_size = min(len(word_index) + 1, MAX_NUMBER_WORDS)
    embedding_matrix = np.zeros((vocabulary_size, emb_len))

    vec = np.random.rand(emb_len)
    for word, i in word_index.items():
        if i >= MAX_NUMBER_WORDS:
            continue
        try:
            embedding_vector = word_vectors[word]
            embedding_matrix[i] = embedding_vector
        except KeyError:
            embedding_matrix[i] = vec

    del word_vectors

    model = Sequential()
    model.add(
        Embedding(MAX_NUMBER_WORDS,
                  emb_len,
                  input_length=x_train.shape[1],
                  weights=[embedding_matrix],
                  trainable=False))
    model.add(SpatialDropout1D(0.2))
    model.add(LSTM(200, dropout=0.2, recurrent_dropout=0.2))
    model.add(Dense(4, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=[metrics.categorical_accuracy])

    epochs = 25
    batch_size = 32
    print("\tTraining...")
    history = model.fit(x_train,
                        y_train,
                        epochs=epochs,
                        batch_size=batch_size,
                        verbose=0,
                        validation_data=(x_val, y_val),
                        shuffle=True)
    print("\tEvaluating....")
    sequences_test = tokenizer.texts_to_sequences(x_test)
    x_test = pad_sequences(sequences_test, maxlen=EMBEDDINGS_MAX_LEN_SEQ)
    y_pred = model.predict(x_test)
    y_pred = [np.argmax(y) for y in y_pred]
    print(np.array(y_test))
    print(np.array(y_pred))

    _acc = classifier_evaluation.evaluate_accuracy(y_test, y_pred)
    _f1 = classifier_evaluation.evaluate_f_score(y_test, y_pred)
    _auc_roc = classifier_evaluation.evaluate_roc_auc(y_test, y_pred)
    _precision = classifier_evaluation.evaluate_precision(y_test, y_pred)
    _recall = classifier_evaluation.evaluate_recall(y_test, y_pred)
    classifier_evaluation.full_evaluation(y_test, y_pred)

    del embedding_matrix, embedding_vector, x_train, y_train, x_test, y_test, y_pred, history

    return _acc, _f1, _auc_roc, _precision, _recall
Ejemplo n.º 8
0
def LeNet(input_shape):
    model = Sequential()
    model.add(
        Conv2D(6, (5, 5),
               padding='valid',
               activation='relu',
               kernel_initializer='he_normal',
               input_shape=input_shape))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))
    model.add(
        Conv2D(16, (5, 5),
               padding='valid',
               activation='relu',
               kernel_initializer='he_normal'))
    model.add(MaxPooling2D((2, 2), strides=(2, 2)))
    model.add(Flatten())
    model.add(Dense(120, activation='relu', kernel_initializer='he_normal'))
    model.add(Dense(84, activation='relu', kernel_initializer='he_normal'))

    return model
Ejemplo n.º 9
0
from keras import Sequential
from keras.layers import Conv2D, Activation, MaxPooling2D, Flatten, Dense, Dropout
from keras.preprocessing.image import ImageDataGenerator

dimensions = [64, 64, 3]
batch_size = 16

model = Sequential()
model.add(
    Conv2D(32, (3, 3),
           input_shape=(dimensions[0], dimensions[1], dimensions[2]),
           data_format="channels_last"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(32, (3, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(32, (3, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())

model.add(Dense(32))
model.add(Activation("relu"))
model.add(Dropout(0.5))

model.add(Dense(4))
model.add(Activation("softmax"))
Ejemplo n.º 10
0
X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=seed)
print y_train[3]
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)



print len(X_train)
print len(y_train)

print y_train[1]


cls = Sequential()

cls.add(Dense(7,input_dim=7,activation='relu',kernel_initializer='random_uniform'))
cls.add(Dense(30,activation='relu',kernel_initializer='random_uniform'))
cls.add(Dense(3,activation='softmax',kernel_initializer='random_uniform'))

opt = Adam(lr=INIT_LR)

cls.compile(loss="categorical_crossentropy", optimizer=opt,
	metrics=["accuracy"])





print ("TRAINING MODEL")

history = cls.fit(X_train,y_train,epochs=EPOCHS,steps_per_epoch=10,validation_split=0.2,validation_steps=50)
Ejemplo n.º 11
0
class NeuralNetwork(object):
    def __init__(self, input_nodes, hidden_nodes, output_nodes, lr=None):
        self.input_nodes = input_nodes
        self.hidden_nodes = hidden_nodes
        self.output_nodes = output_nodes
        self.lr = lr
        self.scales_x = []
        self.scales_y = []

        input_kernel_range = np.sqrt(6) / (np.sqrt(input_nodes) + np.sqrt(hidden_nodes))
        input_kernel_initializer = RandomUniform(minval=-input_kernel_range, maxval=input_kernel_range)
        input_layer = Dense(input_nodes,
                            kernel_initializer=input_kernel_initializer,
                            name='input')

        hidden_kernel_range = np.sqrt(6) / (np.sqrt(hidden_nodes) + np.sqrt(output_nodes))
        hidden_kernel_initializer = RandomUniform(minval=-hidden_kernel_range, maxval=hidden_kernel_range)
        hidden_layer = Dense(hidden_nodes,
                             kernel_initializer=hidden_kernel_initializer,
                             name='hidden')

        output_layer = Dense(output_nodes,
                             name='output')

        self.model = Sequential()
        self.model.add(input_layer)
        self.model.add(hidden_layer)
        self.model.add(output_layer)

    def train(self, x_train, y_train):
        self.set_normalize_scales(x_train, y_train)
        x_train = self.normalize(x_train, self.scales_x)
        y_train = self.normalize(y_train, self.scales_y)

        optimizer = SGD(lr=self.lr)
        self.model.compile(loss='mse', optimizer=optimizer)
        self.model.fit(x_train, y_train, batch_size=20, epochs=500)

    def evaluate(self, x_test, y_test):
        x_test = self.normalize(x_test, self.scales_x)
        y_test = self.normalize(y_test, self.scales_y)
        return self.model.evaluate(x_test, y_test)

    def predict(self, x):
        x = self.normalize(x, self.scales_x)
        y = self.model.predict(x)
        return self.unnormalize(y, self.scales_y)

    def set_normalize_scales(self, x, y):
        for i in range(x.shape[1]):
            mean, std = x[:, i].mean(), x[:, i].std()
            self.scales_x.append([mean, std])
        for i in range(y.shape[1]):
            mean, std = y[:, i].mean(), y[:, i].std()
            self.scales_y.append([mean, std])

    @staticmethod
    def normalize(data, scales):
        for i in range(0, len(scales)):
            mean, std = scales[i]
            data[:, i] = (data[:, i] - mean) / std
        return data

    @staticmethod
    def unnormalize(data, scales):
        for i in range(0, len(scales)):
            mean, std = scales[i]
            data[:, i] = data[:, i] * std + mean
        return data
Ejemplo n.º 12
0
 def keras_model_provider(optimizer='adam'):
     model = Sequential()
     model.add(Dense(1, input_dim=1, activation='sigmoid'))
     model.compile(optimizer, loss='mse')
     return model
Ejemplo n.º 13
0
        def keras_model_provider():
            model = Sequential()
            model.add(Dense(1, input_dim=1, activation='sigmoid'))

            model.compile(optimizer='Adam', loss=loss_provider("bar"))
            return model, loss_provider("bar")
Ejemplo n.º 14
0
    X_TRAIN.append(sentence)
X_TRAIN = sequence.pad_sequences(X_TRAIN,
                                 maxlen=30,
                                 value=0.0,
                                 padding='post',
                                 dtype='float32')

X_TRAIN = np.asarray(X_TRAIN)
from keras.layers import Dense, Dropout
from keras import Sequential
model = Sequential()
from keras.layers.embeddings import Embedding
from keras.layers import LSTM

model.add(Dense(30, input_shape=(30, 100)))
model.add(LSTM(30))
model.add(Dense(1, activation='relu'))

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
batch_size = 128
model.fit(X_TRAIN, ytrain, batch_size=batch_size, epochs=30)
model.save('abusive.h5')
X_TEST = []
for i in xtest:
    sentence = []
    for words in i.split():
        temp = model1[words]
Ejemplo n.º 15
0
    def init(self, num_words, embedding_matrix):
        print("Creating RNN model")
        model_glove = Sequential()
        model_glove.add(
            Embedding(num_words,
                      EMBEDDING_DIMENSION,
                      input_length=MAX_SEQUENCE_LENGTH,
                      weights=[embedding_matrix],
                      trainable=False))
        model_glove.add(Dropout(0.2))
        model_glove.add(Conv1D(64, 5, activation='relu'))
        model_glove.add(MaxPooling1D(pool_size=4))
        model_glove.add(LSTM(100))
        model_glove.add(Dense(1, activation='sigmoid'))
        model_glove.compile(loss='binary_crossentropy',
                            optimizer='adam',
                            metrics=['accuracy'])

        self.model = model_glove
Ejemplo n.º 16
0
#create multiexample
X1_test = np.tile(th, (100, 1))
Y1_test = np.tile(test3, (100, 1))

X_test = np.tile(tg, (100, 1))
Y_test = np.tile(test, (100, 1))
p_test = np.tile(test3, (100, 1))
Xf_test = np.append(X_test, X1_test, axis=0)
Yf_test = np.append(Y_test, Y1_test, axis=0)
Xf_test = np.append(Xf_test, Xf_test, axis=0)
Yf_test = np.append(Yf_test, Yf_test, axis=0)

t1 = time.time()
#model
model = Sequential()
model.add(Dense(units=64, activation='relu', input_dim=5))
model.add(Dense(units=64, activation='relu'))
model.add(Dense(units=4))
model.add(Activation('linear'))

model.compile(loss='mse', optimizer='adam', metrics=['mae'])
#model fit
model.fit(Yf_test, Xf_test, epochs=180, validation_split=0.2, verbose=0)

t2 = time.time()
model.save('stupid_model.h5')  #save model

print(t2 - t1, 'seconds')
#score of model
score = model.predict(test)
print(score)
Ejemplo n.º 17
0
    test_tweets = []
    for test in X_test:
        tweet = clean_tweet(test)
        words = [
            token for grp, token, token_num, (start_index,
                                              end_index) in ht.tokenize(tweet)
        ]
        vector = [dict.token2id[word] for word in words]
        test_tweets.append(vector)

    test_tweets = sequence.pad_sequences(np.array(test_tweets), maxlen=150)

    # LSTM
    model = Sequential()
    model.add(Embedding(len(dict.keys()), 256, dropout=0.2))
    model.add(LSTM(100, dropout_W=0.2, dropout_U=0.2))
    model.add(Dense(2))
    model.add(Activation('softmax'))

    # model.load_weights("lstm_model.hdf5")
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    checkpointer = callbacks.ModelCheckpoint(
        filepath="checkpoint-{epoch:02d}.hdf5",
        verbose=1,
        save_best_only=True,
        monitor='loss')
    csv_logger = CSVLogger('training_set_iranalysis1.csv',
                           separator=',',
import pandas as pd
from keras import Sequential
from keras.layers.core import Dense,Activation,Dropout
from sklearn.model_selection import train_test_split
# from matplotlib.pyplot import plt
train_data=pd.read_csv('D:\sufe\A\contest_basic_train.tsv',sep='\t')
train_data=train_data.drop(['REPORT_ID',"ID_CARD",'LOAN_DATE'],1)
train_data=train_data.dropna()
# print(train_data.info())
X=train_data.drop(['Y'],1).as_matrix()#7
y=train_data['Y'].as_matrix()#1
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)

model=Sequential()
model.add(Dense(14,input_shape=(7,)))
model.add(Activation('relu'))
model.add(Dense(1))
model.add((Dropout(0.3)))
model.compile(loss='mean_squared_error', optimizer='adam')
model.summary()

model.fit(X_train,y_train,epochs=10000,batch_size=16)
t=model.predict(X_test)

rate=0

for i in range(len(t)):
    if t[i]==y_test[i]:
        rate+=1
    else:
        pass
Ejemplo n.º 19
0
def get_model(model_name, input_shape):
    """
    Generate the required model and return it
    :return: Model created
    """
    # Models are inspired from
    # CNN - https://yashk2810.github.io/Applying-Convolutional-Neural-Network-on-the-MNIST-dataset/
    # LSTM - https://github.com/harry-7/Deep-Sentiment-Analysis/blob/master/code/generatePureLSTM.py
    model = Sequential()
    if model_name == 'CNN':
        model.add(Conv2D(8, (13, 13),
                         input_shape=(input_shape[0], input_shape[1], 1)))
        model.add(BatchNormalization(axis=-1))
        model.add(Activation('relu'))
        model.add(Conv2D(8, (13, 13)))
        model.add(BatchNormalization(axis=-1))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 1)))
        model.add(Conv2D(8, (13, 13)))
        model.add(BatchNormalization(axis=-1))
        model.add(Activation('relu'))
        model.add(Conv2D(8, (2, 2)))
        model.add(BatchNormalization(axis=-1))
        model.add(Activation('relu'))
        model.add(MaxPooling2D(pool_size=(2, 1)))
        model.add(Flatten())
        model.add(Dense(64))
        model.add(BatchNormalization())
        model.add(Activation('relu'))
        model.add(Dropout(0.2))
    elif model_name == 'LSTM':
        model.add(LSTM(128, input_shape=(input_shape[0], input_shape[1])))
        model.add(Dropout(0.5))
        model.add(Dense(32, activation='relu'))
        model.add(Dense(16, activation='tanh'))
    model.add(Dense(len(class_labels), activation='softmax'))
    #model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    print(model.summary())
    return model
Ejemplo n.º 20
0
def VGG(X, Y):
    model = Sequential()
    #layer_1
    model.add(
        Conv2D(64, (3, 3),
               strides=(1, 1),
               input_shape=X.shape[1:],
               padding='same',
               data_format='channels_last',
               activation='relu',
               kernel_initializer='uniform'))
    model.add(
        Conv2D(64, (3, 3),
               strides=(1, 1),
               padding='same',
               data_format='channels_last',
               kernel_initializer='uniform',
               activation='relu'))
    model.add(MaxPooling2D((2, 2)))
    #layer_2
    model.add(
        Conv2D(128, (3, 3),
               strides=(1, 1),
               padding='same',
               data_format='channels_last',
               activation='relu',
               kernel_initializer='uniform'))
    model.add(
        Conv2D(128, (2, 2),
               strides=(1, 1),
               padding='same',
               data_format='channels_last',
               activation='relu',
               kernel_initializer='uniform'))
    model.add(MaxPooling2D((2, 2)))
    #layer_3
    model.add(
        Conv2D(256, (3, 3),
               strides=(1, 1),
               padding='same',
               data_format='channels_last',
               activation='relu'))
    model.add(
        Conv2D(256, (3, 3),
               strides=(1, 1),
               padding='same',
               data_format='channels_last',
               activation='relu'))
    model.add(
        Conv2D(256, (1, 1),
               strides=(1, 1),
               padding='same',
               data_format='channels_last',
               activation='relu'))
    model.add(MaxPooling2D((2, 2)))
    #layer_4
    model.add(
        Conv2D(512, (3, 3),
               strides=(1, 1),
               padding='same',
               data_format='channels_last',
               activation='relu'))
    model.add(
        Conv2D(512, (3, 3),
               strides=(1, 1),
               padding='same',
               data_format='channels_last',
               activation='relu'))
    model.add(
        Conv2D(512, (1, 1),
               strides=(1, 1),
               padding='same',
               data_format='channels_last',
               activation='relu'))
    model.add(MaxPooling2D((2, 2)))
    #layer_5
    model.add(
        Conv2D(512, (3, 3),
               strides=(1, 1),
               padding='same',
               data_format='channels_last',
               activation='relu'))
    model.add(
        Conv2D(512, (3, 3),
               strides=(1, 1),
               padding='same',
               data_format='channels_last',
               activation='relu'))
    model.add(
        Conv2D(512, (1, 1),
               strides=(1, 1),
               padding='same',
               data_format='channels_last',
               activation='relu'))
    model.add(MaxPooling2D((2, 2)))
    #全连接层+
    model.add(Flatten())  #拉平
    model.add(Dense(4096, activation='relu'))
    model.add(Dense(4096, activation='relu'))
    model.add(Dense(1000, activation='relu'))
    model.add(Dense(10, activation='softmax'))

    model.summary()
    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
import sys
sys.path.append("I:/New Folder/utils")
import regression_utils as rutils
from keras.layers import Dense
from keras import Sequential, metrics
import keras_utils as kutils
from sklearn import model_selection

#linear pattern in 2d
X, y = rutils.generate_linear_synthetic_data_regression(n_samples=200, n_features=1, 
                                                 n_informative=1,
                                                 noise = 100)
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, test_size=0.1, random_state=1)
rutils.plot_data_2d_regression(X_train, y_train)

model = Sequential()
model.add(Dense(units=1, input_shape=(1,), activation='linear'))

model.compile(optimizer='sgd', loss='mean_squared_error', metrics=[metrics.mean_squared_error])

history = model.fit(x=X_train, y=y_train, verbose=3, epochs=100,  batch_size=10, validation_split=0.1)
print(model.summary())
print(model.get_weights())
kutils.plot_loss(history)
rutils.plot_model_2d_regression(model, X_train, y_train)

y_pred = model.predict(X_test)
rutils.regression_performance(model, X_test, y_test)
Ejemplo n.º 22
0
def get_model(n, model_type, dropout_value, lr_rate):
    model = Sequential()
    model.add(
        LSTM(types[model_type][0],
             kernel_initializer=tf.keras.initializers.Identity(),
             return_sequences=True,
             input_shape=(1, n)))
    model.add(Activation(custom1))
    for i in types[model_type][1:len(types[model_type]) - 1]:
        model.add(LSTM(i, dropout=dropout_value, return_sequences=True))
        model.add(Activation(custom1))
    model.add(LSTM(types[model_type][-1]))
    model.add(Activation(custom1))
    model.add(Dense(1))
    model.add(Activation(custom1))

    model.compile(loss=tf.keras.losses.MeanAbsoluteError(),
                  optimizer=keras.optimizers.Adam(lr_rate),
                  metrics=[tf.keras.metrics.MeanAbsoluteError()])

    return model
def neural_network_regression():
    n_dim = 368
    data = pd.read_csv('./data/train_data_salary.csv')
    # data = data.iloc[:10000, :]

    # print(data)
    train_data_x = data.iloc[:, 1:-1]
    train_data_y = data.iloc[:, n_dim:]
    train_data_x = train_data_x.values

    train_data_y[str(n_dim)] = (train_data_y - np.min(train_data_y)) / (np.max(train_data_y) - np.min(train_data_y))
    train_data_y2 = np.array(train_data_y[str(n_dim)].tolist())

    max_val = np.max(train_data_y[str(n_dim - 1)])
    min_val = np.min(train_data_y[str(n_dim - 1)])
    print(max_val)
    print(min_val)

    x_train, x_test, y_train, y_test = train_test_split(train_data_x, train_data_y2, test_size=0.2)
    # 需要做标准化处理对于特征值处理
    # try:
    #     std_x = joblib.load('./data/model/std2.model')
    # except Exception as e:
    #     std_x = StandardScaler()
    std_x = StandardScaler()
    x_train = std_x.fit_transform(x_train)
    x_test = std_x.transform(x_test)

    joblib.dump(std_x, './data/model/std2.model')

    # 创建模型,Sequential(): 多个网络层的线性堆叠模型
    model = Sequential()

    # 添加神经网络层,并指定,input_dim:输入维度的个数,units:神经元的个数

    model.add(Dense(input_dim=n_dim-1, units=256, activation='relu'))
    model.add(Dropout(0.1))
    # model.add(Dense(input_dim=n_dim - 1, units=256))
    model.add(Dense(units=128, activation='relu'))
    model.add(Dropout(0.1))
    model.add(Dense(units=1, ))
    rmsprop = RMSprop(lr=0.0001, rho=0.9, epsilon=1e-08, decay=0.0)
    # 选择损失函数,优化器
    model.compile(optimizer=rmsprop, loss='mse'),  # optimizer='sgd')

    model.fit(x_train, y_train, epochs=50, batch_size=100)
    print('testing...')
    # 评估测试集
    cost = model.evaluate(x_test, y_test, batch_size=400)
    tail_line()
    print('test cost is: ', cost)
    y_predict = model.predict(x_test)
    y_predict = np.array([i[0] for i in y_predict])
    y_rd_predict = y_predict * (np.max(train_data_y[str(n_dim - 1)]) - np.min(train_data_y[str(n_dim - 1)])) + np.min(
        train_data_y[str(n_dim - 1)])
    y_test = y_test * (np.max(train_data_y[str(n_dim - 1)]) - np.min(train_data_y[str(n_dim - 1)])) + np.min(
        train_data_y[str(n_dim - 1)])
    print(y_rd_predict)
    print(y_test)

    percent = [(y_rd_predict[a] - b) / y_rd_predict[a] for a, b in enumerate(y_test)]
    di = defaultdict(int)
    for i in percent:
        # if -200 < i < 200:
        di[round(i, 2)] += 1
    dis = {a: b for a, b in sorted(di.items(), key=lambda x: x[1], reverse=True)}
    dis2 = {a: b for a, b in sorted(di.items(), key=lambda x: x[1], reverse=True) if a <= 0.3}
    per = sum(dis2.values()) / sum(dis.values())
    print(di)
    tail_line()
    print("概率为:", per)
    t = sorted(di.items(), key=lambda x: x[0], reverse=True)
    t = [(a, b) for a, b in t if -5 <= a <= 5]
    plt.plot([a for a, b in t], [b for a, b in t])
    plt.show()
    tail_line()
    model.save('./data/model/neural_network_regression.model')
Ejemplo n.º 24
0
X_test_ones = X_test_ones.reshape((X_test_ones.shape[0], 300, 100))
X_test_zeros = X_test_zeros.reshape((X_test_zeros.shape[0], 300, 100))
#classifier = LogisticRegression()
#classifier.fit(X_train, y_train)
#score = classifier.score(X_test, y_test)
print(X_test_ones.shape)
print(X_test_zeros.shape)
print(Y_test_ones)
print(Y_test_zeros)
input_dim = X_train.shape[1]
model = Sequential()
#model.add(layers.Embedding(300,100,input_length=30000))
#model.add(layers.Conv1D(10,100,activation='relu'))
#model.add(layers.MaxPool1D(pool_size=2, strides=1))
#model.add(layers.Flatten())
model.add(layers.Conv1D(40, 100, activation='relu'))
model.add(layers.GlobalMaxPool1D())
#model.add(layers.Flatten())
model.add(layers.Dense(10, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.1, nesterov=True)
model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])

history = model.fit(X_train,
                    y_train,
                    epochs=50,
                    verbose=True,
                    validation_data=(X_test, y_test),
                    batch_size=batch_size)
model.summary()
loss, accuracy = model.evaluate(X_train, y_train, verbose=False)
Ejemplo n.º 25
0
def define_model_snn_cifar10():
    model = Sequential()

    model.add(Conv1D(16, 3, strides=2, padding='same', input_shape=[128, 1],
                     kernel_initializer='lecun_normal', bias_initializer='zeros'))
    model.add(Activation('selu'))
    model.add(Conv1D(16, 3, kernel_initializer='lecun_normal', bias_initializer='zeros'))
    model.add(Activation('selu'))
    model.add(MaxPooling1D(pool_size=2))
    model.add(AlphaDropout(0.1))

    model.add(Conv1D(32, 3, padding='same', kernel_initializer='lecun_normal', bias_initializer='zeros'))
    model.add(Activation('selu'))
    model.add(Conv1D(32, 3, kernel_initializer='lecun_normal', bias_initializer='zeros'))
    model.add(Activation('selu'))
    model.add(MaxPooling1D(pool_size=2))
    model.add(AlphaDropout(0.1))

    model.add(Flatten())
    model.add(Dense(41, kernel_initializer='lecun_normal', bias_initializer='zeros'))
    model.add(Activation('selu'))
    model.add(AlphaDropout(0.2))
    model.add(Dense(41, kernel_initializer='lecun_normal', bias_initializer='zeros'))
    model.add(Activation('softmax'))

    # initiate RMSprop optimizer
    opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)

    # Let's train the model using RMSprop
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['accuracy', 'top_k_categorical_accuracy'])
    print(model.summary())
    return model
train_data = train_images.reshape(train_images.shape[0],dimData)
test_data = test_images.reshape(test_images.shape[0],dimData)

#convert data to float and scale values between 0 and 1
train_data = train_data.astype('float')
test_data = test_data.astype('float')
#scale data
train_data /=255.0
test_data /=255.0
#change the labels frominteger to one-hot encoding
train_labels_one_hot = to_categorical(train_labels)
test_labels_one_hot = to_categorical(test_labels)

#creating network
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(dimData,)))
model.add(Dense(512, activation='relu'))
model.add(Dense(10, activation='softmax'))

model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
history = model.fit(train_data, train_labels_one_hot, batch_size=256, epochs=20, verbose=1,
                   validation_data=(test_data, test_labels_one_hot))
# Accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'val'], loc='upper left')
plt.show() 
Ejemplo n.º 27
0
# Now build the Training / Test data sets by splitting off this month.
# The last 20 rows of the dataframe represent Nov, 2017. We'll use that to test with and the
# rest will be used for training
spxTestData = spxData[-20:]
spxData = spxData[0:-20]

# Convert them into NumPy arrays for processing
train_input_data_ = np.asarray(spxData[input_cols])
train_output_data = np.asarray(spxData["next10DaysPrice"])

test_input_data = np.asarray(spxTestData[input_cols])
test_output_data = np.asarray(spxTestData["next10DaysPrice"])

# We build a sequential NN 
model = Sequential()
model.add(Dense(units=20, input_shape=(len(input_cols),), kernel_initializer="uniform", activation="tanh"))
# If we over fit, can regularize by dropping some samples
model.add(Dropout(0.1))
model.add(Dense(units=300, kernel_initializer="uniform", activation="tanh"))
model.add(Dense(units=1, kernel_initializer="uniform", activation="tanh"))

# Stochastic gradient descent optimizer with some sensible defaults.
sgd = optimizers.SGD(lr=0.01, momentum=0.9)
model.compile(loss='mean_squared_error',
              optimizer=sgd)

# Uncomment this to view the model summary
# model.summary()

# Train the model.
hist = model.fit(train_input_data_, train_output_data, epochs=15)
Ejemplo n.º 28
0
def model_builder(model_input, model_output):
    model = Sequential()

    # 2*[Convolution operation > Nonlinear activation (relu)] > Pooling operation
    model.add(Conv2D(32, (3, 3), activation='relu', padding='valid', input_shape=tuple(model_input.shape[1:])))
    model.add(Conv2D(16, (3, 3), activation='relu', padding='valid'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(Conv2D(64, (3, 3), activation='relu', padding='valid'))
    model.add(Conv2D(32, (3, 3), activation='relu', padding='valid'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.5))

    model.add(Flatten())
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(0.5))

    model.add(Dense(256, activation='relu'))
    model.add(Dropout(0.5))

    model.add(Dense(model_output.shape[1], activation='softmax'))

    # Nesterov momentum included for parameters update, makes correction to parameters update values
    # by taking into account the approximated future value of the objective function
    # However does not account for the importance for each parameter when performing updates
    # op = optimizers.SGD(lr=0.01, momentum=0.9, nesterov=True)

    # Nesterov Momentum Adaptive Moment Estimation
    # op = optimizers.Nadam()

    # RMSProp
    op = optimizers.RMSprop()

    model.compile(optimizer=op, metrics=['accuracy'], loss='categorical_crossentropy')

    return model
    height = len(cm[0])

    for x in range(width):
        for y in range(height):
            ax.annotate(str(cm[x][y]), xy=(y, x), horizontalalignment='center',
                        verticalalignment='center', color=getFontColor(cm[x][y]))

    # add genres as ticks
    alphabet = mods
    plt.xticks(range(width), alphabet[:width], rotation=30)
    plt.yticks(range(height), alphabet[:height])
    return plt
from keras import layers
from keras import Sequential,layers
model=Sequential()
model.add(layers.LSTM(128,dropout=0.7,return_sequences=True,input_shape=(128,2)))
model.add(layers.LSTM(128,dropout=0.7))
model.add(layers.Dense(len(mods),activation="sigmoid"))
model.compile(loss='categorical_crossentropy', optimizer='adam',metrics=['accuracy'])
nb_epoch = 80    # number of epochs to train on
batch_size = 256  # training batch size
###################################################train the network#################################################

history = model.fit(X_train,
    Y_train,
    batch_size=batch_size,
    epochs=nb_epoch,
    verbose=1,
    validation_data=(X_test, Y_test)
   )
# we re-load the best weights once training is finished
Ejemplo n.º 30
0
class ChatDNN:
    def __init__(self, input_dim, embedding_dim=None, embedding_weights=None):
        self.input_dim = input_dim
        self.embedding_dim = embedding_dim
        self.embedding_weights = embedding_weights
        self.model = None

    def dnn_create(self, n_classes, input_shape):
        self.model = Sequential()
        if self.embedding_weights is not None:
            self.model.add(
                Embedding(input_dim=self.input_dim,
                          output_dim=self.embedding_dim,
                          input_shape=input_shape,
                          weights=[self.embedding_weights
                                   ]))  # Adding Input Length
        else:
            self.model.add(
                Embedding(input_dim=self.input_dim,
                          output_dim=self.embedding_dim,
                          input_shape=input_shape))  # Adding Input Length
        self.model.add(Dense(512, activation='relu'))
        self.model.add(Dropout(0.5))
        self.model.add(Flatten())
        self.model.add(Dense(512, activation='relu'))
        self.model.add(Dropout(0.5))
        self.model.add(Dense(n_classes, activation='softmax'))
        print('Compiling the Model...')
        self.model.compile(loss='categorical_crossentropy',
                           optimizer='adam',
                           metrics=['accuracy'])
        self.model.summary()
        return self

    def train(self, x_train, y_train, x_test, y_test):
        print('Defining a Simple Keras Model...')
        n_classes = len(set(y_train.tolist() + y_test.tolist()))
        print(n_classes)
        y_train = np_utils.to_categorical(y_train, n_classes)
        y_test = np_utils.to_categorical(y_test, n_classes)
        print((x_train.shape[1], ))
        self.dnn_create(n_classes, (x_train.shape[1], ))
        print("Train...")
        self.model.fit(x_train,
                       y_train,
                       batch_size=setting.BATCH_SIZE,
                       epochs=setting.N_EPOCH,
                       verbose=1,
                       validation_data=(x_test, y_test))
        print("Evaluate...")
        score = self.model.evaluate(x_test,
                                    y_test,
                                    batch_size=setting.BATCH_SIZE)
        print('Test score:', score)
        return self

    def predict(self):
        pass
Ejemplo n.º 31
0
class Network(object):
    def __init__(self, input_size, hidden_size, output_size, lr):
        ''' Train and validate network using notMNIST data.
        Args:
            input_size: size of input feature
            hidden_size: size of hidden layer
            output_size: size of output layer
            lr: learning rate
            name: network's scope name

        Returns:
            nothing
        '''
        self.model = Sequential()
        self.model.add(
            Dense(hidden_size,
                  kernel_initializer=TruncatedNormal(stddev=input_size**-0.5),
                  bias_initializer=Zeros(),
                  activation='relu',
                  input_shape=(input_size, )))
        self.model.add(
            Dense(output_size,
                  kernel_initializer=TruncatedNormal(stddev=hidden_size**-0.5),
                  bias_initializer=Zeros(),
                  activation='softmax'))
        self.model.compile(loss='sparse_categorical_crossentropy',
                           optimizer=SGD(lr=lr),
                           metrics=['accuracy'])

    def train(self, num_epochs, batch_size, train_features, train_labels,
              valid_features, valid_labels, checkpoint_fn):
        ''' Train and validate network using notMNIST data.
        Args:
            num_epochs: number of epochs to train
            batch_size: batch_size
            train_features: training features with shape [batch_size, feature_size]
            train_labels: training labels with shape [batch_size, label_size]
            valid_features: validation features with shape [batch_size, feature_size]
            valid_labels: validation labels with shape [batch_size, label_size]
            checkpoint_fn: checkpoint filename to save after training

        Returns:
            train_losses: training losses
            val_losses: validation losses
        '''

        num_of_batches = len(train_features) // batch_size

        history = self.model.fit(x=train_features,
                                 y=train_labels,
                                 validation_data=(valid_features,
                                                  valid_labels),
                                 epochs=num_epochs,
                                 batch_size=batch_size)
        self.model.save(checkpoint_fn)
        print('[INFO] Training weights have been '
              'successfully saved to {}.'.format(checkpoint_fn))
        return history.history['loss'], history.history['val_loss']

    def inference(self, test_features, checkpoint_fn):
        ''' Test network using notMNIST test data.
        Args:
            test_features: test features with shape [batch_size, feature_size]
            checkpoint_fn: trained checkpoint filename

        Returns:
            predictions: predictions made by the network
        '''
        model = models.load_model(checkpoint_fn)
        print('[INFO] Training weights have been '
              'successfully loaded from {}.'.format(checkpoint_fn))
        predictions = model.predict(test_features).argmax(axis=1)
        return predictions
Ejemplo n.º 32
0
def create_model():
    model = Sequential()
    model.add(Conv2D(LAYER1_SIZE, activation="relu", kernel_size=(3, 3),
                     input_shape=(2, BOARD_SIZE, BOARD_SIZE),
                     data_format="channels_first",
                     kernel_regularizer=l2(L2_REGULARISATION),
                     padding='same'))
    model.add(Conv2D(LAYER1_SIZE, activation="relu", kernel_size=(3, 3),
                     data_format="channels_first",
                     kernel_regularizer=l2(L2_REGULARISATION),
                     padding='same'))
    model.add(MaxPooling2D((2, 2), data_format="channels_first"))
    model.add(Conv2D(LAYER1_SIZE * 2, activation="relu", kernel_size=(3, 3),
                     data_format="channels_first",
                     kernel_regularizer=l2(L2_REGULARISATION),
                     padding='same'))
    model.add(Conv2D(LAYER1_SIZE * 2, activation="relu", kernel_size=(3, 3),
                     data_format="channels_first",
                     kernel_regularizer=l2(L2_REGULARISATION),
                     padding='same'))
    model.add(MaxPooling2D((2, 2), data_format="channels_first"))
    model.add(Flatten())
    model.add(Dense(LAYER2_SIZE, activation='relu', kernel_regularizer=l2(L2_REGULARISATION)))
    model.add(Dense(1, activation='tanh'))

    optimizer = Adam(decay=DECAY, lr=LR)
    model.compile(loss='mse', optimizer=optimizer, metrics=['accuracy', 'mae'])
    model.summary()

    return model
Ejemplo n.º 33
0
from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense

(x_train, y_train), (x_test, y_test) = mnist.load_data()

x_train = x_train.reshape(x_train.shape[0], 28, 28, 1).astype("float32")
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1).astype("float32")

x_train /= 255
x_test /= 255

y_train = keras.utils.to_categorical(y_train, 10)
y_test = keras.utils.to_categorical(y_test, 10)

model = Sequential()
conv = Conv2D(32, (3, 3), activation="relu", input_shape=(28, 28, 1))
model.add(conv)

conv_2 = Conv2D(64, (3, 3), activation="relu")
model.add(conv_2)

pool = MaxPooling2D((2, 2))
model.add(pool)

dropout = Dropout(0.25)
model.add(dropout)

flatten = Flatten()
model.add(flatten)

dense = Dense(128, activation="relu")
model.add(dense)
from keras import applications, optimizers
from keras.layers import Input, Dense, Conv2D, MaxPooling2D, Flatten, Dropout
from keras import Model, Sequential

base_model = applications.VGG16(include_top=False, weights='imagenet', 
                           input_shape=(150, 150, 3))


top_model = Sequential()
top_model.add(Flatten(input_shape=base_model.output_shape[1:]))
top_model.add(Dense(512, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.2))
top_model.add(Dense(2, activation='softmax'))

model = Model(inputs = base_model.input, outputs = top_model(base_model.output))
Ejemplo n.º 35
0
else:
    x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
    x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
    input_shape = (img_rows, img_cols, 1)

x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255

# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)

model = Sequential()
model.add(
    Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
#in case overfitting
model.add(Dropout(0.25))
model.add(Flatten())
#full connected output128
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
#optimizing
model.compile(loss=tf.keras.losses.categorical_crossentropy,
              optimizer=tf.keras.optimizers.Adadelta(),
              metrics=['accuracy'])

model.fit(x_train,
def create_model(num_frame, num_joint):
    model = Sequential()
    model.add(
        CuDNNLSTM(50,
                  input_shape=(num_frame, num_joint),
                  return_sequences=False))
    model.add(Dropout(0.4))  #使用Dropout函数可以使模型有更多的机会学习到多种独立的表征
    model.add(Dense(256))
    model.add(Dropout(0.4))
    model.add(Dense(64))
    model.add(Dropout(0.4))
    model.add(Dense(4, activation='softmax'))
    return model
Ejemplo n.º 37
0
def build_discriminator():
    dis_model = Sequential()
    dis_model.add(Conv2D(128, (5, 5), padding="same", input_shape=(64, 64, 3)))
    dis_model.add(LeakyReLU(alpha=0.2))
    dis_model.add(MaxPooling2D(pool_size=(2, 2)))

    dis_model.add(Conv2D(256, (3, 3)))
    dis_model.add(LeakyReLU(alpha=0.2))
    dis_model.add(MaxPooling2D(pool_size=(2, 2)))

    dis_model.add(Conv2D(512, (3, 3)))
    dis_model.add(LeakyReLU(alpha=0.2))
    dis_model.add(MaxPooling2D(pool_size=(2, 2)))

    dis_model.add(Flatten())
    dis_model.add(Dense(1024))
    dis_model.add(LeakyReLU(alpha=0.2))

    dis_model.add(Dense(1))
    dis_model.add(Activation("sigmoid"))

    return dis_model
Ejemplo n.º 38
0
import numpy as np
from keras import Sequential
from keras.layers import Dense

data = np.random.random((1000, 32))
label = np.random.random((1000, 10))

model = Sequential()
model.add(Dense(64, activation='relu', input_shape=(32, )))
model.add(Dense(64, activation='relu'))
model.add(Dense(10, activation='softmax'))

model.compile('adam', 'categorical_crossentropy')

model.fit(data, label, epochs=100)

model.save('my_model.h5')

Ejemplo n.º 39
0
def build_model():
    _model = Sequential()
    _model.add(
        Conv2D(32,
               kernel_size=(3, 3),
               activation='relu',
               input_shape=tuple(map(sum, zip(TARGET_SIZE, (0, 0, 2))))))
    _model.add(Conv2D(64, (3, 3), activation='relu'))
    _model.add(MaxPooling2D(pool_size=(2, 2)))
    _model.add(Dropout(0.25))
    _model.add(Flatten())
    _model.add(Dense(128, activation='relu'))
    _model.add(Dropout(0.5))
    _model.add(Dense(dic.__len__(), activation='softmax'))
    _model.compile(loss='categorical_crossentropy',
                   optimizer='Adam',
                   metrics=['accuracy'])
    return _model
Ejemplo n.º 40
0
print('Maximum review length: {}'.format(len(max((X_train + X_test), key=len))))

print('Minimum review length: {}'.format(len(min((X_test + X_test), key=len))))


from keras.preprocessing import sequence
max_words = 500
X_train = sequence.pad_sequences(X_train, maxlen=max_words)
X_test = sequence.pad_sequences(X_test, maxlen=max_words)

from keras import Sequential
from keras.layers import Embedding, LSTM, Dense, Dropout
embedding_size=32
model=Sequential()
model.add(Embedding(vocabulary_size, embedding_size, input_length=max_words))
model.add(LSTM(100))
model.add(Dense(1, activation='sigmoid'))
print(model.summary())


model.compile(loss='binary_crossentropy',
             optimizer='adam',
             metrics=['accuracy'])

batch_size = 64
num_epochs = 3
X_valid, y_valid = X_train[:batch_size], y_train[:batch_size]
X_train2, y_train2 = X_train[batch_size:], y_train[batch_size:]
model.fit(X_train2, y_train2, validation_data=(X_valid, y_valid), batch_size=batch_size, epochs=num_epochs)
Ejemplo n.º 41
0
def train_model(train_test_path):
    """
    Creates a model and performs training.
    """
    # Load train/test data
    train_test_data = np.load(train_test_path)
    x_train = train_test_data['X_train']
    y_train = train_test_data['y_train']

    print("x_train:", x_train.shape)
    print("y_train:", y_train.shape)

    del train_test_data

    x_train = np.expand_dims(x_train, axis=3)

    # Create network
    model = Sequential()
    model.add(Conv1D(128, 5, input_shape=x_train.shape[1:], padding='same', activation='relu'))
    model.add(MaxPooling1D(5))
    model.add(Conv1D(128, 5, padding='same', activation='relu'))
    model.add(MaxPooling1D(5))
    model.add(Dropout(0.5))

    model.add(Flatten())

    model.add(Dense(1024, kernel_initializer='glorot_uniform', activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(512, kernel_initializer='glorot_uniform', activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(256, kernel_initializer='glorot_uniform', activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(128, kernel_initializer='glorot_uniform', activation='relu'))
    model.add(Dropout(0.5))
    model.add(Dense(len(language_codes), kernel_initializer='glorot_uniform', activation='softmax'))

    model_optimizer = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    model.compile(loss='categorical_crossentropy', optimizer=model_optimizer, metrics=['accuracy'])

    # Train
    model.fit(x_train, y_train,
              epochs=10,
              validation_split=0.10,
              batch_size=64,
              verbose=2,
              shuffle=True)

    model.save(model_path)