コード例 #1
0
ファイル: model.py プロジェクト: CoderBinGe/user-portrait2
    def get_model(self):
        input_current = Input((self.maxlen, ))
        input_left = Input((self.maxlen, ))
        input_right = Input((self.maxlen, ))

        embedder = Embedding(self.max_features,
                             self.embedding_dims,
                             input_length=self.maxlen)
        embedding_current = embedder(input_current)
        embedding_left = embedder(input_left)
        embedding_right = embedder(input_right)

        x_left = SimpleRNN(128, return_sequences=True)(embedding_left)
        x_right = SimpleRNN(128, return_sequences=True,
                            go_backwards=True)(embedding_right)
        x_right = Lambda(lambda x: K.reverse(x, axes=1))(x_right)
        x = Concatenate(axis=2)([x_left, embedding_current, x_right])

        x = Conv1D(64, kernel_size=1, activation='tanh')(x)
        x = GlobalMaxPooling1D()(x)

        output = Dense(self.class_num, activation=self.last_activation)(x)
        model = Model(inputs=[input_current, input_left, input_right],
                      outputs=output)
        return model
コード例 #2
0
ファイル: simpleCNN.py プロジェクト: sorokin5578/my_diplom
def call_network(text):
    model = Sequential()
    model.add(Embedding(num_words, 2, input_length=max_news_len))
    model.add(SimpleRNN(8))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(optimizer='rmsprop',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    model_save_path='C:\\Users\\Illia\\PycharmProjects\\my_diplom\\Test_network\\best_model\\best_mode_77.h5'
    model.load_weights(model_save_path)
    with open('C:\\Users\\Illia\\PycharmProjects\\my_diplom\\Test_network\\best_model\\tokenizer.pickle', 'rb') as handle:
        tokenizer = pickle.load(handle)
    new_text=[preprocess_text(t) for t in text]
    sequence = tokenizer.texts_to_sequences(new_text)
    data = pad_sequences(sequence, maxlen=max_news_len)
    result = model.predict(data)
    return result


# text = ["Director of the company was fired", "Apple to Start Reopening Stores in Japan This Week", "TikTok's In-App Revenue Skyrockets During Lockdowns", "Coronavirus is propelling Netflix to new heights but is a crash inevitable?"]
#
#
# text2=["May-26-20 01:49AM  	Dow Jones Futures Jump; Five Titans Near Buy Points In Coronavirus Stock Market Rally Investor's Business Daily",
#        "May-25-20 01:02PM  	Gates Foundation Buys Up Amazon, Apple, Twitter Stock; Trims Berkshire Hathaway Stake SmarterAnalyst",
#        "12:45PM  	Dow Jones Stocks To Buy And Watch In May 2020; Apple, Microsoft Approach New Buy Points Investor's Business Daily",
#        "07:58AM  	Apples Key Weaknesses Investopedia"]
# print(call_network(text2))
コード例 #3
0
def build_simple_rnn_model(max_features=10000):
    model = Sequential()
    model.add(Embedding(max_features, 32))
    model.add(SimpleRNN(32))
    model.add(Dense(1, activation='sigmoid'))
    model.compile(optimizer='rmsprop',
                  loss='binary_crossentropy',
                  metrics=['acc'])
    return model
コード例 #4
0
 def simple_rnn():
     model = Sequential()
     model.add(Embedding(dict_len, 32, input_length=pad_len))
     model.add(SimpleRNN(10))
     model.add(Dropout(0.1))
     model.add(Dense(1, activation='sigmoid'))
     model.compile(optimizer=optimizer,
                   loss='binary_crossentropy',
                   metrics=metrics)
     return model
コード例 #5
0
ファイル: training_RWMA.py プロジェクト: cortdog/Trialuse
def made_model():
    model = Sequential()

    model.add(InputLayer(input_shape=(4, 50, 600, 800, 3)))
    model.add(
        TimeDistributed(TimeDistributed(MaxPooling2D(pool_size=2, strides=2))))
    model.add(
        TimeDistributed(
            TimeDistributed(
                Conv2D(kernel_size=3,
                       strides=1,
                       filters=5,
                       padding='same',
                       activation='relu',
                       name='layer_conv1'))))
    model.add(
        TimeDistributed(TimeDistributed(MaxPooling2D(pool_size=2, strides=2))))
    model.add(
        TimeDistributed(
            TimeDistributed(
                Conv2D(kernel_size=5,
                       strides=1,
                       filters=20,
                       padding='same',
                       activation='relu',
                       name='layer_conv2'))))
    model.add(
        TimeDistributed(TimeDistributed(MaxPooling2D(pool_size=2, strides=2))))
    model.add(TimeDistributed(TimeDistributed(Flatten())))
    model.add(TimeDistributed(TimeDistributed(Dense(128, activation='relu'))))

    model.add(
        TimeDistributed(SimpleRNN(64, return_sequences=False, stateful=False)))
    model.add(SimpleRNN(64, return_sequences=False, stateful=False))
    model.add(Dense(6, activation='softmax'))

    optimizer = Adam(lr=1e-4)
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])

    return model
コード例 #6
0
 def add(self,
         units,
         activation='tanh',
         use_bias=True,
         kernel_initializer='glorot_uniform',
         recurrent_initializer='orthogonal',
         bias_initializer='zeros',
         kernel_regularizer=None,
         recurrent_regularizer=None,
         bias_regularizer=None,
         activity_regularizer=None,
         kernel_constraint=None,
         recurrent_constraint=None,
         bias_constraint=None,
         dropout=0.,
         recurrent_dropout=0.,
         return_sequences=False,
         return_state=False,
         go_backwards=False,
         stateful=False,
         unroll=False,
         **kwargs):
     return self._add_layer(
         SimpleRNN(units=units,
                   activation=activation,
                   use_bias=use_bias,
                   kernel_initializer=kernel_initializer,
                   recurrent_initializer=recurrent_initializer,
                   bias_initializer=bias_initializer,
                   kernel_regularizer=kernel_regularizer,
                   recurrent_regularizer=recurrent_regularizer,
                   bias_regularizer=bias_regularizer,
                   activity_regularizer=activity_regularizer,
                   kernel_constraint=kernel_constraint,
                   recurrent_constraint=recurrent_constraint,
                   bias_constraint=bias_constraint,
                   dropout=dropout,
                   recurrent_dropout=recurrent_dropout,
                   return_sequences=return_sequences,
                   return_state=return_state,
                   go_backwards=go_backwards,
                   stateful=stateful,
                   unroll=unroll,
                   **kwargs))
コード例 #7
0
def test_delete_channels_simplernn(channel_index):
    layer = SimpleRNN(9, return_sequences=True)
    recursive_test_helper(layer, channel_index)
コード例 #8
0
    def __init__(self, units=1,
                 name=None,
                 rnn_type='SimpleRNN',
                 activation=linear,
                 kernel_initializer=default_kernel_initializer(),
                 recurrent_initializer=default_kernel_initializer(),
                 bias_initializer=default_bias_initializer(),
                 kernel_regularizer=None,
                 recurrent_regularizer=None,
                 bias_regularizer=None,
                 trainable=True,
                 dtype=None,):
        if not dtype:
            dtype = floatx()
        elif not dtype == floatx():
            set_floatx(dtype)

        assert isinstance(name, str), \
            "Please provide a string for field name. "
        assert callable(activation), \
            "Please provide a function handle for the activation. "

        # prepare initializers.
        if isinstance(kernel_initializer, (float, int)):
            kernel_initializer = default_constant_initializer(kernel_initializer)
        if isinstance(bias_initializer, (float, int)):
            bias_initializer = default_constant_initializer(bias_initializer)
        # prepare regularizers.
        kernel_regularizer = default_regularizer(kernel_regularizer)
        bias_regularizer = default_regularizer(bias_regularizer)

        if rnn_type == 'SimpleRNN':
            super(RNNField, self).__init__(
                SimpleRNN(
                    units=units,
                    activation=activation,
                    return_sequences=True,
                    kernel_initializer=kernel_initializer,
                    recurrent_initializer=recurrent_initializer,
                    bias_initializer=bias_initializer,
                    kernel_regularizer=kernel_regularizer,
                    recurrent_regularizer=recurrent_regularizer,
                    bias_regularizer=bias_regularizer,
                    trainable=trainable,
                    dtype=dtype,
                    unroll=True,
                    name=name
                )
            )
        elif rnn_type == 'LSTM':
            super(RNNField, self).__init__(
                LSTM(
                    units=units,
                    activation=activation,
                    return_sequences=True,
                    kernel_initializer=kernel_initializer,
                    recurrent_initializer=recurrent_initializer,
                    bias_initializer=bias_initializer,
                    kernel_regularizer=kernel_regularizer,
                    recurrent_regularizer=recurrent_regularizer,
                    bias_regularizer=bias_regularizer,
                    trainable=trainable,
                    dtype=dtype,
                    unroll=True,
                    name=name
                )
            )
        elif rnn_type == 'Dense':
            super(RNNField, self).__init__(
                Dense(
                    units=units,
                    activation=activation,
                    kernel_initializer=kernel_initializer,
                    bias_initializer=bias_initializer,
                    kernel_regularizer=kernel_regularizer,
                    bias_regularizer=bias_regularizer,
                    trainable=trainable,
                    dtype=dtype,
                    name=name
                )
            )
        else:
            raise NotImplementedError('Supported RNNType: (SimpleRNN, LSTM, Dense)')
コード例 #9
0
ytrain = traindata[['Close']]
xtest = testdata.drop(columns=['Close'])
ytest = testdata[['Close']]

print(xtrain.head(), ytrain.head(), xtest.head(), ytest.head(), sep=sp)
print(xtrain.shape, ytrain.shape, xtest.shape, ytest.shape, sep=sp)

x = np.array(xtrain).reshape(xtrain.shape[0], xtrain.shape[1], 1)
y = np.array(ytrain)

xt = np.array(xtest).reshape(xtest.shape[0], xtest.shape[1], 1)
yt = np.array(ytest)

modelRNN = Sequential()
modelRNN.add(
    SimpleRNN(50, return_sequences=True, input_shape=(xtrain.shape[1], 1)))
modelRNN.add(Dropout(0.2))

modelRNN.add(SimpleRNN(50, return_sequences=True))
modelRNN.add(Dropout(0.2))

modelRNN.add(SimpleRNN(50))

modelRNN.add(Dense(1, activation='linear'))
modelRNN.compile(loss='mse', optimizer='Adam', metrics=['accuracy'])

# saving my models
savedir = os.path.join(os.getcwd(), 'models')
modelname = 'Best.{epoch:03d}.h5'

if not os.path.isdir(savedir):
コード例 #10
0
ファイル: rnn_functional.py プロジェクト: wangcj05/sciann
    def __init__(self,
                 fields=None,
                 variables=None,
                 hidden_layers=None,
                 activation="tanh",
                 output_activation="linear",
                 rnn_type="SimpleRNN",
                 recurrent_activation="tanh",
                 kernel_initializer=None,
                 bias_initializer=None,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 dtype=None,
                 trainable=True,
                 **kwargs):
        # check data-type.
        if dtype is None:
            dtype = K.floatx()
        elif not K.floatx() == dtype:
            K.set_floatx(dtype)
        # prepare hidden layers.
        if hidden_layers is None:
            hidden_layers = []
        else:
            hidden_layers = to_list(hidden_layers)
        # check for copy constructor.
        if all([x in kwargs for x in ('inputs', 'outputs', 'layers')]):
            self._inputs = kwargs['inputs'].copy()
            self._outputs = kwargs['outputs'].copy()
            self._layers = kwargs['layers'].copy()
            self._set_model()
            return
        # prepare kernel initializers.
        activations, def_biasinit, def_kerinit = \
            prepare_default_activations_and_initializers(
            len(hidden_layers) * [activation] + [output_activation]
        )
        if kernel_initializer is None:
            kernel_initializer = def_kerinit
        elif isinstance(kernel_initializer, (float, int)):
            kernel_initializer = default_weight_initializer(
                len(hidden_layers) * [activation] + [output_activation],
                'constant',
                scale=kernel_initializer)
        else:
            kernel_initializer = [
                kernel_initializer for l in len(hidden_layers) * [activation] +
                [output_activation]
            ]
        # prepare bias initializers.
        if bias_initializer is None:
            bias_initializer = def_biasinit
        elif isinstance(bias_initializer, (float, int)):
            bias_initializer = default_weight_initializer(
                len(hidden_layers) * [activation] + [output_activation],
                'constant',
                scale=bias_initializer)
        else:
            bias_initializer = [
                bias_initializer for l in len(hidden_layers) * [activation] +
                [output_activation]
            ]
        # prepare regularizers.
        kernel_regularizer = default_regularizer(kernel_regularizer)
        bias_regularizer = default_regularizer(bias_regularizer)
        # prepares fields.
        fields = to_list(fields)
        if all([isinstance(fld, str) for fld in fields]):
            output_fields = [
                RNNField(
                    name=fld,
                    dtype=dtype,
                    kernel_initializer=kernel_initializer[-1],
                    bias_initializer=bias_initializer[-1],
                    kernel_regularizer=kernel_regularizer,
                    bias_regularizer=bias_regularizer,
                    trainable=trainable,
                ) for fld in fields
            ]
        elif all([validations.is_field(fld) for fld in fields]):
            output_fields = fields
        else:
            raise TypeError('Please provide a "list" of field names of' +
                            ' type "String" or "Field" objects.')
        # prepare inputs/outputs/layers.
        inputs = []
        layers = []
        variables = to_list(variables)
        if all([isinstance(var, RNNFunctional) for var in variables]):
            for var in variables:
                inputs += var.outputs
            # for var in variables:
            #     for lay in var.layers:
            #         layers.append(lay)
        else:
            raise TypeError(
                "Input error: Please provide a `list` of `Functional`s. \n"
                "Provided - {}".format(variables))
        # prepare hidden layers.
        if hidden_layers is None:
            hidden_layers = []
        else:
            hidden_layers = to_list(hidden_layers)
        # Check and convert activation functions to proper format.
        assert not isinstance(activation, list), \
            'Expected an activation function name not a "list". '
        afunc = get_activation(activation)

        # Input layers.
        if len(inputs) == 1:
            net_input = inputs[0]
        else:
            layer = Concatenate(name=graph_unique_name('conct'))
            net_input = layer(inputs)

        # Define the networks.
        net = [net_input]
        assert len(
            hidden_layers) > 0, 'Minimum of 1 RNN hidden layer is needed.'

        # Adding hidden layers
        for nLay, nNeuron in enumerate(hidden_layers):
            if nLay < 1000:
                # First layer starts with RNN.
                if rnn_type == 'LSTM':
                    layer = LSTM(nNeuron,
                                 return_sequences=True,
                                 recurrent_activation=recurrent_activation,
                                 kernel_initializer=kernel_initializer[nLay],
                                 bias_initializer=bias_initializer[nLay],
                                 kernel_regularizer=kernel_regularizer,
                                 bias_regularizer=bias_regularizer,
                                 trainable=trainable,
                                 dtype=dtype,
                                 unroll=True,
                                 name=graph_unique_name(
                                     "LSTM{:d}b_".format(nNeuron)))
                elif rnn_type == 'SimpleRNN':
                    layer = SimpleRNN(
                        nNeuron,
                        return_sequences=True,
                        kernel_initializer=kernel_initializer[nLay],
                        bias_initializer=bias_initializer[nLay],
                        kernel_regularizer=kernel_regularizer,
                        bias_regularizer=bias_regularizer,
                        trainable=trainable,
                        dtype=dtype,
                        unroll=True,
                        name=graph_unique_name("SRNN{:d}b_".format(nNeuron)))
                else:
                    raise ValueError('Invalid entry for `rnn_type` -- '
                                     'accepts from (`SimpleRNN`, `LSTM`).')
            else:
                # Add the dense layer.
                layer = Dense(nNeuron,
                              kernel_initializer=kernel_initializer[nLay],
                              bias_initializer=bias_initializer[nLay],
                              kernel_regularizer=kernel_regularizer,
                              bias_regularizer=bias_regularizer,
                              trainable=trainable,
                              dtype=dtype,
                              name=graph_unique_name("D{:d}b".format(nNeuron)))
            layers.append(layer)
            net[-1] = layer(net[-1])
            # Apply the activation.
            if afunc.__name__ != 'linear':
                layer = activations[nLay]
                layers.append(layer)
                net[-1] = layer(net[-1])

        # store output layers.
        for out in output_fields:
            layers.append(out)

        # Assign to the output variable
        if len(net) == 1:
            net_output = net[0]
        else:
            raise ValueError("Legacy for Enrichment: Must be updated. ")
            layer = Concatenate(name=graph_unique_name("{}_".format("conct")))
            net_output = layer(net)

        # check output activation functions.
        output_func = get_activation(output_activation)
        # Define the final outputs of each network
        outputs = []
        for out in output_fields:
            # add the activation on the output.
            if output_func.__name__ != 'linear':
                layer = activations[-1]
                layers.append(layer)
                outputs.append(layer(out(net_output)))
            else:
                outputs.append(out(net_output))

        self._inputs = inputs
        self._outputs = outputs
        self._layers = layers
        self._set_model()
コード例 #11
0
    path=imdb_path,
    num_words=max_features
)
print(len(input_train),'train sequences')
print(len(input_test),'test sequences')

print('pad sequences (samples x time)')
input_train = sequence.pad_sequences(input_train,maxlen=maxlen)
input_test = sequence.pad_sequences(input_test,maxlen=maxlen)

#用Embedding层和SimpleRNN层来训练
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.layers import Embedding,SimpleRNN,Dense
model = Sequential()
model.add(Embedding(max_features,32))
model.add(SimpleRNN(32))
model.add(Dense(1,activation='sigmoid'))
model.summary()
model.compile(
    optimizer='rmsprop',
    loss= 'binary_crossentropy',
    metrics=['acc']
)
history = model.fit(
    input_train,
    y_train,
    epochs=10,
    batch_size=128,
    validation_split=0.2
)
from tensorflow.python.keras.models import Model

palabra = open('texto.txt', 'r').read()
palabra = palabra.lower()
alfabeto = list(set(palabra))
tam_datos, tam_alfabeto = len(palabra), len(alfabeto)
#print(nombres) todo el texto
#print(tam_alfabeto,tam_datos) tamaño de cuanttos diferentes tipos de letras hay en el texto y luego tamaño de todas las letras que hay en el texto
car_a_ind = {car: ind for ind, car in enumerate(sorted(alfabeto))}
ind_a_car = {ind: car for ind, car in enumerate(sorted(alfabeto))}
print(car_a_ind)
n_a = 20  # Número de unidades en la capa oculta
entrada = Input(shape=(None, tam_alfabeto))  #entrada nombres
a0 = Input(shape=(n_a, ))  #estado oculto at-1 anterior
print(a0)
celda_recurrente = SimpleRNN(n_a, activation='tanh',
                             return_state=True)  #25 neuronas capa recurrente

capa_salida = Dense(tam_alfabeto, activation='softmax')

print(capa_salida)

hs, _ = celda_recurrente(entrada, initial_state=a0)
salida = []
salida.append(capa_salida(hs))

modelo = Model([entrada, a0], salida)
opt = SGD(lr=0.03)
modelo.compile(optimizer=opt, loss='categorical_crossentropy')

with open("texto.txt") as f:
    ejemplos = f.readlines()
コード例 #13
0
from tensorflow.python.keras.layers import SimpleRNN, Embedding
from tensorflow.python.keras.models import Sequential

# Return output at last cell
print('\n\n\nOutput at last rnn cell only')
mdl = Sequential()
mdl.add(Embedding(10000, 32))
mdl.add(SimpleRNN(32))
mdl.summary()

# Return all values at intermediate cells
print('\n\n\nOutput for each and every rnn cells')
mdl2 = Sequential()
mdl2.add(Embedding(10000, 32))
mdl2.add(SimpleRNN(32, return_sequences=True))
mdl2.summary()

# Stacked RNN
print('\n\n\nOutput at each and every rnn cells (stacked network)')
mdl3 = Sequential()
mdl3.add(Embedding(10000, 32))
mdl3.add(SimpleRNN(32, return_sequences=True))
mdl3.add(SimpleRNN(32, return_sequences=True))
mdl3.add(SimpleRNN(32, return_sequences=True))
mdl3.add(SimpleRNN(32))
mdl3.summary()
コード例 #14
0
(input_train, y_train), (input_test,
                         y_test) = imdb.load_data(num_words=max_features)
print(len(input_train), 'train sequences')
print(len(input_test), 'test sequences')

print('Pad sequences (samples x time')
input_train = sequence.pad_sequences(input_train, maxlen=max_len)
input_test = sequence.pad_sequences(input_test, maxlen=max_len)
print('input_train shape:', input_train.shape)
print('input_test shape:', input_test.shape)

# Training model
mdl = Sequential()
mdl.add(Embedding(max_features, 32))
mdl.add(SimpleRNN(32))
mdl.add(Dense(1, activation='sigmoid'))

mdl.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
hist = mdl.fit(input_train,
               y_train,
               epochs=num_epochs,
               batch_size=batch_size,
               validation_split=0.2)

# Plot results
acc = hist.history['acc']
val_acc = hist.history['val_acc']
loss = hist.history['loss']
val_loss = hist.history['val_loss']
コード例 #15
0
ファイル: r11.py プロジェクト: jocoder22/PythonDataScience
      traindata.head(),
      traindata.shape,
      traindata.shape,
      sep=sp)

print(x_train_new.shape,
      y_train_new.shape,
      x_test_new.shape,
      y_test_new.shape,
      sep=sp)

print(x_train_new[0], y_train_new[0], sep=sp)

# build the model
modelRNN = Sequential()
modelRNN.add(SimpleRNN(50, return_sequences=True, input_shape=(windows, 1)))
modelRNN.add(Dropout(0.2))

modelRNN.add(SimpleRNN(50, return_sequences=True))
modelRNN.add(Dropout(0.2))

modelRNN.add(SimpleRNN(50))

modelRNN.add(Dense(1, activation='linear'))
modelRNN.compile(loss='mse', optimizer='Adam', metrics=['accuracy'])

model_history = modelRNN.fit(x_train_new,
                             y_train_new,
                             epochs=30,
                             batch_size=64,
                             verbose=1,