def build_discriminator(img_shape, loss, optimizer):
    discriminator = Sequential()
    discriminator.add(Input(shape=img_shape))
    # discriminator.add(Dropout(0.4))
    # discriminator.add(Conv2D(3, kernel_size=(5, 5)))
    # discriminator.add(LeakyReLU(alpha=0.2))
    # discriminator.add(MaxPooling2D(pool_size=(3, 3), strides=1, padding='same'))
    discriminator.add(Flatten())
    # discriminator.add(Dropout(0.4))
    discriminator.add(Dense(2048))
    discriminator.add(LeakyReLU(alpha=0.2))
    discriminator.add(Dense(2048))
    discriminator.add(LeakyReLU(alpha=0.2))
    # discriminator.add(Dropout(0.3))
    # discriminator.add(Dense(128))
    # discriminator.add(LeakyReLU(alpha=0.2))
    # discriminator.add(Dropout(0.2))
    discriminator.add(Dense(1, activation='sigmoid'))  # sigmoid

    print('Модель дискриминатора')
    discriminator.summary()
    discriminator.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
    return discriminator
def build_generator(img_shape, latent_dim):
    generator = Sequential()
    generator.add(Input(shape=(latent_dim,)))
    # generator.add(Dropout(0.4))
    generator.add(Dense(units=512))
    generator.add(LeakyReLU(alpha=0.2))
    generator.add(BatchNormalization(momentum=0.8))
    # generator.add(Dropout(0.3))
    generator.add(Dense(units=3072))
    generator.add(LeakyReLU(alpha=0.2))
    generator.add(BatchNormalization(momentum=0.8))
    # generator.add(Dropout(0.2))
    generator.add(Dense(units=6144))
    generator.add(LeakyReLU(alpha=0.2))
    generator.add(BatchNormalization(momentum=0.8))
    # generator.add(Flatten())
    # generator.add(Dropout(0.2))
    generator.add(Dense(units=4096, activation='tanh'))  # tanh
    generator.add(Reshape(target_shape=(img_shape[0], img_shape[1], 1)))

    print('Модель генератора')
    generator.summary()
    return generator
Esempio n. 3
0
def create_model(step: Tensorflow2ModelStep):
    """
    Create a TensorFlow v2 Multi-Layer-Perceptron Model.

    :param step: The base Neuraxle step for TensorFlow v2 (Tensorflow2ModelStep)
    :return: TensorFlow v2 Keras model
    """
    # shape: (batch_size, input_dim)
    inputs = Input(
        shape=(step.hyperparams['input_dim']),
        batch_size=None,
        dtype=tf.dtypes.float32,
        name='inputs',
    )

    dense_layers = [
        Dense(units=step.hyperparams['hidden_dim'],
              kernel_initializer=step.hyperparams['kernel_initializer'],
              activation=step.hyperparams['activation'],
              input_shape=(step.hyperparams['input_dim'], ))
    ]

    hidden_dim = step.hyperparams['hidden_dim']
    for i in range(step.hyperparams['n_dense_layers'] - 1):
        hidden_dim *= step.hyperparams['hidden_dim_layer_multiplier']
        dense_layers.append(
            Dense(units=int(hidden_dim),
                  activation=step.hyperparams['activation'],
                  kernel_initializer=step.hyperparams['kernel_initializer']))

    for layer in dense_layers:
        outputs = layer(inputs)

    softmax_layer = Dense(step.hyperparams['n_classes'], activation='softmax')
    outputs = softmax_layer(outputs)

    return Model(inputs=inputs, outputs=outputs)
Esempio n. 4
0
pairs, labels = pickle.load(open("data/dane.csv_%s.pickle" % algorithm, 'rb'))

pairs = np.asarray(pairs)
labels = np.asarray(labels)
targets = pairs[:, 0]
contexts = pairs[:, 1]

labels, targets, contexts = shuffle(labels, targets, contexts, random_state=42)
labels = labels[0:train_size]
targets = targets[0:train_size]
contexts = contexts[0:train_size]

vocab_size = np.amax(pairs) + 1

input_target = Input((1, ))
input_context = Input((1, ))
embedding = Embedding(vocab_size, vector_dim, input_length=1, name='embedding')
target = embedding(input_target)
target = Reshape((vector_dim, 1))(target)
context = embedding(input_context)
context = Reshape((vector_dim, 1))(context)
# now perform the dot product operation to get a similarity measure
dot_product = dot([target, context], axes=1)
dot_product = Reshape((1, ))(dot_product)
# add the sigmoid output layer
output = Dense(1, activation='sigmoid')(dot_product)

model = Model(inputs=[input_target, input_context], outputs=output)
model.compile(loss='binary_crossentropy', optimizer='rmsprop')
model.summary()
Esempio n. 5
0
rate = [0, 0, 0.3]  # Если 0, то слоя Dropout нет
bn = [False, False, False]  # Если False, то слоя BatchNormalization нет

use_conv = 2 # 0 - Conv-слоев нет; 1 - Conv1D; 2 - Conv2D
filters = [0, 0, 32] # Если 0, то слоя Conv1D (Conv2D) нет
rate_conv = [0, 0, 0.3] # Если 0, то слоя Dropout перед слоем Conv нет
bn_conv = [False, False, False] # Если False, то слоя BatchNormalization после слоя Conv нет

input_shape = (img_rows * img_cols, 1)
# if use_conv == 1:
#     input_shape = (img_rows * img_cols, 1)
if use_conv == 2:
    input_shape = (img_rows, img_cols, 1)

model = Sequential()
model.add(Input(shape=input_shape))

if use_conv == 1:
    x_train = x_train.reshape(len(x_train), img_rows * img_cols, 1)
    x_test = x_test.reshape(len(x_test), img_rows * img_cols, 1)

for k in range(3):
    if filters[k] > 0:
        if rate_conv[k] > 0:
            model.add(Dropout(rate_conv[k]))
        if use_conv == 1:
            model.add(Conv1D(filters[k], kernel_size=4, activation='relu'))
            model.add(MaxPooling1D(pool_size=3, strides=1, padding='same'))
        else:
            model.add(Conv2D(filters[k], kernel_size=(4, 4), activation='relu'))
            model.add(MaxPooling2D(pool_size=(3, 3), strides=1, padding='same'))
# Шум можно рассматривать как изображение размера 10*10
latent_dim = 100
epochs = 30001  # Число эпох обучения (30001)
batch_size = 30  # Размер пакета обучения (число генерируемых изображений)
sample_interval = 3000  # Интервал между сохранением сгенерированных изображений в файл

# Построение генератора
generator = build_generator(img_shape, latent_dim)
# Построение и компиляция дискриминатора
discriminator = build_discriminator(img_shape, loss, optimizer)

# Обобщенная модель
# Генератор принимает шум и возвращает (генерирует) изображения
# (их количество равно размеру пакета обучения batch_size)
combined = Sequential()
combined.add(Input(shape=(latent_dim,)))
combined.add(generator)
discriminator.trainable = False
combined.add(discriminator)

combined.compile(loss=loss_g, optimizer=optimizer)

# inp = Input(shape=(latent_dim,))
# img = generator(inp)
# В объединенной модели обучаем только генератор
# discriminator.trainable = False
# Дискриминатор принимает сгенерированное изображение
# и классифицирует его либо как истинное, либо как поддельное, возвращая validity
# output = 1, если дискриминатор посчитает, что изображение истинное, или 0 - в противном случае
# output = discriminator(img)  # <class 'tensorflow.python.framework.ops.Tensor'>: shape = (?, 1)
# Объединенная модель - стек генератора и дискриминатора
Esempio n. 7
0
    path_to_data, img_rows, img_cols, num_classes)
# Обработанные данные для модели
x_train, y_train, x_test, y_test = load_mnist_data(path_to_data, img_rows,
                                                   img_cols, num_classes)

model = load_model(path_to_history + model_filename)
model.summary()

model.evaluate(x_train, y_train, batch_size=200, verbose=2)

predicted_labels = model.predict(x_test)
plot_wrong_predicted(test_images, y_test, predicted_labels)

# Добавляем в модель новый слой
layers = model.layers
inp = Input(shape=input_shape)
x = inp
for layer in layers[1:len(layers) - 1]:
    # L.trainable = False
    x = layer(x)
x = Dropout(0.2, name='dropout_2')(x)
output = layers[-1](x)

model = Model(inputs=inp, outputs=output)
model.summary()
model.compile(optimizer='Adam',
              loss='binary_crossentropy',
              metrics=['categorical_accuracy'])

callbacks = []
filesToSave = 'weights.{epoch:03d}-{val_categorical_accuracy:.4f}.hdf5'
Esempio n. 8
0
for i, (input_text, target_text) in enumerate(zip(input_texts, target_texts)):
    for t, char in enumerate(input_text):
        encoder_input_data[i, t, input_token_index[char]] = 1.
    encoder_input_data[i, t + 1:, input_token_index[' ']] = 1.
    for t, char in enumerate(target_text):
        # decoder_target_data is ahead of decoder_input_data by one timestep
        decoder_input_data[i, t, target_token_index[char]] = 1.
        if t > 0:
            # decoder_target_data will be ahead by one timestep
            # and will not include the start character.
            decoder_target_data[i, t - 1, target_token_index[char]] = 1.
    decoder_input_data[i, t + 1:, target_token_index[' ']] = 1.
    decoder_target_data[i, t:, target_token_index[' ']] = 1.
# Define an input sequence and process it.
encoder_inputs = Input(shape=(None, num_encoder_tokens))
encoder = LSTM(latent_dim, return_state=True)
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
# We discard `encoder_outputs` and only keep the states.
encoder_states = [state_h, state_c]

# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = Input(shape=(None, num_decoder_tokens))
# We set up our decoder to return full output sequences,
# and to return internal states as well. We don't use the
# return states in the training model, but we will use them in inference.
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_inputs,
                                     initial_state=encoder_states)
decoder_dense = Dense(num_decoder_tokens, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)