Ejemplo n.º 1
0
    out_frame_count=1,
    shuffle=False,
)

inputs = Input(shape=(11, 771))

fc1 = Flatten()(inputs)
fc1 = Dropout(0.3)(fc1)
fc1 = Dense(2048)(fc1)
fc1 = BatchNormalization(momentum=0.999)(fc1)
fc1 = PReLU()(fc1)
fc1 = Dropout(0.3)(fc1)

fc2 = Dense(2048)(fc1)
fc2 = BatchNormalization(momentum=0.999)(fc2)
fc2 = PReLU()(fc2)
fc2 = Dropout(0.3)(fc2)

out = Dense(257)(fc2)
model = Model(inputs=inputs, outputs=out)

adam = Adam(lr=0.0001, decay=1e-8)
model.compile(optimizer=adam, loss='mse')

for epoch in range(100):
    train_loss = model.fit_generator(train_loader.batchify(), 5299)
    test_loss = model.evaluate_generator(test_loader.batchify(), 1600)

    print("Epoch", epoch)
    print("Test loss:", test_loss)
Ejemplo n.º 2
0
            layer = tf.layers.dropout(layer, rate=0.3, training=training)


    out = tf.layers.dense(layer, 257, kernel_initializer=tf.random_normal_initializer(0, 0.02))

    loss = tf.losses.mean_squared_error(out_frames, out)
    global_step = tf.Variable(0, trainable=False)
    learning_rate = tf.train.exponential_decay(0.0001, global_step, 1e4, 0.95)
    train = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step=global_step)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for epoch in range(100):
            print("Epoch", epoch)

            train_loss = 0
            for in_frame_batch, out_frame_batch in train_loader.batchify():
                fd = {in_frames: in_frame_batch, out_frames: out_frame_batch, training: True}
                batch_loss, _ = sess.run([loss, train], fd)
                train_loss += batch_loss

            print("Train loss:", train_loss / 5300)

            test_loss = 0
            for in_frame_batch, out_frame_batch in dev_loader.batchify():
                fd = {in_frames: in_frame_batch, out_frames: out_frame_batch, training: False}
                test_loss += sess.run(loss, fd)

            print("Test loss:", test_loss / 1612)
Ejemplo n.º 3
0
    buffer_size=100,
    context=5,
    out_frame_count=1,
    shuffle=False,
)

model = Sequential([
    Reshape(target_shape=(11, 257, 1), input_shape=(11, 257)),
    Conv2D(64, 7, padding='same', activation='relu'),
    MaxPooling2D((2, 3), padding='same'),
    Conv2D(96, 3, padding='same', activation='relu'),
    MaxPooling2D((2, 3), padding='same'),
    Conv2D(128, 3, padding='same', activation='relu'),
    Flatten(),
    Dense(2048, activation='relu'),
    Dropout(0.3),
    Dense(2048, activation='relu'),
    Dropout(0.3),
    Dense(257),
])

adam = Adam(lr=0.0001)
model.compile(optimizer=adam, loss='mse')

for epoch in range(100):
    model.fit_generator(train_loader.batchify(), 21200)
    test_loss = model.evaluate_generator(test_loader.batchify(), 6410)

    print("Epoch", epoch)
    print("Test loss:", test_loss)
Ejemplo n.º 4
0
    global_step = tf.Variable(0, trainable=False)
    learning_rate = tf.train.exponential_decay(0.0001, global_step, 1e4, 0.95)
    train = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step=global_step)

    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        min_loss = 1
        for epoch in range(100):
            print("Epoch", epoch)

            train_loss = 0
            count = 0
            for in_frame_batch, out_frame_batch in train_loader.batchify(shuffle_batches=True, include_deltas=False):
                fd = {in_frames: in_frame_batch, out_frames: out_frame_batch, training: True}
                batch_loss, _ = sess.run([loss, train], fd)
                train_loss += batch_loss
                count += 1

            print("Train loss:", train_loss / count)

            test_loss = 0
            count = 0
            for in_frame_batch, out_frame_batch in dev_loader.batchify(include_deltas=False):
                fd = {in_frames: in_frame_batch, out_frames: out_frame_batch, training: False}
                test_loss += sess.run(loss, fd)
                count += 1

            test_loss /= count
Ejemplo n.º 5
0
        loss, global_step=global_step)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        saver = tf.train.Saver()
        if args.load_dir:
            saver.restore(sess, tf.train.latest_checkpoint(args.load_dir))

        min_loss = 1
        for epoch in range(0, 100):
            print("Epoch", epoch)

            train_loss = 0
            count = 0
            for noisy, clean in train_loader.batchify():
                noisy = noisy[:, :, :257]
                fd = {
                    noisy_frames: noisy,
                    clean_frames: clean,
                    resnet.training: True
                }
                batch_loss, _ = sess.run([loss, train], fd)
                train_loss += batch_loss
                count += 1

            print("Train loss:", train_loss / count)

            test_loss = 0
            count = 0
            for noisy, clean in dev_loader.batchify():