예제 #1
0
def train(model_type, epochs, batch_size, logdir):
    ds_train, ds_test = tfds.load('cifar10',
                                  split=['train', 'test'],
                                  as_supervised=True)
    num_samples = 50000
    num_samples_test = 10000
    ds_train = ds_train.shuffle(num_samples)\
      .batch(batch_size)\
      .map(preprocess, num_parallel_calls=tf.data.experimental.AUTOTUNE)\
      .repeat(epochs)\
      .prefetch(buffer_size=tf.data.experimental.AUTOTUNE)

    ds_test = ds_test.batch(batch_size)\
      .map(preprocess, num_parallel_calls=tf.data.experimental.AUTOTUNE)\
      .repeat(epochs)\
      .prefetch(buffer_size=tf.data.experimental.AUTOTUNE)

    kl_div_fn = (lambda q, p, _: tfd.kl_divergence(q, p) / num_samples)

    if model_type == 'normal':
        model = CNN(num_classes=10)
    elif model_type == 'reparam':
        model = ReparamCNN(num_classes=10, kernel_divergence_fn=kl_div_fn)
    else:
        model = FlipOutCNN(num_classes=10, kernel_divergence_fn=kl_div_fn)
    # Set input_shape explicitly (before compile) to instantiate model.losses
    model.build(input_shape=[None, 32, 32, 3])

    optimizer = optimizers.Adam()
    loss_fn = losses.SparseCategoricalCrossentropy(from_logits=True)
    metrics = [tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy')]
    model.compile(optimizer, loss=loss_fn, metrics=metrics)

    callbacks = [tf.keras.callbacks.TensorBoard(log_dir=logdir)]
    model.fit(ds_train,
              epochs=epochs,
              callbacks=callbacks,
              validation_data=ds_test,
              steps_per_epoch=num_samples // batch_size,
              validation_steps=num_samples_test // batch_size)

    return None
예제 #2
0
elif net.lower() == 'vgg':
    model = VGG8(input_shape=x_train.shape[1:], num_classes=num_classes)
else:
    model = WideResidualNetwork(depth=28, width=8, dropout_rate=0.5,
                                classes=num_classes, include_top=True,
                                weights=None)

if args.checkpoint:
    model = load_model(args.checkpoint)

model.summary()


# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
              optimizer=opt,
              metrics=['accuracy'])

x_train = x_train.astype('float32')
x_test = x_test.astype('float32')


def normalize(x):
    """Substract mean and Divide by std."""
    x -= np.array([125.3, 123.0, 113.9])
    x /= np.array([63.0, 62.1, 66.7])
    return x


x_train = normalize(x_train)
x_test = normalize(x_test)
예제 #3
0
    0.01,
]
optimizers = [SGD, Adam, RMSprop, Adadelta]
activations = ['relu', 'linear', 'sigmoid', 'tanh']

for act, lr, opt in product(activations, learning_rates, optimizers):

    # Train CNN
    fname = '_'.join(['cnn', opt.__name__.lower(), str(lr), act])
    fname = path + fname + '.wav'
    print(fname)
    model = CNN(input_shape=(28, 28, 1), activation=act)
    model.summary()

    model.compile(loss='categorical_crossentropy',
                  optimizer=opt(lr=lr),
                  metrics=['accuracy'])

    grad_son = GradientSonification(path=fname,
                                    model=model,
                                    fs=fs,
                                    duration=duration,
                                    freq=freq)

    model.compile(loss='categorical_crossentropy',
                  optimizer=opt(lr=lr),
                  metrics=['accuracy'] + grad_son.metrics)

    model.fit(X_train,
              y_train,
              batch_size=batch_size,