Exemplo n.º 1
0
x_train = x_train.astype(np.float32)
x_test = x_test.astype(np.float32)

mean = 6. / 7. * x_train.mean() + 1. / 7. * x_test.mean()
sd = (6. / 7. * x_train.var() + 1. / 7. * x_test.var())**0.5

x_train, x_test = (x_train - mean) / sd, (x_test - mean) / sd

train_dataset_base = tf.data.Dataset.from_tensor_slices((x_train, y_train))
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_dataset = test_dataset.batch(128)

loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)

model_base = Softmax_Model(input_shape=(28, 28),
                           n_class=10,
                           bias_pen=1e-4,
                           kernel_pen=1e-4)
w_base = model_base.get_weights()

predictions = model_base(x_train, training=False)
start_loss_train = loss_object(y_train, predictions).numpy()

predictions = model_base(x_test)
start_loss_val = loss_object(y_test, predictions).numpy()

AdamLR = {
    '0.1': {
        'HD': {
            'loss': [],
            'val_loss': [],
            'alpha_it': [],
    train_loss_tf(loss)
    train_accuracy_tf(y_train, predictions)


iris_data = load_iris()

learning_rate = 1e-1
beta = 0.0001

x_tf = iris_data.data
y_tf = iris_data.target.reshape(-1, 1)

train_dataset = tf.data.Dataset.from_tensor_slices((x_tf, y_tf))
train_dataset = train_dataset.shuffle(150).batch(150)

model_keras = Softmax_Model(input_shape=(4, ), n_class=3)

loss_object_tf = tf.keras.losses.SparseCategoricalCrossentropy(
    from_logits=True)
optimizer_tf = MyOpts.Adam_Test(learning_rate=learning_rate,
                                beta1=0.9,
                                beta2=0.999,
                                epsilon=1e-8)

train_loss_tf = tf.keras.metrics.Mean('train_loss', dtype=tf.float32)
train_accuracy_tf = tf.keras.metrics.SparseCategoricalAccuracy(
    'train_accuracy')

## Check Condizioni Iniziali

n_iterations = 1000
x_train = x_train.astype(np.float32)
x_test = x_test.astype(np.float32)

mean = 6. / 7. * x_train.mean() + 1. / 7. * x_test.mean()
sd = (6. / 7. * x_train.var() + 1. / 7. * x_test.var())**0.5

x_train, x_test = (x_train - mean) / sd, (x_test - mean) / sd

train_dataset_base = tf.data.Dataset.from_tensor_slices((x_train, y_train))
test_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_dataset = test_dataset.batch(128)

loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)

model_base = Softmax(input_shape=(28, 28),
                     n_class=10,
                     bias_pen=1e-4,
                     kernel_pen=1e-4)

w_base = model_base.get_weights()

predictions = model_base(x_train, training=False)
start_loss_train = loss_object(y_train, predictions).numpy()

predictions = model_base(x_test)
start_loss_val = loss_object(y_test, predictions).numpy()

EPOCHS = 25
First = True
iter_for_epoch = 60000 // 128 + 1
count = 0
for Opt in Opts:
Exemplo n.º 4
0
                               nesterov= True, hypergrad_lr=beta, momentum=0.9)

loss_function_torch = torch.nn.CrossEntropyLoss()
w_copy = deepcopy(model_torch.state_dict()["0.weight"])
weight = np.transpose(w_copy.numpy())
b_copy = deepcopy(model_torch.state_dict()["0.bias"])
bias = np.transpose(b_copy.numpy())

### Tensorflow
x_tf = iris_data.data
y_tf = iris_data.target.reshape(-1, 1)

train_dataset = tf.data.Dataset.from_tensor_slices((x_tf, y_tf))
train_dataset = train_dataset.shuffle(150).batch(150)

model_keras = Softmax_Model(input_shape=(4,), n_class=3)

model_keras.set_weights([weight,bias])

loss_object_tf = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer_tf = MyOpts.SGDN_HD(alpha_0=learning_rate, beta=beta, mu=0.9)

train_loss_tf = tf.keras.metrics.Mean('train_loss', dtype = tf.float32)
train_accuracy_tf = tf.keras.metrics.SparseCategoricalAccuracy('train_accuracy')

## Check Condizioni Iniziali

print(model_keras.trainable_weights[0].numpy())
print(model_keras.trainable_weights[1].numpy())
print(model_torch[0].bias)
print(model_torch[0].weight)
            float('nan'),
            float('nan'),
            float('nan'),
            float('nan')
        ],
        [
            float('nan'),
            float('nan'),
            float('nan'),
            float('nan'),
            float('nan'),
            float('nan'),
            float('nan')
        ]]

model_base = Softmax_Model((28, 28), 10, kernel_pen=1e-4)
weight = model_base.get_weights()

alphas = [1e-1, 1e-2, 1e-3, 1e-4, 1e-5, 1e-6]
betas = [1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 1e-1, float('nan')]
for i in range(6):
    for j in range(7):

        train_dataset = train_dataset_base.shuffle(60000, seed=0).batch(128)
        Not_done = True
        alpha = alphas[i]
        beta = betas[j]
        print_ = True

        if isnan(beta):
            opt = keras.optimizers.Adam(lr=alpha,