Пример #1
0
def test(model, data_test, label_test):
    hit, miss = 0, 0
    for i, sample in enumerate(data_test):
        output = sample
        for layer in model:
            output = utils.run_layer(layer, output)
        if np.argmax(output) == np.argmax(label_test[i]):
            hit += 1
        else:
            miss += 1
    print("ACERTOS: ", hit)
    print("ERROS: ", miss)
    print("TAXA DE ACERTOS: ", hit / (hit + miss) * 100)
def encoder_bloc(h, layer_spec, noise_std, update_BN, activation):
    # Run the layer
    z_pre = run_layer(h, layer_spec, output_name="z_pre")

    # Compute mean and variance of z_pre (to be used in the decoder)
    dim = len(z_pre.get_shape().as_list())
    mean, var = tf.nn.moments(z_pre, axes=list(range(0, dim - 1)))
    mean = tf.identity(mean, name="mean")
    var = tf.identity(var, name="var")

    # tf.identity在计算图内部创建了两个节点,send / recv节点,用来发送和接受两个变量,
    # 如果两个变量在不同的设备上,比如 CPU 和 GPU,那么将会复制变量,如果在一个设备上,将会只是一个引用
    # 返回一个和输入的 tensor 大小和数值都一样的 tensor ,类似于 y=x 操作

    # Batch normalization
    def training_batch_norm():
        if update_BN:
            z = update_batch_normalization(z_pre)
        else:
            z = batch_normalization(z_pre)
        return z

    def eval_batch_norm():
        with tf.variable_scope("BN", reuse=tf.AUTO_REUSE):
            mean = ewma.average(
                tf.get_variable("running_mean", shape=z_pre.shape[-1]))
            var = ewma.average(
                tf.get_variable("running_var", shape=z_pre.shape[-1]))
        z = batch_normalization(z_pre, mean, var)
        return z

    # Perform batch norm depending to the phase (training or testing)
    z = tf.cond(training, training_batch_norm, eval_batch_norm)
    z += tf.random_normal(tf.shape(z)) * noise_std
    z = tf.identity(z, name="z")

    # Center and scale plus activation
    size = z.get_shape().as_list()[-1]
    beta = tf.get_variable("beta", [size],
                           initializer=tf.constant_initializer(0))
    gamma = tf.get_variable("gamma", [size],
                            initializer=tf.constant_initializer(1))

    h = activation(z * gamma + beta)
    return tf.identity(h, name="h")
Пример #3
0
def train(dataset):
    global n_layers, learning_rate

    model = utils.initialize_model(n_layers)
    data_test, label_test, data_train, label_train = [], [], [], []
    test_size = 0.1
    epochs = 3

    for i in range(len(dataset['data'])):
        sample = utils.get_row(dataset, i)
        if random.random() < test_size:
            data_test.append(sample[0])
            label_test.append(sample[1])
        else:
            data_train.append(sample[0])
            label_train.append(sample[1])
    for epoch in range(epochs):
        for k in range(len(data_train)):
            sample, label = data_train[k], label_train[k]
            x_values, outputs = sample, []
            outputs.append(x_values)
            for layer in model:
                x_values = utils.run_layer(layer, x_values)
                outputs.append(x_values)
            predicted = x_values
            for i in range(len(model) - 1, -1, -1):
                layer = model[i]
                if i == len(model) - 1:
                    gradient = np.subtract(np.dot(
                        2, predicted), np.dot(
                            2, label)) * predicted * (1 - np.array(predicted))
                else:
                    gradient = np.matmul(np.transpose(model[i + 1]['W']),
                                         gradient) * outputs[i + 1] * (
                                             1 - np.array(outputs[i + 1]))
                layer = utils.update_layer(layer, gradient, learning_rate,
                                           outputs[i] if i > 0 else sample)
    test(model, data_test, label_test)

    return model