コード例 #1
0
def hidden_maxout():
    w1 = create_weight_variable('weight_1', [784, 100])
    b1 = create_bias_variable('bias_1', [100])
    w2 = create_weight_variable('weight_2', [50, 10])
    b2 = create_bias_variable('bias_2', [10])
    t = max_out(tf.matmul(x, w1) + b1, 50)
    return tf.nn.softmax(tf.matmul(t, w2) + b2)
コード例 #2
0
def hidden_maxout():
    W1 = create_weight_variable('Weights', [784, 100])
    b1 = create_bias_variable('Bias', [100])

    W2 = create_weight_variable('Weights2', [50, 10])
    b2 = create_bias_variable('Bias2', [10])

    from maxout import max_out
    t = max_out(tf.matmul(x, W1) + b1, 50)
    return tf.nn.softmax(tf.matmul(t, W2) + b2)
コード例 #3
0
def hidden_maxout(dropout):
    W1 = create_weight_variable('Weights', [784, 100])
    b1 = create_bias_variable('Bias', [100])

    W2 = create_weight_variable('Weights2', [50, 10])
    b2 = create_bias_variable('Bias2', [10])
    t = max_out(tf.matmul(x, W1) + b1, 50)
    if dropout:
        t = tf.nn.dropout(t, rate=0.2)

    return W1, b1, W2, b2, tf.nn.softmax(tf.matmul(t, W2) + b2)
コード例 #4
0
else:
    with tf.name_scope('Model'):
        # Model
        print(params)
        W1, b1, W2, b2, pred = params
    with tf.name_scope('Loss'):
        # Minimize error using cross entropy
        if declared_cost == 'CE':
            cost = tf.reduce_mean(-tf.reduce_sum(y * tf.log(pred), reduction_indices=1))
        elif declared_cost == 'CE_ADVERSARIAL':
            J = tf.reduce_mean(-tf.reduce_sum(y * tf.log(pred), reduction_indices=1))
            grad = tf.gradients(J, x)
            fast_gradient = tf.squeeze(tf.sign(grad), [0])
            fast_signed_gradient = epsilon * fast_gradient
            if activation_type == 'MAXOUT':
                pred_adversarial = tf.nn.softmax(tf.matmul(max_out(tf.matmul(x + fast_signed_gradient, W1) + b1, 50), W2) + b2)
            elif activation_type == 'RELU':
                pred_adversarial = tf.nn.softmax(tf.matmul(tf.nn.relu(tf.matmul(x, W1) + b1), W2) + b2)
            J_adversarial = tf.reduce_mean(-tf.reduce_sum(y * tf.log(pred_adversarial), reduction_indices=1))
            cost = adversarial_alpha * J + (1 - adversarial_alpha) * J_adversarial

    with tf.name_scope('SGD'):
        # Gradient Descent
        optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
    with tf.name_scope('Accuracy'):
        # Accuracy
        acc = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
        acc = tf.reduce_mean(tf.cast(acc, tf.float32))
    saver = tf.train.Saver(max_to_keep=None)

    if is_train: