Exemple #1
0
    def __init__(self, pkeep=0.75):
        # layers
        self.E_W1 = model.weight_variable([4096, 2048], name='E_W1')
        self.E_b1 = model.bias_variable([2048], name='E_b1')

        self.E_W2 = model.weight_variable([2048, 1024], name='E_W2')
        self.E_b2 = model.bias_variable([1024], name='E_b2')

        self.E_W3 = model.weight_variable([1024, 512], name='E_W3')
        self.E_b3 = model.bias_variable([512], name='E_b3')

        self.E_W4 = model.weight_variable([512, 256], name='E_W4')
        self.E_b4 = model.bias_variable([256], name='E_b4')

        self.E_W5 = model.weight_variable([256, 128], name='E_W5')
        self.E_b5 = model.bias_variable([128], name='E_b5')

        self.theta = [
            self.E_W1,
            self.E_b1,
            self.E_W2,
            self.E_b2,
            self.E_W3,
            self.E_b3,
            self.E_W4,
            self.E_b4,
            self.E_W5,
            self.E_b5,
        ]

        self.pkeep = pkeep
Exemple #2
0
    def __init__(self):
        # layers
        self.D_W1 = model.weight_variable([128, 256], name='D_W1')
        self.D_b1 = model.bias_variable([256], name='D_b1')

        self.D_W2 = model.weight_variable([256, 784], name='D_W2')
        self.D_b2 = model.bias_variable([784], name='D_b2')

        self.theta = [
            self.D_W1,
            self.D_b1,
            self.D_W2,
            self.D_b2,
        ]
Exemple #3
0
def get_detect_model_8():
    x, conv_layer, conv_vars = model.convolutional_layers()

    # Fourth layer
    W_fc1 = model.weight_variable([8 * 32 * 64, 1024])
    W_conv1 = tf.reshape(W_fc1, [8, 32, 64, 1024])
    b_fc1 = model.bias_variable([1024])
    h_conv1 = tf.nn.relu(
        model.conv2d(conv_layer, W_conv1, stride=(1, 1), padding="VALID") +
        b_fc1)
    # Fifth layer
    W_fc2 = model.weight_variable([1024, 1 + 8 * len(common.CHARS)])
    W_conv2 = tf.reshape(W_fc2, [1, 1, 1024, 1 + 8 * len(common.CHARS)])
    b_fc2 = model.bias_variable([1 + 8 * len(common.CHARS)])
    h_conv2 = model.conv2d(h_conv1, W_conv2) + b_fc2

    return (x, h_conv2, conv_vars + [W_fc1, b_fc1, W_fc2, b_fc2])
Exemple #4
0
    def __init__(self, pkeep=0.75):
        # layers
        self.E_W1 = model.weight_variable([784, 512], name='E_W1')
        self.E_b1 = model.bias_variable([512], name='E_b1')

        self.E_W2 = model.weight_variable([512, 256], name='E_W2')
        self.E_b2 = model.bias_variable([256], name='E_b2')

        self.E_W3 = model.weight_variable([256, 128], name='E_W3')
        self.E_b3 = model.bias_variable([128], name='E_b3')

        self.theta = [
            self.E_W1, self.E_b1,
            self.E_W2, self.E_b2,
            self.E_W3, self.E_b3,
        ]

        self.pkeep = pkeep
Exemple #5
0
    def __init__(self):
        self.D_W1 = model.weight_variable([5, 5, 1, 64], name='D_W1')
        self.D_b1 = model.bias_variable([64], name='D_b1')

        self.D_W2 = model.weight_variable([5, 5, 64, 128], name='D_W2')
        self.D_b2 = model.bias_variable([128], name='D_b2')

        self.D_W3 = model.weight_variable([7*7*128, 1024], name='D_W3')
        self.D_b3 = model.bias_variable([1024], name='D_b3')

        self.D_W4 = model.weight_variable([1024, 1], name='D_W4')
        self.D_b4 = model.bias_variable([1], name='D_b4')

        self.theta = [
            self.D_W1, self.D_b1,
            self.D_W2, self.D_b2,
            self.D_W3, self.D_b3,
            self.D_W4, self.D_b4,
        ]
Exemple #6
0
    def __init__(self):
        # layers
        self.G_W1 = model.weight_variable([100, 1024], name='G_W1')
        self.G_b1 = model.bias_variable([1024], name='G_b1')

        self.G_W2 = model.weight_variable([1024, 128], name='G_W2')
        self.G_b2 = model.bias_variable([128], name='G_b2')

        self.G_W3 = model.weight_variable([128, 64], name='G_W3')
        self.G_b3 = model.bias_variable([64], name='G_b3')

        self.G_W4 = model.weight_variable([64, 1], name='G_W4')
        self.G_b4 = model.bias_variable([1], name='G_b4')

        self.theta = [
            self.G_W1,
            self.G_b1,
            self.G_W2,
            self.G_b2,
            self.G_W3,
            self.G_b3,
            self.G_W4,
            self.G_b4,
        ]
Exemple #7
0
    n_classes = 2                   # output classes,  space or not
    vocab_size = n_input
    
    x = tf.placeholder(tf.float32, [None, n_steps, n_input])
    y_ = tf.placeholder(tf.int32, [None, n_steps])
    early_stop = tf.placeholder(tf.int32)

    # LSTM layer
    # 2 x n_hidden = state_size = (hidden state & cell state)
    istate = tf.placeholder(tf.float32, [None, 2*n_hidden])
    weights = {
        'hidden' : model.weight_variable([n_input, n_hidden]),
        'out' : model.weight_variable([n_hidden, n_classes])
    }
    biases = {
        'hidden' : model.bias_variable([n_hidden]),
        'out': model.bias_variable([n_classes])
    }

    y = model.RNN(x, istate, weights, biases, n_hidden, n_steps, n_input, early_stop)

    batch_size = 1
    logits = tf.reshape(tf.concat(y, 1), [-1, n_classes])

    NUM_THREADS = 1
    config = tf.ConfigProto(intra_op_parallelism_threads=NUM_THREADS,
            inter_op_parallelism_threads=NUM_THREADS,
            log_device_placement=False)
    sess = tf.Session(config=config)
    init = tf.global_variables_initializer()
    sess.run(init)