Exemplo n.º 1
0
        def build_net(x, sizes):
            lrelu = nn.lrelu_gen(0.1)
            sizes = [[sizes[i], sizes[i+1]] for i in range(len(sizes)-1)]

            def build_block(x, _in, _out):
                z = x

                for i in range(self.res_depth):
                    with tf.variable_scope('fc_layer_{}'.format(i)):
                        f = tf.nn.dropout(
                            nn.build_fc_layer(
                                z, lrelu, _in, _in, self.reg_param
                            ),
                            self.keep_prob
                        )

                        z = f + z

                return tf.nn.dropout(
                    nn.build_fc_layer(
                        z, lrelu, _in, _out, self.reg_param
                    ),
                    self.keep_prob
                )

            z = x
            i = 0

            for _in, _out in sizes:
                with tf.variable_scope('res_block_{}'.format(i)):
                    z = build_block(z, _in, _out)
                    i += 1

            return z
Exemplo n.º 2
0
        def build_net(x, sizes):
            lrelu = nn.lrelu_gen(0.1)

            def block(x, in_dim, out_dim, i):
                with tf.variable_scope('block_{}'.format(i)):
                    z = x
                    for j in range(self.res_depth):
                        with tf.variable_scope('res_block_{}'.format(j)):
                            z = nn.build_residual_block(
                                z, lrelu, in_dim, self.reg_param
                            )
                            with tf.variable_scope('residual_block'):
                                self.embedding_ops.append(z)
                            z = tf.nn.dropout(z, self.keep_prob)

                    z = nn.build_fc_layer(
                        z, lrelu, in_dim, out_dim, self.reg_param
                    )

                    with tf.variable_scope('fc_block'):
                        self.embedding_ops.append(z)

                    if i < len(sizes) - 2:
                        z = tf.nn.dropout(z, self.keep_prob)

                    return z
            z = x

            for i in range(1, len(sizes)):
                z = block(z, sizes[i-1], sizes[i], i-1)

            return z
Exemplo n.º 3
0
        def build_net(X, sizes, scope, reuse=False):
            def block(x, in_dim, out_dim, i):
                with tf.variable_scope('block_{}'.format(i)):
                    z = nn.build_residual_block(x, in_dim)
                    tmp = nn.build_fc_layer(z, lrelu, in_dim, out_dim)
                    with tf.variable_scope('residual_block'):
                        self.embedding_ops.append(z)
                    with tf.variable_scope('fc_block'):
                        self.embedding_ops.append(tmp)

                    return tf.nn.dropout(tmp, self.keep_prob)

            lrelu = nn.lrelu_gen(0.1)
            z = X

            with tf.variable_scope(scope, reuse=reuse):
                for i in range(1, len(sizes)):
                    z = block(z, sizes[i - 1], sizes[i], i - 1)

            return z
Exemplo n.º 4
0
    def build_net(X, keep_prob=tf.constant(1.0)):
        lrelu = nn.lrelu_gen(0.1)

        def block(x, in_dim, out_dim, i):
            with tf.variable_scope('block_{}'.format(i)):
                z = x
                for j in range(2):
                    with tf.variable_scope('res_block_{}'.format(j)):
                        z = nn.build_residual_block(z, lrelu, in_dim)
                        z = tf.nn.dropout(z, keep_prob)

                z = nn.build_fc_layer(z, lrelu, in_dim, out_dim)

                return tf.nn.dropout(z, keep_prob)

        z = X

        for i in range(1, len(sizes)):
            z = block(z, sizes[i - 1], sizes[i], i - 1)

        return z
Exemplo n.º 5
0
    def __init__(self, num_features, batch_size=100, num_epochs=10,
                 debug=False, normalize=False, display_step=1,
                 std_param=5, embedding_size=100, num_steps=3):

        ########################################
        # Network Parameters                   #
        ########################################

        l_rate                  = 0.001
        # reg_param               = 0.01
        self.std_param          = std_param
        self.training_epochs    = num_epochs
        self.display_step       = display_step
        self.batch_size         = batch_size
        self.debug              = debug
        self.normalize          = normalize
        self.num_steps          = num_steps
        self.num_features       = num_features

        ########################################
        # TensorFlow Variables                 #
        ########################################

        # Shape: batch, time steps, features
        self.X = tf.placeholder('float32', [None, None, num_features],
                                name='X')
        self.Y = tf.placeholder('int32', [None], name='Y')
        self.keep_prob = tf.placeholder('float32')

        # Score benign bounds for anomaly detection
        self.score_upper    = tf.Variable(0, dtype=tf.float32)
        self.score_lower    = tf.Variable(0, dtype=tf.float32)

        # for normalization
        self.feature_min = tf.Variable(np.zeros(num_features),
                                       dtype=tf.float32)
        self.feature_max = tf.Variable(np.zeros(num_features),
                                       dtype=tf.float32)

        ########################################
        # READY Model                          #
        ########################################

        # Create the TEA
        self.tea = TEA(num_features=num_features,
                       embedding_size=embedding_size)

        # Create the classifier
        # with tf.variable_scope('classifier'):
        #     weights = tf.get_variable(
        #         'weights',
        #         shape=[embedding_size, 1],
        #         initializer=tf.contrib.layers.xavier_initializer()
        #     )
        #     bias = tf.get_variable(
        #         'bias',
        #         shape=[1],
        #         initializer=tf.contrib.layers.xavier_initializer()
        #     )
        # sizes   = [embedding_size + num_features + 1, 25, 1]
        # acts    = [tf.nn.relu, tf.identity]

        # with tf.variable_scope('classifier'):
        #     self.classifier = NeuralNet(sizes, acts)

        lrelu = nn.lrelu_gen(0.1)

        def build_network(X):
            sizes = [num_features + embedding_size + 1, 100, 50, 25, 12, 6, 1]

            def block(x, in_dim, out_dim, i):
                with tf.variable_scope('block_{}'.format(i)):
                    z = nn.build_residual_block(x, in_dim)
                    return nn.build_fc_layer(z, lrelu, in_dim, out_dim)

            z = X

            with tf.variable_scope('classifier'):
                for i in range(1, len(sizes)):
                    z = block(z, sizes[i-1], sizes[i], i-1)

            return z

        # Connect the full model
        t               = tf.constant(0)
        score_init      = tf.zeros([tf.shape(self.X)[0]])
        x_init          = tf.zeros([tf.shape(self.X)[0], num_features])

        # Create the network
        def constructor(t, score_prev, X_prev):
            X = tf.squeeze(
                tf.slice(self.X, [0, t, 0], [-1, 1, -1]),
                axis=1
            )

            X = tf.add(X, X_prev)

            emb = self.tea.embedding(X)
            score = build_network(
                tf.concat([emb, tf.expand_dims(score_prev, 1), X_prev], axis=1)
            )

            t       = tf.add(t, 1)
            # score   = tf.add(score_prev, tf.squeeze(score, axis=1))
            score   = tf.squeeze(score, axis=1)

            X_next = self.tea.create_network(X)

            return t, score, X_next

        _, net_scores, _ = tf.while_loop(
            lambda t, s, x: t < tf.shape(self.X)[1],
            constructor,
            loop_vars=[t, score_init, x_init],
            shape_invariants=[
                t.get_shape(),
                score_init.get_shape(),
                x_init.get_shape()
            ]
        )

        # self.scores = tf.divide(net_scores, tf.to_float(tf.shape(self.X)[1]))
        self.scores = net_scores
        self.loss = tf.reduce_mean(tf.square(1.0 - self.scores))
        self.opt = tf.train.AdamOptimizer(learning_rate=l_rate).minimize(
            self.loss,
            var_list=tf.get_collection(
                tf.GraphKeys.TRAINABLE_VARIABLES,
                scope='classifier'
            )
        )

        ########################################
        # Evaluation Metrics                   #
        ########################################

        negative_labels = tf.fill(tf.shape(self.Y), 0)
        positive_labels = tf.fill(tf.shape(self.Y), 1)
        lower_threshold = tf.fill(tf.shape(self.Y), self.score_lower)
        upper_threshold = tf.fill(tf.shape(self.Y), self.score_upper)

        pred_labels = tf.where(
            tf.logical_and(
                tf.less(self.scores, upper_threshold),
                tf.greater(self.scores, lower_threshold)
            ),
            positive_labels,
            negative_labels

        )

        self.confusion_matrix = tf.confusion_matrix(
            self.Y,
            pred_labels,
            num_classes=2
        )

        self.accuracy = tf.reduce_mean(
            tf.to_float(tf.equal(pred_labels, self.Y))
        )

        # Variable ops
        self.init_op = tf.global_variables_initializer()
        self.saver = tf.train.Saver()