Exemplo n.º 1
0
    def test_lifecycle(
        self,
        modifier_lambda: Callable[[], GMPruningModifier],
        graph_lambda: Callable[[], tf_compat.Graph],
        steps_per_epoch: int,
    ):
        modifier = modifier_lambda()
        graph = graph_lambda()
        with graph.as_default():
            global_step = tf_compat.train.get_or_create_global_step()
            step_placeholder = tf_compat.placeholder(dtype=tf_compat.int64,
                                                     name="step")
            global_assign = global_step.assign(step_placeholder)

            inp = graph.get_tensor_by_name("inp:0")
            out = graph.get_tensor_by_name("out:0")

            mod_ops, mod_extras = modifier.create_ops(steps_per_epoch,
                                                      global_step, graph)
            assert len(mod_ops) == 1
            assert mod_ops[0] is not None
            assert len(mod_extras) == 1
            assert EXTRAS_KEY_SUMMARIES in mod_extras
            assert modifier.prune_op_vars
            assert len(modifier.prune_op_vars) > 0
            last_sparsities = [0.0 for _ in range(len(modifier.prune_op_vars))]

            with tf_compat.Session(graph=graph) as sess:
                sess.run(tf_compat.global_variables_initializer())
                modifier.initialize_session(sess)
                step_counter = 0
                inp_arr = numpy.random.random((1, *inp.shape[1:]))

                for epoch in range(int(modifier.end_epoch + 5.0)):
                    for step in range(steps_per_epoch):
                        res = sess.run(out, feed_dict={inp: inp_arr})
                        assert res.sum() > 0

                        step_counter += 1
                        sess.run(global_assign,
                                 feed_dict={step_placeholder: step_counter})
                        sess.run(mod_ops)

                        for index, op_vars in enumerate(
                                modifier.prune_op_vars):
                            mask_sparsity = eval_tensor_sparsity(op_vars.mask)
                            masked_sparsity = eval_tensor_sparsity(
                                op_vars.masked)

                            assert abs(mask_sparsity - masked_sparsity) < 1e-5

                            if epoch < modifier.start_epoch:
                                assert masked_sparsity < 1e-2
                            else:
                                assert masked_sparsity == last_sparsities[
                                    index]
                                last_sparsities[index] = masked_sparsity

                modifier.complete_graph(graph, sess)
Exemplo n.º 2
0
def test_apply_op_vars_masks(
    sparsity_val: float,
    net_const: Callable,
    inp_arr: numpy.ndarray,
    var_names: List[str],
):
    group = "test-group"

    with tf_compat.Graph().as_default() as graph:
        out, inp = net_const()
        sparsity = tf_compat.placeholder(dtype=tf_compat.float32,
                                         name="sparsity_placeholder")
        update_ready = tf_compat.placeholder(dtype=tf_compat.bool,
                                             name="update_ready")
        pruning_op_vars = get_or_create_graph_ops_pruning(
            graph,
            var_names,
            sparsity,
            update_ready,
            True,
            None,
            group,
            UnstructuredPruningMaskCreator(),
        )

        with tf_compat.Session() as sess:
            sess.run(tf_compat.global_variables_initializer())

            for op_vars in pruning_op_vars:
                sess.run(
                    op_vars.update,
                    feed_dict={
                        sparsity: sparsity_val,
                        update_ready: True
                    },
                )

            apply_op_vars_masks(pruning_op_vars, group, sess)

            for op_vars in pruning_op_vars:
                var_sparsity = eval_tensor_sparsity(op_vars.op_input)
                assert abs(var_sparsity - sparsity_val) < 1e-2
Exemplo n.º 3
0
def test_gm_pruning_training_with_manager():
    modifier = GMPruningModifier(
        params=["mlp_net/fc1/weight", "mlp_net/fc3/weight"],
        init_sparsity=0.05,
        final_sparsity=0.8,
        start_epoch=2.0,
        end_epoch=7.0,
        update_frequency=1.0,
    )
    sec_modifier = GMPruningModifier(
        params=["mlp_net/fc2/weight"],
        init_sparsity=0.05,
        final_sparsity=0.8,
        start_epoch=2.0,
        end_epoch=7.0,
        update_frequency=1.0,
    )
    manager = ScheduledModifierManager([modifier, sec_modifier])
    steps_per_epoch = 5
    batch_size = 2

    with tf_compat.Graph().as_default() as graph:
        logits, inputs = mlp_net()
        labels = tf_compat.placeholder(tf_compat.float32,
                                       [None, *logits.shape[1:]])
        loss = batch_cross_entropy_loss(logits, labels)

        global_step = tf_compat.train.get_or_create_global_step()
        train_op = tf_compat.train.AdamOptimizer(learning_rate=1e-4).minimize(
            loss, global_step=global_step)

        mod_ops, mod_extras = manager.create_ops(steps_per_epoch)
        last_sparsities = [0.0 for _ in range(len(modifier.prune_op_vars))]

        with tf_compat.Session(graph=graph) as sess:
            sess.run(tf_compat.global_variables_initializer())
            manager.initialize_session(sess)
            batch_lab = numpy.random.random((batch_size, *logits.shape[1:]))
            batch_inp = numpy.random.random((batch_size, *inputs.shape[1:]))

            for epoch in range(int(modifier.end_epoch + 2.0)):
                for step in range(steps_per_epoch):
                    sess.run(train_op,
                             feed_dict={
                                 inputs: batch_inp,
                                 labels: batch_lab
                             })
                    sess.run(global_step)

                    sess.run(mod_ops)
                    update_ready_val = sess.run(modifier.update_ready)
                    sess.run(modifier.sparsity)

                    for index, op_vars in enumerate(modifier.prune_op_vars):
                        mask_sparsity = eval_tensor_sparsity(op_vars.mask)
                        masked_sparsity = eval_tensor_sparsity(op_vars.masked)

                        assert abs(mask_sparsity - masked_sparsity) < 1e-5

                        if epoch < modifier.start_epoch:
                            assert masked_sparsity < 1e-2
                            assert not update_ready_val
                        elif epoch >= modifier.end_epoch:
                            assert abs(masked_sparsity -
                                       modifier.final_sparsity) < 1e-2
                            assert not update_ready_val
                        else:
                            assert masked_sparsity >= last_sparsities[
                                index] - 1e-2
                            last_sparsities[index] = masked_sparsity

            manager.complete_graph()

            for op_vars in modifier.prune_op_vars:
                assert (abs(modifier.final_sparsity -
                            eval_tensor_sparsity(op_vars.op_input)) < 1e-2)
Exemplo n.º 4
0
def test_get_or_create_ks_scheduled_graph_ops(
    begin_step: int,
    end_step: int,
    update_step_freq: int,
    init_sparsity: float,
    final_sparsity: float,
    exponent: float,
    net_const: Callable,
    inp_arr: numpy.ndarray,
    var_names: List[str],
):
    group = "test-group"

    with tf_compat.Graph().as_default() as graph:
        global_step = tf_compat.train.get_or_create_global_step()
        step_placeholder = tf_compat.placeholder(dtype=tf_compat.int64,
                                                 name="step")
        global_assign = global_step.assign(step_placeholder)

        out, inp = net_const()

        (
            update_op,
            pruning_op_vars,
            update_ready,
            sparsity,
        ) = get_or_create_ks_scheduled_graph_ops(
            graph,
            global_step,
            var_names,
            begin_step,
            end_step,
            update_step_freq,
            init_sparsity,
            final_sparsity,
            exponent,
            True,
            group,
            UnstructuredPruningMaskCreator(),
        )
        (
            update_op_sec,
            pruning_op_vars_sec,
            update_ready,
            sparsity,
        ) = get_or_create_ks_scheduled_graph_ops(
            graph,
            global_step,
            var_names,
            begin_step,
            end_step,
            update_step_freq,
            init_sparsity,
            final_sparsity,
            exponent,
            True,
            group,
            UnstructuredPruningMaskCreator(),
        )

        assert update_op == update_op_sec
        assert update_ready == update_ready
        assert sparsity == sparsity
        assert len(pruning_op_vars) == 3
        assert len(pruning_op_vars) >= len(
            var_names)  # at least 1 regex match per name

        for op_vars, op_vars_sec in zip(pruning_op_vars, pruning_op_vars_sec):
            assert op_vars.op == op_vars_sec.op
            assert op_vars.update == op_vars_sec.update
            assert op_vars.mask == op_vars_sec.mask
            assert op_vars.masked == op_vars_sec.masked

        with tf_compat.Session() as sess:
            sess.run(tf_compat.global_variables_initializer())
            last_update_sparsity = None

            for step in range(end_step + 10):
                sess.run(global_assign, feed_dict={step_placeholder: step})
                update_ready_val = sess.run(update_ready)
                sparsity_val = sess.run(sparsity)
                sess.run(update_op)

                for op_var in pruning_op_vars:
                    mask_sparsity = eval_tensor_sparsity(op_var.mask)
                    masked_sparsity = eval_tensor_sparsity(op_var.masked)
                    weight_sparsity = eval_tensor_sparsity(op_vars.op_input)

                    assert abs(mask_sparsity - masked_sparsity) < 1e-5

                    if step < begin_step:
                        assert abs(masked_sparsity) < 1e-2
                        assert not update_ready_val
                    elif step == begin_step:
                        assert abs(masked_sparsity - init_sparsity) < 1e-2
                        assert abs(sparsity_val - init_sparsity) < 1e-5
                        assert update_ready_val
                        last_update_sparsity = masked_sparsity
                    elif step == end_step:
                        assert update_ready_val
                        assert abs(masked_sparsity - final_sparsity) < 1e-2
                        assert abs(sparsity_val - final_sparsity) < 1e-5
                        last_update_sparsity = masked_sparsity
                    elif step > end_step:
                        assert not update_ready_val
                        assert abs(masked_sparsity - final_sparsity) < 1e-2
                    else:
                        assert masked_sparsity >= last_update_sparsity - 1e-2
                        assert sparsity_val >= last_update_sparsity - 1e-2
                        assert abs(weight_sparsity - masked_sparsity) <= 1e-2
                        last_update_sparsity = masked_sparsity
                        if step < end_step and update_ready_val:
                            steps_count = sess.run(global_step) - begin_step
                            steps_range = end_step - begin_step
                            expected = _expected_sparsity(
                                steps_count,
                                steps_range,
                                init_sparsity,
                                final_sparsity,
                                exponent,
                            )
                            assert abs(sparsity_val - expected) < 1e-5

                res = sess.run(out, feed_dict={inp: inp_arr})
                assert res.sum() >= 0.0
Exemplo n.º 5
0
def test_create_op_pruning_fc(sparsity_val):
    group = "test-group"

    with tf_compat.Graph().as_default() as graph:
        inp = tf_compat.placeholder(tf_compat.float32, [None, 64])

        with tf_compat.name_scope("fc"):
            weights = tf_compat.Variable(tf_compat.random_normal([64, 64]),
                                         name="weights")
            bias = tf_compat.Variable(tf_compat.random_normal([64]),
                                      name="bias")
            matmul = tf_compat.matmul(inp, weights, name="matmul")
            add = tf_compat.add(matmul, bias, name="bias_add")
            relu = tf_compat.nn.relu(add, name="relu")

        sparsity = tf_compat.placeholder(dtype=tf_compat.float32,
                                         name="sparsity_placeholder")
        update_ready = tf_compat.placeholder(dtype=tf_compat.bool,
                                             name="update_ready")

        matmul_op = graph.get_operation_by_name("fc/matmul")
        matmul_op_input = get_op_input_var(matmul_op, VAR_INDEX_FROM_TRAINABLE)
        pruning_op_vars = create_op_pruning(
            matmul_op,
            matmul_op_input,
            sparsity,
            update_ready,
            True,
            None,
            group,
            UnstructuredPruningMaskCreator(),
        )

        with tf_compat.Session() as sess:
            sess.run(tf_compat.global_variables_initializer())
            sess.run(
                pruning_op_vars.update,
                feed_dict={
                    sparsity: sparsity_val,
                    update_ready: False
                },
            )

            mask_sparsity = eval_tensor_sparsity(pruning_op_vars.mask)
            weight_sparsity = eval_tensor_sparsity(pruning_op_vars.op_input)
            assert mask_sparsity < 1e-3
            assert mask_sparsity == weight_sparsity

            masked_sparsity = eval_tensor_sparsity(pruning_op_vars.masked)
            assert masked_sparsity < 1e-3

            sess.run(
                pruning_op_vars.update,
                feed_dict={
                    sparsity: sparsity_val,
                    update_ready: True
                },
            )

            mask_sparsity = eval_tensor_sparsity(pruning_op_vars.mask)
            assert abs(mask_sparsity - sparsity_val) < 1e-3

            masked_sparsity = eval_tensor_sparsity(pruning_op_vars.masked)
            assert abs(masked_sparsity - sparsity_val) < 1e-3

            res = sess.run(relu, feed_dict={inp: numpy.random.random((4, 64))})
            assert res.sum() > 0.0
Exemplo n.º 6
0
def test_get_or_create_graph_ops_pruning(
    sparsity_val: float,
    net_const: Callable,
    inp_arr: numpy.ndarray,
    var_names: List[str],
    mask_creator: PruningMaskCreator,
):
    group = "test-group"
    is_grouped_mask = isinstance(mask_creator, GroupedPruningMaskCreator)

    with tf_compat.Graph().as_default() as graph:
        out, inp = net_const()
        sparsity = tf_compat.placeholder(dtype=tf_compat.float32,
                                         name="sparsity_placeholder")
        update_ready = tf_compat.placeholder(dtype=tf_compat.bool,
                                             name="update_ready")
        pruning_op_vars = get_or_create_graph_ops_pruning(
            graph,
            var_names,
            sparsity,
            update_ready,
            True,
            None,
            group,
            mask_creator,
        )
        pruning_op_vars_sec = get_or_create_graph_ops_pruning(
            graph,
            var_names,
            sparsity,
            update_ready,
            True,
            None,
            group,
            mask_creator,
        )

        assert len(pruning_op_vars) >= len(
            var_names)  # get at least 1 match per regex
        assert len(pruning_op_vars) == len(pruning_op_vars_sec)

        for op_vars, op_vars_sec in zip(pruning_op_vars, pruning_op_vars_sec):
            assert op_vars.op == op_vars_sec.op
            # import pdb
            # pdb.set_trace()
            assert op_vars.update == op_vars_sec.update
            assert op_vars.mask == op_vars_sec.mask
            assert op_vars.masked == op_vars_sec.masked

        with tf_compat.Session() as sess:
            sess.run(tf_compat.global_variables_initializer())
            for op_vars in pruning_op_vars:
                sess.run(
                    op_vars.update,
                    feed_dict={
                        sparsity: sparsity_val,
                        update_ready: False
                    },
                )
                print(op_vars.mask.shape)
                # When we reduce the values a mask can take, there can be higher error
                err_threshold = 1e-2 if not is_grouped_mask else 1e-1
                mask_sparsity = eval_tensor_sparsity(op_vars.mask)
                weight_sparsity = eval_tensor_sparsity(op_vars.op_input)
                assert mask_sparsity < err_threshold
                assert weight_sparsity == mask_sparsity

                masked_sparsity = eval_tensor_sparsity(op_vars.masked)
                assert masked_sparsity < err_threshold

                sess.run(
                    op_vars.update,
                    feed_dict={
                        sparsity: sparsity_val,
                        update_ready: True
                    },
                )

                mask_sparsity = eval_tensor_sparsity(op_vars.mask)
                assert abs(mask_sparsity - sparsity_val) < err_threshold

                masked_sparsity = eval_tensor_sparsity(op_vars.masked)
                assert abs(masked_sparsity - sparsity_val) < err_threshold

                res = sess.run(out, feed_dict={inp: inp_arr})
                assert res.sum() > 0.0

                if is_grouped_mask:
                    # Check that every value in the mask_creator grouping
                    # is the same within the mask.  Assumes grouping applies
                    # an absolte mean to each grouping
                    grouped_mask = mask_creator.group_tensor(op_vars.mask)
                    mask_vals_are_grouped = tf_compat.reduce_all(
                        tf_compat.logical_or(
                            tf_compat.equal(grouped_mask, 0.0),
                            tf_compat.equal(grouped_mask, 1.0),
                        ))
                    assert sess.run(mask_vals_are_grouped)
Exemplo n.º 7
0
def test_create_op_pruning_conv(sparsity_val: float,
                                mask_creator: PruningMaskCreator):
    group = "test-group"
    is_grouped_mask = isinstance(mask_creator, GroupedPruningMaskCreator)
    with tf_compat.Graph().as_default() as graph:
        inp = tf_compat.placeholder(tf_compat.float32, [None, 8, 8, 64])

        with tf_compat.name_scope("conv"):
            weights = tf_compat.Variable(tf_compat.random_normal(
                [3, 3, 64, 64]),
                                         name="weights")
            bias = tf_compat.Variable(tf_compat.random_normal([64]),
                                      name="bias")
            conv = tf_compat.nn.conv2d(inp,
                                       weights,
                                       strides=[1, 1, 1, 1],
                                       padding="SAME",
                                       name="conv")
            add = tf_compat.add(conv, bias, name="bias_add")
            relu = tf_compat.nn.relu(add, name="relu")

        sparsity = tf_compat.placeholder(dtype=tf_compat.float32,
                                         name="sparsity_placeholder")
        update_ready = tf_compat.placeholder(dtype=tf_compat.bool,
                                             name="update_ready")

        conv_op = graph.get_operation_by_name("conv/conv")
        conv_op_input = get_op_input_var(conv_op, VAR_INDEX_FROM_TRAINABLE)
        pruning_op_vars = create_op_pruning(
            conv_op,
            conv_op_input,
            sparsity,
            update_ready,
            True,
            None,
            group,
            mask_creator=mask_creator,
        )

        with tf_compat.Session() as sess:
            sess.run(tf_compat.global_variables_initializer())
            sess.run(
                pruning_op_vars.update,
                feed_dict={
                    sparsity: sparsity_val,
                    update_ready: False
                },
            )

            err_threshold = 1e-3 if not is_grouped_mask else 0.05

            mask_sparsity = eval_tensor_sparsity(pruning_op_vars.mask)
            weight_sparsity = eval_tensor_sparsity(pruning_op_vars.op_input)
            assert mask_sparsity < err_threshold
            assert abs(mask_sparsity - weight_sparsity) <= 1e-4

            masked_sparsity = eval_tensor_sparsity(pruning_op_vars.masked)
            assert masked_sparsity < err_threshold

            sess.run(
                pruning_op_vars.update,
                feed_dict={
                    sparsity: sparsity_val,
                    update_ready: True
                },
            )

            mask_sparsity = eval_tensor_sparsity(pruning_op_vars.mask)
            assert abs(mask_sparsity - sparsity_val) < err_threshold

            masked_sparsity = eval_tensor_sparsity(pruning_op_vars.masked)
            assert abs(masked_sparsity - sparsity_val) < err_threshold

            res = sess.run(relu,
                           feed_dict={inp: numpy.random.random((4, 8, 8, 64))})
            assert res.sum() > 0.0

            if is_grouped_mask:
                # Check that every value in the mask_creator grouping
                # is the same within the mask.  Assumes grouping applies
                # an absolte mean to each grouping
                grouped_mask = mask_creator.group_tensor(pruning_op_vars.mask)
                mask_vals_are_grouped = tf_compat.reduce_all(
                    tf_compat.logical_or(
                        tf_compat.equal(grouped_mask, 0.0),
                        tf_compat.equal(grouped_mask, 1.0),
                    ))
                assert sess.run(mask_vals_are_grouped)