Exemplo n.º 1
0
def test_mnist_registry(key: str, pretrained: Union[bool, str],
                        test_input: bool):
    with tf_compat.Graph().as_default():
        inputs = tf_compat.placeholder(tf_compat.float32, [None, 28, 28, 1],
                                       name="inputs")
        logits = ModelRegistry.create(key, inputs)

        with tf_compat.Session() as sess:
            if test_input:
                sess.run(tf_compat.global_variables_initializer())
                out = sess.run(
                    logits,
                    feed_dict={inputs: numpy.random.random((1, 28, 28, 1))})
                assert out.sum() != 0

            if pretrained:
                ModelRegistry.load_pretrained(key, pretrained)

                if test_input:
                    out = sess.run(logits,
                                   feed_dict={
                                       inputs: numpy.random.random(
                                           (1, 28, 28, 1))
                                   })
                    assert out.sum() != 0
Exemplo n.º 2
0
def test_trainable_params_modifier_with_training():
    modifier = TrainableParamsModifier(
        params=["mlp_net/fc1/weight"],
        trainable=False,
        params_strict=False,
    )
    manager = ScheduledModifierManager([modifier])
    steps_per_epoch = 5
    batch_size = 2

    with tf_compat.Graph().as_default() as graph:
        logits, inputs = mlp_net()
        labels = tf_compat.placeholder(tf_compat.float32,
                                       [None, *logits.shape[1:]])
        loss = batch_cross_entropy_loss(logits, labels)

        global_step = tf_compat.train.get_or_create_global_step()
        num_trainable_variabls_init = len(tf_compat.trainable_variables())

        mod_ops, mod_extras = manager.create_ops(steps_per_epoch)
        assert len(
            tf_compat.trainable_variables()) < num_trainable_variabls_init
        # Get the variables returned by the trainable_params modifier
        non_trainable_vars = mod_extras[EXTRAS_KEY_VAR_LIST]
        trainable_vars = tf_compat.trainable_variables()
        train_op = tf_compat.train.AdamOptimizer(learning_rate=1e-4).minimize(
            loss, global_step=global_step)

        with tf_compat.Session(graph=graph) as sess:
            sess.run(tf_compat.global_variables_initializer())
            manager.initialize_session(sess)
            init_non_trainable_vars = [
                var.eval(session=sess) for var in non_trainable_vars
            ]
            init_trainable_vars = [
                var.eval(session=sess) for var in trainable_vars
            ]
            batch_lab = numpy.random.random((batch_size, *logits.shape[1:]))
            batch_inp = numpy.random.random((batch_size, *inputs.shape[1:]))

            for epoch in range(10):
                for step in range(steps_per_epoch):
                    sess.run(train_op,
                             feed_dict={
                                 inputs: batch_inp,
                                 labels: batch_lab
                             })
                    sess.run(global_step)
            # Compare initial and final variable values
            for idx, init_non_trainable_var in enumerate(
                    init_non_trainable_vars):
                final_non_trainable_var = non_trainable_vars[idx].eval(
                    session=sess)
                assert numpy.array_equal(init_non_trainable_var,
                                         final_non_trainable_var)
            for idx, init_trainable_var in enumerate(init_trainable_vars):
                final_trainable_var = trainable_vars[idx].eval(session=sess)
                assert not numpy.array_equal(init_trainable_var,
                                             final_trainable_var)
            manager.complete_graph()
Exemplo n.º 3
0
def mlp_graph_lambda():
    graph = tf_compat.Graph()

    with graph.as_default():
        mlp_net()

    return graph
Exemplo n.º 4
0
def test_loss_sensitivity(net_const: Callable, inp_arr: numpy.ndarray,
                          labs_arr: numpy.ndarray):
    with tf_compat.Graph().as_default():
        out, inp = net_const()
        labels = tf_compat.placeholder(tf_compat.float32,
                                       [None, *labs_arr.shape[1:]],
                                       name="logits")
        loss = batch_cross_entropy_loss(out, labels)
        op_vars = pruning_loss_sens_op_vars()

        with tf_compat.Session() as sess:
            sess.run(tf_compat.global_variables_initializer())

            def add_ops_creator(step: int):
                return []

            def feed_dict_creator(step: int):
                return {inp: inp_arr, labels: labs_arr}

            analysis = pruning_loss_sens_one_shot(op_vars, loss, 5,
                                                  add_ops_creator,
                                                  feed_dict_creator)

            for res in analysis.results:
                assert res.name
                assert isinstance(res.index, int)
                assert len(res.sparse_measurements) > 0
                assert len(res.averages) > 0
                assert res.sparse_average > 0
                assert res.sparse_integral > 0
Exemplo n.º 5
0
def conv_graph_lambda():
    graph = tf_compat.Graph()

    with graph.as_default():
        conv_net()

    return graph
Exemplo n.º 6
0
def test_get_prunable_ops(net_const, expected_ops: List[str]):
    with tf_compat.Graph().as_default():
        net_const()
        ops = get_prunable_ops()
        assert len(ops) == len(expected_ops)

        for op in ops:
            assert op[0] in expected_ops
Exemplo n.º 7
0
def test_op_var_name():
    graph = tf_compat.Graph()

    with graph.as_default():
        var = tf_compat.Variable(tf_compat.random_normal([64]),
                                 dtype=tf_compat.float32,
                                 name="test_var_name")
        name = clean_tensor_name(var)
        assert name == "test_var_name"
Exemplo n.º 8
0
def test_op_input_var():
    with tf_compat.Graph().as_default() as graph:
        mlp_net()
        ops = get_prunable_ops(graph)

        for op in ops:
            inp = get_op_input_var(op[1])
            assert inp is not None
            assert isinstance(inp, tf_compat.Tensor)
Exemplo n.º 9
0
def test_resnets(key: str, pretrained: Union[bool, str], test_input: bool,
                 const: Callable):
    input_shape = ModelRegistry.input_shape(key)
    # test out the stand alone constructor
    with tf_compat.Graph().as_default():
        inputs = tf_compat.placeholder(tf_compat.float32, [None, *input_shape],
                                       name="inputs")
        logits = const(inputs, training=False)

        if test_input:
            with tf_compat.Session() as sess:
                sess.run(tf_compat.global_variables_initializer())
                out = sess.run(
                    logits,
                    feed_dict={inputs: numpy.random.random((1, *input_shape))})
                assert out.sum() != 0

    # test out the registry
    with tf_compat.Graph().as_default():
        inputs = tf_compat.placeholder(tf_compat.float32, [None, *input_shape],
                                       name="inputs")
        logits = ModelRegistry.create(key, inputs, training=False)

        with tf_compat.Session() as sess:
            if test_input:
                sess.run(tf_compat.global_variables_initializer())
                out = sess.run(
                    logits,
                    feed_dict={inputs: numpy.random.random((1, *input_shape))})
                assert out.sum() != 0

            if pretrained:
                ModelRegistry.load_pretrained(key, pretrained)

                if test_input:
                    out = sess.run(
                        logits,
                        feed_dict={
                            inputs: numpy.random.random((1, *input_shape))
                        },
                    )
                    assert out.sum() != 0
Exemplo n.º 10
0
def test_mnist():
    with tf_compat.Graph().as_default():
        inputs = tf_compat.placeholder(tf_compat.float32, [None, 28, 28, 1],
                                       name="inputs")
        logits = mnist_net(inputs)

        with tf_compat.Session() as sess:
            sess.run(tf_compat.global_variables_initializer())
            out = sess.run(
                logits,
                feed_dict={inputs: numpy.random.random((1, 28, 28, 1))})
            assert out.sum() != 0
Exemplo n.º 11
0
def export(
    args,
    save_dir,
    checkpoint_path=None,
    skip_samples=False,
    num_classes=None,
    opset=None,
):
    assert not skip_samples or num_classes
    # dataset creation
    if not skip_samples:
        val_dataset, num_classes = _create_dataset(args, train=False)

    with tf_compat.Graph().as_default():
        input_shape = ModelRegistry.input_shape(args.arch_key)
        inputs = tf_compat.placeholder(tf_compat.float32,
                                       [None] + list(input_shape),
                                       name="inputs")
        outputs = _create_model(args, num_classes, inputs)

        with tf_compat.Session() as sess:
            _load_model(args,
                        sess,
                        checkpoint_path=checkpoint_path
                        or args.checkpoint_path)

            exporter = GraphExporter(save_dir)

            if not skip_samples:
                # Export a batch of samples and expected outputs
                tf_dataset = val_dataset.build(args.num_samples,
                                               repeat_count=1,
                                               num_parallel_calls=1)
                tf_iter = tf_compat.data.make_one_shot_iterator(tf_dataset)
                features, _ = tf_iter.get_next()
                inputs_val = sess.run(features)
                exporter.export_samples([inputs], [inputs_val], [outputs],
                                        sess)

            # Export model to tensorflow checkpoint format
            LOGGER.info("exporting tensorflow in {}".format(save_dir))
            exporter.export_checkpoint(sess=sess)

            # Export model to pb format
            LOGGER.info("exporting pb in {}".format(exporter.pb_path))
            exporter.export_pb(outputs=[outputs])

    # Export model to onnx format
    LOGGER.info("exporting onnx in {}".format(exporter.onnx_path))
    exporter.export_onnx([inputs], [outputs], opset=opset or args.onnx_opset)
Exemplo n.º 12
0
def test_get_ops_and_inputs_by_name_or_regex(
    net_const,
    var_names,
    expected_ops,
    expected_tens,
):
    with tf_compat.Graph().as_default() as graph:
        net_const()
        ops_and_inputs = get_ops_and_inputs_by_name_or_regex(var_names, graph)
        assert len(ops_and_inputs) == len(expected_ops)

        for op, inp in ops_and_inputs:
            assert op.name in expected_ops
            assert clean_tensor_name(inp.name) in expected_tens
Exemplo n.º 13
0
def test_approx_ks_loss_sensitivity(net_const: Callable):
    with tf_compat.Graph().as_default() as graph:
        out, inp = net_const()

        with tf_compat.Session() as sess:
            sess.run(tf_compat.global_variables_initializer())

            analysis = pruning_loss_sens_magnitude(graph)

            for res in analysis.results:
                assert res.name
                assert isinstance(res.index, int)
                assert len(res.sparse_measurements) > 0
                assert len(res.averages) > 0
                assert res.sparse_average > 0
                assert res.sparse_integral > 0
Exemplo n.º 14
0
def test_multi_step_lr_schedule(start_step: int, milestone_steps: List[int],
                                init_lr: float, gamma: float):
    with tf_compat.Graph().as_default():
        global_step = tf_compat.placeholder(dtype=tf_compat.int64, shape=[])
        learning_rate = multi_step_lr_schedule(global_step, start_step,
                                               milestone_steps, init_lr, gamma)

        with tf_compat.Session() as sess:
            for step in range(start_step + milestone_steps[-1] + 10):
                measured = sess.run(learning_rate,
                                    feed_dict={global_step: step})

                gammas = sum([
                    1 for mile in milestone_steps if step >= mile + start_step
                ])
                expected = init_lr * gamma**gammas

                assert abs(measured - expected) < 1e-5
Exemplo n.º 15
0
def test_step_lr_schedule(start_step: int, end_step: int, init_lr: float,
                          step_size: int, gamma: float):
    with tf_compat.Graph().as_default():
        global_step = tf_compat.placeholder(dtype=tf_compat.int64, shape=[])
        learning_rate = step_lr_schedule(global_step, start_step, end_step,
                                         step_size, init_lr, gamma)

        with tf_compat.Session() as sess:
            expected = init_lr

            for step in range(end_step + 10):
                measured = sess.run(learning_rate,
                                    feed_dict={global_step: step})

                if (step - start_step
                    ) % step_size == 0 and start_step < step <= end_step:
                    expected = expected * gamma

                assert abs(measured - expected) < 1e-5
Exemplo n.º 16
0
def test_apply_op_vars_masks(
    sparsity_val: float,
    net_const: Callable,
    inp_arr: numpy.ndarray,
    var_names: List[str],
):
    group = "test-group"

    with tf_compat.Graph().as_default() as graph:
        out, inp = net_const()
        sparsity = tf_compat.placeholder(dtype=tf_compat.float32,
                                         name="sparsity_placeholder")
        update_ready = tf_compat.placeholder(dtype=tf_compat.bool,
                                             name="update_ready")
        pruning_op_vars = get_or_create_graph_ops_pruning(
            graph,
            var_names,
            sparsity,
            update_ready,
            True,
            None,
            group,
            UnstructuredPruningMaskCreator(),
        )

        with tf_compat.Session() as sess:
            sess.run(tf_compat.global_variables_initializer())

            for op_vars in pruning_op_vars:
                sess.run(
                    op_vars.update,
                    feed_dict={
                        sparsity: sparsity_val,
                        update_ready: True
                    },
                )

            apply_op_vars_masks(pruning_op_vars, group, sess)

            for op_vars in pruning_op_vars:
                var_sparsity = eval_tensor_sparsity(op_vars.op_input)
                assert abs(var_sparsity - sparsity_val) < 1e-2
Exemplo n.º 17
0
def _validate(dataset: ImageFolderDataset, size: int):
    with tf_compat.Graph().as_default():
        batch_size = 16

        with tf_compat.device("/cpu:0"):
            print("loading datasets")
            dataset_len = len(dataset)
            assert dataset_len > 0
            tf_dataset = dataset.build(
                batch_size,
                repeat_count=2,
                shuffle_buffer_size=10,
                prefetch_buffer_size=batch_size,
                num_parallel_calls=4,
            )

        handle, iterator, (tf_iter, ) = create_split_iterators_handle(
            [tf_dataset])
        images, labels = iterator.get_next()

        with tf_compat.Session() as sess:
            sess.run([
                tf_compat.global_variables_initializer(),
                tf_compat.local_variables_initializer(),
            ])
            iter_handle = sess.run(tf_iter.string_handle())
            sess.run(tf_iter.initializer)

            for _ in range(5):
                batch_x, batch_lab = sess.run([images, labels],
                                              feed_dict={handle: iter_handle})
                assert batch_x.shape[0] == 16
                assert batch_x.shape[1] == size
                assert batch_x.shape[2] == size
                assert batch_x.shape[3] == 3
                assert batch_lab.shape[0] == 16
                assert batch_lab.shape[1] == 10
Exemplo n.º 18
0
def test_get_or_create_ks_schedule_ops(
    begin_step: int,
    end_step: int,
    update_step_freq: int,
    init_sparsity: float,
    final_sparsity: float,
    exponent: float,
):
    group = "test-group"

    with tf_compat.Graph().as_default():
        global_step = tf_compat.train.get_or_create_global_step()
        step_placeholder = tf_compat.placeholder(dtype=tf_compat.int64,
                                                 name="step")
        global_assign = global_step.assign(step_placeholder)

        update_ready, sparsity = get_or_create_ks_schedule_ops(
            global_step,
            begin_step,
            end_step,
            update_step_freq,
            init_sparsity,
            final_sparsity,
            exponent,
            group,
        )
        update_ready_sec, sparsity_sec = get_or_create_ks_schedule_ops(
            global_step,
            begin_step,
            end_step,
            update_step_freq,
            init_sparsity,
            final_sparsity,
            exponent,
            group,
        )

        assert update_ready == update_ready_sec
        assert sparsity == sparsity_sec

        with tf_compat.Session() as sess:
            sess.run(tf_compat.global_variables_initializer())
            last_update_step = None
            last_update_sparsity = None

            for step in range(end_step + 10):
                sess.run(global_assign, feed_dict={step_placeholder: step})
                update_ready_val = sess.run(update_ready)
                sparsity_val = sess.run(sparsity)

                if step < begin_step:
                    assert not update_ready_val
                    assert abs(sparsity_val) < 1e-5
                elif step <= begin_step:
                    assert update_ready_val
                    assert abs(sparsity_val - init_sparsity) < 1e-5
                    last_update_step = step
                    last_update_sparsity = sparsity_val
                elif step == end_step:
                    assert update_ready_val
                    assert abs(sparsity_val - final_sparsity) < 1e-5
                    last_update_step = step
                    last_update_sparsity = sparsity_val
                elif step > end_step:
                    assert not update_ready_val
                    assert abs(sparsity_val - final_sparsity) < 1e-5
                else:
                    # check if update should be ready
                    check_ready = (last_update_step is None or
                                   step >= last_update_step + update_step_freq)
                    assert sparsity_val > last_update_sparsity

                    if check_ready:
                        assert update_ready_val
                        last_update_step = step
                        last_update_sparsity = sparsity_val
                    else:
                        assert not update_ready_val
Exemplo n.º 19
0
def test_create_op_pruning_fc(sparsity_val):
    group = "test-group"

    with tf_compat.Graph().as_default() as graph:
        inp = tf_compat.placeholder(tf_compat.float32, [None, 64])

        with tf_compat.name_scope("fc"):
            weights = tf_compat.Variable(tf_compat.random_normal([64, 64]),
                                         name="weights")
            bias = tf_compat.Variable(tf_compat.random_normal([64]),
                                      name="bias")
            matmul = tf_compat.matmul(inp, weights, name="matmul")
            add = tf_compat.add(matmul, bias, name="bias_add")
            relu = tf_compat.nn.relu(add, name="relu")

        sparsity = tf_compat.placeholder(dtype=tf_compat.float32,
                                         name="sparsity_placeholder")
        update_ready = tf_compat.placeholder(dtype=tf_compat.bool,
                                             name="update_ready")

        matmul_op = graph.get_operation_by_name("fc/matmul")
        matmul_op_input = get_op_input_var(matmul_op, VAR_INDEX_FROM_TRAINABLE)
        pruning_op_vars = create_op_pruning(
            matmul_op,
            matmul_op_input,
            sparsity,
            update_ready,
            True,
            None,
            group,
            UnstructuredPruningMaskCreator(),
        )

        with tf_compat.Session() as sess:
            sess.run(tf_compat.global_variables_initializer())
            sess.run(
                pruning_op_vars.update,
                feed_dict={
                    sparsity: sparsity_val,
                    update_ready: False
                },
            )

            mask_sparsity = eval_tensor_sparsity(pruning_op_vars.mask)
            weight_sparsity = eval_tensor_sparsity(pruning_op_vars.op_input)
            assert mask_sparsity < 1e-3
            assert mask_sparsity == weight_sparsity

            masked_sparsity = eval_tensor_sparsity(pruning_op_vars.masked)
            assert masked_sparsity < 1e-3

            sess.run(
                pruning_op_vars.update,
                feed_dict={
                    sparsity: sparsity_val,
                    update_ready: True
                },
            )

            mask_sparsity = eval_tensor_sparsity(pruning_op_vars.mask)
            assert abs(mask_sparsity - sparsity_val) < 1e-3

            masked_sparsity = eval_tensor_sparsity(pruning_op_vars.masked)
            assert abs(masked_sparsity - sparsity_val) < 1e-3

            res = sess.run(relu, feed_dict={inp: numpy.random.random((4, 64))})
            assert res.sum() > 0.0
Exemplo n.º 20
0
def pruning_loss_sensitivity(args, save_dir):
    input_shape = ModelRegistry.input_shape(args.arch_key)
    train_dataset, num_classes = _create_dataset(args,
                                                 train=True,
                                                 image_size=input_shape[1])
    with tf_compat.Graph().as_default() as graph:
        # create model graph
        inputs = tf_compat.placeholder(tf_compat.float32,
                                       [None] + list(input_shape),
                                       name="inputs")
        outputs = _create_model(args, num_classes, inputs)

        with tf_compat.Session() as sess:
            _load_model(args, sess, checkpoint_path=args.checkpoint_path)
            if args.approximate:
                LOGGER.info(
                    "Running weight magnitude loss sensitivity analysis...")
                analysis = pruning_loss_sens_magnitude(graph, sess)
            else:
                op_vars = pruning_loss_sens_op_vars(graph)
                train_steps = math.ceil(len(train_dataset) / args.batch_size)
                train_dataset = _build_dataset(args, train_dataset,
                                               args.batch_size)
                handle, iterator, dataset_iter = create_split_iterators_handle(
                    [train_dataset])
                dataset_iter = dataset_iter[0]
                images, labels = iterator.get_next()
                loss = batch_cross_entropy_loss(outputs, labels)
                tensor_names = ["inputs:0", labels.name]
                sess.run(dataset_iter.initializer)

                def feed_dict_creator(
                        step: int) -> Dict[str, tf_compat.Tensor]:
                    assert step < train_steps
                    batch_data = [
                        tens.eval(session=sess)
                        for tens in dataset_iter.get_next()
                    ]
                    return dict(zip(tensor_names, batch_data))

                LOGGER.info("Running one shot loss sensitivity analysis...")
                analysis = pruning_loss_sens_one_shot(
                    op_vars=op_vars,
                    loss_tensor=loss,
                    steps_per_measurement=args.steps_per_measurement,
                    feed_dict_creator=feed_dict_creator,
                    sess=sess,
                )
    # saving and printing results
    LOGGER.info("completed...")
    LOGGER.info("Saving results in {}".format(save_dir))
    analysis.save_json(
        os.path.join(
            save_dir,
            "ks_approx_sensitivity.json"
            if args.approximate else "ks_one_shot_sensitivity.json",
        ))
    analysis.plot(
        os.path.join(
            save_dir,
            os.path.join(
                save_dir,
                "ks_approx_sensitivity.png"
                if args.approximate else "ks_one_shot_sensitivity.png",
            ),
        ),
        plot_integral=True,
    )
    analysis.print_res()
Exemplo n.º 21
0
def test_get_or_create_ks_scheduled_graph_ops(
    begin_step: int,
    end_step: int,
    update_step_freq: int,
    init_sparsity: float,
    final_sparsity: float,
    exponent: float,
    net_const: Callable,
    inp_arr: numpy.ndarray,
    var_names: List[str],
):
    group = "test-group"

    with tf_compat.Graph().as_default() as graph:
        global_step = tf_compat.train.get_or_create_global_step()
        step_placeholder = tf_compat.placeholder(dtype=tf_compat.int64,
                                                 name="step")
        global_assign = global_step.assign(step_placeholder)

        out, inp = net_const()

        (
            update_op,
            pruning_op_vars,
            update_ready,
            sparsity,
        ) = get_or_create_ks_scheduled_graph_ops(
            graph,
            global_step,
            var_names,
            begin_step,
            end_step,
            update_step_freq,
            init_sparsity,
            final_sparsity,
            exponent,
            True,
            group,
            UnstructuredPruningMaskCreator(),
        )
        (
            update_op_sec,
            pruning_op_vars_sec,
            update_ready,
            sparsity,
        ) = get_or_create_ks_scheduled_graph_ops(
            graph,
            global_step,
            var_names,
            begin_step,
            end_step,
            update_step_freq,
            init_sparsity,
            final_sparsity,
            exponent,
            True,
            group,
            UnstructuredPruningMaskCreator(),
        )

        assert update_op == update_op_sec
        assert update_ready == update_ready
        assert sparsity == sparsity
        assert len(pruning_op_vars) == 3
        assert len(pruning_op_vars) >= len(
            var_names)  # at least 1 regex match per name

        for op_vars, op_vars_sec in zip(pruning_op_vars, pruning_op_vars_sec):
            assert op_vars.op == op_vars_sec.op
            assert op_vars.update == op_vars_sec.update
            assert op_vars.mask == op_vars_sec.mask
            assert op_vars.masked == op_vars_sec.masked

        with tf_compat.Session() as sess:
            sess.run(tf_compat.global_variables_initializer())
            last_update_sparsity = None

            for step in range(end_step + 10):
                sess.run(global_assign, feed_dict={step_placeholder: step})
                update_ready_val = sess.run(update_ready)
                sparsity_val = sess.run(sparsity)
                sess.run(update_op)

                for op_var in pruning_op_vars:
                    mask_sparsity = eval_tensor_sparsity(op_var.mask)
                    masked_sparsity = eval_tensor_sparsity(op_var.masked)
                    weight_sparsity = eval_tensor_sparsity(op_vars.op_input)

                    assert abs(mask_sparsity - masked_sparsity) < 1e-5

                    if step < begin_step:
                        assert abs(masked_sparsity) < 1e-2
                        assert not update_ready_val
                    elif step == begin_step:
                        assert abs(masked_sparsity - init_sparsity) < 1e-2
                        assert abs(sparsity_val - init_sparsity) < 1e-5
                        assert update_ready_val
                        last_update_sparsity = masked_sparsity
                    elif step == end_step:
                        assert update_ready_val
                        assert abs(masked_sparsity - final_sparsity) < 1e-2
                        assert abs(sparsity_val - final_sparsity) < 1e-5
                        last_update_sparsity = masked_sparsity
                    elif step > end_step:
                        assert not update_ready_val
                        assert abs(masked_sparsity - final_sparsity) < 1e-2
                    else:
                        assert masked_sparsity >= last_update_sparsity - 1e-2
                        assert sparsity_val >= last_update_sparsity - 1e-2
                        assert abs(weight_sparsity - masked_sparsity) <= 1e-2
                        last_update_sparsity = masked_sparsity
                        if step < end_step and update_ready_val:
                            steps_count = sess.run(global_step) - begin_step
                            steps_range = end_step - begin_step
                            expected = _expected_sparsity(
                                steps_count,
                                steps_range,
                                init_sparsity,
                                final_sparsity,
                                exponent,
                            )
                            assert abs(sparsity_val - expected) < 1e-5

                res = sess.run(out, feed_dict={inp: inp_arr})
                assert res.sum() >= 0.0
Exemplo n.º 22
0
def test_gm_pruning_training_with_manager():
    modifier = GMPruningModifier(
        params=["mlp_net/fc1/weight", "mlp_net/fc3/weight"],
        init_sparsity=0.05,
        final_sparsity=0.8,
        start_epoch=2.0,
        end_epoch=7.0,
        update_frequency=1.0,
    )
    sec_modifier = GMPruningModifier(
        params=["mlp_net/fc2/weight"],
        init_sparsity=0.05,
        final_sparsity=0.8,
        start_epoch=2.0,
        end_epoch=7.0,
        update_frequency=1.0,
    )
    manager = ScheduledModifierManager([modifier, sec_modifier])
    steps_per_epoch = 5
    batch_size = 2

    with tf_compat.Graph().as_default() as graph:
        logits, inputs = mlp_net()
        labels = tf_compat.placeholder(tf_compat.float32,
                                       [None, *logits.shape[1:]])
        loss = batch_cross_entropy_loss(logits, labels)

        global_step = tf_compat.train.get_or_create_global_step()
        train_op = tf_compat.train.AdamOptimizer(learning_rate=1e-4).minimize(
            loss, global_step=global_step)

        mod_ops, mod_extras = manager.create_ops(steps_per_epoch)
        last_sparsities = [0.0 for _ in range(len(modifier.prune_op_vars))]

        with tf_compat.Session(graph=graph) as sess:
            sess.run(tf_compat.global_variables_initializer())
            manager.initialize_session(sess)
            batch_lab = numpy.random.random((batch_size, *logits.shape[1:]))
            batch_inp = numpy.random.random((batch_size, *inputs.shape[1:]))

            for epoch in range(int(modifier.end_epoch + 2.0)):
                for step in range(steps_per_epoch):
                    sess.run(train_op,
                             feed_dict={
                                 inputs: batch_inp,
                                 labels: batch_lab
                             })
                    sess.run(global_step)

                    sess.run(mod_ops)
                    update_ready_val = sess.run(modifier.update_ready)
                    sess.run(modifier.sparsity)

                    for index, op_vars in enumerate(modifier.prune_op_vars):
                        mask_sparsity = eval_tensor_sparsity(op_vars.mask)
                        masked_sparsity = eval_tensor_sparsity(op_vars.masked)

                        assert abs(mask_sparsity - masked_sparsity) < 1e-5

                        if epoch < modifier.start_epoch:
                            assert masked_sparsity < 1e-2
                            assert not update_ready_val
                        elif epoch >= modifier.end_epoch:
                            assert abs(masked_sparsity -
                                       modifier.final_sparsity) < 1e-2
                            assert not update_ready_val
                        else:
                            assert masked_sparsity >= last_sparsities[
                                index] - 1e-2
                            last_sparsities[index] = masked_sparsity

            manager.complete_graph()

            for op_vars in modifier.prune_op_vars:
                assert (abs(modifier.final_sparsity -
                            eval_tensor_sparsity(op_vars.op_input)) < 1e-2)
Exemplo n.º 23
0
def test_get_or_create_graph_ops_pruning(
    sparsity_val: float,
    net_const: Callable,
    inp_arr: numpy.ndarray,
    var_names: List[str],
    mask_creator: PruningMaskCreator,
):
    group = "test-group"
    is_grouped_mask = isinstance(mask_creator, GroupedPruningMaskCreator)

    with tf_compat.Graph().as_default() as graph:
        out, inp = net_const()
        sparsity = tf_compat.placeholder(dtype=tf_compat.float32,
                                         name="sparsity_placeholder")
        update_ready = tf_compat.placeholder(dtype=tf_compat.bool,
                                             name="update_ready")
        pruning_op_vars = get_or_create_graph_ops_pruning(
            graph,
            var_names,
            sparsity,
            update_ready,
            True,
            None,
            group,
            mask_creator,
        )
        pruning_op_vars_sec = get_or_create_graph_ops_pruning(
            graph,
            var_names,
            sparsity,
            update_ready,
            True,
            None,
            group,
            mask_creator,
        )

        assert len(pruning_op_vars) >= len(
            var_names)  # get at least 1 match per regex
        assert len(pruning_op_vars) == len(pruning_op_vars_sec)

        for op_vars, op_vars_sec in zip(pruning_op_vars, pruning_op_vars_sec):
            assert op_vars.op == op_vars_sec.op
            # import pdb
            # pdb.set_trace()
            assert op_vars.update == op_vars_sec.update
            assert op_vars.mask == op_vars_sec.mask
            assert op_vars.masked == op_vars_sec.masked

        with tf_compat.Session() as sess:
            sess.run(tf_compat.global_variables_initializer())
            for op_vars in pruning_op_vars:
                sess.run(
                    op_vars.update,
                    feed_dict={
                        sparsity: sparsity_val,
                        update_ready: False
                    },
                )
                print(op_vars.mask.shape)
                # When we reduce the values a mask can take, there can be higher error
                err_threshold = 1e-2 if not is_grouped_mask else 1e-1
                mask_sparsity = eval_tensor_sparsity(op_vars.mask)
                weight_sparsity = eval_tensor_sparsity(op_vars.op_input)
                assert mask_sparsity < err_threshold
                assert weight_sparsity == mask_sparsity

                masked_sparsity = eval_tensor_sparsity(op_vars.masked)
                assert masked_sparsity < err_threshold

                sess.run(
                    op_vars.update,
                    feed_dict={
                        sparsity: sparsity_val,
                        update_ready: True
                    },
                )

                mask_sparsity = eval_tensor_sparsity(op_vars.mask)
                assert abs(mask_sparsity - sparsity_val) < err_threshold

                masked_sparsity = eval_tensor_sparsity(op_vars.masked)
                assert abs(masked_sparsity - sparsity_val) < err_threshold

                res = sess.run(out, feed_dict={inp: inp_arr})
                assert res.sum() > 0.0

                if is_grouped_mask:
                    # Check that every value in the mask_creator grouping
                    # is the same within the mask.  Assumes grouping applies
                    # an absolte mean to each grouping
                    grouped_mask = mask_creator.group_tensor(op_vars.mask)
                    mask_vals_are_grouped = tf_compat.reduce_all(
                        tf_compat.logical_or(
                            tf_compat.equal(grouped_mask, 0.0),
                            tf_compat.equal(grouped_mask, 1.0),
                        ))
                    assert sess.run(mask_vals_are_grouped)
Exemplo n.º 24
0
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

# flake8: noqa

import math

from sparseml.tensorflow_v1.optim import (
    EXTRAS_KEY_LEARNING_RATE,
    EXTRAS_KEY_SUMMARIES,
    ScheduledModifierManager,
)
from sparseml.tensorflow_v1.utils import tf_compat

with tf_compat.Graph().as_default() as graph:
    CREATE_MODEL_GRAPH = None

    global_step = tf_compat.train.get_or_create_global_step()
    manager = ScheduledModifierManager.from_yaml("/PATH/TO/config.yaml")
    mod_ops, mod_extras = manager.create_ops(
        steps_per_epoch=math.ceil(len(TRAIN_DATASET) / TRAIN_BATCH_SIZE))
    summary_ops = mod_extras[EXTRAS_KEY_SUMMARIES]
    learning_rate = mod_extras[EXTRAS_KEY_LEARNING_RATE]

    with tf_compat.Session() as sess:
        sess.run(tf_compat.global_variables_initializer())

        for epoch in range(manager.max_epochs):
            for batch in range(TRAIN_BATCH_SIZE):
                sess.run(TRAIN_OP)
Exemplo n.º 25
0
def test_create_op_pruning_conv(sparsity_val: float,
                                mask_creator: PruningMaskCreator):
    group = "test-group"
    is_grouped_mask = isinstance(mask_creator, GroupedPruningMaskCreator)
    with tf_compat.Graph().as_default() as graph:
        inp = tf_compat.placeholder(tf_compat.float32, [None, 8, 8, 64])

        with tf_compat.name_scope("conv"):
            weights = tf_compat.Variable(tf_compat.random_normal(
                [3, 3, 64, 64]),
                                         name="weights")
            bias = tf_compat.Variable(tf_compat.random_normal([64]),
                                      name="bias")
            conv = tf_compat.nn.conv2d(inp,
                                       weights,
                                       strides=[1, 1, 1, 1],
                                       padding="SAME",
                                       name="conv")
            add = tf_compat.add(conv, bias, name="bias_add")
            relu = tf_compat.nn.relu(add, name="relu")

        sparsity = tf_compat.placeholder(dtype=tf_compat.float32,
                                         name="sparsity_placeholder")
        update_ready = tf_compat.placeholder(dtype=tf_compat.bool,
                                             name="update_ready")

        conv_op = graph.get_operation_by_name("conv/conv")
        conv_op_input = get_op_input_var(conv_op, VAR_INDEX_FROM_TRAINABLE)
        pruning_op_vars = create_op_pruning(
            conv_op,
            conv_op_input,
            sparsity,
            update_ready,
            True,
            None,
            group,
            mask_creator=mask_creator,
        )

        with tf_compat.Session() as sess:
            sess.run(tf_compat.global_variables_initializer())
            sess.run(
                pruning_op_vars.update,
                feed_dict={
                    sparsity: sparsity_val,
                    update_ready: False
                },
            )

            err_threshold = 1e-3 if not is_grouped_mask else 0.05

            mask_sparsity = eval_tensor_sparsity(pruning_op_vars.mask)
            weight_sparsity = eval_tensor_sparsity(pruning_op_vars.op_input)
            assert mask_sparsity < err_threshold
            assert abs(mask_sparsity - weight_sparsity) <= 1e-4

            masked_sparsity = eval_tensor_sparsity(pruning_op_vars.masked)
            assert masked_sparsity < err_threshold

            sess.run(
                pruning_op_vars.update,
                feed_dict={
                    sparsity: sparsity_val,
                    update_ready: True
                },
            )

            mask_sparsity = eval_tensor_sparsity(pruning_op_vars.mask)
            assert abs(mask_sparsity - sparsity_val) < err_threshold

            masked_sparsity = eval_tensor_sparsity(pruning_op_vars.masked)
            assert abs(masked_sparsity - sparsity_val) < err_threshold

            res = sess.run(relu,
                           feed_dict={inp: numpy.random.random((4, 8, 8, 64))})
            assert res.sum() > 0.0

            if is_grouped_mask:
                # Check that every value in the mask_creator grouping
                # is the same within the mask.  Assumes grouping applies
                # an absolte mean to each grouping
                grouped_mask = mask_creator.group_tensor(pruning_op_vars.mask)
                mask_vals_are_grouped = tf_compat.reduce_all(
                    tf_compat.logical_or(
                        tf_compat.equal(grouped_mask, 0.0),
                        tf_compat.equal(grouped_mask, 1.0),
                    ))
                assert sess.run(mask_vals_are_grouped)