Example #1
0
def main(_):
    """Configures and runs the fuzzer."""

    # Log more and return how logging output will be produced
    tf.logging.set_verbosity(tf.logging.INFO)

    coverage_function = raw_logit_coverage_function  # change function name
    target_seed = np.random.uniform(low=0.0, high=1.0, size=(1, ))  # 语料库,随机值
    numpy_arrays = [[target_seed]]

    targets_tensor = tf.placeholder(tf.float32, [64, 1])
    coverage_tensor = tf.identity(targets_tensor)
    loss_batch_tensor, _ = binary_cross_entropy_with_logits(
        tf.zeros_like(targets_tensor), tf.nn.sigmoid(targets_tensor))
    grads_tensor = tf.gradients(loss_batch_tensor, targets_tensor)[0]
    tensor_map = {
        "input": [targets_tensor],
        "coverage": [coverage_tensor],
        "metadata": [loss_batch_tensor, grads_tensor],
    }

    with tf.Session() as sess:

        fetch_function = build_fetch_function(sess, tensor_map)
        size = FLAGS.mutations_per_corpus_item
        mutation_function = lambda elt: do_basic_mutations(
            elt, size, a_min=-1000, a_max=1000)
        # 从语料库选择种子
        seed_corpus = seed_corpus_from_numpy_arrays(numpy_arrays,
                                                    coverage_function,
                                                    metadata_function,
                                                    fetch_function)
        # corpus: mutations_processed, corpus, time, sample_function, updater
        corpus = InputCorpus(seed_corpus, uniform_sample_function,
                             FLAGS.ann_threshold, "kdtree")
        fuzzer = Fuzzer(
            corpus,
            coverage_function,
            metadata_function,
            objective_function,
            mutation_function,
            fetch_function,
        )
        # fuzzer run
        result = fuzzer.loop(FLAGS.total_inputs_to_fuzz)
        if result is not None:
            tf.logging.info("Fuzzing succeeded.")
            tf.logging.info(
                "Generations to make satisfying element: %s.",
                result.oldest_ancestor()[1],
            )
        else:
            tf.logging.info("Fuzzing failed to satisfy objective function.")
Example #2
0
 def generate_mutated_elements(new_elements):
     mutated_inter_elements = []
     for i in range(len(new_elements)):
         mutated_batches = do_basic_mutations(new_elements[i], 1)
         mutated_numpy_arrays = [[mutated_batches[0][0].tolist(), mutated_batches[1].tolist()[0]]]
         mutated_seed_corpus = seed_corpus_from_numpy_arrays_1(
             # 建立seed corpus,输入集 numpy_array是一张图片,返回的 seed corpus包含一个元素,有metada和coverage信息
             mutated_numpy_arrays, coverage_function, metadata_function, fetch_function
         )  # 变异后的CorpusElement
         mutated_seed_corpus[0].parent = new_elements[i]
         mutated_inter_elements.append(mutated_seed_corpus[0])
     return mutated_inter_elements
Example #3
0
def main(_):
    """Constructs the fuzzer and performs fuzzing."""

    # Log more
    tf.logging.set_verbosity(tf.logging.INFO)  # 为将要被记录的日志设置开始入口
    # Set the seeds!
    if FLAGS.seed:
        random.seed(FLAGS.seed)
        np.random.seed(FLAGS.seed)

    coverage_function = all_logit_coverage_function  # 覆盖计算方法,所有logit的绝对值之和
    image, label = fuzz_utils.basic_mnist_input_corpus(
        choose_randomly=FLAGS.
        random_seed_corpus  # 这里为False, 返回第一张图片和标签, 图片为28*28*1
    )
    numpy_arrays = [[image, label]]

    with tf.Session() as sess:

        tensor_map = fuzz_utils.get_tensors_from_checkpoint(  # 载入checkpoints
            sess, FLAGS.checkpoint_dir)
        fetch_function = fuzz_utils.build_fetch_function(
            sess, tensor_map)  # ===============

        size = FLAGS.mutations_per_corpus_item  # 每次变异数量
        mutation_function = lambda elt: do_basic_mutations(elt, size)  # 变异方法
        seed_corpus = seed_corpus_from_numpy_arrays(  # 建立seed corpus,输入集 numpy_array是一张图片,返回的 seedcorpus包含一个元素,有metada和coverage信息
            numpy_arrays, coverage_function, metadata_function, fetch_function)
        corpus = InputCorpus(  # 建立input corpus
            seed_corpus,
            recent_sample_function,
            FLAGS.ann_threshold,
            "kdtree"  # recent_sample_function用于选择下一个元素
        )
        fuzzer = Fuzzer(
            corpus,
            coverage_function,
            metadata_function,
            objective_function,
            mutation_function,
            fetch_function,
        )
        result = fuzzer.loop(FLAGS.total_inputs_to_fuzz)
        if result is not None:
            tf.logging.info("Fuzzing succeeded.")
            tf.logging.info(
                "Generations to make satisfying element: %s.",
                result.oldest_ancestor()[1],
            )
        else:
            tf.logging.info("Fuzzing failed to satisfy objective function.")
Example #4
0
def main(_):
    """Constructs the fuzzer and performs fuzzing."""

    # Log more
    tf.logging.set_verbosity(tf.logging.INFO)
    # Set the seeds!
    if FLAGS.seed:
        random.seed(FLAGS.seed)
        np.random.seed(FLAGS.seed)

    coverage_function = all_logit_coverage_function  # a function to compute coverage
    image, label = fuzz_utils.basic_mnist_input_corpus(
        choose_randomly=FLAGS.random_seed_corpus)
    numpy_arrays = [[image, label]]

    with tf.Graph().as_default() as g:  # change codes and it works
        sess = tf.Session()  # here
        tensor_map = fuzz_utils.get_tensors_from_checkpoint(
            sess, FLAGS.checkpoint_dir)

        fetch_function = fuzz_utils.build_fetch_function(sess, tensor_map)

        size = FLAGS.mutations_per_corpus_item
        mutation_function = lambda elt: do_basic_mutations(elt, size)  # pram 1
        seed_corpus = seed_corpus_from_numpy_arrays(numpy_arrays,
                                                    coverage_function,
                                                    metadata_function,
                                                    fetch_function)
        corpus = InputCorpus(seed_corpus, recent_sample_function,
                             FLAGS.ann_threshold, "kdtree")  # pram 2
        fuzzer = Fuzzer(
            corpus,
            coverage_function,
            metadata_function,
            objective_function,
            mutation_function,
            fetch_function,
        )
        result = fuzzer.loop(FLAGS.total_inputs_to_fuzz)  # iterations
        if result is not None:
            tf.logging.info("Fuzzing succeeded.")
            tf.logging.info(
                "Generations to make satisfying element: %s.",
                result.oldest_ancestor()[1],
            )
            tf.logging.info("Elements for Crashes: {0}".format(result))
        else:
            tf.logging.info("Fuzzing failed to satisfy objective function.")
Example #5
0
 def mutation_function(elt):
     """Mutates the element in question."""
     return do_basic_mutations(elt, size, FLAGS.perturbation_constraint)
Example #6
0
def mutation_function(elt):
    """Mutates the element in question."""
    return do_basic_mutations(elt,
                              FLAGS.mutations_per_corpus_item,
                              a_min=None,
                              a_max=None)
Example #7
0
def mutation_function(elt):
    """Mutates the element in question."""
    return do_basic_mutations(elt, FLAGS.mutations_per_corpus_item,
                              FLAGS.perturbation_constraint)
def main(_):
    """Constructs the fuzzer and performs fuzzing."""

    # Log more
    tf.logging.set_verbosity(tf.logging.INFO)  # 为将要被记录的日志设置开始入口
    # Set the seeds!
    if FLAGS.seed:
        random.seed(FLAGS.seed)
        np.random.seed(FLAGS.seed)

    coverage_function = all_logit_coverage_function  # 覆盖计算方法,所有logit的绝对值之和
    # image, label = fuzz_utils.basic_mnist_input_corpus(
    #     choose_randomly=FLAGS.random_seed_corpus  # 这里为False, 返回第一张图片和标签, 图片为28*28*1
    # )
    # image, label = fuzz_utils.mnist_input_corpus_by_index(index=0)
    # print(len(image))
    # numpy_arrays = [[image, label]]
    #
    # print(len(numpy_arrays))
    # print(len(numpy_arrays[0]))
    with tf.Session() as sess:

        tensor_map = fuzz_utils.get_tensors_from_checkpoint(  # 载入checkpoints
            sess, FLAGS.checkpoint_dir)  # 返回字典,包括input,coverage,metadata

        # fetch_function = fuzz_utils.build_fetch_function(sess, tensor_map)  # 返回一个方法还是
        fetch_function = fuzz_utils.build_fetch_function_1(sess, tensor_map)

        size = FLAGS.mutations_per_corpus_item  # 每次变异数量
        mutation_function = lambda elt: do_basic_mutations(elt, size)  # 变异方法
        images, labels = fuzz_utils.mnist_test_input_corpus()
        # numpy_arrays = [[images, labels]]
        elements = []
        mutated_elements = []
        for idx in range(10000):
            image, label = images[idx], labels[idx]
            # print(image)
            # print(label)
            # print(len(image))
            numpy_arrays = [[image, label]]
            # print(type(numpy_arrays))
            # print(numpy_arrays)
            # print(len(numpy_arrays))
            # print(len(numpy_arrays[0]))
            seed_corpus = seed_corpus_from_numpy_arrays_1(  # 建立seed corpus,输入集 numpy_array是一张图片,返回的 seed corpus包含一个元素,有metada和coverage信息
                numpy_arrays, coverage_function, metadata_function,
                fetch_function)  # 返回seed corpus,里面包含一个CorpusElement元素

            mutated_batches = do_basic_mutations(seed_corpus[0], 1)
            mutated_numpy_arrays = [[
                mutated_batches[0][0].tolist(), mutated_batches[1].tolist()[0]
            ]]
            # print(type(mutated_numpy_arrays))
            mutated_seed_corpus = seed_corpus_from_numpy_arrays_1(  # 建立seed corpus,输入集 numpy_array是一张图片,返回的 seed corpus包含一个元素,有metada和coverage信息
                mutated_numpy_arrays, coverage_function, metadata_function,
                fetch_function)  # 变异后的CorpusElement
            mutated_seed_corpus[0].parent = seed_corpus[0]
            elements.append(seed_corpus[0])
            mutated_elements.append(mutated_seed_corpus[0])
            # print(type(elements[0].data))
            # print(type(mutated_elements[0].data))
            # print(elements[0].data)
            # print(elements[0].data)

        # print(len(mutated_elements))
        hidden_1_list = []
        hidden_2_list = []
        output_list = []
        mutated_hidden_1_list = []
        mutated_hidden_2_list = []
        mutated_output_list = []

        for index in range(len(elements)):

            # hidden_1_list.append(elements[index].hidden_layer_1_before_activation[0][0].tolist())
            # hidden_2_list.append(elements[index].hidden_layer_2_before_activation[0][0].tolist())
            # output_list.append(elements[index].output_layer_before_activation[0][0].tolist())
            hidden_1_list.append(
                elements[index].hidden_layer_1_after_activation[0][0].tolist())
            hidden_2_list.append(
                elements[index].hidden_layer_2_after_activation[0][0].tolist())
            output_list.append(elements[index].bad_softmax[0].tolist())

        input_list = []
        for size in range(len(hidden_1_list)):
            data_size_input_list = []
            dense1_sub_list = hidden_1_list[size]
            dense2_sub_list = hidden_2_list[size]
            dense3_sub_list = output_list[size]

            dense1_sub_input_list = []
            dense2_sub_input_list = []
            dense3_sub_input_list = []
            for neuron_sum in range(len(dense1_sub_list)):
                dense1_sub_input_list.append([dense1_sub_list[neuron_sum]])
            data_size_input_list.append(dense1_sub_input_list)

            for neuron_sum in range(len(dense2_sub_list)):
                dense2_sub_input_list.append([dense2_sub_list[neuron_sum]])
            data_size_input_list.append(dense2_sub_input_list)

            for neuron_sum in range(len(dense3_sub_list)):
                dense3_sub_input_list.append([dense3_sub_list[neuron_sum]])
            data_size_input_list.append(dense3_sub_input_list)

            input_list.append(data_size_input_list)

        output_layer_input_list = []
        for size in range(len(output_list)):
            data_size_input_list = []
            # dense1_sub_list = mutated_hidden_1_list[size]
            # dense2_sub_list = mutated_hidden_2_list[size]
            dense3_sub_list = output_list[size]

            # dense1_sub_input_list = []
            # dense2_sub_input_list = []
            dense3_sub_input_list = []
            # for neuron_sum in range(len(dense1_sub_list)):
            #     dense1_sub_input_list.append([dense1_sub_list[neuron_sum]])
            # data_size_input_list.append(dense1_sub_input_list)
            #
            # for neuron_sum in range(len(dense2_sub_list)):
            #     dense2_sub_input_list.append([dense2_sub_list[neuron_sum]])
            # data_size_input_list.append(dense2_sub_input_list)

            for neuron_sum in range(len(dense3_sub_list)):
                dense3_sub_input_list.append([dense3_sub_list[neuron_sum]])
            data_size_input_list.append(dense3_sub_input_list)

            output_layer_input_list.append(data_size_input_list)

        time6 = time.time()
        coverage6 = coverage_functions_1.k_multisection_neuron_coverage(
            10, ['output_boundary.csv'], output_layer_input_list)
        print("耗时", time.time() - time6)
        print("output_layer k_multisection coverage:", coverage6)

        time7 = time.time()
        coverage7 = coverage_functions_1.neuron_boundary_coverage(
            ['output_boundary.csv'], output_layer_input_list)
        print("耗时", time.time() - time7)
        print("output_layer neuron_boundary coverage:", coverage7)

        time1 = time.time()
        coverage1 = coverage_functions_1.k_multisection_neuron_coverage(
            10, [
                'hidden_1_boundary.csv', 'hidden_2_boundary.csv',
                'output_boundary.csv'
            ], input_list)
        print("耗时", time.time() - time1)
        print("k_multisection coverage:", coverage1)

        time2 = time.time()
        coverage2 = coverage_functions_1.neuron_boundary_coverage([
            'hidden_1_boundary.csv', 'hidden_2_boundary.csv',
            'output_boundary.csv'
        ], input_list)
        print("耗时", time.time() - time2)
        print("neuron boundary coverage:", coverage2)

        time3 = time.time()
        coverage3 = coverage_functions_1.strong_neuron_activation_coverage([
            'hidden_1_boundary.csv', 'hidden_2_boundary.csv',
            'output_boundary.csv'
        ], input_list)
        print("耗时", time.time() - time3)
        print("strong neuron activation coverage:", coverage3)

        time4 = time.time()
        coverage4 = coverage_functions_1.top_k_neuron_coverage(2, input_list)
        print("耗时", time.time() - time4)
        print("top-k neuron coverage:", coverage4)

        time5 = time.time()
        coverage5 = coverage_functions_1.top_neuron_patterns(2, input_list)
        print("耗时", time.time() - time5)
        print("top-k patterns:", coverage5)
        '''
 def mutation_function(elt):  # elt为一个元素
     """Mutates the element in question."""
     return do_basic_mutations(
         elt, size,
         FLAGS.perturbation_constraint)  # 一个元素多次变异,返回多个变异后的数据
def main(_):
    """Constructs the fuzzer and performs fuzzing."""

    # Log more
    tf.logging.set_verbosity(tf.logging.INFO)  # 为将要被记录的日志设置开始入口
    # Set the seeds!
    if FLAGS.seed:
        random.seed(FLAGS.seed)
        np.random.seed(FLAGS.seed)

    coverage_function = all_logit_coverage_function  # 覆盖计算方法,所有logit的绝对值之和
    # image, label = fuzz_utils.basic_mnist_input_corpus(
    #     choose_randomly=FLAGS.random_seed_corpus  # 这里为False, 返回第一张图片和标签, 图片为28*28*1
    # )
    # image, label = fuzz_utils.mnist_input_corpus_by_index(index=0)
    # print(len(image))
    # numpy_arrays = [[image, label]]
    #
    # print(len(numpy_arrays))
    # print(len(numpy_arrays[0]))
    with tf.Session() as sess:

        tensor_map = fuzz_utils.get_tensors_from_checkpoint(  # 载入checkpoints
            sess, FLAGS.checkpoint_dir)  # 返回字典,包括input,coverage,metadata

        # fetch_function = fuzz_utils.build_fetch_function(sess, tensor_map)  # 返回一个方法还是
        fetch_function = fuzz_utils.build_fetch_function_1(sess, tensor_map)

        size = FLAGS.mutations_per_corpus_item  # 每次变异数量
        mutation_function = lambda elt: do_basic_mutations(elt, size)  # 变异方法
        images, labels = fuzz_utils.all_mnist_input_corpus()
        # numpy_arrays = [[images, labels]]
        elements = []
        for idx in range(50000):
            image, label = images[idx], labels[idx]
            # print(len(image))
            numpy_arrays = [[image, label]]
            # print(len(numpy_arrays))
            # print(len(numpy_arrays[0]))
            seed_corpus = seed_corpus_from_numpy_arrays_1(  # 建立seed corpus,输入集 numpy_array是一张图片,返回的 seed corpus包含一个元素,有metada和coverage信息
                numpy_arrays, coverage_function, metadata_function,
                fetch_function)  # 返回seed corpus,里面包含一个CorpusElement元素
            elements.append(seed_corpus[0])
        hidden_1_list = []
        hidden_2_list = []
        output_list = []
        # hidden_1_list.append(elements[0].hidden_layer_1_before_activation[0][0].tolist())
        # hidden_1_list.append(elements[1].hidden_layer_1_before_activation[0][0].tolist())
        # # print(elements[0].hidden_layer_1_before_activation[0][0].tolist())
        # print(hidden_1_list)
        # print(len(hidden_1_list))
        # print(len(hidden_1_list[0]))
        # reverse_list = utils.reverse_list(hidden_1_list)
        # print(reverse_list)
        # print(len(reverse_list))
        # print(len(reverse_list[0]))

        # for index in range(1):
        for index in range(len(elements)):
            element = elements[index]
            # hidden_1_before_activation = element.hidden_layer_1_before_activation[0][0].tolist()
            # hidden_2_before_activation = element.hidden_layer_2_before_activation[0][0].tolist()
            # output_before_activation = element.output_layer_before_activation[0][0].tolist()
            hidden_1_before_activation = element.hidden_layer_1_after_activation[
                0][0].tolist()
            hidden_2_before_activation = element.hidden_layer_2_after_activation[
                0][0].tolist()
            # print(element.hidden_layer_2_after_activation[0][0])
            # print(len(element.hidden_layer_2_after_activation[0][0]))
            # print(type(element.hidden_layer_2_after_activation[0][0]))
            # print('==============================================')
            # print('==========================', type(element.output_layer_before_activation))
            # print(len(element.output_layer_before_activation))
            # print(len(element.output_layer_before_activation[0]))
            # output_before_activation = element.bad_softmax[0].tolist()
            output_before_activation = element.output_layer_before_activation[
                0][0].tolist()
            # print(output_before_activation[0][0])
            # print(type(output_before_activation[0][0]))

            hidden_1_list.append(hidden_1_before_activation)
            hidden_2_list.append(hidden_2_before_activation)
            output_list.append(output_before_activation)

        # print(hidden_1_list)
        # print(len(hidden_1_list))
        # print(len(hidden_1_list[0]))
        #
        # # print(hidden_2_list)
        # print(len(hidden_2_list))
        # print(hidden_2_list[0])
        #
        # print(len(output_list))
        # print(len(hidden_2_list[0]))

        reverse_hidden_1_list = utils.reverse_list(hidden_1_list)
        reverse_hidden_2_list = utils.reverse_list(hidden_2_list)
        reverse_output_list = utils.reverse_list(output_list)

        # 得到各层各个神经元最大值和最小值
        hidden_1_boundary = utils.get_boundary(reverse_hidden_1_list)
        hidden_2_boundary = utils.get_boundary(reverse_hidden_2_list)
        output_boundary = utils.get_boundary(reverse_output_list)
        #
        # # 将最大值最小值保存为csv文件
        utils.save_boundary_list(hidden_1_boundary, 'hidden_1_boundary.csv')
        utils.save_boundary_list(hidden_2_boundary, 'hidden_2_boundary.csv')
        utils.save_boundary_list(output_boundary, 'output_boundary.csv')
def main(_):
    """Constructs the fuzzer and performs fuzzing."""

    # Log more
    tf.logging.set_verbosity(tf.logging.INFO)  # 为将要被记录的日志设置开始入口
    # Set the seeds!
    # if FLAGS.seed:
    #     random.seed(FLAGS.seed)
    #     np.random.seed(FLAGS.seed)

    coverage_function = all_logit_coverage_function  # 覆盖计算方法,所有logit的绝对值之和
    # coverage_function = raw_logit_coverage_function
    # image, label = fuzz_utils.basic_mnist_input_corpus(
    #     choose_randomly=FLAGS.random_seed_corpus  # 这里为False, 返回第一张图片和标签, 图片为28*28*1
    # )
    # numpy_arrays = [[image, label]]
    # print(numpy_arrays)
    # print(type(numpy_arrays))
    # print(image)
    # print(type(image))
    # print(label)
    # print(type(label))
    with tf.Session() as sess:

        tensor_map = fuzz_utils.get_tensors_from_checkpoint(  # 载入checkpoints
            sess, FLAGS.checkpoint_dir)  # 返回字典,包括input,coverage,metadata

        fetch_function = fuzz_utils.build_fetch_function(sess, tensor_map)

        size = FLAGS.mutations_per_corpus_item  # 每次变异数量
        mutation_function = lambda elt: do_basic_mutations(elt, size)  # 变异方法
        generation_function = generate_mnist_input(low=FLAGS.low,
                                                   high=FLAGS.high,
                                                   size=size)  # 数据生成方法
        # seed_corpus = seed_corpus_from_numpy_arrays(  # 建立seed corpus,输入集 numpy_array是一张图片,返回的 seed corpus包含一个元素,有metada和coverage信息
        #     numpy_arrays, coverage_function, metadata_function, fetch_function
        # )  # 返回seed corpus,里面包含一个CorpusElement元素

        # corpus = InputCorpus(  # 建立input corpus,一开始只包含一个元素
        #     seed_corpus, recent_sample_function, FLAGS.ann_threshold, "kdtree"   # recent_sample_function用于选择下一个元素
        # )

        fuzzer = generation_based_Fuzzer(
            # corpus,
            coverage_function,
            metadata_function,
            objective_function,
            mutation_function,
            fetch_function,
            generation_function)
        start_time = time.time()
        result = fuzzer.loop(FLAGS.total_inputs_to_fuzz)

        if result is not None:
            tf.logging.info("Fuzzing succeeded.")
            tf.logging.info("Generations to make satisfying element: %s.",
                            # result.oldest_ancestor()[1],
                            )
        else:
            tf.logging.info("Fuzzing failed to satisfy objective function.")
        print('sum_time:', time.time() - start_time)
Example #12
0
def main(_):
    """Constructs the fuzzer and performs fuzzing."""

    # Log more
    tf.logging.set_verbosity(tf.logging.INFO)  # 为将要被记录的日志设置开始入口
    # Set the seeds!
    if FLAGS.seed:
        random.seed(FLAGS.seed)
        np.random.seed(FLAGS.seed)

    coverage_function = all_logit_coverage_function  # 覆盖计算方法,所有logit的绝对值之和

    with tf.Session() as sess:

        tensor_map = fuzz_utils.get_tensors_from_checkpoint(  # 载入checkpoints
            sess, FLAGS.checkpoint_dir
        )  # 返回字典,包括input,coverage,metadata

        # fetch_function = fuzz_utils.build_fetch_function(sess, tensor_map)  # 返回一个方法还是
        fetch_function = fuzz_utils.build_fetch_function_1(sess, tensor_map)

        size = FLAGS.mutations_per_corpus_item  # 每次变异数量
        mutation_function = lambda elt: do_basic_mutations(elt, size)  # 变异方法
        images, labels = fuzz_utils.mnist_test_input_corpus()
        # numpy_arrays = [[images, labels]]
        elements = []
        mutated_elements = []
        for idx in range(10000):
            image, label = images[idx], labels[idx]
            numpy_arrays = [[image, label]]
            seed_corpus = seed_corpus_from_numpy_arrays_1(   # 建立seed corpus,输入集 numpy_array是一张图片,返回的 seed corpus包含一个元素,有metada和coverage信息
                numpy_arrays, coverage_function, metadata_function, fetch_function
            )  # 返回seed corpus,里面包含一个CorpusElement元素
            elements.append(seed_corpus[0])
            # print(elements[0].output_layer_before_activation[0][0].tolist())
            # print(elements[0].bad_softmax[0].tolist())

        def generate_mutated_elements(new_elements):
            mutated_inter_elements = []
            for i in range(len(new_elements)):
                mutated_batches = do_basic_mutations(new_elements[i], 1)
                mutated_numpy_arrays = [[mutated_batches[0][0].tolist(), mutated_batches[1].tolist()[0]]]
                mutated_seed_corpus = seed_corpus_from_numpy_arrays_1(
                    # 建立seed corpus,输入集 numpy_array是一张图片,返回的 seed corpus包含一个元素,有metada和coverage信息
                    mutated_numpy_arrays, coverage_function, metadata_function, fetch_function
                )  # 变异后的CorpusElement
                mutated_seed_corpus[0].parent = new_elements[i]
                mutated_inter_elements.append(mutated_seed_corpus[0])
            return mutated_inter_elements

        mutated_elements = generate_mutated_elements(elements)
        iteration = 0
        coverage_functions_1.compute_coverage(mutated_elements)

        # while True:
        #     if iteration % 100 == 0:
        #         print("iteration: ", iteration)
        #     # print(mutated_elements[0].output_layer_before_activation[0][0].tolist())
        #     for index in range(len(mutated_elements[0].output_layer_before_activation[0][0].tolist())):
        #         logit = mutated_elements[0].output_layer_before_activation[0][0].tolist()[index]
        #         if logit >= 88:
        #             print("fuzzed succeed")
        #             break
        #     iteration += 1
        #     mutated_elements = generate_mutated_elements(mutated_elements)






        hidden_1_list = []
        hidden_2_list = []
        output_list = []