Пример #1
0
def create_dataset(annotation_lines: np.ndarray, batch_size: int,
                   input_shape: list, anchors: np.ndarray, num_classes: int, random=True) -> tf.data.Dataset:
    num = len(annotation_lines)
    if num == 0 or batch_size <= 0:
        raise ValueError

    def parser(lines):
        image_data = []
        box_data = []
        for line in lines:
            image, box = get_random_data(line.numpy().decode(), input_shape, random=random)
            image_data.append(image)
            box_data.append(box)

        image_data = np.array(image_data)
        box_data = np.array(box_data)

        y_true = [tf.convert_to_tensor(y, tf.float32) for y in preprocess_true_boxes(box_data, input_shape, anchors, num_classes)]
        image_data = tf.convert_to_tensor(image_data, tf.float32)
        return (image_data, *y_true)

    x_set = (tf.data.Dataset.from_tensor_slices(annotation_lines).
             apply(tf.data.experimental.shuffle_and_repeat(batch_size * 100, seed=66)).
             batch(batch_size, drop_remainder=True).
             map(lambda lines: py_function(parser, [lines], [tf.float32] * (1 + len(anchors) // 3)),
                 num_parallel_calls=tf.data.experimental.AUTOTUNE))
    y_set = tf.data.Dataset.from_tensors(tf.zeros(batch_size, tf.float32)).repeat()
    dataset = tf.data.Dataset.zip((x_set, y_set))
    return dataset
Пример #2
0
    def train(self):
        """
            1、构造tensorflow的基本算子、算法。注意这一步都是在“定义”和“构造”,不是真正的模型训练和计算
        """
        # 先构造一个数据流图
        temp_graph = tf.Graph()
        with temp_graph.as_default():
            # 定义占位符,表示待训练的数据集,用这种方式最后运行train的时候总是报错,暂无法解决:
            # You must feed a value for placeholder tensor 'x' with dtype float and shape [?,?]
            # x = tf.placeholder(dtype=tf.float32, shape=[None, None], name='x')
            # y = tf.placeholder(dtype=tf.float32, shape=[None], name='y')

            # 定义待训练的参数w和b,weight被赋予随机值,介于-1和1之间,bias分配一个变量并赋值为0
            weight = tf.Variable(tf.random_uniform([1, self.__x_train.shape[1]], -1.0, 1.0))
            bias = tf.Variable(tf.zeros([1]))

            # 定义二分类的sigmoid模型 y = 1/(1+exp-(w*x + b))
            # y_pre = tf.div(1.0,
            #                tf.add(1.0,
            #                       tf.exp(tf.neg(tf.reduce_sum(tf.multiply(weight, self.__x_train),
            #                                                   1
            #                                                  ) + bias)
            #                             )
            #                      )
            #               )
            # 也可以直接利用tf的sigmoid函数
            y_pre = tf.sigmoid(tf.reduce_sum(tf.multiply(weight, self.__x_train), 1) + bias)

            # 定义损失函数为对数似然函数(-y*log(y_pre) - (1-y)*log(1-y_pre))/样本数
            # 为什么这样定义呢?这里要扯到线性回归的最小二乘法和逻辑回归中的最大似然函数法的区别了。
            # 最小二乘法的核心思想是,让预测值和真实值的“误差”尽可能小;
            # 而最大似然函数法的核心思想是,让已知训练样本发生的概率尽可能大。
            # 上述的对数似然函数就是这么来的,推导过程可参考相关文献,在梯度下降的运用中,就是加个负号,让其最小
            loss0 = self.__y_train * tf.log(y_pre)
            loss1 = (1 - self.__y_train) * tf.log(1 - y_pre)
            loss = tf.reduce_sum(- loss0 - loss1) / self.__x_train.shape[0]
            # 定义优化算法(梯度下降),目标就是最小化损失函数
            optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
            train = optimizer.minimize(loss)
            # 初始化变量
            init = tf.global_variables_initializer()

        """
            2.正式训练
        """
        # 建立会话
        with tf.Session(graph=temp_graph) as sess:
            # 这个时候才开始真正地计算
            sess.run(init)
            print('初始化参数:weight=', sess.run(weight), ', bias=', sess.run(bias))
            # 拟合平面,过程就是执行1000遍梯度下降算法,得到最佳的w和b
            for step in range(1000):
                sess.run(train)
                if step % 100 == 0:
                    print("第%u步:权重:%s,偏置:%f,损失:%f" %
                          (step, weight.eval(), bias.eval(), loss.eval()))
                self.__weight = weight.eval()
                self.__bias = bias.eval()
Пример #3
0
def test_zip_dataset():
    """ 尝试zip dataset,但还是失败了 """
    annotation_path = 'train.txt'
    classes_path = 'model_data/voc_classes.txt'
    anchors_path = 'model_data/yolo_anchors.txt'
    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    anchors = get_anchors(anchors_path)

    val_split = 0.1
    with open(annotation_path) as f:
        annotation_lines = f.readlines()
    np.random.seed(10101)
    np.random.shuffle(annotation_lines)
    np.random.seed(None)
    num_val = int(len(annotation_lines) * val_split)
    num_train = len(annotation_lines) - num_val

    batch_size = 32
    input_shape = (416, 416)

    num = len(annotation_lines)
    if num == 0 or batch_size <= 0:
        raise ValueError

    def parser(lines):
        image_data = []
        box_data = []
        for line in lines:
            image, box = get_random_data(line.numpy().decode(),
                                         input_shape,
                                         random=True)
            image_data.append(image)
            box_data.append(box)

        image_data = np.array(image_data)
        box_data = np.array(box_data)

        y_true = [
            tf.convert_to_tensor(y, tf.float32) for y in preprocess_true_boxes(
                box_data, input_shape, anchors, num_classes)
        ]
        image_data = tf.convert_to_tensor(image_data, tf.float32)
        return (image_data, *y_true)

    x_set = (tf.data.Dataset.from_tensor_slices(annotation_lines).apply(
        tf.data.experimental.shuffle_and_repeat(
            batch_size * 300, seed=66)).batch(
                batch_size, drop_remainder=True).map(lambda lines: py_function(
                    parser, [lines], [tf.float32] * (1 + len(anchors) // 3))))
    y_set = tf.data.Dataset.from_tensors(tf.zeros(batch_size,
                                                  tf.float32)).repeat()
    dataset = tf.data.Dataset.zip((x_set, y_set))

    sample = next(iter(dataset))
Пример #4
0
def _calculate_supervised_similarities(y_true) -> Tensor:
    """
    Calculates the target supervised similarities.
    Performs a tensorflow nested loop, in order to compare the values of y_true for range(batch_size).

    :param y_true: the y_true value.
    :return: Tensor containing the target supervised similarities.
    """
    # Get the batch size.
    batch_size = shape(y_true)[0]
    # Initialize outer loop index.
    i = constant(0)
    # Initialize symmetric supervised similarity matrix targets.
    target_similarity = zeros((batch_size, batch_size))

    def outer_loop_condition(_i, _batch_size, _y_true, _target_similarity):
        """Define outer loop condition."""
        return less(_i, _batch_size)

    def outer_loop_body(_i, _batch_size, _y_true, _target_similarity):
        """Define outer loop body."""
        # Initialize inner loop index.
        j = constant(0)

        def inner_loop_condition(_i, _j, _y_true, _target_similarity):
            """Define inner loop condition."""
            return less(_j, _batch_size)

        def inner_loop_body(_i, _j, _y_true, _target_similarity):
            """Define inner loop body."""
            if _y_true[_i] == _y_true[_j]:
                _target_similarity[_i, _j] = 1
            return _i, _j + 1, _y_true, _target_similarity

        # Begin inner while loop.
        _, j, _, _target_similarity = while_loop(
            inner_loop_condition, inner_loop_body,
            [_i, j, _y_true, _target_similarity])
        return _i + 1, _batch_size, _y_true, _target_similarity

    # Begin outer while loop.
    i, _, _, target_similarity = while_loop(
        outer_loop_condition, outer_loop_body,
        [i, batch_size, y_true, target_similarity])
    return target_similarity
Пример #5
0
    def train(self):
        """
            1、构造tensorflow的基本算子、算法。注意这一步都是在“定义”和“构造”,不是真正的模型训练和计算
        """
        # 先构造一个线性模型 y = w*x + b,这里w被赋予随机值,介于-1和1之间,b分配一个变量并赋值为0
        w = tf.Variable(tf.random_uniform([1], -1.0, 1.0))
        b = tf.Variable(tf.zeros([1]))
        y = tf.mul(w, self.__x_data) + b
        # 定义损失函数(方差)和优化算法(梯度下降),目标就是最小化损失函数
        loss = tf.reduce_mean(tf.square(y - self.__y_data))
        optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.05)
        train = optimizer.minimize(loss)
        # 初始化变量
        init = tf.global_variables_initializer()

        """
            2、正式训练
        """
        # 建立会话
        sess = tf.Session()
        # 这个时候才开始真正地计算
        sess.run(init)
        print('初始化参数:w=', sess.run(w), ', b=', sess.run(b))
        # 拟合平面,过程就是执行100遍梯度下降算法,得到最佳的w和b
        for step in numpy.arange(0, 101):
            sess.run(train)
            if step % 10 == 0:
                print(step, sess.run(w), sess.run(b))

        """
            3、画图
        """
        plt.scatter(self.__x_data, self.__y_data, marker='.', color='red', s=40, label='First')
        plt.plot([numpy.min(self.__x_data), numpy.max(self.__x_data)],
                 [sess.run(w)*numpy.min(self.__x_data)+sess.run(b),
                  sess.run(w)*numpy.max(self.__x_data)+sess.run(b)],
                 'b')
        plt.show()

        """
            4、任务完成, 关闭会话.
        """
        sess.close()
Пример #6
0
 def train(self):
     """
         1、构造tensorflow的基本算子、算法。注意这一步都是在“定义”和“构造”,不是真正的模型训练和计算
         这里需特别注意一下训练数据和学习率的关系:
         本案例中,训练数据X都在0-1之间,学习率取0.5是比较恰当的。
         但是,当训练数据越大的时候,学习率越要变小,例如X在0-5之间的话,学习率取0.05较合适。
         个人感觉:训练数据取值越大,如果学习率不降的话,在每一步梯度计算时,容易“步子太大扯着蛋”,
                  即所谓的“梯度爆炸”,最终无法收敛导致系数越来越大直到溢出,算不出来了。
     """
     # 先构造一个数据流图,定义线性模型 y = w*x + b,这里w被赋予随机值,介于-1和1之间,b分配一个变量并赋值为0
     temp_graph = tf.Graph()
     with temp_graph.as_default():
         tf_v_w = tf.Variable(tf.random_uniform([1, 2], -1.0, 1.0))
         tf_v_b = tf.Variable(tf.zeros([1]))
         tf_v_y = tf.matmul(tf_v_w, self.__x_data) + tf_v_b
         # 定义损失函数(方差)和优化算法(梯度下降),目标就是最小化损失函数
         loss = tf.reduce_mean(tf.square(tf_v_y - self.__y_data))
         optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.05)
         train = optimizer.minimize(loss)
         # 初始化变量
         init = tf.global_variables_initializer()
     """
         2.正式训练
     """
     # 建立会话
     with tf.Session(graph=temp_graph) as sess:
         # 这个时候才开始真正地计算
         sess.run(init)
         print('初始化参数:w=', sess.run(tf_v_w), ', b=', sess.run(tf_v_b))
         # 拟合平面,过程就是执行100遍梯度下降算法,得到最佳的w和b
         for step in numpy.arange(0, 101):
             sess.run(train)
             if step % 10 == 0:
                 print("第%u步:权重:%s,偏置:%f,损失:%f" %
                       (step, tf_v_w.eval(), tf_v_b.eval(), loss.eval()))
             # 将训练完毕的参数保存
             self.__w_data = tf_v_w.eval()
             self.__b_data = tf_v_b.eval()
Пример #7
0
def test_consant():
    t = tf.ones((16, 10))
    b = tf.constant(tf.zeros((t.shape[0], t.shape[1])),
                    name='b')  # b [in_caps,out_caps]