コード例 #1
0
 def txt2clusters(self):
     chunks = read_xml(self.txt_name, self.pick)
     all_boxes = list()
     for chunk in chunks:
         box = list()
         box.append(chunk[1][2][0]['xmax'] - chunk[1][2][0]['xmin'])
         box.append(chunk[1][2][0]['ymax'] - chunk[1][2][0]['ymin'])
         all_boxes.append(box)
     all_boxes = np.array(all_boxes)
     # all_boxes = self.txt2boxes()
     result = self.kmeans(all_boxes, k=self.cluster_number)
     result = result[np.lexsort(result.T[0, None])]
     self.result2txt(result)
     print("K anchors:\n {}".format(result))
     print("Accuracy: {:.2f}%".format(
         self.avg_iou(all_boxes, result) * 100))
コード例 #2
0
def main():
    n_class = len(Gb_label)
    log_dir = Gb_ckpt_dir
    final_dir = Gb_ckpt_dir
    save_frequency = Gb_save_frequency
    batch_size = Gb_batch_size
    pick = Gb_label
    learning_rate = Gb_learning_rate
    chunks = read_xml('train.txt', pick)
    n_epoch = Gb_epoch
    n_step_epoch = int(len(chunks) / batch_size)

    input_pb = tf.placeholder(tf.float32, [None, 416, 416, 3])
    y_true_pb_1 = tf.placeholder(tf.float32, [None, 52, 52, 3, 5 + n_class])
    y_true_pb_2 = tf.placeholder(tf.float32, [None, 26, 26, 3, 5 + n_class])
    y_true_pb_3 = tf.placeholder(tf.float32, [None, 13, 13, 3, 5 + n_class])
    net_out = inference(input_pb, n_class)
    loss_op = yolo3_loss(net_out, [y_true_pb_1, y_true_pb_2, y_true_pb_3])
    train_op = training(loss_op, learning_rate)

    # varis = tf.global_variables()
    # var_to_restore = [val for val in varis if 'Adam' not in val.name and 'optimizer' not in val.name]
    # saver = tf.train.Saver(var_to_restore)
    saver = tf.train.Saver()
    summary_op = tf.summary.merge_all()
    temp = ''
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        # if tf.train.get_checkpoint_state('./ckpt3/'):  # 确认是否存在
        #     saver.restore(sess, './ckpt3/' + "test.ckpt")
        #     print("load ok!")
        # else:
        #     print("ckpt文件不存在")

        # tensor = tf.global_variables('layer_0_conv')
        # b = sess.run(tensor)

        train_writer = tf.summary.FileWriter(log_dir, sess.graph)
        step = 0
        min_loss = 10000000
        for epoch in range(n_epoch):
            step_epoch = 0
            # TODO shuffle chunks
            data_yield = data_generator(chunks)

            for img, lable_box in data_yield:
                step += 1
                step_epoch += 1
                start_time = time.time()

                loss, _, summary_str = sess.run(
                    [loss_op, train_op, summary_op],
                    feed_dict={
                        input_pb: img,
                        y_true_pb_1: lable_box[0],
                        y_true_pb_2: lable_box[1],
                        y_true_pb_3: lable_box[2]
                    })
                train_writer.add_summary(summary_str, step)

                # 每step打印一次该step的loss
                print("Loss %fs  : Epoch %d  %d/%d: Step %d  took %fs" %
                      (loss, epoch, step_epoch, n_step_epoch, step,
                       time.time() - start_time))

                if step % save_frequency == 0:
                    print("Save model " + "!" * 10)
                    save_path = saver.save(
                        sess,
                        final_dir + 'ep{0:03d}-step{1:d}-loss{2:.3f}'.format(
                            epoch, step, loss))
                    if loss < min_loss:
                        min_loss = loss
                    else:
                        try:
                            os.remove(final_dir + temp +
                                      '.data-00000-of-00001')
                            os.remove(final_dir + temp + '.index')
                            os.remove(final_dir + temp + '.meta')
                        except:
                            pass
                        temp = 'ep{0:03d}-step{1:d}-loss{2:.3f}'.format(
                            epoch, step, loss)
コード例 #3
0
def main():
    # n_class = len(Gb_label)
    # model_name = Gb_model_name
    log_dir = Gb_ckpt_dir
    final_dir = Gb_ckpt_dir
    save_frequency = Gb_save_frequency
    batch_size = Gb_batch_size
    learning_rate = Gb_learning_rate

    annotations_path = Gb_ann_path
    pick = Gb_labels
    chunks = read_xml(annotations_path, pick)

    n_epoch = Gb_epoch
    n_step_epoch = int(len(chunks) / batch_size)
    # n_step = n_epoch * n_step_epoch

    input_pb = tf.placeholder(tf.float32, [None, 416, 416, 3])
    y_true_pb = tf.placeholder(tf.float32, [None, 52, 52])
    net_out = infenence(input_pb)
    loss_op = yolo_loss(net_out, y_true_pb)
    train_op = training(loss_op, learning_rate)

    # varis = tf.global_variables()
    # var_to_restore = [val for val in varis if 'Adam' not in val.name and 'optimizer' not in val.name]
    # saver = tf.train.Saver(var_to_restore)
    saver = tf.train.Saver()
    summary_op = tf.summary.merge_all()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        # if tf.train.get_checkpoint_state('./ckpt2/'):  # 确认是否存在
        #     saver.restore(sess, './ckpt2/' + "ep094-step17000-loss61286.484")
        #     print("load ok!")
        # else:
        #     print("ckpt文件不存在")

        # tensor = tf.global_variables('layer_0_conv')
        # b = sess.run(tensor)

        train_writer = tf.summary.FileWriter(log_dir, sess.graph)
        step = 0
        min_loss = 10000000
        for epoch in range(n_epoch):
            step_epoch = 0
            # TODO shuffle chunks
            data_yield = data_generator(chunks)

            # train_loss, n_batch = 0, 0
            for origin_img_sizeds, segment_datas in data_yield:
                step += 1
                step_epoch += 1
                start_time = time.time()

                summary_str, loss, _ = sess.run(
                    [summary_op, loss_op, train_op],
                    feed_dict={
                        input_pb: origin_img_sizeds,
                        y_true_pb: segment_datas
                    })
                train_writer.add_summary(summary_str, step)

                # 每step打印一次该step的loss
                print("Loss %fs  : Epoch %d  %d/%d: Step %d  took %fs" %
                      (loss, epoch, step_epoch, n_step_epoch, step,
                       time.time() - start_time))

                if step % 1000 == 0 and loss < min_loss:
                    print("Save model " + "!" * 10)
                    save_path = saver.save(
                        sess,
                        final_dir + 'ep{0:03d}-step{1:d}-loss{2:.3f}'.format(
                            epoch, step, loss))
                    min_loss = loss

                if step % save_frequency == 0:
                    if step != save_frequency:
                        os.remove(final_dir + temp + '.data-00000-of-00001')
                        os.remove(final_dir + temp + '.index')
                        os.remove(final_dir + temp + '.meta')

                    print("Save model " + "!" * 10)
                    save_path = saver.save(
                        sess,
                        final_dir + 'ep{0:03d}-step{1:d}-loss{2:.3f}'.format(
                            epoch, step, loss))
                    temp = 'ep{0:03d}-step{1:d}-loss{2:.3f}'.format(
                        epoch, step, loss)
コード例 #4
0
def main():
    n_class = len(Gb_label)
    # model_name = Gb_model_name
    log_dir = Gb_ckpt_dir
    final_dir = Gb_ckpt_dir
    save_frequency = Gb_save_frequency
    label_dir = Gb_label_dir
    batch_size = Gb_batch_size
    pick = Gb_label
    learning_rate = Gb_learning_rate
    chunks = read_xml(
        Gb_img_dir,
        label_dir,
        pick,
    )
    n_epoch = Gb_epoch
    n_step_epoch = int(len(chunks) / batch_size)
    # n_step = n_epoch * n_step_epoch

    input_pb = tf.placeholder(tf.float32, [None, 416, 416, 3])
    y_true_pb = tf.placeholder(tf.float32,
                               [None, Gb_cell, Gb_cell, 9, 5 + n_class])
    net_out = infenence(input_pb)
    # net_out = squeezenet(input_pb)
    loss_op = model_loss(net_out, y_true_pb)
    train_op = training(loss_op, learning_rate)

    # varis = tf.global_variables()
    # var_to_restore = [val for val in varis if 'Adam' not in val.name and 'optimizer' not in val.name]
    # saver = tf.train.Saver(var_to_restore)
    saver = tf.train.Saver(max_to_keep=100)
    summary_op = tf.summary.merge_all()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        # if tf.train.get_checkpoint_state('./ckpt3/'):  # 确认是否存在
        #     saver.restore(sess, './ckpt3/' + "test.ckpt")
        #     print("load ok!")
        # else:
        #     print("ckpt文件不存在")

        train_writer = tf.summary.FileWriter(log_dir, sess.graph)
        step = 0
        min_loss = 10000000
        for epoch in range(n_epoch):
            step_epoch = 0
            # TODO shuffle chunks
            data_yield = data_generator(chunks)

            # train_loss, n_batch = 0, 0
            for img, lable_box in data_yield:
                step += 1
                step_epoch += 1
                start_time = time.time()

                # a = sess.run(tf.trainable_variables()[0])
                summary_str, loss, _ = sess.run(
                    [summary_op, loss_op, train_op],
                    feed_dict={
                        input_pb: img,
                        y_true_pb: lable_box
                    })
                train_writer.add_summary(summary_str, step)

                # 每step打印一次该step的loss
                print("Loss %fs  : Epoch %d  %d/%d: Step %d  took %fs" %
                      (loss, epoch, step_epoch, n_step_epoch, step,
                       time.time() - start_time))

                if step % save_frequency == 0 and loss < min_loss:
                    print("Save model " + "!" * 10)
                    save_path = saver.save(
                        sess,
                        final_dir + 'ep{0:03d}-step{1:d}-loss{2:.3f}'.format(
                            epoch, step, loss))
                    min_loss = loss