Esempio n. 1
0
def evaluate():
    x = tf.placeholder(
        dtype=tf.float32,
        shape=[None, NANOPORE_HEIGHT, NANOPORE_WIDTH, NANOPORE_CHANNEL],
        name="x")

    cnd = CellflowNanoporeDataset(CELLFLOW_DATASET, batch_size=BATCH_SIZE)
    data = cnd.get_data
    features, chrom, position = data.get_next()

    # 测试的前向运算过程
    cellflow_pred = inference(input_tensor=x, train=False, regularizer=None)
    y_logit = tf.nn.softmax(cellflow_pred, name="prediction")

    variable_average = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_average.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        tf.local_variables_initializer().run()
        sess.run(data.initializer)

        # get_checkpoint_state方法通过point文件找到目录中最新的模型的文件名
        ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            chrom_location = []
            positions = []
            predict_results = []
            while True:
                try:
                    feats, chrs, pos = sess.run([features, chrom, position])
                    eval_feed = {x: feats}
                    pred = sess.run(y_logit, feed_dict=eval_feed)
                    chrom_location.extend(chrs)
                    positions.extend(pos)
                    predict_results.extend([i[0] for i in pred])

                except tf.errors.OutOfRangeError:
                    break
            dec = lambda b: bytes.decode(b)
            chrom_location = list(map(dec, chrom_location))
            cnn_result = pd.DataFrame(
                data={
                    "chr": chrom_location,
                    "position": positions,
                    "logit": predict_results
                })
            del chrom_location, positions, predict_results
            cnn_result.to_csv(CELLFLOW_EVAL,
                              sep="\t",
                              header=True,
                              index=False,
                              mode="w",
                              encoding="utf-8")
        else:
            print("No checkpoint file found")

    return None
Esempio n. 2
0
def main(_):
    for i in range(len(TEST_LENGTH) * 10):
        tf.reset_default_graph()
        print('File number :%d' % i)
        train_samples, train_labels = cnn_data.read_and_decode(
            trainfile[i % 6], cnninf.BATCH_SIZE, True, TEST_LENGTH[i % 6])
        #test_samples, test_labels = cnn_data.read_and_decode_var(tfrecords_test, cnninf.BATCH_SIZE, False, 1024)
        X = tf.placeholder(dtype=tf.float32, shape=train_samples.get_shape())
        Y = tf.placeholder(dtype=tf.float32, shape=[cnninf.BATCH_SIZE])

        global_step = tf.Variable(0, trainable=False)

        ## The structure of cnn model

        features, logits_cnn = cnninf.inference(X, cnninf.BATCH_SIZE)

        loss_cnn = cnninf.loss(logits_cnn, Y)
        train_cnn = cnninf.train(loss_cnn, global_step, 1000,
                                 cnninf.BATCH_SIZE)
        correct_cnn = cnninf.evaluation(logits_cnn, Y)

        epoch_train = (TRAIN_NUMBER[i % 6] // 10000 + 1) * 10

        init = tf.global_variables_initializer()
        saver = tf.train.Saver()

        test_accuracy = []
        test_loss = []

        with tf.Session() as sess:
            sess.run(init)

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            if os.path.exists('logfile/chcom.ckpt'):
                saver.restore(sess, 'logfile/chcom.ckpt')


#                tf.get_variable_scope().reuse_variables()

            for _ in xrange(epoch_train):
                sam_batch, label_batch = sess.run(
                    [train_samples, train_labels])
                sess.run(train_cnn, feed_dict={X: sam_batch, Y: label_batch})
                err, acc = sess.run([loss_cnn, correct_cnn],
                                    feed_dict={
                                        X: sam_batch,
                                        Y: label_batch
                                    })
                print(
                    "Loss of File %d ,training CNN, loss: %f, and accuracy:%f"
                    % (i, err, acc))

            saver.save(sess, 'logfile/chcom.ckpt')
            coord.request_stop()
            coord.join(threads)
        sess.close()
        del sess
Esempio n. 3
0
def evaluate(mnist):
    with tf.Graph().as_default() as g:
        # 定义输入输出的格式
        x = tf.placeholder(
            tf.float32,
            [
                mnist.test.num_examples,  # 第一维表示样例的个数
                cnn_inference.IMAGE_SIZE,  # 第二维和第三维表示图片的尺寸
                cnn_inference.IMAGE_SIZE,
                cnn_inference.NUM_CHANNELS
            ],  # 第四维表示图片的深度,对于RBG格式的图片,深度为5
            name='x-input')
        y_ = tf.placeholder(tf.float32, [None, cnn_inference.OUTPUT_NODE],
                            name='y-input')

        validate_feed = {
            x:
            np.reshape(mnist.test.images,
                       (mnist.test.num_examples, cnn_inference.IMAGE_SIZE,
                        cnn_inference.IMAGE_SIZE, cnn_inference.NUM_CHANNELS)),
            y_:
            mnist.test.labels
        }
        # 直接通过调用封装好的函数来计算前向传播的结果。
        # 因为测试时不关注正则损失的值,所以这里用于计算正则化损失的函数被设置为None。
        y = cnn_inference.inference(x, False, None)

        # 使用前向传播的结果计算正确率。
        # 如果需要对未知的样例进行分类,那么使用tf.argmax(y, 1)就可以得到输入样例的预测类别了。
        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        # 通过变量重命名的方式来加载模型,这样在前向传播的过程中就不需要调用求滑动平均的函数来获取平局值了。
        # 这样就可以完全共用cnn_inference.py中定义的前向传播过程
        variable_averages = tf.train.ExponentialMovingAverage(
            cnn_train.MOVING_AVERAGE_DECAY)
        variable_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variable_to_restore)

        #每隔EVAL_INTERVAL_SECS秒调用一次计算正确率的过程以检测训练过程中正确率的变化
        while True:
            with tf.Session() as sess:
                # tf.train.get_checkpoint_state函数会通过checkpoint文件自动找到目录中最新模型的文件名
                ckpt = tf.train.get_checkpoint_state(cnn_train.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    # 加载模型
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    # 通过文件名得到模型保存时迭代的轮数
                    global_step = ckpt.model_checkpoint_path.split(
                        '/')[-1].split('-')[-1]
                    accuracy_score = sess.run(accuracy,
                                              feed_dict=validate_feed)
                    print(
                        "After %s training step(s), validation accuracy = %f" %
                        (global_step, accuracy_score))
                else:
                    print("No checkpoint file found")
                    return
            time.sleep(EVAL_INTERVAL_SECS)
Esempio n. 4
0
def evaluate():
    with tf.Graph().as_default() as g:
        # x = tf.placeholder(tf.float32, [
        #    6321,
        #    cnn_inference.IMAGE_SIZE1,
        #    cnn_inference.IMAGE_SIZE2,
        #    cnn_inference.NUM_CHANNELS],
        #                   name="x-input")
        x = tf.placeholder(tf.float32, [None, cnn_inference.INPUT_NODE],
                           name='x-input')
        y_ = tf.placeholder(tf.float32, [None, cnn_inference.OUTPUT_NODE],
                            name='y-input')
        # validate_feed = {x: valx, y_: valy}
        logits = cnn_inference.inference(x, 0, None)
        y = tf.nn.softmax(logits)
        # percentage = tf.constant(0.1)
        # correct_prediction = tf.equal(tf.where(y > 0.1), tf.where(valy > percentage))
        # accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        variable_averages = tf.train.ExponentialMovingAverage(
            cnn_train.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_averages.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)

        while True:
            config = tf.ConfigProto()
            config.gpu_options.allow_growth = True

            with tf.Session(config=config) as sess:
                aaa = 0
                bbb = 0
                ccc = 0
                pred_label = []
                val_label = []
                ckpt = tf.train.get_checkpoint_state(cnn_train.MODEL_SAVE_PATH)
                if ckpt and ckpt.model_checkpoint_path:
                    saver.restore(sess, ckpt.model_checkpoint_path)
                    global_step = ckpt.model_checkpoint_path\
                        .split('/')[-1].split('-')[-1]
                    # accuracy_score = sess.run(accuracy,
                    #                           feed_dict=validate_feed)
                    # i = 0
                    # while True:
                    # start = (i * BATCH_SIZE) % len(valx)
                    # end = min(start + BATCH_SIZE, len(valx))
                    Y_ = sess.run(y, feed_dict={x: valx})  # predict

                    Y_pred = Y_
                    # print(len(Y_))
                    # print(len(Y_pred))

                    for i in range(len(valx)):
                        # Y_pred[i] = (Y_[i] - min(Y_[i])) / (max(Y_[i]) - min(Y_[i]))
                        pred_eff_idx = np.where(Y_pred[i] > 0.1)
                        y_pred_idx = (pred_eff_idx[0] + 1).tolist()
                        pred_label.append(y_pred_idx)

                        val_eff_idx = np.where(valy[i] > 0.8)
                        y_val_idx = (val_eff_idx[0] + 1).tolist()
                        # y_val_idx = np.add(y_val_idx + 1)
                        val_label.append(y_val_idx)
                    # print(Y_pred[0])
                    # print(Y_pred[321])
                    print(Y_pred[1921][np.where(Y_pred[1921] > 0.1)[0]])

                    # print(pred_label)
                    print(pred_label[1921])
                    # print(pred_label[6320])
                    for j in range(len(valx)):
                        if val_label[j] == pred_label[j]:
                            if len(val_label[j]) == 1:
                                aaa += 1
                            if len(val_label[j]) == 2:
                                bbb += 1
                            if len(val_label[j]) == 3:
                                ccc += 1
                        else:
                            print(j)

                    accuracy_1 = aaa / RUN_COUNT1
                    accuracy_2 = bbb / RUN_COUNT2
                    accuracy_3 = ccc / RUN_COUNT3
                    print("After %s training step(s), validation "
                          "accuracy = %g" % (global_step, accuracy_1))
                    print("After %s training step(s), validation "
                          "accuracy = %g" % (global_step, accuracy_2))
                    print("After %s training step(s), validation "
                          "accuracy = %g" % (global_step, accuracy_3))
                else:
                    print('No checkpoint file found')
            time.sleep(EVAL_INTERVAL_SECS)
def ROC_and_AUC():
    x = tf.placeholder(
        dtype=tf.float32,
        shape=[None, NANOPORE_HEIGHT, NANOPORE_WIDTH, NANOPORE_CHANNEL],
        name="x")
    nd = CnnNanoporeDataset(DATASET, batch_size=BATCH_SIZE)
    test_set = nd.get_test_samples
    test_features, test_labels = test_set.get_next()
    # 测试的前向运算过程
    test_pred = inference(input_tensor=x, train=False, regularizer=None)
    y_logit = tf.nn.softmax(test_pred, name="prediction")

    variable_average = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_average.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    fig = plt.figure()
    ax = fig.add_subplot(1, 1, 1)
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        tf.local_variables_initializer().run()
        sess.run(test_set.initializer)
        # get_checkpoint_state方法通过point文件找到目录中最新的模型的文件名
        ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            test_labs = []
            test_results = []
            while True:
                try:
                    feats, labs = sess.run([test_features, test_labels])
                    test_feed = {x: feats}
                    test_prediction = sess.run(y_logit, feed_dict=test_feed)
                    test_labs.extend([i for i in labs])
                    test_results.extend([i for i in test_prediction])
                except tf.errors.OutOfRangeError:
                    test_labs = np.array(test_labs)
                    test_results = np.array(test_results)

                    # 获取ROC
                    fpr, tpr, _ = roc_curve(test_labs[:, 0], test_results[:,
                                                                          0])
                    roc_auc = roc_auc_score(test_labs[:, 0], test_results[:,
                                                                          0])
                    ax.plot(fpr,
                            tpr,
                            label="class: %s, auc=%.4f" %
                            ("cnn network", roc_auc))
                    del test_labs, test_results
                    break
        else:
            print("No checkpoint file found")
    ax.plot([0, 1], [0, 1], "k--")
    ax.set_xlabel("FPR")
    ax.set_ylabel("TPR")
    ax.set_title("ROC")
    ax.legend(loc="best")
    ax.set_xlim(0, 1.0)
    ax.set_ylim(0, 1.05)
    ax.grid()
    plt.savefig("/data/nanopore/dL_network/network/cnn_train_model/roc.png")
Esempio n. 6
0
def train(mnist):
    # 定义输入输出placeholder
    # 调整输入数据placeholder的格式,输入为一个四维矩阵
    x = tf.placeholder(
        tf.float32,
        [
            BATCH_SIZE,  # 第一维表示一个batch中样例的个数
            cnn_inference.IMAGE_SIZE,  # 第二维和第三维表示图片的尺寸
            cnn_inference.IMAGE_SIZE,
            cnn_inference.NUM_CHANNELS
        ],  # 第四维表示图片的深度,对于RBG格式的图片,深度为5
        name='x-input')
    y_ = tf.placeholder(tf.float32, [None, cnn_inference.OUTPUT_NODE],
                        name='y-input')

    regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)
    # 直接使用cnn_inference.py中定义的前向传播过程
    y = cnn_inference.inference(x, True, regularizer)
    global_step = tf.Variable(0, trainable=False)

    #定义损失函数、学习率、滑动平均操作以及训练过程
    variable_averages = tf.train.ExponentialMovingAverage(
        MOVING_AVERAGE_DECAY, global_step)
    variable_averages_op = variable_averages.apply(tf.trainable_variables())
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=y, labels=tf.argmax(y_, 1))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE, global_step, mnist.train.num_examples / BATCH_SIZE,
        LEARNING_RATE_DECAY)
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        loss, global_step=global_step)
    with tf.control_dependencies([train_step, variable_averages_op]):
        train_op = tf.no_op(name='train')

    # 初始化Tensorflow持久化类
    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        # 验证和测试的过程将会有一个独立的程序来完成
        for i in range(TRAINING_STEPS):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)
            #类似地将输入的训练数据格式调整为一个四维矩阵,并将这个调整后的数据传入sess.run过程
            reshaped_xs = np.reshape(
                xs, (BATCH_SIZE, cnn_inference.IMAGE_SIZE,
                     cnn_inference.IMAGE_SIZE, cnn_inference.NUM_CHANNELS))
            _, loss_value, step = sess.run([train_op, loss, global_step],
                                           feed_dict={
                                               x: reshaped_xs,
                                               y_: ys
                                           })
            #每1000轮保存一次模型。
            if i % 1000 == 0:
                # 输出当前的训练情况。这里只输出了模型在当前训练batch上的损失函数大小。通过损失函数的大小可以大概了解训练的情况。
                # 在验证数据集上的正确率信息会有一个单独的程序来生成。
                print(
                    "After %d training step(s), loss on training batch is %f."
                    % (step, loss_value))
                # 保存当前的模型。注意这里隔出了global_step参数,这样可以让每个被保存模型的文件名末尾加上训练的轮数,比如“model.ckpt-1000”表示训练1000轮后得到的模型
                if not os.path.exists(MODEL_SAVE_PATH):
                    os.makedirs(MODEL_SAVE_PATH)
                saver.save(sess,
                           os.path.join(MODEL_SAVE_PATH, MODEL_NAME),
                           global_step=global_step)
Esempio n. 7
0
def execute_train(images, labels, model_blueprint, loadname="", savename=""):

    image_size = model_blueprint["image_size"][0]
    max_steps = model_blueprint["max_steps"]
    batch_size = model_blueprint["batch_size"]
    learning_rate = model_blueprint["learning_rate"]
    image_pixels = image_size * image_size * 3
    num_class = len(model_blueprint["labels"])

    NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 2

    # # numpy形式に変換
    train_image = np.asarray(images)
    train_label = np.asarray(labels)

    with tf.Graph().as_default():

        train_image_tensor = tf.convert_to_tensor(train_image,
                                                  dtype=tf.float32)
        train_label_tensor = tf.convert_to_tensor(train_label,
                                                  dtype=tf.float32)

        train_label_tensor = tf.cast(train_label_tensor, tf.int64)
        train_one_hot_label = tf.one_hot(train_label_tensor,
                                         depth=num_class,
                                         on_value=1.0,
                                         off_value=0.0,
                                         axis=-1)

        img_queue = tf.train.input_producer(train_image_tensor)
        label_queue = tf.train.input_producer(train_one_hot_label)

        img = img_queue.dequeue()
        label = label_queue.dequeue()

        min_fraction_of_examples_in_queue = 0.4
        min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *
                                 min_fraction_of_examples_in_queue)

        images_tensor, labels_tensor = _generate_image_and_label_batch(
            img, label, min_queue_examples, batch_size=1, shuffle=False)

        # dropout率
        keep_prob = tf.placeholder("float")

        # 入力をIMAGE_SIZExIMAGE_SIZEx3に変形
        x_image = tf.reshape(images_tensor, [-1, image_size, image_size, 3])

        # inference()を呼び出してモデルを作る
        #logits = inference(x_image, keep_prob)
        logits = cnn_inference.inference(x_image, keep_prob, model_blueprint)
        # loss()を呼び出して損失を計算
        loss_value = loss(logits, labels_tensor)
        # training()を呼び出して訓練
        train_op = training(loss_value, learning_rate)
        # 精度の計算
        acc = accuracy(logits, labels_tensor)

        saver = tf.train.Saver()

        # Sessionの作成
        sess = tf.Session()

        if loadname == "":
            # 変数の初期化
            sess.run(tf.initialize_all_variables())
        else:
            saver.restore(sess, "./model_tmp/downloads/model.ckpt")

        tf.train.start_queue_runners(sess)

        # 訓練の実行
        for step in range(max_steps):
            for i in range(int(len(train_image) / batch_size)):
                # batch_size分の画像に対して訓練の実行
                batch = batch_size * i

                #image_ = sess.run(x_image, feed_dict={keep_prob: 0.5})

                sess.run(train_op, feed_dict={keep_prob: 0.5})

            # 1step終わるたびに精度を計算する
            train_accuracy = sess.run(acc, feed_dict={keep_prob: 1.0})
            print("step {}, training accuracy {}".format(step, train_accuracy))

        # 最終的なモデルを保存
        save_path = saver.save(sess, savename)
def train():
    x = tf.placeholder(tf.float32, [None, cnn_inference.IMAGE_SIZE2], name="x-input")
    y_ = tf.placeholder(tf.float32, shape=[None, cnn_inference.OUTPUT_NODE], name="y-input")
    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)

    y = cnn_inference.inference(x, 0, None)
    # y = tf.nn.softmax(logits)
    # y_ = tf.nn.softmax(y_)
    global_step = tf.Variable(0, trainable=False)

    variable_averages = tf.train.ExponentialMovingAverage(
        MOVING_AVERAGE_DECAY, global_step
    )
    variable_averages_op = variable_averages.apply(
        tf.trainable_variables()
    )
    # cross_entropy_mean = -tf.reduce_mean(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0)))
    cross_entropy = tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    # loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
    loss = cross_entropy_mean

    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE,
        global_step,
        DATA_SIZE / BATCH_SIZE,
        LEARNING_RATE_DECAY
    )

    train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss, global_step=global_step)

    trainx = np.loadtxt(input_X)
    trainy = np.loadtxt(input_Y)
    for i in range(len(trainy)):
        trainy[i] = trainy[i] / len(np.where(trainy[i] > 0.9)[0])

    # x_a = tf.expand_dims(trainx, 1)
    # trainx = tf.expand_dims(x_a, -1)  # -1表示最后一维

    with tf.control_dependencies([train_step, variable_averages_op]):
        train_op = tf.no_op(name='train')

    saver = tf.train.Saver()

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        tf.global_variables_initializer().run()
        # trainx = trainx.eval(session=sess)
        for i in range(TRAINING_STEPS):
            start = (i * BATCH_SIZE) % DATA_SIZE
            end = min(start + BATCH_SIZE, DATA_SIZE)

            # 每次选取batch_size个样本进行训练

            # _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: trainx[start: end], y_: trainy[start: end]})
            _, step = sess.run([train_op, global_step], feed_dict={x: trainx[start: end], y_: trainy[start: end]})

            # 通过选取样本训练神经网络并更新参数
            #sess.run(train_step, feed_dict={x: trainx[start:end], y_: trainy[start:end]})
            # 每迭代1000次输出一次日志信息
            if i % 1000 == 0:
                # 计算所有数据的交叉熵
                total_cross_entropy = sess.run(loss, feed_dict={x: trainx, y_: trainy})
                # total_mse = sess.run(loss, feed_dict={x: trainx, y_: trainy})
                # train_accuracy = sess.run(accuracy, feed_dict={x: X, y_: Y})
                # 输出交叉熵之和
                #print("After %d training step(s),cross entropy on all data is %g" % (i, total_cross_entropy))
                print("After %d training step(s), loss on training "
                      "batch is %g" % (i, total_cross_entropy))
                saver.save(
                    sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
                print("save the model")
Esempio n. 9
0
    def train():
        x = tf.placeholder(
            dtype=tf.float32,
            shape=[None, NANOPORE_HEIGHT, NANOPORE_WIDTH, NANOPORE_CHANNEL],
            name="x")
        y = tf.placeholder(dtype=tf.float32, shape=[None, 2], name="y")

        # 获取训练、验证数据集
        train_set = CnnNanoporeModel.get_train_set(BATCH_SIZE, NUM_EPOCH)
        train_features, train_labels = train_set.get_next()

        valid_set = CnnNanoporeModel.get_valid_set(BATCH_SIZE)
        valid_features, valid_labels = valid_set.get_next()

        # 记录实验步数,创建滑动平均模型
        global_step = tf.Variable(initial_value=0, trainable=False)
        variable_average = tf.train.ExponentialMovingAverage(
            decay=MOVING_AVERAGE_DECAY, num_updates=global_step)

        # 前向传播过程输出预测结果
        y_pred = cnn_inference.inference(input_tensor=x,
                                         train=True,
                                         regularizer=None)
        valid_pred = cnn_inference.inference(input_tensor=x,
                                             train=False,
                                             regularizer=None,
                                             reuse=True)
        y_logit = tf.nn.softmax(y_pred, name="prediction")

        # 互熵损失
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            labels=tf.argmax(y, 1), logits=y_pred)
        cross_entropy_mean = tf.reduce_mean(cross_entropy)
        loss = cross_entropy_mean + tf.add_n(tf.get_collection("losses"))

        variable_average_op = variable_average.apply(
            var_list=tf.trainable_variables())

        lr = tf.train.exponential_decay(learning_rate=LEARNING_RATE_BASE,
                                        global_step=global_step,
                                        decay_steps=SAMPLE_NUM / BATCH_SIZE,
                                        decay_rate=LEARNING_RATE_DECAY)
        train_step = tf.train.MomentumOptimizer(
            learning_rate=lr, momentum=0.9).minimize(loss=loss,
                                                     global_step=global_step)

        with tf.control_dependencies([train_step, variable_average_op]):
            train_op = tf.no_op(name="train")

        saver = tf.train.Saver()

        with tf.Session() as sess:
            tf.global_variables_initializer().run()
            tf.local_variables_initializer().run()
            sess.run(train_set.initializer)

            max_auc = 0.5
            i = 1
            while i < 25000:
                try:
                    features, labels = sess.run([train_features, train_labels])

                    train_feed = {x: features, y: labels}
                    _, loss_val, step = sess.run([train_op, loss, global_step],
                                                 feed_dict=train_feed)
                    # 每训练一百次进行一次验证
                    if i % 100 == 0:
                        validset_labels = []
                        validset_results = []
                        sess.run(valid_set.initializer)
                        while True:
                            try:
                                feats, labs = sess.run(
                                    [valid_features, valid_labels])
                                valid_feed = {x: feats, y: labs}

                                valid_prediction = tf.nn.softmax(
                                    sess.run(valid_pred, feed_dict=valid_feed))
                                validset_labels.extend([i[0] for i in labs])
                                validset_results.extend(
                                    [i[0] for i in valid_prediction.eval()])
                            except tf.errors.OutOfRangeError:
                                score = roc_auc_score(validset_labels,
                                                      validset_results)
                                if score > max_auc:
                                    max_auc, score = score, max_auc
                                    print(i)
                                    print(max_auc)
                                    saver.save(sess,
                                               os.path.join(
                                                   MODEL_SAVE_PATH,
                                                   MODEL_NAME),
                                               global_step=global_step)
                                del validset_labels, validset_results
                                break
                    i += 1
                except tf.errors.OutOfRangeError:
                    break
Esempio n. 10
0
def train(trainx, trainy):
    x = tf.compat.v1.placeholder(tf.float32, [None, cnn_inference.IMAGE_SIZE2],
                                 name="x-input")  # input data
    y_ = tf.compat.v1.placeholder(tf.float32,
                                  shape=[None, cnn_inference.OUTPUT_NODE],
                                  name="y-input")  #label value
    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)

    #y = cnn_inference.inference(x, 0, None)
    '''正则化'''
    y = cnn_inference.inference(x, 1, regularizer)
    global_step = tf.Variable(0, trainable=False)

    variable_averages = tf.train.ExponentialMovingAverage(
        MOVING_AVERAGE_DECAY, global_step)  #滑动平均更新参数
    variable_averages_op = variable_averages.apply(
        tf.compat.v1.trainable_variables())
    # cross_entropy_mean = -tf.reduce_mean(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0)))

    cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(
        labels=y_, logits=y)  #计算交叉熵
    cross_entropy_mean = tf.reduce_mean(cross_entropy)  #张量的平均值
    '''正则化'''
    loss = cross_entropy_mean + tf.compat.v1.add_n(
        tf.compat.v1.get_collection('losses')
    )  #tf.add_n()列表内元素相加,tf.get_collection()获得名称为losses集合的所有元素
    #loss = cross_entropy_mean

    learning_rate = tf.compat.v1.train.exponential_decay(
        LEARNING_RATE_BASE, global_step, DATA_SIZE / BATCH_SIZE,
        LEARNING_RATE_DECAY)  #tf.train.exponential()学习率衰减函数

    train_step = tf.compat.v1.train.AdamOptimizer(learning_rate).minimize(
        loss, global_step=global_step)  #梯度下降法

    with tf.control_dependencies([train_step, variable_averages_op]):
        train_op = tf.no_op(name='train')

    saver = tf.compat.v1.train.Saver()

    config = tf.compat.v1.ConfigProto()
    config.gpu_options.allow_growth = False

    with tf.compat.v1.Session(config=config) as sess:
        tf.global_variables_initializer().run()
        for i in range(TRAINING_STEPS):
            start = (i * BATCH_SIZE) % DATA_SIZE
            end = min(start + BATCH_SIZE, DATA_SIZE)

            # 每次选取batch_size个样本进行训练
            # _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: trainx[start: end], y_: trainy[start: end]})
            _, step = sess.run([train_op, global_step],
                               feed_dict={
                                   x: trainx[start:end],
                                   y_: trainy[start:end]
                               })

            # 通过选取样本训练神经网络并更新参数
            #sess.run(train_step, feed_dict={x: trainx[start:end], y_: trainy[start:end]})
            # 每迭代1000次输出一次日志信息

            # 计算所有数据的交叉熵

            # train_accuracy = sess.run(accuracy, feed_dict={x: X, y_: Y})
            # 输出交叉熵之和
            #print("After %d training step(s),cross entropy on all data is %g" % (i, total_cross_entropy))

            if i % 1000 == 0:
                total_cross_entropy = sess.run(loss,
                                               feed_dict={
                                                   x: trainx,
                                                   y_: trainy
                                               })
                #测试正确率
                correct_prediction_train = tf.equal(tf.argmax(y, 1),
                                                    tf.argmax(y_, 1))
                acc_show = tf.reduce_mean(
                    tf.cast(correct_prediction_train, tf.float32))
                acc_train = sess.run(acc_show,
                                     feed_dict={
                                         x: trainx,
                                         y_: trainy
                                     })
                acc_print.append(acc_train)
                acc_printy.append(0.5)
                acc_printx.append(i)
                # total_mse = sess.run(loss, feed_dict={x: trainx, y_: trainy})
                print("After %d training step(s), loss on training "
                      "batch is %g,train accuracy = %g" %
                      (i, total_cross_entropy, acc_train))
                saver.save(sess,
                           os.path.join(MODEL_SAVE_PATH, MODEL_NAME),
                           global_step=global_step)
                print("save the model")
        print(acc_print)
        plt.title("trend of accuracy")
        plt.plot(acc_printx, acc_print, color='skyblue')
        plt.plot(acc_printx, acc_printy, color='cyan')
        plt.show()
Esempio n. 11
0
def main(_):
    X = tf.placeholder(dtype=tf.float32,
                       shape=[None, 2 * 1024],
                       name='X_input')
    Y = tf.placeholder(dtype=tf.float32, shape=[None], name='Y_input')

    global_step = tf.Variable(0, trainable=False)

    ## The structure of cnn model
    features, logits_cnn = cnninf.inference(X, cnninf.BATCH_SIZE)
    loss_cnn = cnninf.loss(logits_cnn, Y)
    train_cnn = cnninf.train(loss_cnn, global_step,
                             NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN,
                             cnninf.BATCH_SIZE)
    correct_cnn = cnninf.evaluation(logits_cnn, Y)

    loss_val = tf.Variable(0.0)
    acc_val = tf.Variable(0.0)
    tf.summary.scalar('loss', loss_val)
    tf.summary.scalar('accuracy', acc_val)

    train_samples, train_labels = cnn_data.read_and_decode(
        tfrecords_train, cnninf.BATCH_SIZE, True)
    test_samples, test_labels = cnn_data.read_and_decode(
        tfrecords_test, cnninf.BATCH_SIZE, True)

    epoch_train = cnn_data.TRAIN_SIZE * cnn_data.NUM_CLASSES // cnninf.BATCH_SIZE
    epoch_test = cnn_data.TEST_SIZE * cnn_data.NUM_CLASSES // cnninf.BATCH_SIZE

    saver = tf.train.Saver()
    init = tf.global_variables_initializer()

    with tf.Session() as sess:

        sess.run(init)

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        try:
            saver.restore(sess, 'logfile\\chcom.ckpt-29999')
            '''The training of classification model, such as svm.'''
            start_time = time.time()

            for i in range(epoch_train):
                train_data, train_target = sess.run(
                    [train_samples, train_labels])
                fea = sess.run(features,
                               feed_dict={
                                   X: train_data,
                                   Y: train_target
                               })
                if i == 0:
                    feature_train = fea
                    label_train = train_target
                else:
                    feature_train = np.vstack((feature_train, fea))
                    label_train = np.hstack((label_train, train_target))
            classifier = SVC(decision_function_shape='ovo')
            rf = classifier.fit(feature_train, label_train)
            train_predicted = classifier.predict(feature_train)
            train_accuracy_value = tf.reduce_mean(
                tf.cast(tf.equal(train_predicted, label_train), tf.float32))
            print("Training Classification report for classifier %g:\n" %
                  train_accuracy_value.eval())
            #cm1 = confusion_matrix(label_train,train_predicted)
            joblib.dump(rf, 'rf.model')
            '''The testing of svm model.'''
            n_acc, n_batch = 0, 0
            for i in range(epoch_test):
                test_data, test_target = sess.run([test_samples, test_labels])
                feature_test = sess.run(features,
                                        feed_dict={
                                            X: test_data,
                                            Y: test_target
                                        })
                test_predicted = classifier.predict(feature_test)
                test_accuracy_value = tf.reduce_mean(
                    tf.cast(tf.equal(test_predicted, test_target), tf.float32))
                n_acc += test_accuracy_value.eval()
                n_batch += 1
                #print("Testing Classification report of batch:%d, accuracy %g:\n" % (i+1,test_accuracy_value.eval()))
            print("The Evaluation of CNN_SVM based model %g:\n" %
                  (n_acc / n_batch))
            #cm2 = confusion_matrix(label_test, test_predicted)
            #print(cm2)

            duration = time.time() - start_time
            print("The time of training the svm model is:  %f" % duration)


#            sio.savemat('logfile\\accuracy.mat', {'train_accuracy': train_accuracy_value, 'test_accuracy': test_accuracy_value,'duration': duration,'confusion_matrix1':cm1,'confusion_matrix2':cm2})
#                    #print("Loss of training CNN, step: %d, loss: %f, and accuracy:%f" % (i+1,err,ac))
        except tf.errors.OutOfRangeError:
            print('Done training --epoch limit reached')
        finally:
            coord.request_stop()
            coord.join(threads)
Esempio n. 12
0
def evaluate(threshold, output_file):
    """
    :param threshold: 分类阈值。
    :param output_file: 已开启的输出文件对象。
    :return:
    """
    x = tf.placeholder(
        dtype=tf.float32,
        shape=[None, NANOPORE_HEIGHT, NANOPORE_WIDTH, NANOPORE_CHANNEL],
        name="x")

    nd = CnnNanoporeDataset(DATASET, batch_size=BATCH_SIZE)
    test_set = nd.get_test_samples
    test_features, test_labels = test_set.get_next()

    # 测试的前向运算过程
    test_pred = inference(input_tensor=x, train=False, regularizer=None)
    y_logit = tf.nn.softmax(test_pred, name="prediction")

    # 通过变量重命名加载模型,向前传播过程无需求取滑动平均值
    variable_average = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_average.variables_to_restore()
    saver = tf.train.Saver(variables_to_restore)

    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        tf.local_variables_initializer().run()
        sess.run(test_set.initializer)
        # get_checkpoint_state方法通过point文件找到目录中最新的模型的文件名
        ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            # global_steps = ckpt.model_checkpoint_path.split("/")[-1].split("-")[-1]
            test_labs = []
            test_results = []
            while True:
                try:
                    feats, labs = sess.run([test_features, test_labels])
                    test_feed = {x: feats}
                    test_prediction = sess.run(y_logit, feed_dict=test_feed)

                    test_labs.extend([i[0] for i in labs])
                    test_results.extend([i[0] for i in test_prediction])
                except tf.errors.OutOfRangeError:
                    for i in range(len(test_results)):
                        test_results[
                            i] = 1.0 if test_results[i] > threshold else 0.0
                    spec = calc_specificity(test_labs, test_results)
                    # if spec < 0.9:
                    #     break

                    # acc = accuracy_score(test_labs, test_results)
                    score = roc_auc_score(test_labs, test_results)
                    del test_labs, test_results
                    result = "\t".join([
                        format(threshold, ".2f"),
                        format(spec, ".4f"),
                        format(score, ".4f")
                    ])
                    with open(output_file, "a") as f:
                        f.write(result + "\n")
                    # print(result)
                    break
        else:
            print("No checkpoint file found")
    # 释放计算图与变量,一定写在with tf.Session外,否则会报错
    tf.reset_default_graph()
def main(_):
    X = tf.placeholder(dtype=tf.float32,
                       shape=[None, 2 * 1024],
                       name='X_input')
    Y = tf.placeholder(dtype=tf.float32, shape=[None], name='Y_input')

    global_step = tf.Variable(0, trainable=False)

    ## The structure of cnn model
    features, logits_cnn = cnninf.inference(X, cnninf.BATCH_SIZE)
    loss_cnn = cnninf.loss(logits_cnn, Y)
    train_cnn = cnninf.train(loss_cnn, global_step,
                             NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN,
                             cnninf.BATCH_SIZE)
    correct_cnn = cnninf.evaluation(logits_cnn, Y)

    loss_val = tf.Variable(0.0)
    acc_val = tf.Variable(0.0)
    tf.summary.scalar('loss', loss_val)
    tf.summary.scalar('accuracy', acc_val)

    writer_train = tf.summary.FileWriter("logfile/plot_1")  #train
    writer_test = tf.summary.FileWriter("logfile/plot_2")  #test
    #
    train_samples, train_labels = cnn_data.read_and_decode(
        tfrecords_train, cnninf.BATCH_SIZE, True)
    train_samples_f, train_labels_f = cnn_data.read_and_decode(
        tfrecords_train, cnninf.BATCH_SIZE, False)
    valid_samples, valid_labels = cnn_data.read_and_decode(
        tfrecords_valid, cnninf.BATCH_SIZE, False)
    test_samples, test_labels = cnn_data.read_and_decode(
        tfrecords_test, cnninf.BATCH_SIZE, False)

    epoch_train = cnn_data.TRAIN_SIZE * cnn_data.NUM_CLASSES // cnninf.BATCH_SIZE
    epoch_valid = cnn_data.VALID_SIZE * cnn_data.NUM_CLASSES // cnninf.BATCH_SIZE
    epoch_test = cnn_data.TEST_SIZE * cnn_data.NUM_CLASSES // cnninf.BATCH_SIZE

    saver = tf.train.Saver(max_to_keep=50)
    summary = tf.summary.merge_all()
    init = tf.global_variables_initializer()

    train_accuracy = []
    test_accuracy = []
    train_loss = []
    test_loss = []

    with tf.Session() as sess:

        sess.run(init)

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        try:
            start_time = time.time()
            for i in xrange(FLAGS.max_steps):
                if coord.should_stop():
                    break

                sam_batch, label_batch = sess.run(
                    [train_samples, train_labels])
                sess.run(train_cnn, feed_dict={X: sam_batch, Y: label_batch})

                #                if (i+1) % 500 == 0:
                #                    #########   Validation evaluation   ##########
                #                    n_cost, n_acc, n_batch = 0, 0, 0
                #                    for _ in range(epoch_valid):
                #                        sam_batch, label_batch = sess.run([valid_samples, valid_labels])
                #                        err,acc = sess.run([loss_cnn, correct_cnn],feed_dict={X:sam_batch,Y:label_batch})
                #                        n_cost += err
                #                        n_acc += acc
                #                        n_batch += 1
                #                    print("Loss of validation dataset, step: %d, loss: %f, and accuracy:%f" % (i+1,(n_cost/n_batch),(n_acc/n_batch/cnninf.BATCH_SIZE)))
                #
                if (i + 1) % 2000 == 0:
                    #########   Training evaluation   ##########
                    n_cost, n_acc, n_batch = 0, 0, 0
                    for _ in range(epoch_train):
                        sam_batch, label_batch = sess.run(
                            [train_samples_f, train_labels_f])
                        err, acc = sess.run([loss_cnn, correct_cnn],
                                            feed_dict={
                                                X: sam_batch,
                                                Y: label_batch
                                            })
                        n_cost += err
                        n_acc += acc
                        n_batch += 1
                    print("Loss of training CNN, loss: %f, and accuracy:%f" %
                          ((n_cost / n_batch),
                           (n_acc / n_batch / cnninf.BATCH_SIZE)))
                    train_accuracy.append(n_acc / n_batch / cnninf.BATCH_SIZE)
                    train_loss.append(n_cost / n_batch)
                    write_op = sess.run(
                        summary, {
                            loss_val: n_cost / n_batch,
                            acc_val: n_acc / n_batch / cnninf.BATCH_SIZE
                        })
                    writer_train.add_summary(write_op, i)
                    writer_train.flush()
                    #########   Testing evaluation   ##########
                    n_cost, n_acc, n_batch = 0, 0, 0
                    for j in range(epoch_test):
                        sam_batch, label_batch = sess.run(
                            [test_samples, test_labels])
                        logits, err, acc = sess.run(
                            [logits_cnn, loss_cnn, correct_cnn],
                            feed_dict={
                                X: sam_batch,
                                Y: label_batch
                            })
                        n_cost += err
                        n_acc += acc
                        n_batch += 1
                        #print("Loss of testing CNN of batch: %d, loss: %f, and accuracy:%f" % (j, err, acc/cnninf.BATCH_SIZE))
                    print("Loss of testing CNN, loss: %f, and accuracy:%f" %
                          ((n_cost / n_batch),
                           (n_acc / n_batch / cnninf.BATCH_SIZE)))
                    test_accuracy.append(n_acc / n_batch / cnninf.BATCH_SIZE)
                    test_loss.append(n_cost / n_batch)
                    write_op = sess.run(
                        summary, {
                            loss_val: n_cost / n_batch,
                            acc_val: n_acc / n_batch / cnninf.BATCH_SIZE
                        })
                    writer_test.add_summary(write_op, i)
                    writer_test.flush()

                    checkpoint_path = os.path.join(FLAGS.train_dir,
                                                   'chcom.ckpt')
                    saver.save(sess, checkpoint_path, global_step=i)
            duration = time.time() - start_time
            sio.savemat(
                'logfile/accuracy.mat', {
                    'train_accuracy': train_accuracy,
                    'test_accuracy': test_accuracy,
                    'train_loss': train_loss,
                    'test_loss': test_loss,
                    'duration': duration
                })

            #print("Loss of training CNN, step: %d, loss: %f, and accuracy:%f" % (i+1,err,ac))
        except tf.errors.OutOfRangeError:
            print('Done training --epoch limit reached')
        finally:
            coord.request_stop()
            coord.join(threads)