Exemplo n.º 1
0
def evaluate():
    data_generator = dataGen(test_data_path + "/0", test_data_path + "/1", batch_size)
    
    x = tf.placeholder(tf.float32, [None, height, width, channel], name="inputs")
    y_true = tf.placeholder(tf.float32, [None], name="labels")  
    
    is_training = tf.placeholder(tf.bool, name="is_train")

    vgg = VGG()
    out = vgg.model(x, is_training)
    prob = tf.nn.softmax(out)

    y_pred = tf.argmax(prob, axis=1)
        
    auc_value, auc_op = tf.metrics.auc(y_true, y_pred)

    saver = tf.train.Saver()
    with tf.Session() as sess:
        saver.restore(sess, model_saved_path)
        inputs, labels = data_generator.__next__()
        predict = sess.run(y_pred, {x: inputs, is_training: is_train})
        #predict, _, val = sess.run([y_pred, auc_op, auc_value], {x: inputs, y_true: labels, is_training: is_train})

        sess.run(tf.local_variables_initializer())
        sess.run(auc_op, {x: inputs, y_true: labels, is_training: is_train})
        val = sess.run(auc_value) 
        
        print(predict)
        print(labels)
        print("AUC : ", val)
def train():
    pf = os.listdir(positive_path)
    nf = os.listdir(negative_path)

    num_examples = int((len(pf) + len(nf)) * 0.7)

    # data_generator = dataGen(positive_path, negative_path, batch_size)
    val_data_generator = valGen(positive_path, negative_path, batch_size)

    x = tf.placeholder(tf.float32, [None, height, width, channels],
                       name="inputs")
    y_true = tf.placeholder(tf.float32, [None], name="labels")
    is_training = tf.placeholder(tf.bool, name="is_train")

    # forward
    vgg = VGG()
    logit = vgg.model(x, is_training)
    prob = tf.nn.sigmoid(logit, name="prob")

    # compute acc
    y_pred = tf.argmax(prob, axis=1)
    correct_prediction = tf.equal(tf.cast(y_pred, tf.float32), y_true)
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    tf.summary.scalar('acc', accuracy)

    # loss function
    # cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y_true, logits=logit)
    logit = tf.squeeze(logit, axis=1)
    # cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true, logits=logit)
    cross_entropy = focal_loss(y_true, logit)
    cross_entropy_mean = tf.reduce_mean(cross_entropy)

    # print([v for v in tf.trainable_variables()])

    l2_loss = regular_rate * tf.add_n([
        tf.nn.l2_loss(tf.cast(v, tf.float32))
        for v in tf.trainable_variables()
    ])

    loss = cross_entropy_mean + l2_loss

    tf.summary.scalar('loss', loss)

    # global step
    global_step = tf.Variable(0, trainable=False)

    # exponential moving average
    variable_averages = tf.train.ExponentialMovingAverage(
        moving_average_decay, global_step)

    # update weight using moving average
    variables_averages_op = variable_averages.apply(tf.trainable_variables())

    # learning rate exponential decay
    learning_rate = tf.train.exponential_decay(lr,
                                               global_step,
                                               num_examples // batch_size,
                                               0.96,
                                               staircase=True)

    tf.summary.scalar('learning_rate', learning_rate)

    # Passing global_step to minimize() will increment it at each step.
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        loss, global_step=global_step)

    # merge train step and variables averages op
    merged = tf.summary.merge_all()
    with tf.control_dependencies([train_step, variables_averages_op]):
        train_op = tf.no_op(name='train')

    # model save
    sav_iter = [i for i in range(epochs * num_examples // batch_size)]
    sav_acc = []
    saver = tf.train.Saver()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        summary_writer_t = tf.summary.FileWriter(log_saved_path + '/train',
                                                 sess.graph)
        summary_writer_v = tf.summary.FileWriter(log_saved_path + '/valid',
                                                 sess.graph)

        for epoch in range(epochs):
            iteration = 0
            data_generator = dataGen(positive_path, negative_path, batch_size)
            for inputs, labels in data_generator:

                # print(inputs.shape)
                # print(labels.shape)

                probability, loss_value, acc_value, summary, step, clr, _ = sess.run(
                    [
                        prob, loss, accuracy, merged, global_step,
                        learning_rate, train_op
                    ], {
                        x: inputs,
                        y_true: labels,
                        is_training: is_train
                    })

                print(
                    "[epoch : %2d / iter : %5d] loss: %.5f acc: %.5f lr: %.5f"
                    % (epoch, iteration, loss_value, acc_value, clr))
                sav_acc.append(acc_value)

                summary_writer_t.add_summary(summary, step)

                iteration += 1

                # break

            # validation
            val_inputs, val_labels = val_data_generator.__next__()
            summary_v = sess.run(merged, {
                x: val_inputs,
                y_true: val_labels,
                is_training: is_train
            })
            summary_writer_v.add_summary(summary_v, epoch)

            print("Saving model.....")
            saver.save(sess, model_saved_path + "/epoch_%d.ckpt" % epoch)

    summary_writer_t.close()
    summary_writer_v.close()
def train():
    pf = os.listdir(positive_path)
    nf = os.listdir(negative_path)

    num_examples = int((len(pf) + len(nf)) * 0.7)

    # data_generator = dataGen(positive_path, negative_path, batch_size)

    x = tf.placeholder(tf.float32, [None, height, width, channels],
                       name="inputs")
    y_true = tf.placeholder(tf.float32, [None], name="labels")
    is_training = tf.placeholder(tf.bool, name="is_train")

    # forward
    vgg = VGG()
    logit = vgg.model(x, is_training)
    prob = tf.nn.sigmoid(logit, name="prob")

    print("prob shape : ", prob.get_shape().as_list())
    print("y_true shape : ", y_true.get_shape().as_list())
    # compute acc
    # y_pred = tf.where(prob > 0.5, True, False)
    y_pred = tf.cast(tf.greater(prob, 0.5), tf.float32)
    correct_prediction = tf.equal(y_pred, y_true)
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    tf.summary.scalar('acc', accuracy)

    # loss function
    # labels = tf.expand_dims(y_true, axis=1)
    # cross_entropy = tf.nn.weighted_cross_entropy_with_logits(labels, logit, 9)

    # logit = tf.squeeze(logit, axis=1)
    # cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true, logits=logit)
    prob = tf.squeeze(prob, axis=1)
    print("prob shape : ", prob.get_shape().as_list())
    cross_entropy = focal_loss(y_true, prob)
    cross_entropy_mean = tf.reduce_mean(cross_entropy)

    # print([v for v in tf.trainable_variables()])

    l2_loss = regular_rate * tf.add_n([
        tf.nn.l2_loss(tf.cast(v, tf.float32))
        for v in tf.trainable_variables()
    ])

    loss = cross_entropy_mean + l2_loss

    tf.summary.scalar('loss', loss)

    # global step
    global_step = tf.Variable(0, trainable=False)

    # exponential moving average
    variable_averages = tf.train.ExponentialMovingAverage(
        moving_average_decay, global_step)

    # update weight using moving average
    variables_averages_op = variable_averages.apply(tf.trainable_variables())

    # learning rate exponential decay
    learning_rate = tf.train.exponential_decay(lr,
                                               global_step,
                                               num_examples // batch_size,
                                               0.96,
                                               staircase=True)

    tf.summary.scalar('learning_rate', learning_rate)

    # Passing global_step to minimize() will increment it at each step.
    # train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
    train_step = tf.train.MomentumOptimizer(
        learning_rate, momentum=0.9).minimize(loss, global_step=global_step)
    # train_step = tf.train.AdamOptimizer(lr).minimize(loss, global_step=global_step)

    merged = tf.summary.merge_all()
    # merge train step and variables averages op
    with tf.control_dependencies([train_step, variables_averages_op]):
        train_op = tf.no_op(name='train')

    # model save

    saver = tf.train.Saver(max_to_keep=20)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        summary_writer_t = tf.summary.FileWriter(log_saved_path, sess.graph)

        for epoch in range(epochs):
            iteration = 0

            inpu, outp, idx = dataShuffle(positive_path, negative_path)
            data_generator = dataGen(inpu, outp, idx, batch_size)
            val_generator = valGen(inpu, outp, idx, batch_size)

            for inputs, labels in data_generator:

                probability, loss_value, acc_value, summary, step, clr, _ = sess.run(
                    [
                        prob, loss, accuracy, merged, global_step,
                        learning_rate, train_op
                    ], {
                        x: inputs,
                        y_true: labels,
                        is_training: is_train
                    })

                print(
                    "[epoch : %2d / iter : %5d] loss: %.5f acc: %.5f lr: %.5f"
                    % (epoch, iteration, loss_value, acc_value, clr))

                summary_writer_t.add_summary(summary, step)

                iteration += 1

            # validation
            average_acc = 0.0
            vcnt = 0
            for vi, vl in val_generator:
                # val_inputs, val_labels = val_data_generator.__next__()
                va = sess.run(accuracy, {
                    x: vi,
                    y_true: vl,
                    is_training: is_train
                })
                average_acc = average_acc + va
                vcnt += 1

            print("validation acc : ", average_acc / vcnt)

            print("Saving model.....")
            if (epoch + 1) % 10 == 0:
                saver.save(sess, model_saved_path + "/epoch_%d.ckpt" % epoch)

    summary_writer_t.close()
def train():
    pf = os.listdir(positive_path)
    nf = os.listdir(negative_path)

    num_examples = int((len(pf) + len(nf)) * 0.7)

    # data_generator = dataGen(positive_path, negative_path, batch_size)

    x = tf.placeholder(tf.float32, [None, height, width, channels], name="inputs")
    y_true = tf.placeholder(tf.float32, [None], name="labels")
    is_training = tf.placeholder(tf.bool, name="is_train")

    # forward
    vgg = VGG()
    logit = vgg.model(x, is_training)
    prob = tf.nn.sigmoid(logit, name="prob")

    # auc
    y_hat = tf.cast(tf.greater(prob, 0.5), tf.float32)
    y_hat = tf.squeeze(y_hat, axis=1)
    auc_value, auc_op = tf.metrics.auc(y_true, y_hat)
    # tf.summary.scalar('auc', auc_value) 

    # loss function
    logit = tf.squeeze(logit, axis=1)
    cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true, logits=logit)
    # cross_entropy = focal_loss(labels=y_true, logits=logit)
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
   
    # print([v for v in tf.trainable_variables()])
 
    l2_loss = regular_rate * tf.add_n([tf.nn.l2_loss(tf.cast(v, tf.float32)) for v in tf.trainable_variables()])

    loss = cross_entropy_mean + l2_loss

    tf.summary.scalar('loss', loss)

    # global step
    global_step = tf.Variable(0, trainable=False)

    # exponential moving average
    variable_averages = tf.train.ExponentialMovingAverage(moving_average_decay, global_step)

    # update weight using moving average
    variables_averages_op = variable_averages.apply(tf.trainable_variables())

    # learning rate exponential decay
    learning_rate = tf.train.exponential_decay(lr,
                                                global_step,
                                                num_examples // batch_size
                                                , 1, staircase=True)
   
    tf.summary.scalar('learning_rate', learning_rate)
    
    # Passing global_step to minimize() will increment it at each step.
    # train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
    train_step = tf.train.MomentumOptimizer(learning_rate, momentum=0.9).minimize(loss, global_step=global_step)

    merged = tf.summary.merge_all()
    # merge train step and variables averages op
    with tf.control_dependencies([train_step, variables_averages_op]):
        train_op = tf.no_op(name='train')

    # model save
    sav_iter = [i for i in range(epochs * num_examples // batch_size)]
    sav_acc = []
    
    saver = tf.train.Saver(max_to_keep=20)

    with tf.Session() as sess:
        # 断点继续训练
        # saver.restore(sess, '/home/sdc/xujiping_sde/saved_model_cbr' + '/epoch_s1_29.ckpt')
        # print("loading saved model done")       
 
        sess.run(tf.global_variables_initializer())
        summary_writer_t = tf.summary.FileWriter(log_saved_path, sess.graph)
        summary_writer_v = tf.summary.FileWriter(log_saved_path, sess.graph)

        for epoch in range(epochs):
            iteration = 0

            inpu, outp, idx = dataShuffle(positive_path, negative_path)
            data_generator = dataGen(inpu, outp, idx, batch_size)
            val_generator = valGen(inpu, outp, idx, batch_size)

            for inputs, labels in data_generator:

                # print(inputs.shape)
                # print(labels.shape)                

                probability, loss_value, summary, step, clr, _ = sess.run(
                    [prob, loss, merged, global_step, learning_rate, train_op],
                    {x: inputs, y_true: labels, is_training: is_train}
                )

                print("[epoch : %2d / iter : %5d] loss: %.5f, lr: %.5f" % (epoch, iteration, loss_value, clr)) 

                summary_writer_t.add_summary(summary, step)
                
                iteration += 1                

                # break

            # validation
            val_true = []
            val_pred = []
            for vi, vl in val_generator:
                # val_inputs, val_labels = val_data_generator.__next__()
                pred = sess.run(y_hat, {x: vi, y_true: vl, is_training: is_train})
                val_true += list(vl)
                val_pred += list(pred)
                summary = sess.run(merged, {x: vi, y_true: vl, is_training: is_train})
                
                summary_writer_v.add_summary(summary, step)
          
            print("Auc value : ", roc_auc_score(np.array(val_true), np.array(val_pred)))
            print("Saving model.....")
            if (epoch + 1) % 10 == 0 :
                saver.save(sess, model_saved_path + "/epoch_s1_%d.ckpt" % epoch)

    summary_writer_t.close()
    summary_writer_v.close()