Beispiel #1
0
    def train(self, x, y, x_train_i, y_train_i, x_test_i, y_test_i, train_weight, class_weight, ir_overall, train_parameter):
        training_epochs = train_parameter['training_epochs']
        batch_size = train_parameter['batch_size']
        display_step = train_parameter['display_step']
        learning_rate = train_parameter['learning_rate']
        
        pred, loss_w = self.cost_cnn(x)
        
        with tf.name_scope("loss"):
            cost1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))+loss_w
#            tf.add_to_collection('loss1', cost1)
#            cost2 = tf.add_n(tf.get_collection('loss1'))
            #cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred, labels=y))
            optm1 = tf.train.AdamOptimizer(learning_rate = learning_rate, epsilon = 1e-8).minimize(cost1)
        
        corr = tf.equal(tf.argmax(y, 1), tf.argmax(pred, 1))
        accr = tf.reduce_mean(tf.cast(corr, tf.float64))
            
        sess = tf.Session()
        sess.run(tf.global_variables_initializer())
        
        for i in range(training_epochs):
            avg_cost_train = 0.
            total_batch = int(x_train_i.shape[0]/batch_size)
            for batch_features, batch_labels, weight_train in batches(batch_size, x_train_i, y_train_i, train_weight):
                sess.run(optm1, feed_dict={x: batch_features, y: batch_labels, self.keep_prob: 0.5, self.is_training: True})
                avg_cost_train+=sess.run(cost1,feed_dict={x: batch_features, y: batch_labels, self.keep_prob:1.0, self.is_training: False})
            avg_cost = avg_cost_train/total_batch
        
            if i%display_step == 0:
                train_accr = sess.run(accr, feed_dict={x: x_train_i, y: y_train_i, self.keep_prob: 0.5, self.is_training: False})
                #ff_score=sess.run(f_score,feed_dict={x:x_train_i,y:y_train_i,keep_prob:1.0})
                test_accr = sess.run(accr, feed_dict={x: x_test_i, y: y_test_i, self.keep_prob: 1.0, self.is_training: False})
#                print(sess.run(cost_weight))
                print('\n step: %d cost: %.9f train accr: %.3f test accr: %.3f'%(i,avg_cost,train_accr,test_accr))
                
        return test_accr
    def train(self, x, y, x_train_i, y_train_i, x_test_i, y_test_i, train_weight, class_weight, ir_overall, train_parameter):
        training_epochs = train_parameter['training_epochs']
        batch_size = train_parameter['batch_size']
        display_step = train_parameter['display_step']
        learning_rate = train_parameter['learning_rate']
        
        pred, cost_weight, loss_w, layer_fn = self.cost_cnn(x)
        pred_index = tf.argmax(pred, 1)
        y_index = tf.argmax(y, 1)
        confusion = tf.contrib.metrics.confusion_matrix(y_index, pred_index)
        y_index_accr = tf.reshape(y_index, [-1,1])
        y_index_accr = tf.to_int32(y_index_accr)
        accr_confusion = self.accuracy(confusion, y_index_accr, num_classes=8)
        g_mean = tf.pow(self.accr_confusion_multiply(accr_confusion, num_classes=8), 1/8)
        
        with tf.name_scope("loss1"):
            #focal loss
#            gamma = 9
#            focal = tf.pow(tf.subtract(1., tf.clip_by_value(pred, 1e-10, 1.0)), class_weight)
            cost1 = tf.reduce_mean(-tf.reduce_sum(class_weight*y*tf.log(tf.clip_by_value(pred, 1e-10, 1.0)), reduction_indices=[1]))+loss_w
            #cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred, labels=y))
            output_vars1 = tf.get_collection('loss1')
            optm1 = tf.train.AdamOptimizer(learning_rate = learning_rate, epsilon = 1e-8).minimize(cost1, var_list = output_vars1)
        
        corr = tf.equal(tf.argmax(y, 1), tf.argmax(pred, 1))
        accr = tf.reduce_mean(tf.cast(corr, tf.float64))
        
#        with tf.name_scope("loss2"):
#            #cost sensitive loss
#            h = ir_overall*tf.exp(-g_mean)*tf.exp(-f)
#            hh = tf.to_float(h)
#            cost2 = tf.reduce_mean(-tf.reduce_sum(0.5*tf.pow(tf.subtract(hh, cost_weight), 2)))
#            output_vars2 = tf.get_collection('loss2')
#            optm2 = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(cost2, var_list = output_vars2)
            
        sess = tf.Session()
        sess.run(tf.global_variables_initializer())
        
        f_score_out = np.zeros(training_epochs)
        g_mean_out = np.zeros(training_epochs)
        f_score_out_test = np.zeros(training_epochs)
        g_mean_out_test = np.zeros(training_epochs)
        for i in range(training_epochs):
            avg_cost_train = 0.
            total_batch = int(x_train_i.shape[0]/batch_size)
            for batch_features, batch_labels, weight_train in batches(batch_size, x_train_i, y_train_i, train_weight):
                sess.run(optm1, feed_dict={x: batch_features, y: batch_labels, class_weight: weight_train, self.keep_prob: 0.5, self.is_training: True})
                avg_cost_train+=sess.run(cost1,feed_dict={x: batch_features, y: batch_labels, class_weight: weight_train, self.keep_prob:1.0, self.is_training: False})
            avg_cost = avg_cost_train/total_batch
            
            pred_index_f1 = sess.run(pred_index, feed_dict={x: x_train_i, self.keep_prob: 1.0, self.is_training: False})
            y_index_f1 = sess.run(y_index, feed_dict={y: y_train_i, self.keep_prob: 1.0, self.is_training: False})
            f_score = metrics.f1_score(y_index_f1, pred_index_f1, average='macro')
#            sess.run(optm2, feed_dict={x: x_train_i, y: y_train_i, f:f_score, self.keep_prob: 0.5, self.is_training: True})
            f_score_out[i] = f_score
            
            pred_index_f1_test = sess.run(pred_index, feed_dict={x: x_test_i, self.keep_prob: 1.0, self.is_training: False})
            y_index_f1_test = sess.run(y_index, feed_dict={y: y_test_i, self.keep_prob: 1.0, self.is_training: False})
            f_score_test = metrics.f1_score(y_index_f1_test, pred_index_f1_test, average='macro')
            f_score_out_test[i] = f_score_test
            
            if i%display_step == 0:
                train_accr = sess.run(accr, feed_dict={x: x_train_i, y: y_train_i, self.keep_prob: 0.5, self.is_training: False})
                confusion_matrix = sess.run(confusion, feed_dict={x: x_test_i, y: y_test_i, self.keep_prob: 1.0, self.is_training: False})
                accr_confusion1 = sess.run(accr_confusion, feed_dict={x: x_train_i, y: y_train_i, self.keep_prob: 1.0, self.is_training: False})
                g_mean1 = sess.run(g_mean, feed_dict={x: x_train_i, y: y_train_i, self.keep_prob: 1.0, self.is_training: False})
                g_mean1_test = sess.run(g_mean, feed_dict={x: x_test_i, y: y_test_i, self.keep_prob: 1.0, self.is_training: False})
                #ff_score=sess.run(f_score,feed_dict={x:x_train_i,y:y_train_i,keep_prob:1.0})
                test_accr = sess.run(accr, feed_dict={x: x_test_i, y: y_test_i, self.keep_prob: 1.0, self.is_training: False})
#                print(sess.run(cost_weight))
                print('\n step: %d cost: %.9f train accr: %.3f test accr: %.3f'%(i,avg_cost,train_accr,test_accr))
                g_mean_out[i] = g_mean1
                g_mean_out_test[i] = g_mean1_test
                
            if test_accr > 0.83:
                if test_accr < 0.85:
                    fn_output = sess.run(layer_fn, feed_dict={x: x_test_i, y: y_test_i, self.keep_prob: 1.0, self.is_training: False})
                    break
            
        return confusion_matrix, accr_confusion1, f_score_out, g_mean_out, test_accr, train_accr, f_score_out_test, g_mean_out_test, fn_output
Beispiel #3
0
    def train(self, x, y, x_train_i, y_train_i, x_test_i, y_test_i, train_weight, class_weight, ir_overall, train_parameter):
        training_epochs = train_parameter['training_epochs']
        batch_size = train_parameter['batch_size']
        display_step = train_parameter['display_step']
        learning_rate = train_parameter['learning_rate']
        
        pred, layer_fn = self.cost_cnn(x)
        pred_index = tf.argmax(pred, 1)
        y_index = tf.argmax(y, 1)
        confusion = tf.contrib.metrics.confusion_matrix(y_index, pred_index)
        y_index_accr = tf.reshape(y_index, [-1,1])
        y_index_accr = tf.to_int32(y_index_accr)
        accr_confusion = self.accuracy(confusion, y_index_accr, num_classes=8)
        g_mean = tf.pow(self.accr_confusion_multiply(accr_confusion, num_classes=8), 1/8)
        
        with tf.name_scope("loss"):
            cost1 = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
#            tf.add_to_collection('loss1', cost1)
#            cost2 = tf.add_n(tf.get_collection('loss1'))
            #cost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=pred, labels=y))
            optm1 = tf.train.AdamOptimizer(learning_rate = learning_rate, epsilon = 1e-8).minimize(cost1)
        
        corr = tf.equal(tf.argmax(y, 1), tf.argmax(pred, 1))
        accr = tf.reduce_mean(tf.cast(corr, tf.float64))
            
        sess = tf.Session()
        sess.run(tf.global_variables_initializer())
        
        f_score_out = np.zeros(training_epochs)
        g_mean_out = np.zeros(training_epochs)
        f_score_out_test = np.zeros(training_epochs)
        g_mean_out_test = np.zeros(training_epochs)
        for i in range(training_epochs):
            avg_cost_train = 0.
            total_batch = int(x_train_i.shape[0]/batch_size)
            for batch_features, batch_labels, weight_train in batches(batch_size, x_train_i, y_train_i, train_weight):
                sess.run(optm1, feed_dict={x: batch_features, y: batch_labels, self.keep_prob: 0.5, self.is_training: True})
                avg_cost_train+=sess.run(cost1,feed_dict={x: batch_features, y: batch_labels, self.keep_prob:1.0, self.is_training: False})
            avg_cost = avg_cost_train/total_batch
            
            pred_index_f1 = sess.run(pred_index, feed_dict={x: x_test_i, self.keep_prob: 1.0, self.is_training: False})
            y_index_f1 = sess.run(y_index, feed_dict={y: y_test_i, self.keep_prob: 1.0, self.is_training: False})
            f_score_test = metrics.f1_score(y_index_f1, pred_index_f1, average='macro')
            f_score_out_test[i] = f_score_test
            
            pred_index_f1 = sess.run(pred_index, feed_dict={x: x_train_i, self.keep_prob: 1.0, self.is_training: False})
            y_index_f1 = sess.run(y_index, feed_dict={y: y_train_i, self.keep_prob: 1.0, self.is_training: False})
            f_score = metrics.f1_score(y_index_f1, pred_index_f1, average='macro')
            f_score_out[i] = f_score
            
            if i%display_step == 0:
                train_accr = sess.run(accr, feed_dict={x: x_train_i, y: y_train_i, self.keep_prob: 0.5, self.is_training: False})
                confusion_matrix = sess.run(confusion, feed_dict={x: x_test_i, y: y_test_i, self.keep_prob: 1.0, self.is_training: False})
                accr_confusion1 = sess.run(accr_confusion, feed_dict={x: x_train_i, y: y_train_i, self.keep_prob: 1.0, self.is_training: False})
                test_accr = sess.run(accr, feed_dict={x: x_test_i, y: y_test_i, self.keep_prob: 1.0, self.is_training: False})
                g_mean1 = sess.run(g_mean, feed_dict={x: x_train_i, y: y_train_i, self.keep_prob: 1.0, self.is_training: False})
                g_mean1_test = sess.run(g_mean, feed_dict={x: x_test_i, y: y_test_i, self.keep_prob: 1.0, self.is_training: False})
#                print(sess.run(cost_weight))
                print('\n step: %d cost: %.9f train accr: %.3f test accr: %.3f'%(i,avg_cost,train_accr,test_accr))
                g_mean_out[i] = g_mean1
                g_mean_out_test[i] = g_mean1_test
                
            if test_accr > 0.71:
                if test_accr < 0.73:
                    fn_output = sess.run(layer_fn, feed_dict={x: x_test_i, y: y_test_i, self.keep_prob: 1.0, self.is_training: False})
                    break 
            
            
        return confusion_matrix, accr_confusion1, f_score_out, g_mean_out, test_accr, train_accr, f_score_out_test, g_mean_out_test, fn_output