Ejemplo n.º 1
0
def testOReval_one_epoch(sess,
                         ops,
                         ops_data,
                         testOReval_writer,
                         file_lenth,
                         batch_size=BATCH_SIZE):

    is_training = False
    loss_sum = 0.
    total_seen = 0
    total_ave_error = 0.
    cnt_loop = 0
    for i in range(file_lenth):  #tfrecords
        with tf.variable_scope('data_packer'):
            list_rgb, list_srgb = sess.run([
                ops_data['list_rgb_%s' % (i)], ops_data['list_srgb_%s' % (i)]
            ])
            current_rgb, current_srgb = tl.data_loader(list_rgb, list_srgb)

        idx = 0
        num = int(current_rgb.shape[0])

        with tf.variable_scope('testORvali_layer') as sc:
            while idx < num:
                log_string('----' + 'testORvali-batch-count=' + str(cnt_loop) +
                           '-----')
                rgb = current_rgb[idx:idx + batch_size]
                srgb = current_srgb[idx:idx + batch_size]

                idx += batch_size

                #feeding
                feed_dict = {
                    ops['rgb_pl']: rgb,
                    ops['srgb_pl']: srgb,
                    ops['is_training_pl']: is_training,
                }

                #run the session and get all the results we need
                summary, step, accuracy, loss_val = sess.run(
                    [ops['merged'], ops['step'], ops['accuracy'], ops['loss']],
                    feed_dict=feed_dict)

                #then cook
                testORval_writer.add_summary(summary, step)

                print('accuracy(test):', accuracy)
                print('loss(test):', loss_val)

                total_ave_error += accuracy
                total_seen += BATCH_SIZE
                loss_sum += (loss_val)
                cnt_loop += 1

    log_string('mean Lab-accuracy/pair:%f' %
               (total_ave_error / float(cnt_loop)))
    log_string('mean loss/pair: %f' % (loss_sum / float(cnt_loop)))  #()
Ejemplo n.º 2
0
def train_one_epoch(sess,ops,ops_data,train_writer,file_lenth,batch_size=BATCH_SIZE):

    is_training = True    
    loss_sum = 0.      
    total_seen = 0  
    total_correct=0.  
    cnt_loop=0 

    for i in range(file_lenth):  #tfrecords   
        with tf.variable_scope('data_packer'):
            list_cloud,list_label=sess.run([ops_data['list_cloud_%s'%(i)],ops_data['list_label_%s'%(i)]])
            current_cloud,current_label=tl.data_loader(list_cloud,list_label)
       
        idx=0
        num=int(current_cloud.shape[0])
        
        with tf.variable_scope('train_layer') as sc:    
            while idx<num:              
                log_string('----' + 'train-batch-count='+str(cnt_loop) + '-----')                   
                cloud=current_cloud[idx:idx+batch_size]
                label=current_label[idx:idx+batch_size]
            
                idx+=batch_size

                #feeding 
                feed_dict = {ops['pointclouds_pl']: cloud,     #(B,N,c)           
                                ops['labels_pl']: label,      #(B) or (B,N)
                                ops['is_training_pl']: is_training,}

                #run the session and get all the results we need
                summary, step, _, loss_val, pred_val = sess.run([ops['merged'], ops['step'], 
                    ops['train_op'], ops['loss'], ops['pred']], feed_dict=feed_dict)

                #then cook
                '''
                train_writer.add_summary(summary, step)
                '''
                pred_val = np.argmax(pred_val, 1)   #(B,class_num)->(B,1)                              #supervised&hard learning
                correct = np.sum(pred_val == label[start_idx:end_idx])                         #a bool tensor,sum it then get a number represents the correct_predicted results
                total_correct += correct
                total_seen += BATCH_SIZE
                loss_sum += loss_val
        
       
    log_string('mean loss: %f' % (loss_sum / float(total_seen)))
    log_string('accuracy: %f' % (total_correct / float(total_seen)))
Ejemplo n.º 3
0
def train_one_epoch(sess,ops,ops_data,train_writer,file_lenth,batch_size=BATCH_SIZE):

    is_training = True    
    loss_sum = 0.      
    total_seen = 0  
    total_correct=0.  
    cnt_loop=0 

    for i in range(file_lenth):  #tfrecords   
        with tf.variable_scope('data_packer'):
            list_img,list_label=sess.run([ops_data['list_img_%s'%(i)],ops_data['list_label_%s'%(i)]])
            current_img,current_label=tl.data_loader(list_img,list_label)
       
        idx=0
        num=int(current_img.shape[0])
        
        with tf.variable_scope('train_layer') as sc:    
            while idx+batch_size<num:              
                log_string('----' + 'train-batch-count='+str(cnt_loop) + '-----')                   
                imgs=current_img[idx:idx+batch_size]
                labels=current_label[idx:idx+batch_size]
            
                idx+=batch_size
                #feeding 
                feed_dict = {ops['img_pl']: imgs,        
                                ops['label_pl']: labels,     
                                ops['is_training_pl']: is_training}
                
                #run the session and get all the results we need
                summary, step,_,pred_val,loss_val= sess.run([ops['merged'], ops['step'], 
                    ops['train_op'],ops['pred'],ops['loss']], feed_dict=feed_dict)
               
                #then cook               
                train_writer.add_summary(summary, step)                        
              
                print('loss(train):',loss_val)
                pred_val = np.argmax(pred_val[:,4:], 1)   #(B,class_num)->(B,1)                              #supervised&hard learning
                correct = np.sum(pred_val == labels[:,4]) 
                total_correct+=correct
                total_seen += BATCH_SIZE
                loss_sum += loss_val
                cnt_loop+=1

    log_string('accuracy: %f' % (total_correct / float(total_seen)))                        
    log_string('mean loss: %f' % (loss_sum / float(cnt_loop)))  #()
Ejemplo n.º 4
0
def testOReval_one_epoch(sess,
                         ops,
                         ops_data,
                         testOReval_writer,
                         file_lenth,
                         batch_size=BATCH_SIZE):

    is_training = False
    loss_sum = 0.
    total_seen = 0
    total_correct = 0.
    total_seen_class = [0 for _ in range(CLASS_NUM)]
    total_correct_class = [0 for _ in range(CLASS_NUM)]

    cnt_loop = 0
    for i in range(file_lenth):  #tfrecords

        with tf.variable_scope('data_packer'):
            list_img, list_label = sess.run([
                ops_data['list_img_%s' % (i)], ops_data['list_label_%s' % (i)]
            ])
            current_img, current_label = tl.data_loader(list_img, list_label)
            #print(current_label)
        idx = 0
        num = int(current_img.shape[0])

        with tf.variable_scope('test/vali_layer') as sc:
            while idx + batch_size < num:
                log_string('----' + 'test/vali-batch-count=' + str(cnt_loop) +
                           '-----')
                imgs = current_img[idx:idx + batch_size]
                labels = current_label[idx:idx + batch_size]
                labels = np.squeeze(labels)
                idx += batch_size

                #feeding
                feed_dict = {
                    ops['img_pl']: imgs,
                    ops['label_pl']: labels,
                    ops['is_training_pl']: is_training,
                }

                #run the session and get all the results we need
                summary, step, pred_lb, loss_val = sess.run(
                    [
                        ops['merged'], ops['step'], ops['pred_label'],
                        ops['loss']
                    ],
                    feed_dict=feed_dict)

                testOReval_writer.add_summary(summary, step)

                print('loss(test):', loss_val)

                #then cook
                #pred_val = np.argmax(pred_val, 1)
                correct = np.sum(
                    pred_lb ==
                    labels)  #in this line ,labels have been changed to float32
                total_correct += correct
                total_seen += BATCH_SIZE
                loss_sum += (loss_val * BATCH_SIZE)
                #extra loop:
                #count how many times each class has occured in prediction,and numbering the correct ones
                templ = labels  #(B)
                for i in range(batch_size):
                    l = int(templ[i])  #(1)
                    total_correct_class[l] += (pred_lb[i] == l)
                    total_seen_class[l] += 1

                cnt_loop += 1
    acc = np.mean(
        np.array(total_correct_class) /
        np.array(total_seen_class, dtype=np.float))

    log_string('test mean loss: %f' % (loss_sum / float(cnt_loop)))
    log_string('test accuracy: %f' % (total_correct / float(total_seen)))
    log_string('test mean class acc: %f' % (acc))

    return acc