Пример #1
0
# 200
print('np.prod(new_gt_IN.shape[:2]:', np.prod(new_gt_IN.shape[:2]))
# 21025

data = data_IN.reshape(np.prod(data_IN.shape[:2]), np.prod(data_IN.shape[2:]))
gt = new_gt_IN.reshape(np.prod(new_gt_IN.shape[:2]), )

data = preprocessing.scale(data)
print('data.shape:', data.shape)
# (21025, 200)

data_ = data.reshape(data_IN.shape[0], data_IN.shape[1], data_IN.shape[2])
# (145, 145, 200)
whole_data = data_

padded_data = zeroPadding.zeroPadding_3D(whole_data, PATCH_LENGTH)
print('padded_data.shape:', padded_data.shape)

ITER = 1
CATEGORY = 9  # 16

train_data = np.zeros((TRAIN_SIZE, 2 * PATCH_LENGTH + 1, 2 * PATCH_LENGTH + 1,
                       INPUT_DIMENSION_CONV))
print('train_data.shape:', train_data.shape)
# (2055, 11, 11, 200)
test_data = np.zeros((TEST_SIZE, 2 * PATCH_LENGTH + 1, 2 * PATCH_LENGTH + 1,
                      INPUT_DIMENSION_CONV))
print('test_data.shape:', test_data.shape)
# (8194, 11, 11, 200)

KAPPA_3D_ResNeXt = []
Пример #2
0
print('data.shape:', data.shape)
data1 = data_IN1.reshape(np.prod(data_IN1.shape[:2]), np.prod(data_IN1.shape[2:]))
print('data.shape:', data1.shape)
gt = new_gt_IN.reshape(np.prod(new_gt_IN.shape[:2]),)
print('gt.shape:', gt.shape)

data = preprocessing.scale(data)
data_ = data.reshape(data_IN.shape[0], data_IN.shape[1],data_IN.shape[2])

data1 = preprocessing.scale(data1)
data1_ = data1.reshape(data_IN1.shape[0], data_IN1.shape[1],data_IN1.shape[2])

whole_data = data_
whole_data1 = data1_

padded_data = zeroPadding.zeroPadding_3D(whole_data, PATCH_LENGTH)  # 1
print("whole_data.shape:", whole_data.shape)

padded_data1 = zeroPadding.zeroPadding_3D(whole_data1, PATCH_LENGTH1)  # 13
print("whole_data.shape:", whole_data1.shape)


ITER = 1
CATEGORY = 13

train_data = np.zeros((TRAIN_SIZE, 2*PATCH_LENGTH + 1, 2*PATCH_LENGTH + 1, INPUT_DIMENSION_CONV))
train_data1 = np.zeros((TRAIN_SIZE, 2*PATCH_LENGTH1 + 1, 2*PATCH_LENGTH1 + 1, INPUT_DIMENSION_CONV1))


test_data = np.zeros((TEST_SIZE, 2*PATCH_LENGTH + 1, 2*PATCH_LENGTH + 1, INPUT_DIMENSION_CONV))
test_data1 = np.zeros((TEST_SIZE, 2*PATCH_LENGTH1 + 1, 2*PATCH_LENGTH1 + 1, INPUT_DIMENSION_CONV1))
Пример #3
0
def run_training():

# load the data
    print (150*'*')
    uPavia = sio.loadmat('/home/amax/xibobo/data/UP/PaviaU.mat')
    gt_uPavia = sio.loadmat('/home/amax/xibobo/data/UP/PaviaU_combinedGt.mat')
    data_IN = uPavia['paviaU']
    gt_IN = gt_uPavia['combinedGt']

    print (data_IN.shape)
    data = data_IN.reshape(np.prod(data_IN.shape[:2]),np.prod(data_IN.shape[2:]))
    gt = gt_IN.reshape(np.prod(gt_IN.shape[:2]),)
    
    #[2]:
  
    trainingIndexf = '/home/amax/xibobo/data/DisjointUPtrainingIndexMix.mat'
    train_indices = sio.loadmat(trainingIndexf)['trainingIndexMix']
    train_indices_rows = sio.loadmat(trainingIndexf)['trainingIndexMix_rows']
    train_indices_cols = sio.loadmat(trainingIndexf)['trainingIndexMix_cols']
    testingIndexf = '/home/amax/xibobo/data/DisjointUPtestingIndexMix.mat'
    test_indices = sio.loadmat(testingIndexf)['testingIndexMix']  
    test_indices_rows = sio.loadmat(testingIndexf)['testingIndexMix_rows']  
    test_indices_cols = sio.loadmat(testingIndexf)['testingIndexMix_cols'] 
    
    train_indices = np.squeeze(train_indices-1)
    test_indices = np.squeeze(test_indices-1)
    height = gt_IN.shape[0]
    width = gt_IN.shape[1]   
    Y=gt_IN.T
    Y = Y.reshape(height*width,)
    
    train_y = Y[train_indices]-1
    y_train = to_categorical(np.asarray(train_y))
    
    
    test_y = Y[test_indices] - 1
    y_test = to_categorical(np.asarray(test_y))
    TRAIN_SIZE = train_indices.shape[0]
    TEST_SIZE = test_indices.shape[0]
    classes_num = np.max(gt)
    
    data = preprocessing.scale(data)
    whole_data = data.reshape(data_IN.shape[0], data_IN.shape[1], data_IN.shape[2])
    whole_data,pca = applyPCA(whole_data, numComponents = numComponents)
    img_channels= whole_data.shape[2]    
    
    padded_data = zeroPadding.zeroPadding_3D(whole_data, PATCH_LENGTH)
    
    train_data = np.zeros((TRAIN_SIZE, 2*PATCH_LENGTH + 1, 2*PATCH_LENGTH + 1, img_channels))
    test_data = np.zeros((TEST_SIZE, 2*PATCH_LENGTH + 1, 2*PATCH_LENGTH + 1, img_channels))
    
    train_assign = indexToAssignment(np.squeeze(train_indices_rows-1), np.squeeze(train_indices_cols-1), PATCH_LENGTH)
    for i in range(len(train_assign)):
        train_data[i] = selectNeighboringPatch(padded_data,train_assign[i][0],train_assign[i][1],PATCH_LENGTH)
    #
    test_assign = indexToAssignment(np.squeeze(test_indices_rows-1), np.squeeze(test_indices_cols-1), PATCH_LENGTH)
    for i in range(len(test_assign)):
        test_data[i] = selectNeighboringPatch(padded_data,test_assign[i][0],test_assign[i][1],PATCH_LENGTH)
    
    Xtrain = train_data.reshape(train_data.shape[0], train_data.shape[1], train_data.shape[2],img_channels)
    Xtest = test_data.reshape(test_data.shape[0], test_data.shape[1], test_data.shape[2], img_channels)
    train_x = Xtrain.reshape(-1,window_size,window_size,img_channels,1)   
    test_x = Xtest.reshape(-1, window_size,window_size,img_channels,1)      
    train_num = train_x.shape[0]
    test_num = test_x.shape[0]

    # construct the computation graph
    images = tf.placeholder(tf.float32, shape=[None,window_size,window_size,img_channels,1])
    labels = tf.placeholder(tf.int32, shape=[None])
    lr= tf.placeholder(tf.float32)

    features,_ = res4_model_ss(images,classes_num,[1],[1])
    centers = func.construct_center(features, classes_num, 1)
    
    loss1 = func.dce_loss(features, labels, centers, FLAGS.temp)
#    loss2 = func.mcl_loss(features, labels, centers, 0.9)
#    loss2 = func.gmcl_loss(features, labels, centers, 0.9)
    loss2 = func.dis_loss(features, labels, centers)

#    loss=loss1
    loss = loss1 + FLAGS.weight_pl * loss2
#    
    eval_correct = func.evaluation(features, labels, centers)
    train_op = func.training(loss, lr)
    
    #counts = tf.get_variable('counts', [FLAGS.classes_num], dtype=tf.int32,
    #    initializer=tf.constant_initializer(0), trainable=False)
    #add_op, count_op, average_op = net.init_centers(features, labels, centers, counts)
    init = tf.global_variables_initializer()
    # initialize the variables
    sess = tf.Session()
    sess.run(init)
    #compute_centers(sess, add_op, count_op, average_op, images, labels, train_x, train_y)

    # run the computation graph (train and test process)
    epoch = 1
    index = list(range(train_num))
    np.random.shuffle(index)
    batch_size = FLAGS.batch_size
    batch_num = train_num//batch_size if train_num % batch_size==0 else train_num//batch_size+1
    #saver = tf.train.Saver(max_to_keep=1)
    train_start= time.time()
    # train the framework with the training data
#    while stopping<FLAGS.stop:
    while epoch<epoch_num:
        time1 = time.time()
        loss_now = 0.0
        score_now = 0.0
           
        for i in range(batch_num):
            batch_x = train_x[index[i*batch_size:(i+1)*batch_size]]
            batch_y = train_y[index[i*batch_size:(i+1)*batch_size]]
            result = sess.run([train_op, loss, eval_correct], feed_dict={images:batch_x,
                labels:batch_y, lr:FLAGS.learning_rate})
#            init_logits_value = sess.run(logits)
            loss_now += result[1]
            score_now += result[2][1]
        score_now /= train_num

        print ('epoch {}: training: loss --> {:.3f}, acc --> {:.3f}%'.format(epoch, loss_now, score_now*100))
        #print sess.run(centers)

        #checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')
        #saver.save(sess, checkpoint_file, global_step=epoch)
#        FLAGS.learning_rate*=FLAGS.decay
        FLAGS.learning_rate-=FLAGS.decay
#        FLAGS.learning_rate*= (1. / (1. + FLAGS.decay * epoch))
        epoch += 1
        np.random.shuffle(index)

        time2 = time.time()
        print ('time for this epoch: {:.3f} minutes'.format((time2-time1)/60.0))
    print()
    print('time for the whole training phase: '+str(time.time()-train_start)+' s')   
    # test the framework with the test data
    init_centers_value = sess.run(centers)
    test_start= time.time()
    pred_labels, test_score = do_eval(sess, eval_correct, images, labels, test_x, test_y)
    print('time for the whole testing phase: '+str(time.time()-test_start)+' s')
    sess.close()    
    pred_labels = np.int8(pred_labels)  
    test_y = np.int8(test_y) 
#    confusion matrix
    matrix = np.zeros((classes_num, classes_num))
    with open('prediction_DPN_HRA.txt', 'w') as f:
        for i in range(test_num):
            pre_label = pred_labels[i]
            f.write(str(pre_label)+'\n')
            matrix[pre_label, test_y[i]] += 1
    f.closed  
    print()
    print('The confusion matrix is:')
    print(np.int_(matrix))
#     overall accuracy    
    OA=np.sum(np.trace(matrix)) / float(test_num)
#    print('OA = '+str(OA)+'\n')
    
#     average accuracy
#    print('ua =')
    ua = np.diag(matrix)/np.sum(matrix, axis=1)
    
#     precision
#    print('precision =')
    precision = np.diag(matrix)/np.sum(matrix, axis=0)  
#     Kappa
    matrix = np.mat(matrix);
    Po = OA;
    xsum = np.sum(matrix, axis=1);
    ysum = np.sum(matrix,axis = 0);
    Pe = float(ysum*xsum)/(np.sum(matrix)**2);
    Kappa = float((Po-Pe)/(1-Pe));
    
    for i in ua:
         print(i)
    print(str(np.sum(ua)/matrix.shape[0]))
    print(str(OA))
    print(str(Kappa));
    print()
    for i in precision:
         print(i)  
    print(str(np.sum(precision)/matrix.shape[0]))
Пример #4
0
data = data_IN.reshape(np.prod(data_IN.shape[:2]), np.prod(data_IN.shape[2:]))
gt = gt_IN.reshape(np.prod(gt_IN.shape[:2]), )

#data = normalization.Normalization(data)

#data_ = data.reshape(data_UP.shape[0], data_UP.shape[1],data_UP.shape[2])
# data_trans = data.transpose()
# whole_pca = doPCA.dimension_PCA(data_trans, data_UP, INPUT_DIMENSION)
#
# print (whole_pca.shape)

data_trans = data.transpose()
data_ = doPCA.dimension_PCA(data_trans, data_IN, INPUT_DIMENSION)

padded_data = zeroPadding.zeroPadding_3D(data_, PATCH_LENGTH)
print(padded_data.shape)

ITER = 1
CATEGORY = 16

KAPPA_CONV = []
OA_CONV = []
AA_CONV = []
TRAINING_TIME_CONV = []
TESTING_TIME_CONV = []
ELEMENT_ACC_CONV = np.zeros((ITER, CATEGORY))

for index_iter in range(ITER):
    print("# %d Iteration" % (index_iter + 1))
VERIFICATION_SIZE = 1500
TOTAL_SIZE = TRAIN_SIZE + TEST_SIZE
img_channels_HSI = 144
img_channels_HSIEPLBP = 815
img_channels_LiDAREPLBP = 134
CATEGORY = 15

######### Data normalization ########
data = data_Houston_HSI.reshape(np.prod(data_Houston_HSI.shape[:2]),
                                np.prod(
                                    data_Houston_HSI.shape[2:]))  # 3D to 2D
data = preprocessing.scale(data)  #normalization
whole_data_HSI = data.reshape(data_Houston_HSI.shape[0],
                              data_Houston_HSI.shape[1],
                              data_Houston_HSI.shape[2])
padded_data_HSI = zeroPadding.zeroPadding_3D(whole_data_HSI, PATCH_LENGTH)
del data, data_Houston_HSI

data = data_Houston_HSIEPLBP.reshape(
    np.prod(data_Houston_HSIEPLBP.shape[:2]),
    np.prod(data_Houston_HSIEPLBP.shape[2:]))  # 3维矩阵转换为2维矩阵
data = preprocessing.scale(data)  #normalization
whole_data_HSIEPLBP = data.reshape(data_Houston_HSIEPLBP.shape[0],
                                   data_Houston_HSIEPLBP.shape[1],
                                   data_Houston_HSIEPLBP.shape[2])
padded_data_HSIEPLBP = zeroPadding.zeroPadding_3D(whole_data_HSIEPLBP,
                                                  PATCH_LENGTH)
del data, data_Houston_HSIEPLBP

data = data_Houston_LiDAREPLBP.reshape(
    np.prod(data_Houston_LiDAREPLBP.shape[:2]),
Пример #6
0
def run_training():
# load the data
    print (150*'*')
    HU2012 = sio.loadmat('./data/HU2012/2012_Houston.mat')
    data_IN = HU2012['spectraldata']
    gt_IN = HU2012['gt_2012']
    print (data_IN.shape)
    data = data_IN.reshape(np.prod(data_IN.shape[:2]),np.prod(data_IN.shape[2:]))
    gt = gt_IN.reshape(np.prod(gt_IN.shape[:2]),)

    trainingIndexf = './data/Houston2012trainingIndex.mat'
    train_indices = sio.loadmat(trainingIndexf)['trainingIndex']
    train_indices_rows = sio.loadmat(trainingIndexf)['trainingIndex_rows']
    train_indices_cols = sio.loadmat(trainingIndexf)['trainingIndex_cols']
    testingIndexf = './data/Houston2012testingIndex.mat'
    test_indices = sio.loadmat(testingIndexf)['testingIndex']  
    test_indices_rows = sio.loadmat(testingIndexf)['testingIndex_rows']  
    test_indices_cols = sio.loadmat(testingIndexf)['testingIndex_cols'] 

    train_indices = np.squeeze(train_indices-1)
    test_indices = np.squeeze(test_indices-1)
    height, width = gt_IN.shape

    Y=gt_IN.T
    Y = Y.reshape(height*width,)
    train_y = Y[train_indices]-1
    test_y = Y[test_indices] - 1

    classes_num = np.max(gt)
    
    data = preprocessing.scale(data)
    whole_data = data.reshape(data_IN.shape[0], data_IN.shape[1], data_IN.shape[2])

    whole_data, pca = applyPCA(whole_data, numComponents = FLAGS.numComponents)
    img_channels = whole_data.shape[2]
    PATCH_LENGTH = int((FLAGS.window_size-1)/2)
    padded_data = zeroPadding.zeroPadding_3D(whole_data, PATCH_LENGTH)
    train_data = np.zeros((train_indices.shape[0], FLAGS.window_size, FLAGS.window_size, img_channels))
    test_data = np.zeros((test_indices.shape[0], FLAGS.window_size, FLAGS.window_size, img_channels))
    
    train_assign = indexToAssignment(np.squeeze(train_indices_rows-1), np.squeeze(train_indices_cols-1), PATCH_LENGTH)
    for i in range(len(train_assign)):
        train_data[i] = selectNeighboringPatch(padded_data,train_assign[i][0],train_assign[i][1],PATCH_LENGTH)

    test_assign = indexToAssignment(np.squeeze(test_indices_rows-1), np.squeeze(test_indices_cols-1), PATCH_LENGTH)
    for i in range(len(test_assign)):
        test_data[i] = selectNeighboringPatch(padded_data,test_assign[i][0],test_assign[i][1],PATCH_LENGTH)
    
    Xtrain = train_data.reshape(train_data.shape[0], train_data.shape[1], train_data.shape[2],img_channels)
    Xtest = test_data.reshape(test_data.shape[0], test_data.shape[1], test_data.shape[2], img_channels)
    train_x = Xtrain.reshape(-1,train_data.shape[1], train_data.shape[2],img_channels,1)
    test_x = Xtest.reshape(-1, test_data.shape[1], test_data.shape[2],img_channels,1)
    train_num = train_x.shape[0]
    test_num = test_x.shape[0]

    # construct the computation graph
    images = tf.placeholder(tf.float32, shape=[None,FLAGS.window_size,FLAGS.window_size,img_channels,1])
    labels = tf.placeholder(tf.int32, shape=[None])
    lr= tf.placeholder(tf.float32)

    features = res4_model_ss(images,[1],[1])
    prototypes = func.construct_center(features, classes_num, 1)
    
    loss1 = func.dce_loss(features, labels, prototypes, FLAGS.temp)
    loss2 = func.dis_loss(features, labels, prototypes)
    loss = loss1 + FLAGS.weight_dis * loss2

    eval_correct = func.evaluation(features, labels, prototypes)
    train_op = func.training(loss, lr)

    # initialize the variables
    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)

    # run the computation graph (train and test process)
    epoch = 1
    index = list(range(train_num))
    np.random.shuffle(index)
    batch_size = FLAGS.batch_size
    batch_num = train_num//batch_size if train_num % batch_size ==0 else train_num//batch_size+1
    train_start= time.time()

    # train the framework with the training data
    while epoch<FLAGS.epoch_num:
        time1 = time.time()
        loss_now = 0.0
        score_now = 0.0
        for i in range(batch_num):
            batch_x = train_x[index[i*batch_size:(i+1)*batch_size]]
            batch_y = train_y[index[i*batch_size:(i+1)*batch_size]]
            result = sess.run([train_op, loss, eval_correct], feed_dict={images:batch_x,
                labels:batch_y, lr:FLAGS.learning_rate})
            loss_now += result[1]
            score_now += result[2][1]
        score_now /= train_num
        print ('epoch {}: training: loss --> {:.3f}, acc --> {:.3f}%'.format(epoch, loss_now, score_now*100))
        FLAGS.learning_rate-=FLAGS.decay
        epoch += 1
        np.random.shuffle(index)
        time2 = time.time()
        print ('time for this epoch: {:.3f} minutes'.format((time2-time1)/60.0))
    print()
    print('time for the whole training phase: '+str(time.time()-train_start)+' s')   
    # test the framework with the test data
    # init_prototypes_value = sess.run(prototypes) # get the variable of prototypes
    test_start= time.time()
    pred_labels, test_score = do_eval(sess, eval_correct, images, labels, test_x, test_y)
    print('time for the whole testing phase: '+str(time.time()-test_start)+' s')
    sess.close()    
    pred_labels = np.int8(pred_labels)  
    test_y = np.int8(test_y) 
#    confusion matrix
    matrix = np.zeros((classes_num, classes_num))
    with open('prediction.txt', 'w') as f:
        for i in range(test_num):
            pre_label = pred_labels[i]
            f.write(str(pre_label)+'\n')
            matrix[pre_label, test_y[i]] += 1
    f.closed  
    print()
    print('The confusion matrix is:')
    print(np.int_(matrix))

#   calculate the overall accuracy
    OA = np.sum(np.trace(matrix)) / float(test_num)
#    print('OA = '+str(OA)+'\n')
#   calculate the per-class accuracy
#    print('ua =')
    ua = np.diag(matrix)/np.sum(matrix, axis=0)
#   calculate the precision
#    print('precision =')
    precision = np.diag(matrix)/np.sum(matrix, axis=1)
#   calculate the Kappa coefficient
    matrix = np.mat(matrix)
    Po = OA
    xsum = np.sum(matrix, axis=1)
    ysum = np.sum(matrix, axis=0)
    Pe = float(ysum*xsum)/(np.sum(matrix)**2)
    Kappa = float((Po-Pe)/(1-Pe))
    ## print the classification result
    for i in ua:
         print(i)
    print(str(np.sum(ua)/matrix.shape[0]))
    print(str(OA))
    print(str(Kappa))
    print()
    for i in precision:
         print(i)  
    print(str(np.sum(precision)/matrix.shape[0]))