Esempio n. 1
0
def plot_tSNE(model, filename="data/RML2016.10a_dict.pkl"):
    from keras.models import Model
    from sklearn.manifold import TSNE
    from sklearn.decomposition import PCA

    (mods, snrs, lbl), (X_train, Y_train), (X_test, Y_test), (train_idx, test_idx) = \
        rmldataset2016.load_data(filename, train_rate=0.80)

    #设计中间层输出的模型
    dense2_model = Model(inputs=model.input,
                         outputs=model.get_layer('dense1').output)

    #提取snr下的数据进行测试
    for snr in [s for s in snrs if s > 14]:
        test_SNRs = [lbl[x][1] for x in test_idx]  #lbl: list(mod,snr)
        test_X_i = X_test[np.where(np.array(test_SNRs) == snr)]
        test_Y_i = Y_test[np.where(np.array(test_SNRs) == snr)]

        #计算中间层输出
        dense2_output = dense2_model.predict(test_X_i, batch_size=32)
        Y_true = np.argmax(test_Y_i, axis=1)

        #PCA降维到50以内
        pca = PCA(n_components=50)
        dense2_output_pca = pca.fit_transform(dense2_output)

        #t-SNE降为2
        tsne = TSNE(n_components=2, perplexity=5)
        Y_sne = tsne.fit_transform(dense2_output_pca)

        fig = plt.figure(figsize=(14, 12))

        # 散点图
        # plt.scatter(Y_sne[:,0],Y_sne[:,1],s=5.,color=plt.cm.Set1(Y_true / 11.))

        # 标签图
        data = Y_sne
        x_min, x_max = np.min(data, 0), np.max(data, 0)
        data = (data - x_min) / (x_max - x_min)
        for i in range(Y_sne.shape[0]):
            plt.text(data[i, 0],
                     data[i, 1],
                     str(Y_true[i]),
                     color=plt.cm.Set1(Y_true[i] / 11.),
                     fontdict={
                         'weight': 'bold',
                         'size': 9
                     })
            plt.title('t-SNR at snr:{}'.format(snr))

        # plt.legend()  # 显示图示
        fig.show()
Esempio n. 2
0
def predict(model, filename="data/RML2016.10a_dict.pkl"):
    (mods, snrs, lbl), (X_train, Y_train), (X_test, Y_test), (train_idx, test_idx) = \
        rmldataset2016.load_data(filename, train_rate=0.5)
    # Plot confusion matrix
    test_Y_hat = model.predict(X_test, batch_size=batch_size)
    confnorm, _, _ = mltools.calculate_confusion_matrix(
        Y_test, test_Y_hat, classes)
    mltools.plot_confusion_matrix(confnorm, labels=classes)

    # Plot confusion matrix
    acc = {}
    for snr in snrs:

        # extract classes @ SNR
        # test_SNRs = map(lambda x: lbl[x][1], test_idx)
        test_SNRs = [lbl[x][1] for x in test_idx]

        test_X_i = X_test[np.where(np.array(test_SNRs) == snr)]
        test_Y_i = Y_test[np.where(np.array(test_SNRs) == snr)]

        # estimate classes
        test_Y_i_hat = model.predict(test_X_i)
        confnorm_i, cor, ncor = mltools.calculate_confusion_matrix(
            test_Y_i, test_Y_i_hat, classes)
        acc[snr] = 1.0 * cor / (cor + ncor)

        mltools.plot_confusion_matrix(
            confnorm_i,
            labels=classes,
            title="ConvNet Confusion Matrix (SNR=%d)(ACC=%2f)" %
            (snr, 100.0 * acc[snr]))

    # Save results to a pickle file for plotting later
    print(acc)
    fd = open('predictresult/cnn2_d0.5.dat', 'wb')
    pickle.dump(("CNN2", 0.5, acc), fd)

    # Plot accuracy curve
    plt.plot(snrs, [acc[i] for i in snrs])
    plt.xlabel("Signal to Noise Ratio")
    plt.ylabel("Classification Accuracy")
    plt.title("CNN2 Classification Accuracy on RadioML 2016.10 Alpha")
    plt.show()
Esempio n. 3
0
def main():
    import rmldataset2016
    import numpy as np
    (mods,snrs,lbl),(X_train,Y_train),(X_test,Y_test),(train_idx,test_idx) = \
        rmldataset2016.load_data(filename ="RML2016.10a_dict.pkl", train_rate = 0.2)

    one_sample = X_test[0]
    print(np.shape(one_sample))
    print(one_sample[0:2])
    print(np.max(one_sample,axis=1))
    one_sample = np.power(one_sample,2)
    one_sample = np.sqrt(one_sample[0,:]+one_sample[1,:])

    plt.figure()
    plt.title('Training Samples')
    one_sample_t = np.arange(128)
    plt.plot(one_sample_t,one_sample)
    # plt.scatter()
    plt.grid()
    plt.show()

    sum_sample = np.sum(one_sample)
    print(sum_sample)
Esempio n. 4
0
def predict(model,filename="data/RML2016.10a_dict.pkl"):
    (mods, snrs, lbl), (X_train, Y_train), (X_test, Y_test), (train_idx, test_idx) = \
        rmldataset2016.load_data(filename, train_rate=0.5)
    # Plot confusion matrix
    test_Y_hat = model.predict(X_test, batch_size=batch_size)
    confnorm,_,_ = mltools.calculate_confusion_matrix(Y_test,test_Y_hat,classes)
    mltools.plot_confusion_matrix(confnorm, labels=classes,save_filename='figure/total_confusion')

    # Plot confusion matrix
    acc = {}
    acc_mod_snr = np.zeros( (len(classes),len(snrs)) )
    i = 0
    for snr in snrs:

        # extract classes @ SNR
        # test_SNRs = map(lambda x: lbl[x][1], test_idx)
        test_SNRs = [lbl[x][1] for x in test_idx]

        test_X_i = X_test[np.where(np.array(test_SNRs) == snr)]
        test_Y_i = Y_test[np.where(np.array(test_SNRs) == snr)]

        # estimate classes
        test_Y_i_hat = model.predict(test_X_i)
        confnorm_i,cor,ncor = mltools.calculate_confusion_matrix(test_Y_i,test_Y_i_hat,classes)
        acc[snr] = 1.0 * cor / (cor + ncor)

        mltools.plot_confusion_matrix(confnorm_i, labels=classes, title="ConvNet Confusion Matrix (SNR=%d)(ACC=%2f)" % (snr,100.0*acc[snr]),save_filename="figure/Confusion(SNR=%d)(ACC=%2f).png" % (snr,100.0*acc[snr]))
        
        acc_mod_snr[:,i] = np.round(np.diag(confnorm_i)/np.sum(confnorm_i,axis=1),3)
        i = i +1
    
    #plot acc of each mod in one picture
    dis_num=11
    for g in range(int(np.ceil(acc_mod_snr.shape[0]/dis_num))):
        assert (0 <= dis_num <= acc_mod_snr.shape[0])
        beg_index = g*dis_num
        end_index = np.min([(g+1)*dis_num,acc_mod_snr.shape[0]])

        plt.figure(figsize=(12, 10))
        plt.xlabel("Signal to Noise Ratio")
        plt.ylabel("Classification Accuracy")
        plt.title("Classification Accuracy for Each Mod")

        for i in range(beg_index,end_index):
            plt.plot(snrs, acc_mod_snr[i], label=classes[i])
            # 设置数字标签
            for x, y in zip(snrs, acc_mod_snr[i]):
                plt.text(x, y, y, ha='center', va='bottom', fontsize=8)

        plt.legend()
        plt.grid()
        plt.savefig('figure/acc_with_mod_{}.png'.format(g+1))
        plt.close()
    #save acc for mod per SNR
    fd = open('predictresult/acc_for_mod_on_cldnn.dat', 'wb')
    pickle.dump(('128','cldnn', acc_mod_snr), fd)
    fd.close()

    # Save results to a pickle file for plotting later
    print(acc)
    fd = open('predictresult/CLDNN_dr0.5.dat','wb')
    pickle.dump( ("1D", 0.5, acc) , fd )

    # Plot accuracy curve
    plt.plot(snrs, list(map(lambda x: acc[x], snrs)))
    plt.xlabel("Signal to Noise Ratio")
    plt.ylabel("Classification Accuracy")
    plt.title("CLDNN Classification Accuracy on RadioML 2016.10 Alpha")
    plt.tight_layout()
    plt.savefig('figure/each_acc.png')
Esempio n. 5
0
from keras.models import model_from_json
#from keras.utils.vis_utils import plot_model

import mltools,rmldataset2016
#import rmlmodels.CNN2Model as cnn2
#import rmlmodels.ResNetLikeModel as resnet
#import rmlmodels.VGGLikeModel as vggnet
import rmlmodels.CLDNNLikeModel as cldnn


#set Keras data format as channels_first
K.set_image_data_format('channels_last')
print(K.image_data_format())

(mods,snrs,lbl),(X_train,Y_train),(X_test,Y_test),(train_idx,test_idx) = \
    rmldataset2016.load_data(filename ="data/RML2016.10a_dict.pkl", train_rate = 0.5)

in_shp = list(X_train.shape[1:])
print(X_train.shape, in_shp)
classes = mods
print(classes)

# Build VT-CNN2 Neural Net model using Keras primitives --
#  - Reshape [N,2,128] to [N,2,128,1] on input
#  - Pass through 2 2DConv/ReLu layers
#  - Pass through 2 Dense layers (ReLu and Softmax)
#  - Perform categorical cross entropy optimization

# Set up some params
nb_epoch = 100     # number of epochs to train on
batch_size = 1024  # training batch size