Beispiel #1
0
def predict(model, batch, data, classes):
    # Plot confusion matrix
    test_Y_hat = model.predict([X_test, X1_test, X2_test], batch_size=batch)
    confnorm, _, _ = mltools.calculate_confusion_matrix(
        Y_test, test_Y_hat, classes)
    mltools.plot_confusion_matrix(
        confnorm,
        labels=classes,
        save_filename='figure/mclstm_total_confusion.png')

    # Plot confusion matrix
    acc = {}
    acc_mod_snr = np.zeros((len(classes), len(snrs)))
    i = 0
    for snr in snrs:

        # Extract classes @ SNR
        test_SNRs = [lbl[x][1] for x in test_idx]
        test_X1_i = X1_test[np.where(np.array(test_SNRs) == snr)]
        test_X2_i = X2_test[np.where(np.array(test_SNRs) == snr)]
        test_X_i = X_test[np.where(np.array(test_SNRs) == snr)]
        test_Y_i = Y_test[np.where(np.array(test_SNRs) == snr)]

        # Estimate classes
        test_Y_i_hat = model.predict([test_X_i, test_X1_i, test_X2_i])
        confnorm_i, cor, ncor = mltools.calculate_confusion_matrix(
            test_Y_i, test_Y_i_hat, classes)
        acc[snr] = 1.0 * cor / (cor + ncor)
        mltools.plot_confusion_matrix(
            confnorm_i,
            labels=classes,
            title="Confusion Matrix",
            save_filename="figure/Confusion(SNR=%d)(ACC=%2f).png" %
            (snr, 100.0 * acc[snr]))
        acc_mod_snr[:, i] = np.round(
            np.diag(confnorm_i) / np.sum(confnorm_i, axis=1), 3)
        i = i + 1

    # Save acc for mod per SNR
    fd = open('predictresult/acc_for_mod.dat', 'wb')
    pickle.dump((acc_mod_snr), fd)
    fd.close()

    # Save results to a pickle file for plotting later
    print(acc)
    fd = open('predictresult/acc.dat', 'wb')
    pickle.dump((acc), fd)

    # Plot accuracy curve
    plt.plot(snrs, list(map(lambda x: acc[x], snrs)))
    plt.xlabel("Signal to Noise Ratio")
    plt.ylabel("Classification Accuracy")
    plt.title(" Classification Accuracy on RadioML")
    plt.tight_layout()
    plt.savefig('figure/each_acc.png')
Beispiel #2
0
def predict(model,
            weight_file='res-like-1024-1m.wts.h5',
            test_filename='/media/XYZ_1024_128k.hdf5',
            dis_acc=True,
            dis_conf=True,
            min_snr=0):
    test_file = h5py.File(test_filename, 'r')
    X = test_file['X']
    Y = test_file['Y'][:]
    Z = test_file['Z'][:]

    global classes
    model.load_weights(weight_file)
    Y_hat = model.predict(X, batch_size=1024, verbose=1)

    test_file.close()

    #plot confusion matrix
    cm, right, wrong = mltools.calculate_confusion_matrix(Y, Y_hat, classes)
    acc = round(1.0 * right / (right + wrong), 4)
    print('Overall Accuracy:%.2f%s / (%d + %d)' %
          (100 * acc, '%', right, wrong))

    if dis_conf:
        mltools.plot_confusion_matrix(
            cm, 'Confution matrix of {}'.format(test_filename), labels=classes)

    #plot accuracy with erery snr
    if dis_acc:
        print(min_snr)
        mltools.calculate_acc_cm_each_snr(Y,
                                          Y_hat,
                                          Z,
                                          classes,
                                          min_snr=min_snr)
Beispiel #3
0
def predict(model, filename="data/RML2016.10a_dict.pkl"):
    (mods, snrs, lbl), (X_train, Y_train), (X_test, Y_test), (train_idx, test_idx) = \
        rmldataset2016.load_data(filename, train_rate=0.5)
    # Plot confusion matrix
    test_Y_hat = model.predict(X_test, batch_size=batch_size)
    confnorm, _, _ = mltools.calculate_confusion_matrix(
        Y_test, test_Y_hat, classes)
    mltools.plot_confusion_matrix(confnorm, labels=classes)

    # Plot confusion matrix
    acc = {}
    for snr in snrs:

        # extract classes @ SNR
        # test_SNRs = map(lambda x: lbl[x][1], test_idx)
        test_SNRs = [lbl[x][1] for x in test_idx]

        test_X_i = X_test[np.where(np.array(test_SNRs) == snr)]
        test_Y_i = Y_test[np.where(np.array(test_SNRs) == snr)]

        # estimate classes
        test_Y_i_hat = model.predict(test_X_i)
        confnorm_i, cor, ncor = mltools.calculate_confusion_matrix(
            test_Y_i, test_Y_i_hat, classes)
        acc[snr] = 1.0 * cor / (cor + ncor)

        mltools.plot_confusion_matrix(
            confnorm_i,
            labels=classes,
            title="ConvNet Confusion Matrix (SNR=%d)(ACC=%2f)" %
            (snr, 100.0 * acc[snr]))

    # Save results to a pickle file for plotting later
    print(acc)
    fd = open('predictresult/cnn2_d0.5.dat', 'wb')
    pickle.dump(("CNN2", 0.5, acc), fd)

    # Plot accuracy curve
    plt.plot(snrs, [acc[i] for i in snrs])
    plt.xlabel("Signal to Noise Ratio")
    plt.ylabel("Classification Accuracy")
    plt.title("CNN2 Classification Accuracy on RadioML 2016.10 Alpha")
    plt.show()
Beispiel #4
0
    def Draw1(self):
        weight_file = 'weights-ResNet-norm.h5'

        test_filename = "F:/PyQT5学习/01.hdf5"
        test_file = h5py.File(test_filename, 'r')
        X = test_file['X'][:, :, :]
        Y = test_file['Y'][:]
        Z = test_file['Z'][:]

        classes = [
            '2FSK', '4FSK', '4ASK', '16QAM', 'MSK', 'OQPSK', 'BPSK', 'QPSK',
            '8PSK', 'LFM'
        ]

        if weight_file is not None:
            model = load_model(weight_file)
        Y_hat = model.predict(X, batch_size=1024, verbose=1)
        test_file.close()
        # plot confusion matrix
        cm, right, wrong = mltools.calculate_confusion_matrix(
            Y, Y_hat, classes)
        acc = round(1.0 * right / (right + wrong), 4)
        print('CNN网络模型在测试集上的总准确率:%.2f%s / (%d + %d)' %
              (100 * acc, '%', right, wrong))

        plt.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
        plt.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号
        plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)
        plt.colorbar()
        self.tick_marks = np.arange(len(classes))
        plt.xticks(self.tick_marks, classes, rotation=45)
        plt.yticks(self.tick_marks, classes)
        plt.tight_layout()
        plt.ylabel('正确的标签')
        plt.xlabel('预测的标签')
        self.draw()
Beispiel #5
0
def predict(model,filename="data/RML2016.10a_dict.pkl"):
    (mods, snrs, lbl), (X_train, Y_train), (X_test, Y_test), (train_idx, test_idx) = \
        rmldataset2016.load_data(filename, train_rate=0.5)
    # Plot confusion matrix
    test_Y_hat = model.predict(X_test, batch_size=batch_size)
    confnorm,_,_ = mltools.calculate_confusion_matrix(Y_test,test_Y_hat,classes)
    mltools.plot_confusion_matrix(confnorm, labels=classes,save_filename='figure/total_confusion')

    # Plot confusion matrix
    acc = {}
    acc_mod_snr = np.zeros( (len(classes),len(snrs)) )
    i = 0
    for snr in snrs:

        # extract classes @ SNR
        # test_SNRs = map(lambda x: lbl[x][1], test_idx)
        test_SNRs = [lbl[x][1] for x in test_idx]

        test_X_i = X_test[np.where(np.array(test_SNRs) == snr)]
        test_Y_i = Y_test[np.where(np.array(test_SNRs) == snr)]

        # estimate classes
        test_Y_i_hat = model.predict(test_X_i)
        confnorm_i,cor,ncor = mltools.calculate_confusion_matrix(test_Y_i,test_Y_i_hat,classes)
        acc[snr] = 1.0 * cor / (cor + ncor)

        mltools.plot_confusion_matrix(confnorm_i, labels=classes, title="ConvNet Confusion Matrix (SNR=%d)(ACC=%2f)" % (snr,100.0*acc[snr]),save_filename="figure/Confusion(SNR=%d)(ACC=%2f).png" % (snr,100.0*acc[snr]))
        
        acc_mod_snr[:,i] = np.round(np.diag(confnorm_i)/np.sum(confnorm_i,axis=1),3)
        i = i +1
    
    #plot acc of each mod in one picture
    dis_num=11
    for g in range(int(np.ceil(acc_mod_snr.shape[0]/dis_num))):
        assert (0 <= dis_num <= acc_mod_snr.shape[0])
        beg_index = g*dis_num
        end_index = np.min([(g+1)*dis_num,acc_mod_snr.shape[0]])

        plt.figure(figsize=(12, 10))
        plt.xlabel("Signal to Noise Ratio")
        plt.ylabel("Classification Accuracy")
        plt.title("Classification Accuracy for Each Mod")

        for i in range(beg_index,end_index):
            plt.plot(snrs, acc_mod_snr[i], label=classes[i])
            # 设置数字标签
            for x, y in zip(snrs, acc_mod_snr[i]):
                plt.text(x, y, y, ha='center', va='bottom', fontsize=8)

        plt.legend()
        plt.grid()
        plt.savefig('figure/acc_with_mod_{}.png'.format(g+1))
        plt.close()
    #save acc for mod per SNR
    fd = open('predictresult/acc_for_mod_on_cldnn.dat', 'wb')
    pickle.dump(('128','cldnn', acc_mod_snr), fd)
    fd.close()

    # Save results to a pickle file for plotting later
    print(acc)
    fd = open('predictresult/CLDNN_dr0.5.dat','wb')
    pickle.dump( ("1D", 0.5, acc) , fd )

    # Plot accuracy curve
    plt.plot(snrs, list(map(lambda x: acc[x], snrs)))
    plt.xlabel("Signal to Noise Ratio")
    plt.ylabel("Classification Accuracy")
    plt.title("CLDNN Classification Accuracy on RadioML 2016.10 Alpha")
    plt.tight_layout()
    plt.savefig('figure/each_acc.png')