示例#1
0
def createVGGNetNetwork():
    layers = [ConvLayer((1,1,32,32),(5,5),32, zero_padding = 2),
            ConvLayer((32,1,32,32),(5,5),1,zero_padding=2),
            MaxPollingLayer((32,1,32,32),filter_shape=(2,2)),
            ConvLayer((32,1,16,16),(5,5),2,zero_padding = 2),
            ConvLayer((64,1,16,16),(5,5),1,zero_padding=2), # JESPERE QUE CETTE LIGNE EST BONNE
            MaxPollingLayer((64,1,16,16),filter_shape=(2,2)),
            ConvLayer((64,1,8,8),(5,5),2,zero_padding=2),
            ConvLayer((128,1,8,8),(5,5),1,zero_padding=2),
            MaxPollingLayer((128,1,8,8),filter_shape = (2,2)),
            FCLayer(2048,128,'sigmoid'),
            FCLayer(128,128,'sigmoid'),
            FCLayer(128,43,'sigmoid')]
    cnn = ConvNet(layers)
    return cnn
示例#2
0
 def __init__(self, imageList, mode, groundTruthFile=None):
     self.mode = mode
     self.cnn = cn.ConvNet()
     self.testRecord = None
     self.idx = -1
     self.imageList = [x.strip() for x in open(imageList)]
     self.groundTruth = cu.loadBoxIndexFile(groundTruthFile)
     #self.imageList = self.rankImages()
     #self.imageList = self.imageList[0:10]
     allImgs = set([x.strip() for x in open(config.get('allImagesList'))])
     self.negativeSamples = list(
         allImgs.difference(set(self.groundTruth.keys())))
     self.negativeEpisode = False
     if self.mode == 'train':
         self.negativeProbability = config.getf('negativeEpisodeProb')
         random.shuffle(self.imageList)
     self.loadNextEpisode()
示例#3
0
def simulation(linear_code, top_config, net_config, simutimes_range, target_err_bits_num, batch_size):
    SNRset = generate_snr_set(top_config)
    bp_iter_num = top_config.BP_iter_nums_simu
    noise_io = DataIO.NoiseIO(top_config, False, None, rng_seed=0)
    denoising_net_num = top_config.cnn_net_num
    model_id = top_config.model_id

    G_matrix = linear_code.G_matrix
    H_matrix = linear_code.H_matrix
    K, N = np.shape(G_matrix)

    ## build BP decoding network
    if np.size(bp_iter_num) != denoising_net_num + 1:
        print('>>> Error: the length of bp_iter_num is not correct! (Iterative_BP_CNN.py)')
        print(bp_iter_num, np.size(bp_iter_num))
        print(denoising_net_num)
        exit(0)
    bp_decoder = BP_Decoder.BP_NetDecoder(H_matrix, batch_size)

    conv_net = {}
    denoise_net_in = {}
    denoise_net_out = {}
    # build network for each CNN denoiser,
    for net_id in range(denoising_net_num):
        if net_id > 0:
            conv_net[net_id] = conv_net[0]
            denoise_net_in[net_id] = denoise_net_in[0]
            denoise_net_out[net_id] = denoise_net_out[0]
        else:
            conv_net[net_id] = ConvNet.ConvNet(net_config, None, net_id)
            denoise_net_in[net_id], denoise_net_out[net_id], _ = conv_net[net_id].build_network()
    # init gragh
    init = tf.global_variables_initializer()
    sess = tf.Session()
    print('Open a tf session!')
    sess.run(init)
        param_group['lr'] = lr


device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
N_EPOCH = 80
BATCH_SIZE = 64
NUM_CLASSES = 2
LEARNING_RATE = 0.01
TRAIN_SPECIES = 'whole'
TEST_SPECIES = 'new'

WriteFile = open("./results/test_new/%s_test.rst" % TEST_SPECIES, "w")
rst = []
loss_list = []
accuracy_list = []
model = ConvNet().to(device)
model = model.double()
weights = [4.0, 1.0]
class_weights = torch.DoubleTensor(weights).to(device)
criterion = nn.CrossEntropyLoss(weight=class_weights)
optimizer = torch.optim.Adagrad(model.parameters(), lr=LEARNING_RATE)
train_dataset = DriveData("./dataset/sequences/%s_pos.fa" % TRAIN_SPECIES,
                          "./dataset/sequences/%s_neg.fa" % TRAIN_SPECIES)
test_dataset = DriveData("./dataset/sequences/%s_pos.fa" % TEST_SPECIES,
                         "./dataset/sequences/%s_neg.fa" % TEST_SPECIES)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
                                           batch_size=BATCH_SIZE,
                                           num_workers=8,
                                           shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
                                          batch_size=BATCH_SIZE,
示例#5
0
def main(argv):
    # folder for saving
    subfold = argv[1]
    if not os.path.exists(subfold):
        os.mkdir(subfold)
        os.mkdir(os.path.join(subfold, "features"))

    # load data
    X_cnn_raw = []
    labels_cnn_raw = []
    num_grp = 3
    for i in range(num_grp):
        fname = '../LRG-fits/data/lrg_171020/sample-lrg-train-120-120-c3-gr{0}.pkl'.format(
            i)
        with open(fname, 'rb') as fp:
            datadict = pickle.load(fp)
            X_cnn_raw.append(datadict['data'])
            labels_cnn_raw.append(datadict['label'])
        time.sleep(3)

    X_test = []
    labels_test = []
    fname = '../LRG-fits/data/lrg_171020/sample-lrg-test-120-120-c3.pkl'
    with open(fname, 'rb') as fp:
        datadict = pickle.load(fp)
        X_test.append(datadict['data'])
        labels_test.append(datadict['label'])

    # Combine and normalization
    sample_mat = np.vstack(X_cnn_raw)
    del (X_cnn_raw)
    labels_cnn = np.hstack(labels_cnn_raw)
    del (labels_cnn_raw)

    # sample_mat = np.nan_to_num(sample_mat)

    rs = 120
    '''
    with open("../nets/norm_params.pkl", 'rb') as fp:
        normparam = pickle.load(fp)
    X_max = normparam["X_max"]
    X_min = normparam["X_min"]
    X_mean = normparam["X_mean"]
    X_train_cnn = (sample_mat - X_min) / (X_max - X_min)
    # X_norm = sample_mat
    X_w_cnn = X_train_cnn - X_mean
    X_tr_cnn = X_w_cnn.reshape(-1, rs, rs, 1).astype('float32')
    '''
    X_tr_cnn = sample_mat.reshape(-1, rs, rs, 1).astype('float32')

    idx = np.random.permutation(len(labels_cnn))
    numsamples = 100000
    X_in = X_tr_cnn[idx[0:numsamples], :, :, :]
    # get labels
    X_out = labels_cnn[idx[0:numsamples]].astype('int32')

    mask_layer1 = [100, 100, 100, 100, 0, 1, 100, 100]
    data_layer1, label_layer1 = sub2triple(data=X_in,
                                           label=X_out,
                                           mask=mask_layer1)
    label_layer1_hotpot = vec2onehot(label=label_layer1, numclass=2)

    numclass = 2
    encode_nodes = 64
    cnn = ConvNet.ConvNet(input_shape=data_layer1.shape,
                          kernel_size=[3, 3, 3, 3, 3],
                          kernel_num=[8, 8, 16, 32, 32],
                          fc_nodes=[],
                          encode_nodes=encode_nodes,
                          padding=('SAME', 'SAME'),
                          stride=(2, 2),
                          numclass=numclass,
                          sess=None,
                          name=None)
    cnn.cae_build()
    cnn.cnn_build(learning_rate=0.001)  # In order to init the weights

    foldname = "./nets/pretrain-171020-2cls/"
    name = "pretrain-120-171020-2cls.pkl"
    cnn.sess, cnn.name = utils.load_net(os.path.join(foldname, name))

    # train
    num_epochs = 100
    learning_rate = 0.001
    batch_size = 100
    droprate = 0.5
    cnn.cnn_train(data=data_layer1,
                  label=label_layer1_hotpot,
                  num_epochs=num_epochs,
                  learning_rate=learning_rate,
                  batch_size=batch_size,
                  droprate=droprate)

    # save features
    fname = "code_l5.pkl"
    folder = "{0}/features/".format(subfold)
    if not os.path.exists(folder):
        os.mkdir(folder)
    numsample = data_layer1.shape[0]
    numone = numsample // 10
    code = np.zeros((numsample, encode_nodes))
    for i in range(10):
        code[i * numone:(i + 1) * numone] = cnn.cae_encode(
            data_layer1[i * numone:(i + 1) * numone, :, :, :])
    # code = cnn.cae_encode(data_layer1)
    label = label_layer1
    with open(os.path.join(folder, fname), 'wb') as fp:
        code_dict = {"code": code, "label": label}
        pickle.dump(code_dict, fp)

    # save net
    foldname = "{0}/net_l5_140".format(subfold)
    name = "net_l5.pkl"
    netname = "model-l5.ckpt"
    if os.path.exists(foldname):
        os.system("rm -r %s" % (foldname))
    os.mkdir(foldname)
    cnn.cnn_save(namepath=os.path.join(foldname, name),
                 netpath=os.path.join(foldname, netname))
示例#6
0
def generate_noise_samples(code, top_config, train_config, net_config, gen_data_for, bp_iter_num, num_of_cnn, model_id,
                           noise_io, intf_io):
    global batch_size_each_SNR, total_batches
    G_matrix = code.G_matrix
    H_matrix = code.H_matrix

    top_config.SNR_set_gen_training = generate_snr_set(top_config)
    print('SNR set for generating training data: %s' % np.array2string(top_config.SNR_set_gen_training))

    if gen_data_for == 'Training':
        batch_size_each_SNR = int(train_config.training_minibatch_size // top_config.SNR_set_size)
        total_batches = int(train_config.training_sample_num // train_config.training_minibatch_size)
    elif gen_data_for == 'Test':
        batch_size_each_SNR = int(train_config.test_minibatch_size // top_config.SNR_set_size)
        total_batches = int(train_config.test_sample_num // train_config.test_minibatch_size)
    else:
        print('>>> Invalid objective of data generation! (ibc.py)')
        exit(0)

    # BP iteration
    if np.size(bp_iter_num) != num_of_cnn + 1:
        print('>>> Error: the length of bp_iter_num is not correct! (ibc.py)')
        exit(0)
    bp_decoder = BP_Decoder.BP_NetDecoder(H_matrix, batch_size_each_SNR)

    conv_net = {}
    denoise_net_in = {}
    denoise_net_out = {}
    intf_net_out = {}

    for net_id in range(num_of_cnn):  # TODO: Doesn't work if num_of_cnn=0
        conv_net[net_id] = ConvNet.ConvNet(net_config, None, net_id)
        denoise_net_in[net_id], denoise_net_out[net_id], intf_net_out[net_id] = conv_net[net_id].build_network()

    # Init gragh
    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)

    # Restore cnn networks before the target CNN        # TODO: Doesn't work if num_of_cnn=0
    for net_id in range(num_of_cnn):  # TODO: Why restore here?
        conv_net[net_id].restore_network_with_model_id(sess, net_config.total_layers, model_id[0:(net_id + 1)])

    start = datetime.datetime.now()

    if gen_data_for == 'Training':
        if not os.path.isdir(train_config.training_folder):
            os.mkdir(train_config.training_folder)
        fout_est_noise = open(train_config.training_feature_file, 'wb')
        fout_real_noise = open(train_config.training_noise_label_file, 'wb')
        fout_real_intf = open(train_config.training_intf_label_file, 'wb')
    elif gen_data_for == 'Test':
        if not os.path.isdir(train_config.test_folder):
            os.mkdir(train_config.test_folder)
        fout_est_noise = open(train_config.test_feature_file, 'wb')
        fout_real_noise = open(train_config.test_noise_label_file, 'wb')
        fout_real_intf = open(train_config.test_intf_label_file, 'wb')
    else:
        print('>>> Invalid objective of data generation! (ibc.py)')
        exit(0)

    # Generating data
    for ik in range(total_batches):
        for SNR in top_config.SNR_set_gen_training:
            x_bits, _, _, ch_noise, intf_labels, y_receive, LLR = lbc.encode_and_transmit(G_matrix, SNR,
                                                                                          batch_size_each_SNR, noise_io,
                                                                                          intf_io, top_config)

            for iter in range(0, num_of_cnn + 1):
                # BP decoder
                u_BP_decoded = bp_decoder.decode(LLR.astype(np.float32), bp_iter_num[iter])

                # CNN
                if iter != num_of_cnn:
                    res_noise_power = conv_net[iter].get_res_noise_power(model_id).get(np.float32(SNR))
                    LLR, predicted_intf_ind = denoising_and_calc_LLR_awgn(res_noise_power, y_receive, u_BP_decoded,
                                                                          denoise_net_in[iter], denoise_net_out[iter],
                                                                          intf_net_out[iter], sess)

            # reconstruct noise
            noise_before_cnn = y_receive - (u_BP_decoded * (-2) + 1)
            noise_before_cnn = noise_before_cnn.astype(np.float32)
            ch_noise = ch_noise.astype(np.float32)
            intf_labels = intf_labels.astype(np.float32)
            noise_before_cnn.tofile(fout_est_noise)  # write features to file
            ch_noise.tofile(fout_real_noise)  # write noise labels to file
            intf_labels.tofile(fout_real_intf)  # write interference labels to file

        if ik % 100 == 0:
            print("%d batches finished!" % ik)
            section = datetime.datetime.now()
            print("Time: %ds" % (section - start).seconds)

    fout_real_noise.close()
    fout_est_noise.close()
    fout_real_intf.close()

    sess.close()
    end = datetime.datetime.now()

    print("Time: %ds" % (end - start).seconds)
    print("Finish generating %s data" % gen_data_for)
示例#7
0
def simulation_colored_noise(linear_code,
                             top_config,
                             net_config,
                             simutimes_range,
                             target_err_bits_num,
                             batch_size,
                             BP_layers,
                             train_epoch=25,
                             use_weight_loss=False):
    # target_err_bits_num: the simulation stops if the number of bit errors reaches the target.
    # simutimes_range: [min_simutimes, max_simutimes]

    ## load configurations from top_config
    SNRset = top_config.eval_SNRs
    bp_iter_num = top_config.BP_iter_nums_simu
    noise_io = DataIO.NoiseIO(
        top_config.N_code,
        False,
        None,
        top_config.cov_1_2_file_simu,
        rng_seed=0)  # cov_1_2_file_simu 就是Noise文件夹下对应的噪声文件
    denoising_net_num = top_config.cnn_net_number
    model_id = top_config.model_id

    G_matrix = linear_code.G_matrix
    H_matrix = linear_code.H_matrix
    K, N = np.shape(G_matrix)

    ## build BP decoding network
    if np.size(bp_iter_num) != denoising_net_num + 1:
        print('Error: the length of bp_iter_num is not correct!')
        exit(0)
    bp_decoder = BP_Decoder.BP_NetDecoder(H_matrix, batch_size, top_config,
                                          BP_layers, 0, use_weight_loss)
    # bp_decoder_after_cnn = BP_Decoder.BP_NetDecoder(H_matrix, batch_size, top_config, BP_layers, 1)
    # bp_decoder = bp_decoder_before_cnn  # default

    res_N = top_config.N_code
    res_K = top_config.K_code
    res_BP_layers = bp_decoder.BP_layers

    ## build denoising network
    conv_net = {}
    denoise_net_in = {}
    denoise_net_out = {}
    # build network for each CNN denoiser,
    if net_config.use_conv_net:  # 如果使用 conv net 才加载
        for net_id in range(denoising_net_num):
            if top_config.same_model_all_nets and net_id > 0:
                conv_net[net_id] = conv_net[0]
                denoise_net_in[net_id] = denoise_net_in[0]
                denoise_net_out[net_id] = denoise_net_out[0]
            else:  # 默认进入
                # conv_net[net_id] = ConvNet.ConvNet(net_config, None, net_id)  # 建立了一个残差噪声的神经网络对象
                conv_net[net_id] = ConvNet.ConvNet(net_config, top_config,
                                                   net_id)  # 建立了一个残差噪声的神经网络对象
                denoise_net_in[net_id], denoise_net_out[net_id] = conv_net[
                    net_id].build_network()  # 构建好对应的神经网络,返回的是网络的输入和输出
        # init gragh
        init = tf.global_variables_initializer()
        sess = tf.Session()
        print('Open a tf session!')
        sess.run(init)
        # restore denoising network
        for net_id in range(denoising_net_num):
            if top_config.same_model_all_nets and net_id > 0:
                break
            conv_net[net_id].restore_network_with_model_id(
                sess, net_config.total_layers,
                model_id[0:(net_id + 1)])  # 恢复之前训练好的网络。

    ## initialize simulation times
    max_simutimes = simutimes_range[1]
    min_simutimes = simutimes_range[0]
    max_batches, residual_times = np.array(divmod(max_simutimes, batch_size),
                                           np.int32)
    if residual_times != 0:
        max_batches += 1

    ## generate out ber file
    bp_str = np.array2string(bp_iter_num,
                             separator='_',
                             formatter={'int': lambda d: "%d" % d})
    bp_str = bp_str[1:(len(bp_str) - 1)]

    if net_config.use_conv_net and bp_decoder.use_cnn_res_noise:
        ber_file = format(
            '%s/bp_model/%s_%s/BP%s/BER(%d_%d)_BP(%s)_BPDNN%s-CNN-BPDNN%s' %
            (net_config.model_folder, N, K, bp_decoder.BP_layers, N, K, bp_str,
             bp_decoder.BP_layers, bp_decoder.BP_layers))
        f_simulation_time = format(
            '%s/bp_model/%s_%s/BP%s/simulation_time(%d_%d)_BP(%s)_BPDNN%s-CNN-BPDNN%s'
            % (net_config.model_folder, N, K, bp_decoder.BP_layers, N, K,
               bp_str, bp_decoder.BP_layers, bp_decoder.BP_layers))
    elif bp_decoder.use_train_bp_net or bp_decoder.train_bp_network:
        if use_weight_loss:
            ber_file = format(
                '%s/bp_model/%s_%s/BP%s/BER(%d_%d)_BP(%s)_BP%s_epoch%s_weight_loss'
                % (net_config.model_folder, N, K, bp_decoder.BP_layers, N, K,
                   bp_str, bp_decoder.BP_layers, train_epoch))
            f_simulation_time = format(
                '%s/bp_model/%s_%s/BP%s/simulation_time(%d_%d)_BP(%s)_BP%s_epch%s_weight_loss'
                % (net_config.model_folder, N, K, bp_decoder.BP_layers, N, K,
                   bp_str, bp_decoder.BP_layers, train_epoch))
        else:
            ber_file = format(
                '%s/bp_model/%s_%s/BP%s/BER(%d_%d)_BP(%s)_BP%s_epoch%s' %
                (net_config.model_folder, N, K, bp_decoder.BP_layers, N, K,
                 bp_str, bp_decoder.BP_layers, train_epoch))
            f_simulation_time = format(
                '%s/bp_model/%s_%s/BP%s/simulation_time(%d_%d)_BP(%s)_BP%s_epch%s'
                % (net_config.model_folder, N, K, bp_decoder.BP_layers, N, K,
                   bp_str, bp_decoder.BP_layers, train_epoch))

    else:
        ber_file = format('%s/bp_model/%s_%s/BP%s/BER(%d_%d)_BP(%s)_LLRBP%s' %
                          (net_config.model_folder, N, K, bp_decoder.BP_layers,
                           N, K, bp_str, bp_decoder.BP_layers))
        f_simulation_time = format(
            '%s/bp_model/%s_%s/BP%s/simulation_time(%d_%d)_BP(%s)_LLRBP%s' %
            (net_config.model_folder, N, K, bp_decoder.BP_layers, N, K, bp_str,
             bp_decoder.BP_layers))

    if top_config.corr_para != top_config.corr_para_simu:  # this means we are testing the model robustness to correlation level.
        ber_file = format('%s_SimuCorrPara%.2f' %
                          (ber_file, top_config.corr_para_simu))
    if top_config.same_model_all_nets:
        ber_file = format('%s_SameModelAllNets' % ber_file)
    if top_config.update_llr_with_epdf:
        ber_file = format('%s_llrepdf' % ber_file)
    if denoising_net_num > 0:
        model_id_str = np.array2string(model_id,
                                       separator='_',
                                       formatter={'int': lambda d: "%d" % d})
        model_id_str = model_id_str[1:(len(model_id_str) - 1)]
        ber_file = format('%s_model%s' % (ber_file, model_id_str))
    if np.size(SNRset) == 1:
        ber_file = format('%s_%.1fdB' % (ber_file, SNRset[0]))

    ber_file = format('%s.txt' % ber_file)
    fout_ber = open(ber_file, 'wt')
    simlation_time_file = format('%s.txt' % f_simulation_time)
    fout_simulation_time = open(simlation_time_file, 'wt')

    ## simulation starts
    start = datetime.datetime.now()
    total_simulation_times = 0
    residual_simulation_times = 0
    for SNR in SNRset:
        real_batch_size = batch_size
        # simulation part
        bit_errs_iter = np.zeros(denoising_net_num + 1, dtype=np.int32)
        actual_simutimes = 0
        rng = np.random.RandomState(1)  # 伪随机数种子
        noise_io.reset_noise_generator()  # reset随机数种子
        for ik in range(0, max_batches):  # 遍历max_batches 6667

            if ik == max_batches - 1 and residual_times != 0:  # 如果遍历结束,并且residual_times != 0 ,在这里默认是 == 0
                real_batch_size = residual_times
                residual_simulation_times = residual_simulation_times + 1
                fout_simulation_time.write('不足一个batch_size, 实际batch_size 是:' +
                                           str(real_batch_size) + '\n')
                print('不足一个batch_size, 实际batch_size 是:' +
                      str(real_batch_size) + '\n')
            x_bits, u_coded_bits, s_mod, ch_noise, y_receive, LLR, ch_noise_sigma = lbc.encode_and_transmission(
                G_matrix, SNR, real_batch_size, noise_io, rng)  #
            # ------------------------------------------------------------
            noise_power = np.mean(np.square(ch_noise))
            practical_snr = 10 * np.log10(1 / (noise_power * 2.0))
            if ik % 1000 == 0:
                print('Batch %d in total %d batches.' % (ik, int(max_batches)),
                      end=' ')
                print('Practical EbN0: %.2f' % practical_snr)

            for iter in range(0, denoising_net_num):  # denoising_net_num == 1
                # if 0 == iter:
                #     bp_decoder = bp_decoder_before_cnn
                # else:
                #     bp_decoder = bp_decoder_after_cnn
                # BP decoding,第二个参数bp_iter_num 失效的,因为迭代次数是由前面的变量 BP_layers 决定的
                u_BP_decoded = bp_decoder.decode(
                    LLR.astype(np.float32),
                    bp_iter_num[iter])  # BP译码传输的本来是LLR,返回的则是对应译码的码字
                # !!!当iter==0,误比特率记录的是BP的误比特率,当iter==1,记录的是BP-CNN-BP的误比特率。
                # 首先判断是否使用 conv net
                if net_config.use_conv_net and iter < denoising_net_num:  # denoising_net_num == 1,当iter==0,使用CNN进行噪声估计,当iter==0,不使用CNN,即单纯使用BP译码
                    if top_config.update_llr_with_epdf:
                        prob = conv_net[iter].get_res_noise_pdf(
                            model_id, res_N, res_K,
                            res_BP_layers).get(np.float32(SNR))
                        LLR = denoising_and_calc_LLR_epdf(
                            prob, y_receive, u_BP_decoded,
                            denoise_net_in[iter], denoise_net_out[iter], sess)
                    elif bp_decoder.use_cnn_res_noise:  # 默认进入else
                        res_noise_power = conv_net[iter].get_res_noise_power(
                            model_id, SNRset, res_N, res_K, res_BP_layers).get(
                                np.float32(SNR))  # 计算噪声功率,这个残差噪声功率貌似是存储在文件中读取的
                        LLR = denoising_and_calc_LLR_awgn(
                            res_noise_power, y_receive, u_BP_decoded,
                            denoise_net_in[iter], denoise_net_out[iter],
                            sess)  # 使用神经网络译码进行噪声估计,并得到新一轮BP的LLR输入
                    else:
                        res_noise_power = conv_net[iter].get_res_noise_power(
                            model_id, SNRset, res_N, res_K, res_BP_layers).get(
                                np.float32(SNR))  # 计算噪声功率,这个残差噪声功率貌似是存储在文件中读取的
                        LLR = denoising_and_calc_LLR_awgn(
                            res_noise_power, y_receive, u_BP_decoded,
                            denoise_net_in[iter], denoise_net_out[iter],
                            sess)  # 使用神经网络译码进行噪声估计,并得到新一轮BP的LLR输入
                        noise_after_cnn = y_receive - (u_BP_decoded * (-2) + 1)
                        # noise_after_cnn = sess.run(net_out, feed_dict={net_in: noise_before_cnn})
                        # calculate the LLR for next BP decoding
                        s_mod_plus_res_noise = y_receive - noise_after_cnn
                        LLR = s_mod_plus_res_noise * 2.0 / res_noise_power
                output_x = linear_code.dec_src_bits(
                    u_BP_decoded)  # 前k位是编码之前的信息位
                shape_x, shape_y = output_x.shape
                # for i in range(shape_x):
                #     if (np.any(output_x[i] - x_bits[i])):
                #         bit_errs_iter[iter] += 1
                bit_errs_iter[iter] += np.sum(
                    output_x !=
                    x_bits)  # 统计比特不同的熟练(对应位比特不同记为1,然后累加计算有多少个不同比特位)
                pass
                # 同一个码字会记录两次误比特率,一次是只使用BP,还有一次是BP+CNN+BP。一般来说,经过BP+CNN+BP之后的误比特率要比只经过BP要好。

            actual_simutimes += real_batch_size
            if bit_errs_iter[
                    denoising_net_num] >= target_err_bits_num and actual_simutimes >= min_simutimes:  # 当错误码元数或者仿真迭代次数达标
                break
        print('%d bits are simulated!, batch_size=%d' %
              (actual_simutimes * K, real_batch_size))

        total_simulation_times += actual_simutimes

        ber_iter = np.zeros(denoising_net_num + 1, dtype=np.float64)
        fout_ber.write(str(SNR) + '\t')
        for iter in range(0, denoising_net_num + 1):  # 1+1 = 2
            ber_iter[iter] = bit_errs_iter[iter] / float(K * actual_simutimes)
            fout_ber.write(
                str(ber_iter[iter]) + '\t' + str(bit_errs_iter[iter]) + '\t')
            print(ber_iter[iter])
        fout_ber.write('\n')
        # break

    fout_ber.close()
    end = datetime.datetime.now()
    print('Time: %ds' % (end - start).seconds)
    print("end\n")

    fout_simulation_time.write(
        str(total_simulation_times) + '\t' + str((end - start).seconds))
    fout_simulation_time.close()

    if net_config.use_conv_net:
        sess.close()
    print('Close the tf session!')
示例#8
0
optimizer = tf.keras.optimizers.Adam()  # the optimizer for the training
#ptimizer = tf.keras.optimizers.SGD(0.1)  # the optimizer for the training

train_loss = tf.keras.metrics.Mean(name="train_loss")
test_loss = tf.keras.metrics.Mean(name="test_loss")
valid_loss = tf.keras.metrics.Mean(name="valid_loss")
#accuracy
train_acc = tf.keras.metrics.CategoricalAccuracy(name="train_accuracy")
test_acc = tf.keras.metrics.CategoricalAccuracy(name="test_accuracy")
valid_acc = tf.keras.metrics.CategoricalAccuracy(name="valid_accuracy")

IMAGE_SIZE = (139, 139)

# neural network model
IMG_SHAPE = IMAGE_SIZE + (1, )
model = ConvNet(7, IMG_SHAPE)
model.compile(optimizer=optimizer,
              loss="categorical_crossentropy",
              metrics=["categorical_accuracy"])

checkpoint = tf.train.Checkpoint(step=tf.Variable(1),
                                 optimizer=optimizer,
                                 model=model)
manager = tf.train.CheckpointManager(checkpoint,
                                     directory="saved_model/",
                                     max_to_keep=2)


def resize_rescale(inputs, label):
    """
    preprocessing for test images 
示例#9
0
def createYannLeCunNetwork():
    return ConvNet(sign_recognition=True)
    
示例#10
0
def simulation_colored_noise(linear_code, top_config, net_config, simutimes_range, target_err_bits_num, batch_size):
# target_err_bits_num: the simulation stops if the number of bit errors reaches the target.
# simutimes_range: [min_simutimes, max_simutimes]

    ## load configurations from top_config
    SNRset = top_config.eval_SNRs
    bp_iter_num = top_config.BP_iter_nums_simu
    noise_io = DataIO.NoiseIO(top_config.N_code, False, None, top_config.cov_1_2_file_simu, rng_seed=0)
    denoising_net_num = top_config.cnn_net_number
    model_id = top_config.model_id

    G_matrix = linear_code.G_matrix
    H_matrix = linear_code.H_matrix
    K, N = np.shape(G_matrix)

    ## build BP decoding network
    if np.size(bp_iter_num) != denoising_net_num + 1:
        print('Error: the length of bp_iter_num is not correct! 1!\nnp.size(bp_iter_num)='+str(np.size(bp_iter_num))+'\nnet_id_tested + 1='+str(denoising_net_num + 1))
        exit(0)
    bp_decoder = BP_Decoder.BP_NetDecoder(H_matrix, batch_size)

    ## build denoising network
    conv_net = {}
    denoise_net_in = {}
    denoise_net_out = {}
    # build network for each CNN denoiser,
    for net_id in range(denoising_net_num):
        if top_config.same_model_all_nets and net_id > 0:
            conv_net[net_id] = conv_net[0]
            denoise_net_in[net_id] = denoise_net_in[0]
            denoise_net_out[net_id] = denoise_net_out[0]
        else:
            conv_net[net_id] = ConvNet.ConvNet(net_config, None, net_id)
            denoise_net_in[net_id], denoise_net_out[net_id] = conv_net[net_id].build_network()
    # init gragh
    init = tf.compat.v1.global_variables_initializer()
    sess = tf.compat.v1.Session()
    print('Open a tf session!')
    sess.run(init)
    # restore denoising network
    for net_id in range(denoising_net_num):
        if top_config.same_model_all_nets and net_id > 0:
            break
        conv_net[net_id].restore_network_with_model_id(sess, net_config.total_layers, model_id[0:(net_id+1)])

    ## initialize simulation times
    max_simutimes = simutimes_range[1]
    min_simutimes = simutimes_range[0]
    max_batches, residual_times = np.array(divmod(max_simutimes, batch_size), np.int32)
    if residual_times!=0:
        max_batches += 1

    ## generate out ber file
    bp_str = np.array2string(bp_iter_num, separator='_', formatter={'int': lambda d: "%d" % d})
    bp_str = bp_str[1:(len(bp_str) - 1)]
    ber_file = format('%sBER(%d_%d)_BP(%s)' % (net_config.model_folder, N, K, bp_str))

    if top_config.corr_para != top_config.corr_para_simu:  # this means we are testing the model robustness to correlation level.
        ber_file = format('%s_SimuCorrPara%.2f' % (ber_file, top_config.corr_para_simu))
    if top_config.same_model_all_nets:
        ber_file = format('%s_SameModelAllNets' % ber_file)
    if top_config.update_llr_with_epdf:
        ber_file = format('%s_llrepdf' % ber_file)
    if denoising_net_num > 0:
        model_id_str = np.array2string(model_id, separator='_', formatter={'int': lambda d: "%d" % d})
        model_id_str = model_id_str[1:(len(model_id_str)-1)]
        ber_file = format('%s_model%s' % (ber_file, model_id_str))
    if np.size(SNRset) == 1:
        ber_file = format('%s_%.1fdB' % (ber_file, SNRset[0]))

    ber_file = format('%s.txt' % ber_file)
    fout_ber = open(ber_file, 'wt')

    ## simulation starts
    start = datetime.datetime.now()
    for SNR in SNRset:
        real_batch_size = batch_size
        # simulation part
        bit_errs_iter = np.zeros(denoising_net_num + 1, dtype=np.int32)
        actual_simutimes = 0
        rng = np.random.RandomState(0)
        noise_io.reset_noise_generator()
        for ik in range(0, max_batches):
            print('Batch %d in total %d batches.' % (ik, int(max_batches)), end=' ')
            if ik == max_batches - 1 and residual_times != 0:
                real_batch_size = residual_times
            x_bits, _, s_mod, ch_noise, y_receive, LLR = lbc.encode_and_transmission(G_matrix, SNR, real_batch_size, noise_io, rng)
            noise_power = np.mean(np.square(ch_noise))
            practical_snr = 10*np.log10(1 / (noise_power * 2.0))
            print('Practical EbN0: %.2f' % practical_snr)

            for iter in range(0, denoising_net_num+1):
                # BP decoding
                u_BP_decoded = bp_decoder.decode(LLR.astype(np.float32), bp_iter_num[iter])

                if iter < denoising_net_num:
                    if top_config.update_llr_with_epdf:
                        prob = conv_net[iter].get_res_noise_pdf(model_id).get(np.float32(SNR))
                        LLR = denoising_and_calc_LLR_epdf(prob, y_receive, u_BP_decoded, denoise_net_in[iter], denoise_net_out[iter], sess)
                    else:
                        res_noise_power = conv_net[iter].get_res_noise_power(model_id, SNRset).get(np.float32(SNR))
                        LLR = denoising_and_calc_LLR_awgn(res_noise_power, y_receive, u_BP_decoded, denoise_net_in[iter], denoise_net_out[iter], sess)
                output_x = linear_code.dec_src_bits(u_BP_decoded)
                bit_errs_iter[iter] += np.sum(output_x != x_bits)

            actual_simutimes += real_batch_size
            if bit_errs_iter[denoising_net_num] >= target_err_bits_num and actual_simutimes >= min_simutimes:
                break
        print('%d bits are simulated!' % (actual_simutimes * K))

        ber_iter = np.zeros(denoising_net_num+1, dtype=np.float64)
        fout_ber.write(str(SNR) + '\t')
        for iter in range(0, denoising_net_num+1):
            ber_iter[iter] = bit_errs_iter[iter] / float(K * actual_simutimes)
            fout_ber.write(str(ber_iter[iter]) + '\t')
        fout_ber.write('\n')

    fout_ber.close()
    end = datetime.datetime.now()
    print('Time: %ds' % (end-start).seconds)
    print("end\n")
    sess.close()
    print('Close the tf session!')
示例#11
0
def analyze_residual_noise(linear_code, top_config, net_config, simutimes, batch_size):

    ## load some configurations from top_config
    net_id_tested = top_config.currently_trained_net_id
    model_id = top_config.model_id
    bp_iter_num = top_config.BP_iter_nums_gen_data[0:(net_id_tested + 1)]
    noise_io = DataIO.NoiseIO(top_config.N_code, False, None, top_config.cov_1_2_file)
    SNRset = top_config.eval_SNRs

    G_matrix = linear_code.G_matrix
    H_matrix = linear_code.H_matrix
    _, N = np.shape(G_matrix)

    max_batches, residual_times = np.array(divmod(simutimes, batch_size), np.int32)
    print('Real simutimes: %d' % simutimes)
    if residual_times != 0:
        max_batches += 1

    # build BP decoding network
    if np.size(bp_iter_num) != net_id_tested + 1:
        print('Error: the length of bp_iter_num is not correct! 3!\nnp.size(bp_iter_num)='+str(np.size(bp_iter_num))+'\nnet_id_tested + 1='+str(net_id_tested + 1))
        exit(0)
    bp_decoder = BP_Decoder.BP_NetDecoder(H_matrix, batch_size)

    # build denoising network
    conv_net = {}
    denoise_net_in = {}
    denoise_net_out = {}

    # build network for each CNN denoiser,
    for net_id in range(net_id_tested+1):
        conv_net[net_id] = ConvNet.ConvNet(net_config, None, net_id)
        denoise_net_in[net_id], denoise_net_out[net_id] = conv_net[net_id].build_network()

    # init gragh
    init = tf.compat.v1.global_variables_initializer()
    sess = tf.compat.v1.Session()
    sess.run(init)

    # restore denoising network
    for net_id in range(net_id_tested + 1):
        conv_net[net_id].restore_network_with_model_id(sess, net_config.total_layers, model_id[0:(net_id+1)])

    model_id_str = np.array2string(model_id, separator='_', formatter={'int': lambda d: "%d" % d})
    model_id_str = model_id_str[1:(len(model_id_str) - 1)]
    loss_file_name = format("%sresidual_noise_property_netid%d_model%s.txt" % (net_config.residual_noise_property_folder, net_id_tested, model_id_str))
    fout_loss = open(loss_file_name, 'wt')

    start = datetime.datetime.now()
    for SNR in SNRset:
        noise_io.reset_noise_generator()
        real_batch_size = batch_size
        # simulation part
        loss = 0.0
        prob = np.ones(0)
        for ik in range(0, max_batches):
            print("Batch id: %d" % ik)
            if ik == max_batches - 1 and residual_times != 0:
                real_batch_size = residual_times
            x_bits, _, s_mod, channel_noise, y_receive, LLR = lbc.encode_and_transmission(G_matrix, SNR, real_batch_size, noise_io)

            for iter in range(0, net_id_tested+1):
                # BP decoding
                u_BP_decoded = bp_decoder.decode(LLR.astype(np.float32), bp_iter_num[iter])
                noise_before_cnn = y_receive - (u_BP_decoded * (-2) + 1)
                noise_after_cnn = sess.run(denoise_net_out[iter], feed_dict={denoise_net_in[iter]: noise_before_cnn})
                s_mod_plus_res_noise = y_receive - noise_after_cnn
                if iter < net_id_tested:  # calculate the LLR for next BP decoding
                    if top_config.update_llr_with_epdf:
                        prob_tmp = conv_net[iter].get_res_noise_pdf(model_id).get(np.float32(SNR))
                        LLR = calc_LLR_epdf(prob_tmp, s_mod_plus_res_noise)
                    else:
                        res_noise_power = conv_net[iter].get_res_noise_power(model_id).get(np.float32(SNR))
                        LLR = s_mod_plus_res_noise * 2.0 / res_noise_power
            if top_config.update_llr_with_epdf:
                prob = stat_prob(s_mod_plus_res_noise - s_mod, prob)
            else:
                loss += np.sum(np.mean(np.square(s_mod_plus_res_noise-s_mod), 1))

        # each SNR
        if top_config.update_llr_with_epdf:
            fout_loss.write(str(SNR) + '\t')
            for i in range(np.size(prob)):
                fout_loss.write(str(prob[i]) + '\t')
            fout_loss.write('\n')
        else:
            loss /= np.double(simutimes)
            fout_loss.write(str(SNR) + '\t' + str(loss) + '\n')

    fout_loss.close()
    end = datetime.datetime.now()
    print('Time: %ds' % (end-start).seconds)
    print("end\n")
    sess.close()
示例#12
0
    noise_io = DataIO.NoiseIO(top_config.N_code, False, None,
                              top_config.cov_1_2_file)
    # generate training data
    ibd.generate_noise_samples(code, top_config, net_config, train_config,
                               top_config.BP_iter_nums_gen_data,
                               top_config.currently_trained_net_id, 'Training',
                               noise_io, top_config.model_id)
    # generate test data
    ibd.generate_noise_samples(code, top_config, net_config, train_config,
                               top_config.BP_iter_nums_gen_data,
                               top_config.currently_trained_net_id, 'Test',
                               noise_io, top_config.model_id)
elif top_config.function == 'Train':
    # 获取网络id?
    net_id = top_config.currently_trained_net_id
    conv_net = ConvNet.ConvNet(net_config, train_config, net_id)
    conv_net.train_network(top_config.model_id)
elif top_config.function == 'Simulation':
    batch_size = 5000
    if top_config.analyze_res_noise:
        simutimes_for_anal_res_power = int(
            np.ceil(5e6 / float(top_config.K_code * batch_size)) * batch_size)
        ibd.analyze_residual_noise(code, top_config, net_config,
                                   simutimes_for_anal_res_power, batch_size)

    simutimes_range = np.array([
        np.ceil(1e7 / float(top_config.K_code * batch_size)) * batch_size,
        np.ceil(1e8 / float(top_config.K_code * batch_size)) * batch_size
    ], np.int32)
    ibd.simulation_colored_noise(code, top_config, net_config, simutimes_range,
                                 1000, batch_size)
    optimizer="nadam",
    lr=.01,
    lr_decay=0.0)
fully_connected = fcn.FullyConnected(sizes=[32, 4, 10],
                                     activations=["linear", "relu", "linear"],
                                     scale_method="normalize",
                                     optimizer="nadam",
                                     lr=.01,
                                     lr_decay=0.0)
convnet = cnn.ConvNet(conv_method="convolution",
                      layer_names=["conv", "pool", "conv", "pool"],
                      num_filters=[3, None, 2, None],
                      kernel_sizes=[[2, 2], None, [2, 2], None],
                      stride_sizes=[[2, 2], [1, 1], [2, 2], [1, 1]],
                      pool_sizes=[None, [2, 2], None, [2, 2]],
                      pool_fns=[None, "max", None, "max"],
                      pad_fns=["same", "valid", "same", "valid"],
                      activations=["relu", None, "relu", None],
                      input_channels=2,
                      scale_method="normalize",
                      optimizer="nadam",
                      lr=0.01,
                      lr_decay=0)

nn = nen.NeuralNetwork([convnet, fully_connected], "cross_entropy")

# inputs = np.random.rand(2, 6, 6, 2)
# #print(nn.feedforward(inputs, scale=True))

# epsilon=1e-5
# inputs = np.random.rand(2, 6, 6, 2)
# outputs = nn.feedforward(inputs)
示例#14
0
def run_train_models(datasets, parameters):
    device = parameters["device"]

    # Per-Class-Error
    per_class_error = {
        "MLP": 0.0,
        "FCN": 0.0,
        "ResNet": 0.0
    }

    for dataset_number, (dataset, dataloader) in enumerate(datasets.items()):
        dataset_number += 1

        # setting up
        if parameters["verbose"]:
            print_dataset_info(dataset, dataloader)
        sleep(1)

        time_steps = dataloader['test'].dataset.inputs.shape[-1]
        n_classes = len(np.unique(dataloader['test'].dataset.targets))

        # MLP
        if parameters["run_mlp"]:
            model_name = "MLP"
            if parameters["verbose"]:
                print(model_name)
            model = MultiLayerPerceptron(time_steps, n_classes)
            optimizer = optim.Adadelta(
                model.parameters(),
                lr=parameters["mlp_lr"],
                rho=parameters["mlp_rho"],
                eps=parameters["mlp_eps"]
            )
            if torch.cuda.device_count() > 0:
                model = nn.DataParallel(model)
            model.to(device)
            test_error_rate, model, _ = train(
                model_name=model_name,
                dataset_name=dataset,
                dataloader_train=dataloader['train'],
                dataloader_test=dataloader['test'],
                device=device,
                model=model,
                optimizer=optimizer,
                epochs=parameters["mlp_epochs"],
                save=False
            )
            per_class_error[model_name] += test_error_rate / n_classes
            mean_per_class_error = per_class_error[model_name] / dataset_number
            neptune.log_metric("{}_mpce".format(model_name), mean_per_class_error)

        # ConvNet
        if parameters["run_fcn"]:
            model_name = "FCN"
            if parameters["verbose"]:
                print(model_name)
            model = ConvNet(time_steps, n_classes)
            if torch.cuda.device_count() > 0:
                model = nn.DataParallel(model)
            model.to(device)

            optimizer = optim.Adam(
                model.parameters(),
                lr=parameters["fcn_lr"],
                betas=parameters["fcn_betas"],
                eps=parameters["fcn_eps"]
            )
            test_error_rate, model, _ = train(
                model_name=model_name,
                dataset_name=dataset,
                dataloader_train=dataloader['train'],
                dataloader_test=dataloader['test'],
                device=device,
                model=model,
                optimizer=optimizer,
                epochs=parameters["fcn_epochs"],
                save=False
            )
            per_class_error[model_name] += test_error_rate / n_classes
            mean_per_class_error = per_class_error[model_name] / dataset_number
            neptune.log_metric("{}_mpce".format(model_name), mean_per_class_error)

        # ResNet
        if parameters["run_resnet"]:
            model_name = "ResNet"
            if parameters["verbose"]:
                print(model_name)
            model = ResNet(time_steps, n_classes)
            if torch.cuda.device_count() > 0:
                model = nn.DataParallel(model)
            model.to(device)
            optimizer = optim.Adam(
                model.parameters(),
                lr=parameters["fcn_lr"],
                betas=parameters["fcn_betas"],
                eps=parameters["fcn_eps"]
            )
            test_error_rate, model, _ = train(
                model_name=model_name,
                dataset_name=dataset,
                dataloader_train=dataloader['train'],
                dataloader_test=dataloader['test'],
                device=device,
                model=model,
                optimizer=optimizer,
                epochs=parameters["fcn_epochs"],
                save=False
            )
            per_class_error[model_name] += test_error_rate / n_classes
            mean_per_class_error = per_class_error[model_name] / dataset_number
            neptune.log_metric("{}_mpce".format(model_name), mean_per_class_error)
示例#15
0
def evaluate_convnet(arch_def,
                     data_path,
                     n_cand_chunk,
                     base_lr=0.04,
                     stepsize=50000,
                     gamma=0.5,
                     momentum=0.0,
                     n_epochs=10000,
                     batch_size=50,
                     N_train=1250000,
                     N_valid=100000,
                     N_test=100000,
                     validate_every_batches=2000,
                     n_rot=0,
                     activation=T.tanh,
                     tiny_train=False,
                     buf_size=1000,
                     savestep=50000,
                     resume=None,
                     improve_thresh=0.99,
                     ini_patience=50000,
                     data_interface_str="directory",
                     im_chan=4,
                     im_size=21):

    if data_interface_str == "directory":
        print "cnn.py: creating dataInterface"
        train_folder = data_path + '/chunks_train/'
        valid_folder = data_path + '/chunks_validate/'
        test_folder = data_path + '/chunks_test/'
        dataInterface = DirectoryDataInterface(train_folder,
                                               valid_folder,
                                               test_folder,
                                               n_cand_chunk=n_cand_chunk,
                                               batch_size=batch_size,
                                               N_valid=N_valid,
                                               N_test=N_test,
                                               N_train=N_train,
                                               im_chan=im_chan,
                                               im_size=im_size)
        print "cnn.py: dataInterface created"
    elif data_interface_str == "random":
        dataInterface = RandomDataInterface(data_path,
                                            n_cand_chunk=n_cand_chunk,
                                            batch_size=batch_size,
                                            N_valid=N_valid,
                                            N_test=N_test,
                                            N_train=N_train,
                                            im_chan=im_chan,
                                            im_size=im_size)
    else:
        print "cnn.py: " + data_interface_str + " not implemented."
        exit()

    print "Creating ConvNet"
    convnet = ConvNet(dataInterface,
                      arch_def,
                      batch_size=batch_size,
                      base_lr=base_lr,
                      momentum=momentum,
                      activation=activation,
                      buf_size=buf_size,
                      n_cand_chunk=n_cand_chunk,
                      n_rot=n_rot,
                      N_valid=N_valid,
                      N_test=N_test)
    print "ConvNet created"

    patience = ini_patience
    validation_frequency = min(validate_every_batches, patience / 2)
    done_looping = False

    start_time = time.time()
    epoch = 0

    if resume != None:
        state = numpy.load(str(resume) + "_training_state.pkl")
        epoch = state['epoch']
        patience = state['patience']
        convnet.it = state['iteration']
        convnet.learning_rate = state['learning_rate']

        print "Loading training history"
        training = numpy.load(str(resume) + "_training_history.pkl")
        convnet.iter_train_history = training['iter_train_history']
        convnet.train_err_history = training['train_err_history']
        convnet.train_loss_history = training['train_loss_history']

        print "Loading validation history"
        validation = numpy.load(str(resume) + "_validation_history.pkl")
        convnet.iter_val_history = validation['iter_val_history']
        convnet.val_err_history = validation['val_err_history']
        convnet.val_loss_history = validation['val_loss_history']

        print "Loading buffer history"
        training_buffer = numpy.load(
            str(resume) + "_training_buffer_history.pkl")
        convnet.train_buf_err_history = training_buffer[
            'train_buf_err_history']

        print "Loading convnet"
        convnet_state = np.load(str(resume) + "_convnet_state.pkl")
        convnet.load_params(convnet_state['params'])
        convnet.best_params = convnet_state['best_params']
        print "convnet.best_params loaded"
        convnet.best_iter = convnet_state['best_iter']
        print "convnet.best_iter set to ", convnet.best_iter
        convnet.best_validation_error = convnet_state['best_validation_error']
        print "convnet.best_validation_error set to ", convnet.best_validation_error
        convnet.patience_loss = convnet_state['patience_loss']
        print "convnet.patience_loss set to ", convnet.patience_loss
    DropoutLayer.activate()
    while epoch < n_epochs and (not done_looping):
        epoch_done = False
        print "Epoch ", epoch, ", iteration ", convnet.it,
        ", patience ", patience, ", learning rate ", convnet.learning_rate
        while not epoch_done:
            sys.stdout.flush()
            if convnet.it % savestep == 0:  # and convnet.it>1:
                print "Saving @ iter ", convnet.it
                convnet.save(str(convnet.it))
                with open(str(convnet.it) + "_training_state.pkl", "w") as f:
                    pickle.dump(
                        {
                            'epoch': epoch,
                            'patience': patience,
                            'iteration': convnet.it,
                            'learning_rate': convnet.learning_rate
                        }, f, pickle.HIGHEST_PROTOCOL)

            if convnet.it % stepsize == 0 and convnet.it > 1:
                convnet.reduceLearningRate(gamma)
            if convnet.it % validation_frequency == 0:  # and convnet.it>1:
                patience = convnet.validate(patience)
            t = time.time()
            convnet.train()
            #print " training time = ", time.time() - t
            epoch_done = convnet.dataInterface.doneTrain()
            if patience <= convnet.it:
                done_looping = True
                print "patience <= iter", patience, convnet.it
                break
        #convnet.chunkLoader.done = False
        convnet.dataInterface.setDoneTrain(False)

        epoch += 1
    elapsed_time = time.time() - start_time
    print "Optimization complete"
    print >> sys.stderr, "Elapsed time: ", elapsed_time / 60.0, " minutes"
    convnet.test()
    convnet.save("final")
示例#16
0
    correct_num = 0
    if test_num == -1:
        test_num = len(test_label)

    for i in range(test_num):
        im = enlarge_pic(test_pic[i])
        train_convnet.forward_p(im, test_label[i])
        if np.argmax(train_convnet.output7.maps[0][0]) == test_label[i]:
            correct_num += 1
    correct_rate = correct_num / test_num
    print('testdata correct rate:', correct_rate)


start_time = time.time()
print(start_time)
train_convnet = ConvNet()
epoch = 5
learning_rate = 0.0001
pic_num = 5000
test_pic_num = 1000

all_train_pic = get_images("train-images.idx3-ubyte")
all_train_label = get_labels("train-labels.idx1-ubyte")
all_test_pic = get_images("t10k-images.idx3-ubyte")
all_test_label = get_labels("t10k-labels.idx1-ubyte")

print('start training')
train_net(train_convnet, epoch, all_train_pic, all_train_label, learning_rate,
          pic_num)
print('end training and start test')
test_net(train_convnet, all_test_pic, all_test_label, test_pic_num)
示例#17
0
def generate_noise_samples(linear_code,
                           top_config,
                           net_config,
                           train_config,
                           bp_iter_num,
                           net_id_data_for,
                           generate_data_for,
                           noise_io,
                           model_id,
                           BP_layers=20):
    """
    :param linear_code: LDPC码对象
    :param top_config: 
    :param net_config: 
    :param train_config: 
    :param bp_iter_num: 
    :param net_id_data_for: 
    :param generate_data_for: 
    :param noise_io: 
    :param model_id: 
    :return: 
    """
    # net_id_data_for: the id of the CNN network this function generates data for. Start from zero.
    # model_id is to designate the specific model folder

    # 生成矩阵和校验矩阵
    G_matrix = linear_code.G_matrix
    H_matrix = linear_code.H_matrix

    SNRset_for_generate_training_data = train_config.SNR_set_gen_data
    if generate_data_for == 'Training':
        batch_size_each_SNR = int(train_config.training_minibatch_size //
                                  np.size(train_config.SNR_set_gen_data))
        total_batches = int(train_config.training_sample_num //
                            train_config.training_minibatch_size)
    elif generate_data_for == 'Test':
        batch_size_each_SNR = int(train_config.test_minibatch_size //
                                  np.size(train_config.SNR_set_gen_data))
        total_batches = int(train_config.test_sample_num //
                            train_config.test_minibatch_size)
    else:
        print('Invalid objective of data generation!')
        exit(0)

    # build BP decoding network
    if np.size(bp_iter_num) != net_id_data_for + 1:
        print('Error: the length of bp_iter_num is not correct!')
        exit(0)
    bp_decoder = BP_Decoder.BP_NetDecoder(H_matrix, batch_size_each_SNR,
                                          top_config, BP_layers)

    conv_net = {}
    denoise_net_in = {}
    denoise_net_out = {}
    for net_id in range(net_id_data_for):
        # conv_net[net_id] = ConvNet.ConvNet(net_config, None, net_id)
        conv_net[net_id] = ConvNet.ConvNet(net_config, train_config, net_id)
        denoise_net_in[net_id], denoise_net_out[net_id] = conv_net[
            net_id].build_network()

    # init gragh
    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)

    # restore cnn networks before the target CNN
    for net_id in range(net_id_data_for):
        conv_net[net_id].restore_network_with_model_id(
            sess, net_config.total_layers, model_id[0:(net_id + 1)])

    start = datetime.datetime.now()

    if generate_data_for == 'Training':
        train_feature_path = train_config.training_feature_folder + format("BP%s/" % bp_decoder.BP_layers) \
                             + train_config.training_feature_file
        fout_est_noise = open(train_feature_path, 'wb')

        train_label_path = train_config.training_label_folder + format("BP%s/" % bp_decoder.BP_layers) \
                             + train_config.training_label_file
        fout_real_noise = open(train_label_path, 'wb')
        # fout_est_noise = open(train_config.training_feature_file, 'wb')
        # fout_real_noise = open(train_config.training_label_file, 'wb')
    elif generate_data_for == 'Test':
        test_feature_path = train_config.test_feature_folder + format("BP%s/" % bp_decoder.BP_layers) \
                             + train_config.test_feature_file
        fout_est_noise = open(test_feature_path, 'wb')

        test_label_path = train_config.test_label_folder + format("BP%s/" % bp_decoder.BP_layers) \
                          + train_config.test_label_file
        fout_real_noise = open(test_label_path, 'wb')
        # fout_est_noise = open(train_config.test_feature_file, 'wb')
        # fout_real_noise = open(train_config.test_label_file, 'wb')
    else:
        print('Invalid objective of data generation!')
        exit(0)

    # generating data,cnn网络的数据集产生:输入是经过BP译码输出数据noise_before_cnn,输出是实际的信道噪声:channel_noise
    for ik in range(0, total_batches):  # number of batches
        for SNR in SNRset_for_generate_training_data:
            x_bits, _, _, channel_noise, y_receive, LLR, _ = lbc.encode_and_transmission(
                G_matrix, SNR, batch_size_each_SNR, noise_io)
            # x_bits, 1 - u_coded_bits, s_mod, ch_noise, y_receive, LLR, ch_noise_sigma
            for iter in range(0, net_id_data_for + 1):
                u_BP_decoded = bp_decoder.decode(LLR.astype(np.float32),
                                                 bp_iter_num[iter])

                if iter != net_id_data_for:
                    if top_config.update_llr_with_epdf:
                        prob = conv_net[iter].get_res_noise_pdf(model_id).get(
                            np.float32(SNR))
                        LLR = denoising_and_calc_LLR_epdf(
                            prob, y_receive, u_BP_decoded,
                            denoise_net_in[iter], denoise_net_out[iter], sess)
                    else:
                        res_noise_power = conv_net[iter].get_res_noise_power(
                            model_id).get(np.float32(SNR))
                        LLR = denoising_and_calc_LLR_awgn(
                            res_noise_power, y_receive, u_BP_decoded,
                            denoise_net_in[iter], denoise_net_out[iter], sess)

            # reconstruct noise
            noise_before_cnn = y_receive - (u_BP_decoded * (-2) + 1)
            noise_before_cnn = noise_before_cnn.astype(np.float32)
            noise_before_cnn.tofile(fout_est_noise)  # write features to file
            channel_noise.tofile(fout_real_noise)  # write labels to file

    fout_real_noise.close()
    fout_est_noise.close()

    sess.close()
    end = datetime.datetime.now()

    print("Time: %ds" % (end - start).seconds)
    print("end")
示例#18
0
def analyze_residual_noise(linear_code, top_config, net_config, simutimes,
                           batch_size, BP_layers):

    ## load some configurations from top_config
    net_id_tested = top_config.currently_trained_net_id
    model_id = top_config.model_id
    bp_iter_num = top_config.BP_iter_nums_gen_data[0:(net_id_tested + 1)]
    noise_io = DataIO.NoiseIO(top_config.N_code, False, None,
                              top_config.cov_1_2_file)
    SNRset = top_config.eval_SNRs

    G_matrix = linear_code.G_matrix
    H_matrix = linear_code.H_matrix
    _, N = np.shape(G_matrix)

    max_batches, residual_times = np.array(divmod(simutimes, batch_size),
                                           np.int32)
    print('Real simutimes: %d' % simutimes)
    if residual_times != 0:
        max_batches += 1

    # build BP decoding network
    if np.size(bp_iter_num) != net_id_tested + 1:
        print('Error: the length of bp_iter_num is not correct!')
        exit(0)
    bp_decoder = BP_Decoder.BP_NetDecoder(H_matrix, batch_size, top_config,
                                          BP_layers)

    # build denoising network
    conv_net = {}
    denoise_net_in = {}
    denoise_net_out = {}

    # build network for each CNN denoiser,
    for net_id in range(net_id_tested + 1):
        # conv_net[net_id] = ConvNet.ConvNet(net_config, None, net_id)
        conv_net[net_id] = ConvNet.ConvNet(net_config, top_config, net_id)
        denoise_net_in[net_id], denoise_net_out[net_id] = conv_net[
            net_id].build_network()

    # init gragh
    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)

    # restore denoising network
    for net_id in range(net_id_tested + 1):
        conv_net[net_id].restore_network_with_model_id(
            sess, net_config.total_layers, model_id[0:(net_id + 1)])

    model_id_str = np.array2string(model_id,
                                   separator='_',
                                   formatter={'int': lambda d: "%d" % d})
    model_id_str = model_id_str[1:(len(model_id_str) - 1)]
    loss_file_name = format(
        "%s/bp_model/%s_%s/BP%s/%s_%s_residual_noise_property_netid%d_model%s.txt"
        % (net_config.residual_noise_property_folder, N, _,
           bp_decoder.BP_layers, N, _, net_id_tested, model_id_str))
    fout_loss = open(loss_file_name, 'wt')

    start = datetime.datetime.now()
    for SNR in SNRset:  # 0 0.5 1 1.5 2 2.5 3
        noise_io.reset_noise_generator()
        real_batch_size = batch_size
        # simulation part
        loss = 0.0
        prob = np.ones(0)
        for ik in range(0, max_batches):  # max_batches 3
            print("Batch id: %d" % ik)
            if ik == max_batches - 1 and residual_times != 0:
                real_batch_size = residual_times
            x_bits, _, s_mod, channel_noise, y_receive, LLR, _ = lbc.encode_and_transmission(
                G_matrix, SNR, real_batch_size, noise_io)
            # x_bits 随机生成的发送端码元,u_coded_bits 对x_bits做纠错编码后的码元,s_mod 对u_coded_bits做BPSK调制后的码元,ch_noise 信道噪声,y_recive 接收端接收到的信号,LLR 对数似然比
            for iter in range(0, net_id_tested + 1):
                # BP decoding,astype:类型转为float32
                u_BP_decoded = bp_decoder.decode(
                    LLR.astype(np.float32), bp_iter_num[iter]
                )  # u_BP_decoded 就是解码所得, bp_iter_num[iter] 表示bp迭代次数
                noise_before_cnn = y_receive - (u_BP_decoded *
                                                (-2) + 1)  # 转为[-1,1]
                noise_after_cnn = sess.run(
                    denoise_net_out[iter],
                    feed_dict={denoise_net_in[iter]:
                               noise_before_cnn})  #  cnn 计算噪声 n~
                s_mod_plus_res_noise = y_receive - noise_after_cnn  # 接收信号减去 cnn 的噪声 y~,
                if iter < net_id_tested:  # calculate the LLR for next BP decoding
                    if top_config.update_llr_with_epdf:  # 这里就决定了 经过 cnn 输出的残差噪声以什么形式转入下一轮的迭代(经验分布或者重新计算)默认是经验分布
                        prob_tmp = conv_net[iter].get_res_noise_pdf(
                            model_id).get(np.float32(SNR))
                        LLR = calc_LLR_epdf(prob_tmp, s_mod_plus_res_noise)
                    else:
                        res_noise_power = conv_net[iter].get_res_noise_power(
                            model_id).get(np.float32(SNR))
                        LLR = s_mod_plus_res_noise * 2.0 / res_noise_power  # 计算新一轮的BP输入
            if top_config.update_llr_with_epdf:
                prob = stat_prob(s_mod_plus_res_noise - s_mod,
                                 prob)  # s_mod 是发送端的悉尼号
            else:  # 累加实际值和网络输出值之间的均方误差
                loss += np.sum(
                    np.mean(np.square(s_mod_plus_res_noise - s_mod),
                            1))  # 求的是新一轮迭代的输入噪声的平均功率e
                #   axis=1 表示计算[[a,b],[c,d]] [(a+b)/2, (c+d)/2],损失函数是均方误差

        # each SNR 对应的CNN的loss。
        if top_config.update_llr_with_epdf:
            fout_loss.write(str(SNR) + '\t')
            for i in range(np.size(prob)):
                fout_loss.write(str(prob[i]) + '\t')
            fout_loss.write('\n')
        else:
            loss /= np.double(
                simutimes * 16
            )  # 猜测 simutimes = 5000*3, 其中5000是一次测试5000行马元,3是测试三次即 max_batches = 3
            fout_loss.write(
                str(SNR) + '\t' + str(loss) + '\n'
            )  # residual_noise_property_netid0_model0.txt 里面存储的是损失值,很明显损失值是在递减的,说明训练有效果

    fout_loss.close()
    end = datetime.datetime.now()
    print('Time: %ds' % (end - start).seconds)
    print("end\n")
    sess.close()
示例#19
0
def main(argv):

    # folder for saving
    subfold = argv[1]
    if not os.path.exists(subfold):
        os.mkdir(subfold)
    # load data
    fname = '../LRG-filter/data/lrg_171016/sample-LRG-140-120-10-c3.pkl'
    with open(fname, 'rb') as fp:
        datadict = pickle.load(fp)
        X_raw = datadict['data']

    # Reshape and generate train and test dataset
    rs = 120
    # normalization and whitening
    with open("../nets/norm_params.pkl", 'rb') as fp:
        normparam = pickle.load(fp)
    X_max = normparam["X_max"]
    X_min = normparam["X_min"]
    X_mean = normparam["X_mean"]

    X_train_pre = X_raw
    X_train_pre = (X_train_pre - X_min) / (X_max - X_min)
    X_in = X_train_pre.reshape(-1, rs, rs, 1)
    X_mean = np.mean(X_train_pre)
    X_w = X_in - X_mean  # Whitening?

    numclass = 3
    encode_nodes = 32
    cnn = ConvNet.ConvNet(input_shape=X_w.shape,
                          kernel_size=[3, 3, 3, 3, 3],
                          kernel_num=[8, 8, 16, 32, 32],
                          fc_nodes=[],
                          encode_nodes=encode_nodes,
                          padding=('SAME', 'SAME'),
                          stride=(2, 2),
                          numclass=numclass,
                          sess=None,
                          name=None)
    cnn.cae_build()
    cnn.cnn_build(learning_rate=0.001)  # In order to init the weights

    foldname = "{0}/net_l3_140".format(subfold)
    name = "net_l3.pkl"
    cnn.sess, cnn.name = utils.load_net(os.path.join(foldname, name))

    # estimate
    label, label_pos = cnn.cnn_predict(img=X_w)
    label_new_pos = pos_to_line(label_pos)

    # save result
    savefold = "{0}/est_labeled".format(subfold)
    if not os.path.exists(savefold):
        os.mkdir(savefold)
    savepath = "est_l3.pkl"
    savedict = {
        "label_raw": np.array(datadict['type']),
        "z": np.array(datadict['redshift']),
        "snvss": np.array(datadict['snvss']),
        "name": np.array(datadict['name']),
        "label_est": label,
        "label_pos": label_new_pos
    }
    with open(os.path.join(savefold, savepath), 'wb') as fp:
        pickle.dump(savedict, fp)
示例#20
0
def generate_noise_samples(linear_code, top_config, net_config, train_config,
                           bp_iter_num, net_id_data_for, generate_data_for,
                           noise_io, model_id):
    # net_id_data_for: the id of the CNN network this function generates data for. Start from zero.
    # model_id is to designate the specific model folder
    # this task is under the colored noise : the last paragraph in related work
    G_matrix = linear_code.G_matrix
    H_matrix = linear_code.H_matrix

    SNRset_for_generate_training_data = train_config.SNR_set_gen_data
    if generate_data_for == 'Training':
        batch_size_each_SNR = int(train_config.training_minibatch_size //
                                  np.size(train_config.SNR_set_gen_data))
        total_batches = int(train_config.training_sample_num //
                            train_config.training_minibatch_size)
    elif generate_data_for == 'Test':
        batch_size_each_SNR = int(train_config.test_minibatch_size //
                                  np.size(train_config.SNR_set_gen_data))
        total_batches = int(train_config.test_sample_num //
                            train_config.test_minibatch_size)
    else:
        print('Invalid objective of data generation!')
        exit(0)

    # build BP decoding network
    if np.size(bp_iter_num) != net_id_data_for + 1:
        print('Error: the length of bp_iter_num is not correct!')
        exit(0)
    bp_decoder = BP_Decoder.BP_NetDecoder(H_matrix, batch_size_each_SNR)

    conv_net = {}
    denoise_net_in = {}
    denoise_net_out = {}
    # net_id_data_for在GenData下是0
    for net_id in range(net_id_data_for):
        conv_net[net_id] = ConvNet.ConvNet(net_config, None, net_id)
        denoise_net_in[net_id], denoise_net_out[net_id] = conv_net[
            net_id].build_network()

    # init gragh
    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)

    # restore cnn networks before the target CNN
    # net_id_data_for在GenData下是0
    for net_id in range(net_id_data_for):
        conv_net[net_id].restore_network_with_model_id(
            sess, net_config.total_layers, model_id[0:(net_id + 1)])

    start = datetime.datetime.now()

    if generate_data_for == 'Training':
        fout_est_noise = open(train_config.training_feature_file, 'wb')
        fout_real_noise = open(train_config.training_label_file, 'wb')
    elif generate_data_for == 'Test':
        fout_est_noise = open(train_config.test_feature_file, 'wb')
        fout_real_noise = open(train_config.test_label_file, 'wb')
    else:
        print('Invalid objective of data generation!')
        exit(0)

    # generating data
    # GenData的total_batches是1428
    for ik in range(0, total_batches):  # number of batches
        # SNRset_for_generate_training_data在[0,3]以0.5步进(信噪比)
        for SNR in SNRset_for_generate_training_data:
            x_bits, _, _, channel_noise, y_receive, LLR = lbc.encode_and_transmission(
                G_matrix, SNR, batch_size_each_SNR, noise_io)
            # GenData下net_id_data_for是0
            for iter in range(0, net_id_data_for + 1):
                u_BP_decoded = bp_decoder.decode(LLR.astype(np.float32),
                                                 bp_iter_num[iter])

                if iter != net_id_data_for:
                    if top_config.update_llr_with_epdf:
                        prob = conv_net[iter].get_res_noise_pdf(model_id).get(
                            np.float32(SNR))
                        LLR = denoising_and_calc_LLR_epdf(
                            prob, y_receive, u_BP_decoded,
                            denoise_net_in[iter], denoise_net_out[iter], sess)
                    else:
                        res_noise_power = conv_net[iter].get_res_noise_power(
                            model_id).get(np.float32(SNR))
                        LLR = denoising_and_calc_LLR_awgn(
                            res_noise_power, y_receive, u_BP_decoded,
                            denoise_net_in[iter], denoise_net_out[iter], sess)

            # reconstruct noise
            noise_before_cnn = y_receive - (u_BP_decoded * (-2) + 1)
            noise_before_cnn = noise_before_cnn.astype(np.float32)
            noise_before_cnn.tofile(fout_est_noise)  # write features to file
            channel_noise.tofile(fout_real_noise)  # write labels to file

    fout_real_noise.close()
    fout_est_noise.close()

    sess.close()
    end = datetime.datetime.now()

    print("Time: %ds" % (end - start).seconds)
    print("end")