Exemplo n.º 1
0
def test(snr_model, ratio, velocity):
    """评估模型,snr_model:选择某种模型"""
    result_dict = dict()
    for snr in config.SNRs:
        # 实例化模型
        seq2seq = Seq2Seq(snr, ratio).to(config.device).eval()
        # 加载模型
        seq2seq_model_path = "./model/{}km/ratio_{}/{}dB.model".format(velocity, ratio, snr_model)
        seq2seq.load_state_dict(torch.load(seq2seq_model_path), strict=False)
        loss_list = list()
        similarity_list = list()
        time_list = list()
        data_loader = data_load(False, velocity)
        for idx, input_ in enumerate(tqdm(data_loader)):
            with torch.no_grad():
                start_time = time.time()
                input_ = input_.to(config.device)
                output = seq2seq(input_)
                stop_time = time.time()
                cur_similarity = cosine_similarity(output, input_, dim=-1).mean().cpu().item()
                cur_loss = mse_loss(output, input_).cpu().item()
                loss_list.append(cur_loss / config.test_batch_size)
                similarity_list.append(cur_similarity)
                time_list.append((stop_time - start_time) / config.test_batch_size)

        # 计算平均相似度与损失
        avg_loss = np.mean(loss_list)
        avg_similarity = np.mean(similarity_list)
        avg_time = np.mean(time_list)
        print("v:{}\tratio:{}\tSNR:{}dB\tloss:{:.3f}\tsimilarity:{:.3f}\ttime:{:.4f}".format(velocity, ratio, snr, avg_loss, avg_similarity, avg_time))
        result = {"相似度": avg_similarity, "NMSE": avg_loss, "time": avg_time}
        result_dict["{}dB".format(snr)] = result
    file_path = "./test_result/{}km/csi_net/csi_net_{}_{}dB.pkl".format(velocity, ratio, snr_model)
    rec_mkdir(file_path)
    pickle.dump(result_dict, open(file_path, "wb"))
Exemplo n.º 2
0
def train_model_merged(epoch, snr, ratio, velocity):
    """在一种信噪比下进行模型训练,模型为一个整体"""
    my_model = Seq2Seq(snr=snr, ratio=ratio).to(config.device)
    my_model.train()
    optimizer = Adam(my_model.parameters())
    data_loader = data_load(True, velocity)
    init_loss = 1
    for i in range(epoch):
        bar = tqdm(data_loader)
        for idx, data in enumerate(bar):
            optimizer.zero_grad()  # 梯度置为零
            data = data.to(config.device)  # 转到GPU训练
            output = my_model(data)
            similarity = torch.cosine_similarity(output, data, dim=-1).mean()  # 当前一个batch的相似度
            loss = F.mse_loss(output, data)  # 计算损失
            loss.backward()  # 反向传播
            nn.utils.clip_grad_norm_(my_model.parameters(), config.clip)  # 进行梯度裁剪
            optimizer.step()  # 梯度更新
            bar.set_description("v:{}\tratio:{}\tSNR:{}dB\tepoch:{}\tindex:{}\tloss:{:}\tsimilarity:{:.3f}"
                                .format(velocity, ratio, snr, i + 1, idx, loss.item(), similarity.item()))
            if loss.item() < init_loss:
                init_loss = loss.item()
                model_save_path = "./model/{}km/ratio_{}/{}dB.model".format(velocity, ratio, snr)
                rec_mkdir(model_save_path)  # 保证该路径下文件夹存在
                torch.save(my_model.state_dict(), model_save_path)
            if loss.item() < 1e-6:
                return
Exemplo n.º 3
0
def pre_test_seq2seq(ratio):
    """评估seq2seq模型,不加入去噪声网络"""

    # 实例化模型
    encoder = Encoder(ratio).to(config.device).eval()
    decoder = Decoder().to(config.device).eval()
    # 加载模型
    encoder_model_path = "./model/ratio_{}/pre_train/seq2seq/encoder.pkl".format(
        ratio)
    decoder_model_path = "./model/ratio_{}/pre_train/seq2seq/decoder.pkl".format(
        ratio)
    encoder.load_state_dict(torch.load(encoder_model_path))
    decoder.load_state_dict(torch.load(decoder_model_path))
    loss_list = list()
    similarity_list = list()
    data_loader = data_load()
    for idx, input_ in enumerate(tqdm(data_loader)):
        with torch.no_grad():
            input_ = input_.to(config.device)
            encoder_output = encoder(input_)
            decoder_output = decoder(encoder_output)
            # 当前一个batch的相似度
            cur_similarity = cosine_similarity(decoder_output, input_,
                                               dim=-1).mean().cpu().item()
            # 当前一个batch的损失
            cur_loss = mse_loss(decoder_output, input_).cpu().item()
            loss_list.append(cur_loss / config.train_batch_size)
            similarity_list.append(cur_similarity)

    # 计算平均相似度与损失
    avg_loss = np.mean(loss_list)
    avg_similarity = np.mean(similarity_list)
    print("seq2seq\tloss:{:.3f}\tsimilarity:{:.3f}".format(
        avg_loss, avg_similarity))
Exemplo n.º 4
0
def overfit_one_batch_test_seq2seq(ratio):
    """评估过拟合模型的效果"""
    # 实例化模型
    encoder = Encoder(ratio).to(config.device).eval()
    decoder = Decoder().to(config.device).eval()
    # 加载模型
    encoder_model_path = "./model/ratio_{}/pre_train/seq2seq/encoder.pkl".format(
        ratio)
    decoder_model_path = "./model/ratio_{}/pre_train/seq2seq/decoder.pkl".format(
        ratio)
    encoder.load_state_dict(torch.load(encoder_model_path))
    decoder.load_state_dict(torch.load(decoder_model_path))
    data_loader = data_load()
    input_ = next(iter(data_loader)).to(config.device)
    print("\n", input_)
    with torch.no_grad():
        encoder_output = encoder(input_)
        decoder_output = decoder(encoder_output)
        # 当前一个batch的相似度
        cur_similarity = cosine_similarity(decoder_output, input_,
                                           dim=-1).mean().cpu().item()
        # 当前一个batch的损失
        cur_loss = mse_loss(decoder_output, input_).cpu().item()

    print("one_batch\tloss:{:.3f}\tsimilarity:{:.3f}".format(
        cur_loss, cur_similarity))
    print("\n", decoder_output)
Exemplo n.º 5
0
 def __init__(self, epoch, **kwargs):
     """初始化模型、数据集、优化器、必要参数"""
     self.ratio = kwargs.get("ratio", config.old_csi_net_compress_ratio)
     self.velocity = kwargs.get('velocity', config.velocity)
     self.model_path = kwargs.get(
         "model_path", "./model/{}km/ratio_{}/old_csi/old_csi_{}.pt".format(
             self.velocity, self.ratio, self.ratio))
     self.epoch = epoch
     self.data_loader = data_load(True, self.velocity)
     self.model = CsiNet(ratio=self.ratio).to(self.device).train()
     self.optimizer = Adam(self.model.parameters())
Exemplo n.º 6
0
 def __init__(self, **kwargs):
     """加载模型、数据集、必要参数"""
     self.ratio = kwargs.get("ratio",
                             config.old_csi_net_compress_ratio)  # 压缩率
     add_noise = kwargs.get("add_noise", False)  # 是否加入噪声
     self.snr = kwargs.get("snr", None)  # 信噪比
     self.velocity = kwargs.get("velocity", config.velocity)  # 速度
     self.model_path = kwargs.get(
         "model_path", "./model/{}km/ratio_{}/old_csi/old_csi_{}.pt".format(
             self.velocity, self.ratio, self.ratio))
     self.model = CsiNet(ratio=self.ratio,
                         add_noise=add_noise,
                         snr=self.snr).to(self.device).eval()
     self.model.load_state_dict(torch.load(self.model_path))
     self.data_loader = tqdm(data_load(False, self.velocity))
Exemplo n.º 7
0
def pre_train_seq2seq(epoch, ratio):
    """对一种信噪比进行训练seq2seq网络,不加入噪声网络"""
    encoder = Encoder(ratio).to(config.device).train()
    decoder = Decoder(ratio).to(config.device).train()
    optimizer1 = Adam(encoder.parameters())
    optimizer2 = Adam(decoder.parameters())
    data_loader = data_load()
    for i in range(epoch):
        bar = tqdm(data_loader, desc="seq2seq")
        for idx, data in enumerate(bar):
            optimizer1.zero_grad()  # 梯度置为零
            optimizer2.zero_grad()  # 梯度置为零
            data = data.to(config.device)  # 转到GPU训练
            encoder_output = encoder(data)
            decoder_output = decoder(encoder_output)
            similarity = torch.cosine_similarity(
                decoder_output, data, dim=-1).mean()  # 当前一个batch的相似度
            mse_loss = F.mse_loss(decoder_output, data)  # 计算损失
            mse_loss.backward()  # 反向传播
            nn.utils.clip_grad_norm_(encoder.parameters(),
                                     config.clip)  # 进行梯度裁剪
            nn.utils.clip_grad_norm_(decoder.parameters(),
                                     config.clip)  # 进行梯度裁剪
            optimizer1.step()  # 梯度更新
            optimizer2.step()  # 梯度更新
            bar.set_description(
                "seq2seq_\tratio:{}\tepoch:{}\tindex:{}\tloss:{:}\tsimilarity:{:.3f}"
                .format(ratio, i + 1, idx, mse_loss.item(), similarity.item()))
            # 10个batch存一次
            if (idx + 1) % 10 == 0:
                encoder_model_path = "./model/ratio_{}/pre_train/seq2seq/encoder.pkl".format(
                    ratio)
                decoder_model_path = "./model/ratio_{}/pre_train/seq2seq/decoder.pkl".format(
                    ratio)
                torch.save(encoder.state_dict(), encoder_model_path)
                torch.save(decoder.state_dict(), decoder_model_path)
            if mse_loss.item() < 1e-6:
                return
        # 50个epoch存一次
        if (i + 1) % 50 == 0:
            encoder_model_epoch_path = "./model/ratio_{}/pre_train/seq2seq/epoch/epoch_{}_encoder.pkl".format(
                ratio, i + 1)
            decoder_model_epoch_path = "./model/ratio_{}/pre_train/seq2seq/epoch/epoch_{}_decoder.pkl".format(
                ratio, i + 1)
            torch.save(encoder.state_dict(), encoder_model_epoch_path)
            torch.save(decoder.state_dict(), decoder_model_epoch_path)
Exemplo n.º 8
0
def train_model_separated(epoch, snr, ratio, velocity):
    """在一种信噪比下进行模型训练,每个模块分开保存"""
    noise = Noise(snr=snr, ratio=ratio).to(config.device).train()
    encoder = Encoder(ratio).to(config.device).train()
    decoder = Decoder(ratio).to(config.device).train()
    optim_encoder = Adam(encoder.parameters())
    optim_noise = Adam(noise.parameters())
    optim_decoder = Adam(decoder.parameters())
    data_loader = data_load(True, velocity)
    init_loss = 1
    for i in range(epoch):
        bar = tqdm(data_loader)
        for idx, data in enumerate(bar):
            optim_encoder.zero_grad()  # 梯度置为零
            optim_noise.zero_grad()
            optim_decoder.zero_grad()
            data = data.to(config.device)  # 转到GPU训练
            out = encoder(data)
            out = noise(out)
            output = decoder(out)
            similarity = torch.cosine_similarity(output, data, dim=-1).mean()  # 当前一个batch的相似度
            loss = F.mse_loss(output, data)  # 计算损失
            loss.backward()  # 反向传播
            nn.utils.clip_grad_norm_(encoder.parameters(), config.clip)  # 进行梯度裁剪
            nn.utils.clip_grad_norm_(noise.parameters(), config.clip)
            nn.utils.clip_grad_norm_(decoder.parameters(), config.clip)
            optim_encoder.step()  # 梯度更新
            optim_noise.step()
            optim_decoder.step()
            bar.set_description("v:{}\tratio:{}\tSNR:{}dB\tepoch:{}\tindex:{}\tloss:{:}\tsimilarity:{:.3f}"
                                .format(velocity, ratio, snr, i + 1, idx, loss.item(), similarity.item()))
            if loss.item() < init_loss:
                init_loss = loss.item()
                encoder_save_path = "./model/{}km/ratio_{}/separate/{}dB.encoder".format(velocity, ratio, snr)
                noise_save_path = "./model/{}km/ratio_{}/separate/{}dB.noise".format(velocity, ratio, snr)
                decoder_save_path = "./model/{}km/ratio_{}/separate/{}dB.decoder".format(velocity, ratio, snr)
                rec_mkdir(encoder_save_path)
                rec_mkdir(decoder_save_path)
                rec_mkdir(noise_save_path)
                torch.save(encoder.state_dict(), encoder_save_path)
                torch.save(noise.state_dict(), noise_save_path)
                torch.save(decoder.state_dict(), decoder_save_path)
            if loss.item() < 1e-5:
                return
Exemplo n.º 9
0
def test_model_separated(snr_model, ratio, velocity):
    """snr_model:选择某种信噪比模型"""
    result_dict = dict()
    for snr in config.SNRs:
        noise = Noise(snr=snr, ratio=ratio).to(config.device).eval()
        encoder = Encoder(ratio).to(config.device).eval()
        decoder = Decoder().to(config.device).eval()
        encoder_model_path = "./model/{}km/ratio_{}/separate/{}dB.encoder".format(velocity, ratio, snr_model)
        noise_model_path = "./model/{}km/ratio_{}/separate/{}dB.noise".format(velocity, ratio, snr_model)
        decoder_model_path = "./model/{}km/ratio_{}/separate/{}dB.decoder".format(velocity, ratio, snr_model)
        encoder.load_state_dict(torch.load(encoder_model_path))
        noise.load_state_dict(torch.load(noise_model_path))
        decoder.load_state_dict(torch.load(decoder_model_path))
        loss_list = list()
        similarity_list = list()
        time_list = list()
        data_loader = data_load(False, velocity)
        for idx, input_ in enumerate(tqdm(data_loader)):
            with torch.no_grad():
                start_time = time.time()
                input_ = input_.to(config.device)
                out = encoder(input_)
                out = noise(out)
                output = decoder(out)
                stop_time = time.time()
                cur_similarity = cosine_similarity(output, input_, dim=-1).mean().cpu().item()  # 当前一个信号的平均的相似度
                cur_loss = mse_loss(output, input_).cpu().item()  # 当前一个batch的损失
                loss_list.append(cur_loss / config.test_batch_size)
                similarity_list.append(cur_similarity)
                time_list.append((stop_time - start_time) / config.test_batch_size)

        # 计算平均相似度与损失
        avg_loss = np.mean(loss_list)
        avg_similarity = np.mean(similarity_list)
        avg_time = np.mean(time_list)
        print("v:{}\tratio:{}\tSNR:{}dB\tloss:{:.3f}\tsimilarity:{:.3f}\ttime:{:.4f}".format(velocity, ratio, snr, avg_loss, avg_similarity, avg_time))
        result = {"相似度": avg_similarity, "NMSE": avg_loss, "time": avg_time}
        result_dict["{}dB".format(snr)] = result
    file_path = "./test_result/{}km/csi_net/separate/csi_net_{}.pkl".format(velocity, ratio)
    rec_mkdir(file_path)
    pickle.dump(result_dict, open(file_path, "wb"))
Exemplo n.º 10
0
def overfit_one_batch_train_seq2seq(epoch, ratio):
    """
    将每个batch的训练数据设置为相同数据,进行过拟合训练
    通过过拟合训练,测试网络的学习能力,判断网络是否能够学习所需特征,恢复出原始信号
    """
    encoder = Encoder(ratio).to(config.device).train()
    decoder = Decoder(ratio).to(config.device).train()
    optimizer1 = Adam(encoder.parameters())
    optimizer2 = Adam(decoder.parameters())
    for i in tqdm(range(epoch)):
        data_loader = data_load()
        data = next(iter(data_loader))
        optimizer1.zero_grad()  # 梯度置为零
        optimizer2.zero_grad()  # 梯度置为零
        data = data.to(config.device)  # 转到GPU训练
        encoder_output = encoder(data)
        decoder_output = decoder(encoder_output)
        print("原始数据:\n", data)
        print("*" * 40)
        print("输出:\n", decoder_output)
        similarity = torch.cosine_similarity(decoder_output, data,
                                             dim=-1).mean()  # 当前一个batch的相似度
        mse_loss = F.mse_loss(decoder_output, data)  # 计算损失
        mse_loss.backward()  # 反向传播
        nn.utils.clip_grad_norm_(encoder.parameters(), config.clip)  # 进行梯度裁剪
        nn.utils.clip_grad_norm_(decoder.parameters(), config.clip)  # 进行梯度裁剪
        optimizer1.step()  # 梯度更新
        optimizer2.step()  # 梯度更新
        print("相似度:", similarity.item(), "\t", "loss:", mse_loss.item())

        # 模型的保存
        if (i + 1) % 50 == 0:
            encoder_model_epoch_path = "./model/ratio_{}/pre_train/seq2seq/epoch/epoch_{}_encoder.pkl".format(
                ratio, i + 1)
            decoder_model_epoch_path = "./model/ratio_{}/pre_train/seq2seq/epoch/epoch_{}_decoder.pkl".format(
                ratio, i + 1)
            torch.save(encoder.state_dict(), encoder_model_epoch_path)
            torch.save(decoder.state_dict(), decoder_model_epoch_path)
        if mse_loss.item() < 1e-6:
            break
Exemplo n.º 11
0
def pre_test_noise(snr, ratio, model_path, velocity=config.velocity) -> dict:
    """评估去噪网络模型"""

    # 实例化模型
    noise = Noise(snr, ratio).to(config.device)
    noise.eval()
    # 加载模型
    noise.load_state_dict(torch.load(model_path))
    loss_list = list()
    similarity_list = list()
    time_list = list()
    data_loader = data_load()
    for idx, _ in enumerate(tqdm(data_loader)):
        with torch.no_grad():
            input_ = torch.tensor(np.random.randn(
                config.train_batch_size, int(config.data_length / ratio)),
                                  dtype=torch.float).to(config.device)
            start_time = time.time()
            output = noise(input_)
            stop_time = time.time()
            # 当前一个batch的相似度
            cur_similarity = cosine_similarity(output, input_,
                                               dim=-1).mean().cpu().item()
            # 当前一个batch的损失
            cur_loss = mse_loss(output, input_).cpu()
            loss_list.append(cur_loss / config.train_batch_size)
            similarity_list.append(cur_similarity)
            time_list.append(
                (stop_time - start_time) / config.train_batch_size)

    # 计算平均相似度与损失
    avg_loss = np.mean(loss_list)
    avg_similarity = np.mean(similarity_list)
    avg_time = np.mean(time_list)
    print("noise\tv:{}\tSNR:{}\tloss:{:.3f}\tsimilarity:{:.3f}\ttime:{:.4f}".
          format(velocity, snr, avg_loss, avg_similarity, avg_time))
    result = {"NMSE": avg_loss, "相似度": avg_similarity, "time": avg_time}
    return result
Exemplo n.º 12
0
def pre_train_noise(epoch, snr, ratio):
    """对一种信噪比进行训练noise网络"""
    noise = Noise(snr=snr, ratio=ratio).to(config.device)
    noise.train()
    optimizer = Adam(noise.parameters())
    data_loader = data_load()
    for i in range(epoch):
        bar = tqdm(data_loader)
        for idx, _ in enumerate(bar):
            optimizer.zero_grad()  # 梯度置为零
            np.random.seed(1)
            data = torch.tensor(np.random.randn(
                config.train_batch_size, int(config.data_length / ratio)),
                                dtype=torch.float).to(config.device)
            output = noise(data)  # [batch_size, 32*32]
            similarity = torch.cosine_similarity(
                output, data, dim=-1).mean()  # 当前一个batch的相似度
            mse_loss = F.mse_loss(output, data)
            mse_loss.backward()
            nn.utils.clip_grad_norm_(noise.parameters(), config.clip)  # 进行梯度裁剪
            optimizer.step()  # 梯度更新
            bar.set_description(
                "noise--ratio:{}\tnoise_SNR:{}dB\tepoch:{}\tindex:{}\tloss:{:}\tsimilarity:{:.3f}"
                .format(ratio, snr, i + 1, idx + 1, mse_loss.item(),
                        similarity.item()))
            # 10个batch存一次
            if (idx + 1) % 10 == 0:
                model_save_path = "./model/ratio_{}/pre_train/noise/{}dB.noise".format(
                    ratio, snr)
                torch.save(noise.state_dict(), model_save_path)
            if mse_loss.item() < 1e-7:
                return
        # 50个epoch存一次
        if (i + 1) % 50 == 0:
            save_path_ = "./model/ratio_{}/pre_train/noise/epoch/epoch_{}_{}dB.noise".format(
                ratio, i + 1, snr)
            torch.save(noise.state_dict(), save_path_)