def TestSamples(samples,
                gts,
                net,
                tester,
                device='cuda',
                I_only=True,
                batch_size=K.BatchSize,
                SNR_generate=None):
    sum_loss = 0
    i1 = 0
    while i1 < len(samples):
        if i1 + batch_size < len(samples):
            i2 = i1 + batch_size
        else:
            i2 = len(samples)
        batch_X = samples[i1:i2]
        if SNR_generate:
            batch_X = DataManager.add_complex_gaussian_noise(
                batch_X, SNR=SNR_generate(), I_only=I_only)
        batch_X = batch_X.reshape(i2 - i1, 1 if I_only else 2, -1)
        batch_X = torch.tensor(batch_X, dtype=torch.float32, device=device)
        cpu_batch_Y = gts[i1:i2]
        batch_Y = torch.tensor(cpu_batch_Y, dtype=torch.float32, device=device)
        # * Forward
        loss, PR = net.get_cross_entropy_loss(batch_X,
                                              batch_Y,
                                              need_PR=True,
                                              is_expanded_target=True)
        sum_loss += loss * (i2 - i1)
        tester.update_confusion_matrix(PR.cpu().numpy(), cpu_batch_Y)
        i1 += batch_size
    tester.measure()
    return float(sum_loss) / len(samples), tester.micro_avg_precision
示例#2
0
     process_bar.SkipMsg(
         f"Taking Snapshot of current model as {os.path.split(K.SnapshotFileStr.format(iteration))[1]}",
         logger)
     saver.save(iteration,
                net,
                optimizer,
                model_init_dict=net.model_init_dict)
     # ! Also decrease the lr here
     if K.ExponentialLR != None:
         exp_scheduler.step()
         process_bar.SkipMsg(
             f"Current lr is {exp_scheduler.get_lr()}", logger)
 # * Process Batch Data
 batch_X, cpu_batch_Y = train_batch
 if K.IsNoise:
     batch_X = data_manager.add_complex_gaussian_noise(
         batch_X, SNR=K.random_SNR_generate(), I_only=K.IOnly)
 batch_X = batch_X.reshape(batch_X.shape[0], 1 if K.IOnly else 2,
                           -1)
 batch_X = torch.tensor(batch_X, dtype=torch.float32, device='cuda')
 batch_Y = torch.tensor(cpu_batch_Y,
                        dtype=torch.float32,
                        device='cuda')
 # * Fwd, Bwd, Optimize, record
 optimizer.zero_grad()
 loss, PR = net.get_cross_entropy_loss(batch_X,
                                       batch_Y,
                                       is_expanded_target=True,
                                       need_PR=True)
 loss.backward()
 optimizer.step()
 tester.update_confusion_matrix(PR.cpu().numpy(), cpu_batch_Y)
     process_bar.SkipMsg(
         f"Taking Snapshot of current model as {os.path.split(K.SnapshotFileStr.format(iteration))[1]}",
         logger)
     saver.save(iteration,
                net,
                optimizer,
                model_init_dict=net.model_init_dict)
     # ! Also decrease the lr here
     if K.ExponentialLR != None:
         exp_scheduler.step()
         process_bar.SkipMsg(
             f"Current lr is {exp_scheduler.get_lr()}", logger)
 # * Process Batch Data
 batch_X, cpu_batch_Y = train_batch
 if K.IsNoise:
     batch_X = data_manager.add_complex_gaussian_noise(
         batch_X, SNRs=K.train_SNRs_generator(batch_X.shape[0]))
 batch_X = batch_X.reshape(batch_X.shape[0], 1 if K.IOnly else 2,
                           -1)
 batch_X = torch.tensor(batch_X, dtype=torch.float32, device='cuda')
 batch_Y = torch.tensor(cpu_batch_Y,
                        dtype=torch.float32,
                        device='cuda')
 # * Fwd, Bwd, Optimize, record
 optimizer.zero_grad()
 loss, PR = net.get_cross_entropy_loss(batch_X,
                                       batch_Y,
                                       is_expanded_target=True,
                                       need_PR=True)
 loss.backward()
 optimizer.step()
 tester.update_confusion_matrix(PR.cpu().numpy(), cpu_batch_Y)