def main(): generator = sample_generator(train_batch_size, mod_n, NR) device = 'cuda' model = iterative_classifier(d_model, n_head, nhid, nlayers, mod_n, NR, d_transmitter_encoding, generator.real_QAM_const, generator.imag_QAM_const, generator.constellation, device, dropout) model = model.to(device=device) optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate) if (load_pretrained_model): checkpoint = torch.load(model_filename) model.load_state_dict(checkpoint['model_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( optimizer, 'min', 0.91, 0, True, 0.0001, 'rel', 0, 0, 1e-08) print('*******Successfully loaded pre-trained model***********') else: lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau( optimizer, 'min', 0.91, 0, True, 0.0001, 'rel', 0, 0, 1e-08) train(model, optimizer, lr_scheduler, generator, device) print( '******************************** Now Testing **********************************************' )
def main(): generator = sample_generator(validtn_batch_size, mod_n, NR) device = 'cuda' model = iterative_classifier(d_model, n_head, nhid, nlayers, mod_n, NR, d_transmitter_encoding, generator.real_QAM_const, generator.imag_QAM_const, generator.constellation, device, dropout) model = model.to(device=device) checkpoint = torch.load(model_filename) model.load_state_dict(checkpoint['model_state_dict']) print('*******Successfully loaded pre-trained model*********** from direcotry : ', model_filename) test(model, generator, device) print('******************************** Now Testing **********************************************')
def main(): generator = sample_generator(time_batch_size, mod_n, NR) device = 'cuda' model_network = iterative_classifier(d_model, n_head, nhid, nlayers, mod_n, NR, d_transmitter_encoding, generator.real_QAM_const, generator.imag_QAM_const, generator.constellation, device, dropout) model_oampnet = oampnet(num_layers, generator.constellation, generator.real_QAM_const, generator.imag_QAM_const, device=device) model_network = model_network.to(device=device) model_oampnet = model_oampnet.to(device=device) network_checkpoint = torch.load(model_network_filename) model_network.load_state_dict(network_checkpoint['model_state_dict']) model_oampnet.load_state_dict(torch.load(model_oampnet_filename)) print('*******Successfully loaded pre-trained model***********') test(model_network, model_oampnet, generator, device) print('******************************** Now Testing **********************************************')