img_channels = 2
feedback_bits = 128
# Data loading
# data_load_address = './data'
# mat = h5py.File(data_load_address+'/H_test.mat', 'r')
# data = np.transpose(mat['H_test'])
mat = h5py.File('./data/H_train.mat')
data = np.transpose(mat['H_train'])
data = data.astype('float32')
x_test = np.reshape(data, [len(data), img_channels, img_height, img_width])

# load encoder_output
decode_input = np.load('./Modelsave/encoder_output.npy')

# load model and test NMSE
model = AutoEncoder(feedback_bits).cuda()
model_decoder = model.decoder
model_path = './Modelsave/decoder.pth.tar'
model_decoder.load_state_dict(torch.load(model_path)['state_dict'])
print("weight loaded")

# dataLoader for test
test_dataset = DatasetFolder(decode_input)
test_loader = torch.utils.data.DataLoader(test_dataset,
                                          batch_size=batch_size,
                                          shuffle=False,
                                          num_workers=num_workers,
                                          pin_memory=True)

# test
model_decoder.eval()
Exemplo n.º 2
0
os.environ["CUDA_VISIBLE_DEVICES"] = '0'
use_single_gpu = True  # select whether using single gpu or multiple gpus
torch.manual_seed(1)
batch_size = 10240
epochs = 1000
learning_rate = 1e-3
num_workers = 4
print_freq = 100  # print frequency (default: 60)
# parameters for data
feedback_bits = 128
img_height = 16
img_width = 32
img_channels = 2

# Model construction
model = AutoEncoder(feedback_bits)
if use_single_gpu:
    model = model.cuda()

else:
    # DataParallel will divide and allocate batch_size to all available GPUs
    autoencoder = torch.nn.DataParallel(model).cuda()

criterion = nn.MSELoss().cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

# Data loading
print('Loading training data ...')
data_load_address = './data'
mat = h5py.File(data_load_address + '/H_train.mat', 'r')
data = np.transpose(mat['H_train'])  # shape=(320000, 1024)
Exemplo n.º 3
0
device = torch.device("cuda:0")
torch.manual_seed(1)
batch_size = 64
epochs = 1000
learning_rate = 1e-3
num_warmup_steps = 0
num_workers = 4
print_freq = 100  # print frequency (default: 60)
# parameters for data
feedback_bits = 128
img_height = 16
img_width = 32
img_channels = 2

# Model construction
model = AutoEncoder(feedback_bits)
if use_single_gpu:
    model = model.cuda()

else:
    # DataParallel will divide and allocate batch_size to all available GPUs
    autoencoder = torch.nn.DataParallel(model).cuda()

# modelSave1 = './Modelsave/encoder.pth.tar'
# model.encoder.load_state_dict(torch.load(modelSave1)['state_dict'])

# modelSave2 = './Modelsave/decoder.pth.tar'
# model.decoder.load_state_dict(torch.load(modelSave2)['state_dict'])

# Data loading
data_load_address = './data'