Exemplo n.º 1
0
    opt.load_size = 80
    opt.crop_size = 64
    opt.size = 64
    dataset = create_dataset(opt)  # create a dataset given opt.dataset_mode and other options
    dataset_size = len(dataset)
    print('#training images = %d' % dataset_size)
else:
    raise Exception('Not implemented yet')


########################################  OFDM setting  ###########################################
size_after_compress = (opt.size // (2**opt.n_downsample))**2 * (opt.C_channel // 2)
opt.N = opt.batchSize                       # Batch size
opt.P = 1                                   # Number of symbols
opt.M = 64                                  # Number of subcarriers per symbol
opt.K = 16                                  # Length of CP
opt.L = 8                                   # Number of paths
opt.decay = 4
opt.S = size_after_compress // opt.M        # Number of packets

opt.is_cfo = False
opt.is_trick = True
opt.is_cfo_random = False
opt.max_ang = 1.7
opt.ang = 1.7

if opt.CE not in ['LS', 'MMSE', 'TRUE', 'IMPLICIT']:
    raise Exception("Channel estimation method not implemented")

if opt.EQ not in ['ZF', 'MMSE', 'IMPLICIT']:
    raise Exception("Equalization method not implemented")
Exemplo n.º 2
0
opt.n_downsample = 2  # Downsample times
opt.n_blocks = 2  # Numebr of residual blocks
opt.first_kernel = 5  # The filter size of the first convolutional layer in encoder

# Set the input dataset
opt.dataset_mode = 'CIFAR10'  # Current dataset:  CIFAR10, CelebA

# Set up the training procedure
opt.batchSize = 64  # batch size
opt.n_epochs = 80  # # of epochs without lr decay
opt.n_epochs_decay = 80  # # of epochs with lr decay
opt.lr = 5e-4  # Initial learning rate
opt.lr_policy = 'linear'  # decay policy.  Availability:  see options/train_options.py
opt.beta1 = 0.5  # parameter for ADAM
opt.beta = 1
opt.K = 16

opt.is_Feat = False  # Whether to use feature matching loss or not
opt.lambda_feat = 1

##############################################################################################################

if opt.gan_mode == 'wgangp':
    opt.norm_D = 'instance'  # Use instance normalization when using WGAN.  Available: 'instance', 'batch', 'none'
else:
    opt.norm_D = 'batch'  # Used batch normalization otherwise

opt.activation = 'sigmoid'  # The output activation function at the last layer in the decoder
opt.norm_EG = 'batch'

if opt.dataset_mode == 'CIFAR10':