Beispiel #1
0
def load_model():
    dir = os.getcwd()
    model_dir = dir + f'/Channel/encoder_16_VAE_1_epoch30.pth'
    model = Encoder()
    model.load_state_dict(torch.load(model_dir, map_location=device))
    encoder_model = model.to(device)
    return encoder_model
Beispiel #2
0
 def __init__(self, device):
     self.device = device
     self.prediction = Prediction(64, self.device)
     dir = os.getcwd()
     model_dir16 = dir + f'/Gaussian/generative_model/encoder_16_VAE_0.5_epoch30.pth'
     model16 = Encoder()
     model16.load_state_dict(
         torch.load(model_dir16, map_location=self.device), False)
     self.encoder_model16 = model16.to(self.device)
     self.encoder_model16.eval()
Beispiel #3
0
    def __init__(self, device):
        self.device = device
        self.prediction = Prediction(64, self.device)
        dir = os.getcwd()
        model_dir16 = dir + f'/Channel/generative_model/encoder_16_VAE_1_epoch30.pth'
        model16 = Encoder()
        model16.load_state_dict(
            torch.load(model_dir16, map_location=self.device), False)
        self.encoder_model16 = model16.to(self.device)
        self.encoder_model16.eval()

        dir = os.getcwd()
        model_dir_16_32 = dir + f'/Channel/generative_model/encoder_16_32_VAE_1_0.7_epoch50.pth'
        model_16_32 = Encoder()
        model_16_32.load_state_dict(
            torch.load(model_dir_16_32, map_location=self.device), False)
        self.encoder_model_16_32 = model_16_32.to(self.device)
        self.encoder_model_16_32.eval()
Beispiel #4
0
def load_model(size):
        dir = os.getcwd()
        if size == 16:
            model_dir = dir+f'/Channel/encoder_16_VAE_1_epoch30.pth'
            model = Encoder()
        elif size == 32:
            model_dir = dir+f'/Channel/encoder_16_32_VAE_1_0.7_epoch50.pth'
            model = Encoder()
        model.load_state_dict(torch.load(model_dir, map_location=device))
        encoder_model =model.to(device)
        return encoder_model
Beispiel #5
0
                    type=float,
                    default=0.5,
                    help="beta hyperparameter")
args = parser.parse_args()

dir = os.getcwd()
directory = f'/Gaussian/experiments/experiments_64/latent256/beta_{args.beta_vae}'
exp_dir = dir + directory + "/N{}_Bts{}_Eps{}_lr{}".\
    format(args.n_train, args.batch_size, args.n_epochs, args.lr)
output_dir = exp_dir + "/save_model"

if not os.path.exists(output_dir):
    os.makedirs(output_dir)

device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")
encoder = Encoder()
decoder = Decoder()
encoder.to(device)
decoder.to(device)
print("number of parameters: {}".format(encoder._n_parameters() +
                                        decoder._n_parameters()))


train_hdf5_file = os.getcwd() + \
    f'/Gaussian/data/training_set_64_gaussian1_25000.hdf5'
train_loader = load_data_1scale(train_hdf5_file,
                                args.n_train,
                                args.batch_size,
                                singlescale=True)

optimizer = torch.optim.Adam(itertools.chain(encoder.parameters(),
Beispiel #6
0
parser.add_argument("--sample-interval", type=int, default=5, help="interval between image sampling")
parser.add_argument("--beta_vae", type=float, default=1, help="beta hyperparameter")
args = parser.parse_args()

dir = os.getcwd()
directory = f'/Gaussian/experiments/experiments_16/latent16/beta_{args.beta_vae}'
exp_dir = dir + directory + "/N{}_Bts{}_Eps{}_lr{}".\
    format(args.n_train, args.batch_size, args.n_epochs, args.lr)
output_dir = exp_dir + "/predictions"
model_dir = exp_dir
if not os.path.exists(output_dir):
    os.makedirs(output_dir)
if not os.path.exists(model_dir):
    os.makedirs(model_dir)
device = torch.device("cuda:1" if torch.cuda.is_available() else "cpu")
encoder = Encoder() 
decoder = Decoder()
encoder.to(device)
decoder.to(device)
print("number of parameters: {}".format(encoder._n_parameters()+decoder._n_parameters()))


train_hdf5_file = os.getcwd() + \
    f'/Gaussian/data/training_set_16_gaussian.hdf5'
test_hdf5_file = os.getcwd() + \
    f'/Gaussian/data/test_set_16_gaussian.hdf5'
train_loader = load_data_1scale(train_hdf5_file, args.n_train, args.batch_size,singlescale=True)
with h5py.File(test_hdf5_file, 'r') as f:
    x_test = f['test'][()]
    x_test =x_test