Пример #1
0
 def __init__(self, load_model=True):
     self.env_name = "carracing"
     self.vae = ConvVAE(batch_size=1,
                        gpu_mode=False,
                        is_training=False,
                        reuse=True)
     self.rnn = MDNRNN(hps_sample, gpu_mode=False, reuse=True)
     if load_model:
         self.vae.load_json('Weights/vae_weights.json')
         self.rnn.load_json('Weights/rnn_weights.json')
     self.state = rnn_init_state(self.rnn)
     self.rnn_mode = True
     self.input_size = rnn_output_size(EXP_MODE)
     self.z_size = 32
     if EXP_MODE == MODE_Z_HIDDEN:
         self.hidden_size = 40
         self.weight_hidden = np.random.randn(self.input_size,
                                              self.hidden_size)
         self.bias_hidden = np.random.randn(self.hidden_size)
         self.weight_output = np.random.randn(self.hidden_size, 3)
         self.bias_output = np.random.randn(3)
         self.param_count = ((self.input_size + 1) *
                             self.hidden_size) + (self.hidden_size * 3 + 3)
     else:
         self.weight = np.random.randn(self.input_size, 3)
         self.bias = np.random.randn(3)
         self.param_count = (self.input_size) * 3 + 3
     self.render_mode = False
Пример #2
0
    def __init__(self, disc_hiddens=[1000, 1000, 1000],
                 gamma=30, input_size=(3, 64, 64),
                 kernel_sizes=[32, 32, 64, 64],
                 hidden_size=256, dim_z=32,
                 binary=True, **kwargs):
        """initialize neural networks
        :param disc_hiddens: list of int, numbers of hidden units of each layer in discriminator
        :param gamma: weight for total correlation term in loss function
        """
        super(ConvFactorVAE, self).__init__()
        self.gamma = gamma
        self.dim_z = dim_z
        self.binary = binary
        self.input_size = input_size
        self.hidden_size = hidden_size

        # VAE networks
        self.vae = ConvVAE(input_size, kernel_sizes, hidden_size, dim_z, binary, **kwargs)
        # inherit some attributes
        self.channel_sizes = self.vae.channel_sizes

        # discriminator networks
        D_act = nn.LeakyReLU
        D_act_args = {"negative_slope": 0.2, "inplace": False}
        D_output_dim = 2
        self.discriminator = nns.create_mlp(
            self.dim_z, disc_hiddens, act_layer=D_act, act_args=D_act_args)
        self.discriminator = nn.Sequential(
            self.discriminator,
            nn.Linear(disc_hiddens[-1], D_output_dim))
kl_tolerance = 0.5
filelist = os.listdir(DATA_DIR)
filelist.sort()
filelist = filelist[0:10000]
dataset, action_dataset = load_raw_data_list(filelist)

# Resetting the graph of the VAE model

reset_graph()

# Creating the VAE model as an object of the ConvVAE class

vae = ConvVAE(z_size=z_size,
              batch_size=batch_size,
              learning_rate=learning_rate,
              kl_tolerance=kl_tolerance,
              is_training=False,
              reuse=False,
              gpu_mode=True)

# Loading the weights of the VAE model

vae.load_json(os.path.join(model_path_name, 'vae.json'))

# Running the main code that generates the data from the VAE model for the MDN-RNN model

mu_dataset = []
logvar_dataset = []
for i in range(len(dataset)):
    data_batch = dataset[i]
    mu, logvar, z = encode_batch(data_batch)
Пример #4
0
    os.makedirs(out_path, exist_ok=True)

    # check for GPU
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # load data
    print("Loading data")
    train_dataloader = get_dataloader(opt)
    test_dataloader = None
    n = len(train_dataloader.dataset)
    iter_per_epoch = math.ceil(n / opt.batch_size)
    # run
    start = time.time()
    print("Training")

    vae = ConvVAE(opt).to(device)
    optimizer = optim.Adam(vae.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))

    discriminator = Discriminator(opt.latent_dim).to(device)
    optimizer_d = optim.Adam(discriminator.parameters(),
                             lr=opt.lrd,
                             betas=(opt.b1d, opt.b2d))

    losses_train = []
    reconstruction_error = []
    kl_divergence = []
    total_correlation = []
    discriminator_loss = []
    losses_test = []

    dimension_kld = run()
Пример #5
0
class ConvFactorVAE(BaseVAE):
    """Class that implements the Factor Variational Auto-Encoder (based on CNN)"""

    def __init__(self, disc_hiddens=[1000, 1000, 1000],
                 gamma=30, input_size=(3, 64, 64),
                 kernel_sizes=[32, 32, 64, 64],
                 hidden_size=256, dim_z=32,
                 binary=True, **kwargs):
        """initialize neural networks
        :param disc_hiddens: list of int, numbers of hidden units of each layer in discriminator
        :param gamma: weight for total correlation term in loss function
        """
        super(ConvFactorVAE, self).__init__()
        self.gamma = gamma
        self.dim_z = dim_z
        self.binary = binary
        self.input_size = input_size
        self.hidden_size = hidden_size

        # VAE networks
        self.vae = ConvVAE(input_size, kernel_sizes, hidden_size, dim_z, binary, **kwargs)
        # inherit some attributes
        self.channel_sizes = self.vae.channel_sizes

        # discriminator networks
        D_act = nn.LeakyReLU
        D_act_args = {"negative_slope": 0.2, "inplace": False}
        D_output_dim = 2
        self.discriminator = nns.create_mlp(
            self.dim_z, disc_hiddens, act_layer=D_act, act_args=D_act_args)
        self.discriminator = nn.Sequential(
            self.discriminator,
            nn.Linear(disc_hiddens[-1], D_output_dim))

    def encode(self, x):
        """vae encode"""
        return self.vae.encode(x)

    def decode(self, code):
        """vae decode"""
        return self.vae.decode(code)

    def reparameterize(self, mu, logvar):
        """reparameterization trick"""
        return self.vae.reparameterize(mu, logvar)

    def forward(self, input, no_dec=False):
        """autoencoder forward computation"""
        encoded = self.encode(input)
        mu, logvar = encoded
        z = self.reparameterize(mu, logvar) # latent variable z

        if no_dec:
            # no decoding
            return z.clone() # avoid inplace operation

        return self.decode(z), encoded, z

    def sample_latent(self, num, device, **kwargs):
        """vae sample latent"""
        return self.vae.sample_latent(num, device, **kwargs)

    def sample(self, num, device, **kwargs):
        """vae sample"""
        return self.vae.sample(num, device, **kwargs)

    def decoded_to_output(self, decoded, **kwargs):
        """vae transform decoded result to output"""
        return self.vae.decoded_to_output(decoded, **kwargs)

    def reconstruct(self, input, **kwargs):
        """vae reconstruct"""
        return self.vae.reconstruct(input, **kwargs)

    def permute_dims(self, z):
        """permute separately each dimension of the z randomly in a batch
        :param z: [B x D] tensor
        :return: [B x D] tensor with each dim of D dims permuted randomly
        """
        B, D = z.size()
        # generate randomly permuted batch on each dimension
        permuted = []
        for i in range(D):
            ind = torch.randperm(B)
            permuted.append(z[:, i][ind].view(-1, 1))

        return torch.cat(permuted, dim=1)

    def loss_function(self, *inputs, **kwargs):
        """loss function described in the paper (eq. (2))"""
        optim_part = kwargs['optim_part'] # the part to optimize

        if optim_part == 'vae':
            # update VAE
            decoded = inputs[0]
            encoded = inputs[1]
            Dz = inputs[2]
            x = inputs[3]

            flat_input_size = np.prod(self.input_size)
            mu, logvar = encoded
            # KL divergence term
            KLD = -0.5 * (1 + logvar - mu.pow(2) - logvar.exp()).sum(1).mean()
            if self.binary:
                # likelihood term under Bernolli MLP decoder
                MLD = F.binary_cross_entropy(decoded.view(-1, flat_input_size),
                                             x.view(-1, flat_input_size),
                                             reduction='sum').div(x.size(0))
            else:
                # likelihood term under Gaussian MLP decoder
                mean_dec, logvar_dec = decoded
                recon_x_distribution = Normal(loc=mean_dec.view(-1, flat_input_size),
                                              scale=torch.exp(0.5*logvar_dec.view(-1, flat_input_size)))
                MLD = -recon_x_distribution.log_prob(x.view(-1, flat_input_size)).sum(1).mean()

            tc_loss = (Dz[:, :1] - Dz[:, 1:]).mean()

            return {
                "loss": KLD + MLD + self.gamma * tc_loss,
                "KLD": KLD,
                "MLD": MLD,
                "tc_loss": tc_loss}
        elif optim_part == 'discriminator':
            # update discriminator
            Dz = inputs[0]
            Dz_pperm = inputs[1]
            device = Dz.device

            ones = torch.ones(Dz.size(0), dtype=torch.long).to(device)
            zeros = torch.zeros(Dz.size(0), dtype=torch.long).to(device)

            D_tc_loss = 0.5 * (F.cross_entropy(Dz, zeros) +
                            F.cross_entropy(Dz_pperm, ones))

            return {"loss": D_tc_loss, "D_tc_loss": D_tc_loss}

        else:
            raise Exception("no such network to optimize: {}".format(optim_part))
Пример #6
0
def main():
    parser = argparse.ArgumentParser(description='VAE')
    parser.add_argument(
        '--weights',
        type=str,
        default=None,
        help='Load weigths for the VAE, not training the model')

    parser.add_argument('--batch-size',
                        type=int,
                        default=128,
                        help='Input batch size for training, 128 by default')

    parser.add_argument('--epochs',
                        type=int,
                        default=10,
                        help='Number of epochs to train, 10 by default')

    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        help='Random seed, 1 by default')

    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        help='Interval between two logs of the training status')

    args = parser.parse_args()

    torch.manual_seed(args.seed)
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    if torch.cuda.is_available():
        kwargs = {'num_workers': 1, 'pin_memory': True}
    else:
        kwargs = {}

    train_dataset = datasets.MNIST('data',
                                   train=True,
                                   download=True,
                                   transform=transforms.ToTensor())

    test_dataset = datasets.MNIST('data',
                                  train=False,
                                  transform=transforms.ToTensor())

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               **kwargs)

    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=args.batch_size,
                                              shuffle=True,
                                              **kwargs)

    # model = FCVAE(channels=1, z_dim=2).to(device)
    model = ConvVAE(channels=1, z_dim=2).to(device)

    net_manager = NetManager(model,
                             device,
                             train_loader=train_loader,
                             test_loader=test_loader)

    if args.weights is None:
        net_manager.set_writer("vae")
        net_manager.train(args.epochs, log_interval=args.log_interval)

        if not os.path.exists("weights"):
            os.mkdir("weights")

        net_manager.save_net("weights/vae.pth")
    else:
        net_manager.load_net(args.weights)

    net_manager.plot_latent_space(dark_background=True)
Пример #7
0
class Model:

    # Initializing all the parameters and variables of the Model class
    def __init__(self, load_model=True):
        self.env_name = "carracing"
        self.vae = ConvVAE(batch_size=1,
                           gpu_mode=False,
                           is_training=False,
                           reuse=True)
        self.rnn = MDNRNN(hps_sample, gpu_mode=False, reuse=True)
        if load_model:
            self.vae.load_json('Weights/vae_weights.json')
            self.rnn.load_json('Weights/rnn_weights.json')
        self.state = rnn_init_state(self.rnn)
        self.rnn_mode = True
        self.input_size = rnn_output_size(EXP_MODE)
        self.z_size = 32
        if EXP_MODE == MODE_Z_HIDDEN:
            self.hidden_size = 40
            self.weight_hidden = np.random.randn(self.input_size,
                                                 self.hidden_size)
            self.bias_hidden = np.random.randn(self.hidden_size)
            self.weight_output = np.random.randn(self.hidden_size, 3)
            self.bias_output = np.random.randn(3)
            self.param_count = ((self.input_size + 1) *
                                self.hidden_size) + (self.hidden_size * 3 + 3)
        else:
            self.weight = np.random.randn(self.input_size, 3)
            self.bias = np.random.randn(3)
            self.param_count = (self.input_size) * 3 + 3
        self.render_mode = False

    # Making a method that creates an environment (in our case the CarRacing game) inside which both the AI and HI will play
    def make_env(self, seed=-1, render_mode=False, full_episode=False):
        self.render_mode = render_mode
        self.env = make_env(self.env_name,
                            seed=seed,
                            render_mode=render_mode,
                            full_episode=full_episode)

    # Making a method that reinitiates the states for the RNN model
    def reset(self):
        self.state = rnn_init_state(self.rnn)

    # Making a method that encodes the observations (input frames)
    def encode_obs(self, obs):
        result = np.copy(obs).astype(np.float) / 255.0
        result = result.reshape(1, 64, 64, 3)
        mu, logvar = self.vae.encode_mu_logvar(result)
        mu = mu[0]
        logvar = logvar[0]
        s = logvar.shape
        z = mu + np.exp(logvar / 2.0) * np.random.randn(*s)
        return z, mu, logvar

    # Making a method that samples an action based on the latent vector z
    def get_action(self, z):
        h = rnn_output(self.state, z, EXP_MODE)
        if EXP_MODE == MODE_Z_HIDDEN:
            h = np.tanh(np.dot(h, self.weight_hidden) + self.bias_hidden)
            action = np.tanh(np.dot(h, self.weight_output) + self.bias_output)
        else:
            action = np.tanh(np.dot(h, self.weight) + self.bias)
        action[1] = (action[1] + 1.0) / 2.0
        action[2] = clip(action[2])
        self.state = rnn_next_state(self.rnn, z, action, self.state)
        return action

    # Making a method that sets the initialized/loaded weights into the model
    def set_model_params(self, model_params):
        if EXP_MODE == MODE_Z_HIDDEN:
            params = np.array(model_params)
            cut_off = (self.input_size + 1) * self.hidden_size
            params_1 = params[:cut_off]
            params_2 = params[cut_off:]
            self.bias_hidden = params_1[:self.hidden_size]
            self.weight_hidden = params_1[self.hidden_size:].reshape(
                self.input_size, self.hidden_size)
            self.bias_output = params_2[:3]
            self.weight_output = params_2[3:].reshape(self.hidden_size, 3)
        else:
            self.bias = np.array(model_params[:3])
            self.weight = np.array(model_params[3:]).reshape(
                self.input_size, 3)

    # Making a method that loads the model weights
    def load_model(self, filename):
        with open(filename) as f:
            data = json.load(f)
        print('loading file %s' % (filename))
        self.data = data
        model_params = np.array(data[0])
        self.set_model_params(model_params)

    # Making a method that randomly initializes the weights
    def get_random_model_params(self, stdev=0.1):
        return np.random.standard_cauchy(self.param_count) * stdev

    # Making a method that randomly initializes the weights for all 3 parts of the Full World Model (VAE, RNN, Controller)
    def init_random_model_params(self, stdev=0.1):
        params = self.get_random_model_params(stdev=stdev)
        self.set_model_params(params)
        vae_params = self.vae.get_random_model_params(stdev=stdev)
        self.vae.set_model_params(vae_params)
        rnn_params = self.rnn.get_random_model_params(stdev=stdev)
        self.rnn.set_model_params(rnn_params)
Пример #8
0
class Model:
    ''' simple one layer model for car racing '''
    def __init__(self, load_model=True):
        self.env_name = "carracing"
        self.vae = ConvVAE(batch_size=1,
                           gpu_mode=False,
                           is_training=False,
                           reuse=True)

        self.rnn = MDNRNN(hps_sample, gpu_mode=False, reuse=True)

        if load_model:
            self.vae.load_json('vae/vae.json')
            self.rnn.load_json('rnn/rnn.json')

        self.state = rnn_init_state(self.rnn)
        self.rnn_mode = True

        self.input_size = rnn_output_size(EXP_MODE)
        self.z_size = 32

        if EXP_MODE == MODE_Z_HIDDEN:  # one hidden layer
            self.hidden_size = 40
            self.weight_hidden = np.random.randn(self.input_size,
                                                 self.hidden_size)
            self.bias_hidden = np.random.randn(self.hidden_size)
            self.weight_output = np.random.randn(self.hidden_size, 3)
            self.bias_output = np.random.randn(3)
            self.param_count = ((self.input_size + 1) *
                                self.hidden_size) + (self.hidden_size * 3 + 3)
        else:
            self.weight = np.random.randn(self.input_size, 3)
            self.bias = np.random.randn(3)
            self.param_count = (self.input_size) * 3 + 3

        self.render_mode = False

    def make_env(self, seed=-1, render_mode=False, full_episode=False):
        self.render_mode = render_mode
        self.env = make_env(self.env_name,
                            seed=seed,
                            render_mode=render_mode,
                            full_episode=full_episode)

    def reset(self):
        self.state = rnn_init_state(self.rnn)

    def encode_obs(self, obs):
        # convert raw obs to z, mu, logvar
        result = np.copy(obs).astype(np.float) / 255.0
        result = result.reshape(1, 96, 96, 3)
        mu, logvar = self.vae.encode_mu_logvar(result)
        mu = mu[0]
        logvar = logvar[0]
        s = logvar.shape
        z = mu + np.exp(logvar / 2.0) * np.random.randn(*s)
        return z, mu, logvar

    def get_action(self, z):
        h = rnn_output(self.state, z, EXP_MODE)
        '''
    action = np.dot(h, self.weight) + self.bias
    action[0] = np.tanh(action[0])
    action[1] = sigmoid(action[1])
    action[2] = clip(np.tanh(action[2]))
    '''
        if EXP_MODE == MODE_Z_HIDDEN:  # one hidden layer
            h = np.tanh(np.dot(h, self.weight_hidden) + self.bias_hidden)
            action = np.tanh(np.dot(h, self.weight_output) + self.bias_output)
        else:
            action = np.tanh(np.dot(h, self.weight) + self.bias)

        action[1] = (action[1] + 1.0) / 2.0
        action[2] = clip(action[2])

        self.state = rnn_next_state(self.rnn, z, action, self.state)

        return action

    def set_model_params(self, model_params):
        if EXP_MODE == MODE_Z_HIDDEN:  # one hidden layer
            params = np.array(model_params)
            cut_off = (self.input_size + 1) * self.hidden_size
            params_1 = params[:cut_off]
            params_2 = params[cut_off:]
            self.bias_hidden = params_1[:self.hidden_size]
            self.weight_hidden = params_1[self.hidden_size:].reshape(
                self.input_size, self.hidden_size)
            self.bias_output = params_2[:3]
            self.weight_output = params_2[3:].reshape(self.hidden_size, 3)
        else:
            self.bias = np.array(model_params[:3])
            self.weight = np.array(model_params[3:]).reshape(
                self.input_size, 3)

    def load_model(self, filename):
        with open(filename) as f:
            data = json.load(f)
        print('loading file %s' % (filename))
        self.data = data
        model_params = np.array(data[0])  # assuming other stuff is in data
        self.set_model_params(model_params)

    def get_random_model_params(self, stdev=0.1):
        #return np.random.randn(self.param_count)*stdev
        return np.random.standard_cauchy(
            self.param_count) * stdev  # spice things up

    def init_random_model_params(self, stdev=0.1):
        params = self.get_random_model_params(stdev=stdev)
        self.set_model_params(params)
        vae_params = self.vae.get_random_model_params(stdev=stdev)
        self.vae.set_model_params(vae_params)
        rnn_params = self.rnn.get_random_model_params(stdev=stdev)
        self.rnn.set_model_params(rnn_params)
Пример #9
0
    crops = np.load(args["npy"])

    # parameters
    z_size=16
    batch_size, crop_size, _, _ = crops.shape

    print("batch_size {}".format(batch_size))
    print("crop_size {}".format(crop_size))

    cv2.imshow("crop",crops[0])
    cv2.waitKey(0)

    reset_graph()
    test_vae = ConvVAE(z_size=z_size,
                       batch_size=batch_size,
                       is_training=False,
                       reuse=False,
                       gpu_mode=True)

    # show reconstruction example
    test_vae.load_json("../../models/0/vae_{}.json".format(180000))
    z = test_vae.encode(crops)
    print(z.shape)

    rec = test_vae.decode(z)
    print(rec.shape)

    np.save("../../output/z_{}.npy".format(batch_size), z)
    np.save("../../output/rec_{}.npy".format(batch_size), rec)

    #for img_idx, img in enumerate(test_batch):
Пример #10
0
    # run this file from the script folder
    data_path = "../data/dsprites/dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz"

    n_train = 10000
    n_test = 5000

    path = "../results/dsprites/"

    metrics = {"train": None, "test": None}

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    model_path = path + args.model + f"/parameter_{args.parameter}/seed_{args.seed}"
    model_data = torch.load(model_path + "/model.pt",
                            map_location=torch.device("cpu"))
    model = ConvVAE(model_data["opt"]).to(device)
    model.load_state_dict(state_dict=model_data["model"])
    opt = model_data["opt"]

    evaluate = Evaluator(data_path=data_path,
                         model=model,
                         opt=opt,
                         batch_size=64)

    if args.metric == "beta":
        metric_train, metric_test = evaluate.get_beta_metric(n_train, n_test)
        metrics["train"] = metric_train
        metrics["test"] = metric_test

    elif args.metric == "factor":
        metric_train, metric_test = evaluate.get_factor_metric(
# Splitting the dataset into batches

total_length = len(dataset)
num_batches = int(np.floor(total_length / batch_size))
print("num_batches", num_batches)

# Resetting the graph of the VAE model

reset_graph()

# Creating the VAE model as an object of the ConvVAE class

vae = ConvVAE(z_size=z_size,
              batch_size=batch_size,
              learning_rate=learning_rate,
              kl_tolerance=kl_tolerance,
              is_training=True,
              reuse=False,
              gpu_mode=True)

# Implementing the Training Loop

print("train", "step", "loss", "recon_loss", "kl_loss")
for epoch in range(NUM_EPOCH):
    np.random.shuffle(dataset)
    for idx in range(num_batches):
        batch = dataset[idx * batch_size:(idx + 1) * batch_size]
        obs = batch.astype(np.float) / 255.0
        feed = {
            vae.x: obs,
        }