Exemplo n.º 1
0
def experiment(exp_specs):
    ptu.set_gpu_mode(exp_specs['use_gpu'])
    # Set up logging ----------------------------------------------------------
    exp_id = exp_specs['exp_id']
    exp_prefix = exp_specs['exp_name']
    seed = exp_specs['seed']
    set_seed(seed)
    setup_logger(exp_prefix=exp_prefix, exp_id=exp_id, variant=exp_specs)

    # Prep the data -----------------------------------------------------------
    replay_dict = joblib.load(exp_specs['replay_dict_path'])
    next_obs_array = replay_dict['next_observations']
    acts_array = replay_dict['actions']
    data_loader = BasicDataLoader(
        next_obs_array[:40000], acts_array[:40000], exp_specs['episode_length'], exp_specs['batch_size'], use_gpu=ptu.gpu_enabled())
    val_data_loader = BasicDataLoader(
        next_obs_array[40000:], acts_array[40000:], exp_specs['episode_length'], exp_specs['batch_size'], use_gpu=ptu.gpu_enabled())

    # Model Definition --------------------------------------------------------
    conv_channels = 64
    conv_encoder = nn.Sequential(
        nn.Conv2d(3, conv_channels, 1, stride=1, padding=0, bias=False),
        nn.BatchNorm2d(conv_channels),
        nn.ReLU(),
        nn.Conv2d(conv_channels, conv_channels, 1, stride=1, padding=0, bias=False),
        nn.BatchNorm2d(conv_channels),
        nn.ReLU()
    )
    ae_dim = 128
    gru_dim = 512
    img_h = 5
    flat_inter_img_dim = img_h * img_h * conv_channels
    fc_encoder = nn.Sequential(
        nn.Linear(flat_inter_img_dim, ae_dim, bias=False),
        nn.BatchNorm1d(ae_dim),
        nn.ReLU(),
        nn.Linear(ae_dim, ae_dim, bias=False),
        nn.BatchNorm1d(ae_dim),
        nn.ReLU(),
        nn.Linear(ae_dim, ae_dim, bias=False),
        nn.BatchNorm1d(ae_dim),
        nn.ReLU(),
        nn.Linear(ae_dim, ae_dim, bias=False),
        nn.BatchNorm1d(ae_dim),
        nn.ReLU()
    )
    gru = nn.GRUCell(
        ae_dim, gru_dim, bias=True
    )
    fc_decoder = nn.Sequential(
        nn.Linear(gru_dim + 4, ae_dim, bias=False),
        nn.BatchNorm1d(ae_dim),
        nn.ReLU(),
        nn.Linear(ae_dim, ae_dim, bias=False),
        nn.BatchNorm1d(ae_dim),
        nn.ReLU(),
        nn.Linear(ae_dim, ae_dim, bias=False),
        nn.BatchNorm1d(ae_dim),
        nn.ReLU(),
        nn.Linear(ae_dim, ae_dim, bias=False),
        nn.BatchNorm1d(ae_dim),
        nn.ReLU(),
        nn.Linear(ae_dim, flat_inter_img_dim, bias=False),
        nn.BatchNorm1d(flat_inter_img_dim),
        nn.ReLU(),
    )
    conv_decoder = nn.Sequential(
        nn.ConvTranspose2d(conv_channels, conv_channels, 1, stride=1, padding=0, output_padding=0, bias=False),
        nn.BatchNorm2d(conv_channels),
        nn.ReLU(),
        nn.ConvTranspose2d(conv_channels, conv_channels, 1, stride=1, padding=0, output_padding=0, bias=False),
        nn.BatchNorm2d(conv_channels),
        nn.ReLU(),
        nn.Conv2d(conv_channels, 3, 1, stride=1, padding=0, bias=True),
        nn.Sigmoid()
    )
    if ptu.gpu_enabled():
        conv_encoder.cuda()
        fc_encoder.cuda()
        gru.cuda()
        fc_decoder.cuda()
        conv_decoder.cuda()

    # Optimizer ---------------------------------------------------------------
    model_optim = Adam(
        [
            item for sublist in
            map(
                lambda x: list(x.parameters()),
                [fc_encoder, conv_encoder, gru, fc_decoder, conv_decoder]
            )
            for item in sublist
        ],
        lr=float(exp_specs['model_lr']), weight_decay=float(exp_specs['model_wd'])
    )

    # -------------------------------------------------------------------------
    freq_bptt = exp_specs['freq_bptt']
    episode_length = exp_specs['episode_length']
    losses = []
    for iter_num in range(int(float(exp_specs['max_iters']))):
        if iter_num % freq_bptt == 0:
            if iter_num > 0:
                # loss = loss / freq_bptt
                loss.backward()
                model_optim.step()
                prev_h_batch = prev_h_batch.detach()
            loss = 0
        if iter_num % episode_length == 0:
            prev_h_batch = Variable(torch.zeros(exp_specs['batch_size'], gru_dim))
            if ptu.gpu_enabled():
                prev_h_batch = prev_h_batch.cuda()
            
            if iter_num % exp_specs['freq_val'] == 0:
                train_loss_print = '\t'.join(losses)
            losses = []

        obs_batch, act_batch = data_loader.get_next_batch()
        recon = fc_decoder(torch.cat([prev_h_batch, act_batch], 1)).view(obs_batch.size(0), conv_channels, img_h, img_h)
        recon = conv_decoder(recon)

        enc = conv_encoder(obs_batch).view(obs_batch.size(0), -1)
        enc = fc_encoder(enc)
        prev_h_batch = gru(enc, prev_h_batch)

        losses.append('%.4f' % ((obs_batch - recon)**2).mean())
        if iter_num % episode_length != 0:
            loss = loss + ((obs_batch - recon)**2).sum()/float(exp_specs['batch_size'])

        if iter_num % (50*episode_length) in range(2*episode_length):
            save_pytorch_tensor_as_img(recon[0].data.cpu(), 'junk_vis/fixed_colors_simple_maze_5_h/rnn_recon_%d.png' % iter_num)
            save_pytorch_tensor_as_img(obs_batch[0].data.cpu(), 'junk_vis/fixed_colors_simple_maze_5_h/rnn_obs_%d.png' % iter_num)

        if iter_num % exp_specs['freq_val'] == 0:
            print('\nValidating Iter %d...' % iter_num)
            list(map(lambda x: x.eval(), [fc_encoder, conv_encoder, gru, fc_decoder, conv_decoder]))

            val_prev_h_batch = Variable(torch.zeros(exp_specs['batch_size'], gru_dim))
            if ptu.gpu_enabled():
                val_prev_h_batch = val_prev_h_batch.cuda()

            losses = []            
            for i in range(episode_length):
                obs_batch, act_batch = data_loader.get_next_batch()
                
                recon = fc_decoder(torch.cat([val_prev_h_batch, act_batch], 1)).view(obs_batch.size(0), conv_channels, img_h, img_h)
                recon = conv_decoder(recon)

                enc = conv_encoder(obs_batch).view(obs_batch.size(0), -1)
                enc = fc_encoder(enc)
                val_prev_h_batch = gru(enc, val_prev_h_batch)

                losses.append('%.4f' % ((obs_batch - recon)**2).mean())

            loss_print = '\t'.join(losses)
            print('Val MSE:\t' + loss_print)
            print('Train MSE:\t' + train_loss_print)

            list(map(lambda x: x.train(), [fc_encoder, conv_encoder, gru, fc_decoder, conv_decoder]))            
Exemplo n.º 2
0
def experiment(exp_specs):
    ptu.set_gpu_mode(exp_specs['use_gpu'])
    # Set up logging ----------------------------------------------------------
    exp_id = exp_specs['exp_id']
    exp_prefix = exp_specs['exp_name']
    seed = exp_specs['seed']
    set_seed(seed)
    setup_logger(exp_prefix=exp_prefix, exp_id=exp_id, variant=exp_specs)

    # Prep the data -----------------------------------------------------------
    replay_dict = joblib.load(exp_specs['replay_dict_path'])
    next_obs_array = replay_dict['next_observations']
    acts_array = replay_dict['actions']
    data_loader = BasicDataLoader(next_obs_array[:40000],
                                  acts_array[:40000],
                                  exp_specs['episode_length'],
                                  exp_specs['batch_size'],
                                  use_gpu=ptu.gpu_enabled())
    val_data_loader = BasicDataLoader(next_obs_array[40000:],
                                      acts_array[40000:],
                                      exp_specs['episode_length'],
                                      exp_specs['batch_size'],
                                      use_gpu=ptu.gpu_enabled())

    # Model Definition --------------------------------------------------------
    ae_dim = 128
    model = nn.Sequential(nn.Linear(48, ae_dim, bias=False),
                          nn.BatchNorm1d(ae_dim, affine=False), nn.ReLU(),
                          nn.Linear(ae_dim, ae_dim, bias=False),
                          nn.BatchNorm1d(ae_dim, affine=False), nn.ReLU(),
                          nn.Linear(ae_dim, ae_dim, bias=False),
                          nn.BatchNorm1d(ae_dim, affine=False), nn.ReLU(),
                          nn.Linear(ae_dim, 48), nn.Sigmoid())
    if ptu.gpu_enabled(): model.cuda()

    # Optimizer ---------------------------------------------------------------
    model_optim = Adam(model.parameters(),
                       lr=float(exp_specs['model_lr']),
                       weight_decay=float(exp_specs['model_wd']))

    # -------------------------------------------------------------------------
    freq_bptt = exp_specs['freq_bptt']
    for iter_num in range(int(float(exp_specs['max_iters']))):
        if iter_num % freq_bptt == 0:
            if iter_num > 0:
                loss.backward()
                model_optim.step()
            loss = 0

        obs_batch, act_batch = data_loader.get_next_batch()
        recon = model(obs_batch.view(obs_batch.size(0),
                                     -1)).view(obs_batch.size())
        loss = loss + (
            (obs_batch - recon)**2).sum() / float(exp_specs['batch_size'])

        if iter_num % 50 == 0:
            save_pytorch_tensor_as_img(recon[0].data.cpu(),
                                       'junk_vis/ae_recon_%d.png' % iter_num)
            save_pytorch_tensor_as_img(obs_batch[0].data.cpu(),
                                       'junk_vis/ae_obs_%d.png' % iter_num)

        if iter_num % exp_specs['freq_val'] == 0:
            print('\nValidating Iter %d...' % iter_num)
            model.eval()

            obs_batch, act_batch = val_data_loader.get_next_batch()
            recon = model(obs_batch.view(obs_batch.size(0),
                                         -1)).view(obs_batch.size())

            print('MSE:\t%.4f' % ((obs_batch - recon)**2).mean())

            model.train()
Exemplo n.º 3
0
def experiment(exp_specs):
    ptu.set_gpu_mode(exp_specs['use_gpu'])
    # Set up logging ----------------------------------------------------------
    exp_id = exp_specs['exp_id']
    exp_prefix = exp_specs['exp_name']
    seed = exp_specs['seed']
    set_seed(seed)
    setup_logger(exp_prefix=exp_prefix, exp_id=exp_id, variant=exp_specs)

    # Prep the data -----------------------------------------------------------
    replay_dict = joblib.load(exp_specs['replay_dict_path'])
    next_obs_array = replay_dict['next_observations']
    acts_array = replay_dict['actions']
    data_loader = BasicDataLoader(next_obs_array[:40000],
                                  acts_array[:40000],
                                  exp_specs['episode_length'],
                                  exp_specs['batch_size'],
                                  use_gpu=ptu.gpu_enabled())
    val_data_loader = BasicDataLoader(next_obs_array[40000:],
                                      acts_array[40000:],
                                      exp_specs['episode_length'],
                                      exp_specs['batch_size'],
                                      use_gpu=ptu.gpu_enabled())

    # Model Definition --------------------------------------------------------
    ae_dim = 128
    encoder = nn.Sequential(nn.Linear(48, ae_dim, bias=False),
                            nn.BatchNorm1d(ae_dim), nn.ReLU(),
                            nn.Linear(ae_dim, ae_dim, bias=False),
                            nn.BatchNorm1d(ae_dim), nn.ReLU())
    gru = nn.GRUCell(ae_dim, ae_dim, bias=True)
    decoder = nn.Sequential(nn.Linear(ae_dim + 4, ae_dim, bias=False),
                            nn.BatchNorm1d(ae_dim), nn.ReLU(),
                            nn.Linear(ae_dim, ae_dim, bias=False),
                            nn.BatchNorm1d(ae_dim), nn.ReLU(),
                            nn.Linear(ae_dim, ae_dim, bias=False),
                            nn.BatchNorm1d(ae_dim), nn.ReLU(),
                            nn.Linear(ae_dim, 48), nn.Sigmoid())
    if ptu.gpu_enabled():
        encoder.cuda()
        gru.cuda()
        decoder.cuda()

    # Optimizer ---------------------------------------------------------------
    model_optim = Adam(list(encoder.parameters()) +
                       list(decoder.parameters()) + list(gru.parameters()),
                       lr=float(exp_specs['model_lr']),
                       weight_decay=float(exp_specs['model_wd']))

    # -------------------------------------------------------------------------
    freq_bptt = exp_specs['freq_bptt']
    losses = []
    for iter_num in range(int(float(exp_specs['max_iters']))):
        if iter_num % freq_bptt == 0:
            if iter_num > 0:
                # loss = loss / freq_bptt
                loss.backward()
                model_optim.step()
            loss = 0
            prev_h_batch = Variable(
                torch.zeros(exp_specs['batch_size'], ae_dim))
            if ptu.gpu_enabled():
                prev_h_batch = prev_h_batch.cuda()

            if iter_num % exp_specs['freq_val'] == 0:
                train_loss_print = '\t'.join(losses)
            losses = []

        obs_batch, act_batch = data_loader.get_next_batch()
        recon = decoder(torch.cat([prev_h_batch, act_batch],
                                  1)).view(obs_batch.size())
        enc = encoder(obs_batch.view(obs_batch.size(0), -1))
        prev_h_batch = gru(enc, prev_h_batch)

        losses.append('%.4f' % ((obs_batch - recon)**2).mean())
        if iter_num % freq_bptt != 0:
            loss = loss + (
                (obs_batch - recon)**2).sum() / float(exp_specs['batch_size'])

        if iter_num % 250 in range(10):
            save_pytorch_tensor_as_img(
                recon[0].data.cpu(),
                'junk_vis/with_wd_1e-3_ae_recon_%d.png' % iter_num)
            save_pytorch_tensor_as_img(
                obs_batch[0].data.cpu(),
                'junk_vis/with_wd_1e-3_ae_obs_%d.png' % iter_num)

        if iter_num % exp_specs['freq_val'] == 0:
            print('\nValidating Iter %d...' % iter_num)
            list(map(lambda x: x.eval(), [encoder, decoder, gru]))

            val_prev_h_batch = Variable(
                torch.zeros(exp_specs['batch_size'], ae_dim))
            if ptu.gpu_enabled():
                val_prev_h_batch = val_prev_h_batch.cuda()

            losses = []
            for i in range(freq_bptt):
                obs_batch, act_batch = val_data_loader.get_next_batch()
                recon = decoder(torch.cat([val_prev_h_batch, act_batch],
                                          1)).view(obs_batch.size())
                enc = encoder(obs_batch.view(obs_batch.size(0), -1))
                val_prev_h_batch = gru(enc, val_prev_h_batch)
                losses.append('%.4f' % ((obs_batch - recon)**2).mean())

            loss_print = '\t'.join(losses)
            print('Val MSE:\t' + loss_print)
            print('Train MSE:\t' + train_loss_print)

            list(map(lambda x: x.train(), [encoder, decoder, gru]))
Exemplo n.º 4
0
def experiment(exp_specs):
    ptu.set_gpu_mode(exp_specs['use_gpu'])
    # Set up logging ----------------------------------------------------------
    exp_id = exp_specs['exp_id']
    exp_prefix = exp_specs['exp_name']
    seed = exp_specs['seed']
    set_seed(seed)
    setup_logger(exp_prefix=exp_prefix, exp_id=exp_id, variant=exp_specs)

    # Prep the data -----------------------------------------------------------
    env_specs = {
        'flat_repr': False,
        'one_hot_repr': False,
        'maze_h': 9,
        'maze_w': 9,
        'obs_h': 5,
        'obs_w': 5,
        'scale': 4,
        'num_objs': 10
    }
    maze_constructor = lambda: PartiallyObservedGrid(env_specs)
    data_loader = VerySpecificOnTheFLyDataLoader(maze_constructor,
                                                 exp_specs['episode_length'],
                                                 exp_specs['batch_size'],
                                                 use_gpu=ptu.gpu_enabled())
    val_data_loader = VerySpecificOnTheFLyDataLoader(
        maze_constructor,
        exp_specs['episode_length'],
        exp_specs['batch_size'],
        use_gpu=ptu.gpu_enabled())

    # Model Definition --------------------------------------------------------
    conv_channels = 32
    conv_encoder = nn.Sequential(
        nn.Conv2d(3, conv_channels, 4, stride=2, padding=1, bias=False),
        nn.BatchNorm2d(conv_channels), nn.ReLU(),
        nn.Conv2d(conv_channels,
                  conv_channels,
                  4,
                  stride=2,
                  padding=1,
                  bias=False), nn.BatchNorm2d(conv_channels), nn.ReLU(),
        nn.Conv2d(conv_channels,
                  conv_channels,
                  3,
                  stride=1,
                  padding=1,
                  bias=False), nn.BatchNorm2d(conv_channels), nn.ReLU(),
        nn.Conv2d(conv_channels,
                  conv_channels,
                  3,
                  stride=1,
                  padding=1,
                  bias=False), nn.BatchNorm2d(conv_channels), nn.ReLU())
    ae_dim = 256
    gru_dim = 512
    img_h = 5
    flat_inter_img_dim = img_h * img_h * conv_channels
    act_dim = 64
    act_proc = nn.Linear(4, act_dim, bias=True)
    fc_encoder = nn.Sequential(
        nn.Linear(flat_inter_img_dim + act_dim, ae_dim, bias=False),
        nn.BatchNorm1d(ae_dim),
        nn.ReLU(),
        # nn.Linear(ae_dim, ae_dim, bias=False),
        # nn.BatchNorm1d(ae_dim),
        # nn.ReLU(),
        # nn.Linear(ae_dim, ae_dim, bias=False),
        # nn.BatchNorm1d(ae_dim),
        # nn.ReLU(),
        # nn.Linear(ae_dim, ae_dim, bias=False),
        # nn.BatchNorm1d(ae_dim),
        # nn.ReLU()
    )
    gru = nn.LSTMCell(ae_dim, gru_dim, bias=True)
    fc_decoder = nn.Sequential(
        nn.Linear(gru_dim + act_dim, 256, bias=False),
        nn.BatchNorm1d(256),
        nn.ReLU(),
        nn.Linear(256, 2 * flat_inter_img_dim, bias=False),
        nn.BatchNorm1d(2 * flat_inter_img_dim),
        nn.ReLU(),
        # # nn.Linear(ae_dim, ae_dim, bias=False),
        # # nn.BatchNorm1d(ae_dim),
        # # nn.ReLU(),
        # # nn.Linear(ae_dim, ae_dim, bias=False),
        # # nn.BatchNorm1d(ae_dim),
        # # nn.ReLU(),
        # nn.Linear(ae_dim, flat_inter_img_dim, bias=False),
        # nn.BatchNorm1d(flat_inter_img_dim),
        # nn.ReLU(),
    )
    conv_decoder = nn.Sequential(
        nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False),
        nn.BatchNorm2d(64),
        nn.ReLU(),
        nn.ConvTranspose2d(64,
                           64,
                           4,
                           stride=2,
                           padding=1,
                           output_padding=0,
                           bias=False),
        nn.BatchNorm2d(64),
        nn.ReLU(),
        nn.ConvTranspose2d(64,
                           64,
                           4,
                           stride=2,
                           padding=1,
                           output_padding=0,
                           bias=False),
        nn.BatchNorm2d(64),
        nn.ReLU(),
        # nn.Conv2d(conv_channels, conv_channels, 3, stride=1, padding=1, bias=False),
        # nn.BatchNorm2d(conv_channels),
        # nn.ReLU(),
    )
    mean_decoder = nn.Sequential(
        nn.Conv2d(64, 3, 1, stride=1, padding=0, bias=True), nn.Sigmoid())
    log_cov_decoder = nn.Sequential(
        nn.Conv2d(64, 3, 1, stride=1, padding=0, bias=True), )
    if ptu.gpu_enabled():
        conv_encoder.cuda()
        fc_encoder.cuda()
        gru.cuda()
        fc_decoder.cuda()
        conv_decoder.cuda()
        mean_decoder.cuda()
        log_cov_decoder.cuda()
        act_proc.cuda()

    # Optimizer ---------------------------------------------------------------
    model_optim = Adam([
        item for sublist in map(lambda x: list(x.parameters()), [
            fc_encoder, conv_encoder, gru, fc_decoder, conv_decoder,
            mean_decoder, log_cov_decoder
        ]) for item in sublist
    ],
                       lr=float(exp_specs['model_lr']),
                       weight_decay=float(exp_specs['model_wd']))

    # -------------------------------------------------------------------------
    freq_bptt = exp_specs['freq_bptt']
    episode_length = exp_specs['episode_length']
    losses = []
    for iter_num in range(int(float(exp_specs['max_iters']))):
        if iter_num % freq_bptt == 0:
            if iter_num > 0:
                # loss = loss / freq_bptt
                loss.backward()
                model_optim.step()
                prev_h_batch = prev_h_batch.detach()
                prev_c_batch = prev_c_batch.detach()
            loss = 0
        if iter_num % episode_length == 0:
            prev_h_batch = Variable(
                torch.zeros(exp_specs['batch_size'], gru_dim))
            prev_c_batch = Variable(
                torch.zeros(exp_specs['batch_size'], gru_dim))
            if ptu.gpu_enabled():
                prev_h_batch = prev_h_batch.cuda()
                prev_c_batch = prev_c_batch.cuda()

            train_loss_print = '\t'.join(losses)
            losses = []

        obs_batch, act_batch = data_loader.get_next_batch()
        act_batch = act_proc(act_batch)

        hidden = fc_decoder(torch.cat([prev_h_batch, act_batch],
                                      1)).view(obs_batch.size(0), 64, img_h,
                                               img_h)
        hidden = conv_decoder(hidden)
        recon = mean_decoder(hidden)
        log_cov = log_cov_decoder(hidden)
        log_cov = torch.clamp(log_cov, LOG_COV_MIN, LOG_COV_MAX)

        enc = conv_encoder(obs_batch)
        enc = enc.view(obs_batch.size(0), -1)
        enc = fc_encoder(torch.cat([enc, act_batch], 1))
        prev_h_batch, prev_c_batch = gru(enc, (prev_h_batch, prev_c_batch))

        losses.append('%.4f' % ((obs_batch - recon)**2).mean())
        if iter_num % episode_length != 0:
            loss = loss + (
                (obs_batch - recon)**2).sum() / float(exp_specs['batch_size'])
            # loss = loss - compute_diag_log_prob(recon, log_cov, obs_batch)/float(exp_specs['batch_size'])

        if iter_num % (500 * episode_length) in range(2 * episode_length):
            save_pytorch_tensor_as_img(
                recon[0].data.cpu(),
                'junk_vis/debug_2_good_acts_on_the_fly_pogrid_len_8_scale_4/rnn_recon_%d.png'
                % iter_num)
            save_pytorch_tensor_as_img(
                obs_batch[0].data.cpu(),
                'junk_vis/debug_2_good_acts_on_the_fly_pogrid_len_8_scale_4/rnn_obs_%d.png'
                % iter_num)

        if iter_num % exp_specs['freq_val'] == 0:
            print('\nValidating Iter %d...' % iter_num)
            list(
                map(lambda x: x.eval(), [
                    fc_encoder, conv_encoder, gru, fc_decoder, conv_decoder,
                    mean_decoder, log_cov_decoder, act_proc
                ]))

            val_prev_h_batch = Variable(
                torch.zeros(exp_specs['batch_size'], gru_dim))
            val_prev_c_batch = Variable(
                torch.zeros(exp_specs['batch_size'], gru_dim))
            if ptu.gpu_enabled():
                val_prev_h_batch = val_prev_h_batch.cuda()
                val_prev_c_batch = val_prev_c_batch.cuda()

            losses = []
            for i in range(episode_length):
                obs_batch, act_batch = val_data_loader.get_next_batch()
                act_batch = act_proc(act_batch)

                hidden = fc_decoder(torch.cat([val_prev_h_batch, act_batch],
                                              1)).view(obs_batch.size(0), 64,
                                                       img_h, img_h)
                hidden = conv_decoder(hidden)
                recon = mean_decoder(hidden)
                log_cov = log_cov_decoder(hidden)
                log_cov = torch.clamp(log_cov, LOG_COV_MIN, LOG_COV_MAX)

                enc = conv_encoder(obs_batch).view(obs_batch.size(0), -1)
                enc = fc_encoder(torch.cat([enc, act_batch], 1))
                val_prev_h_batch, val_prev_c_batch = gru(
                    enc, (val_prev_h_batch, val_prev_c_batch))

                # val_loss = compute_diag_log_prob(recon, log_cov, obs_batch)/float(exp_specs['batch_size'])
                losses.append('%.4f' % ((obs_batch - recon)**2).mean())

            loss_print = '\t'.join(losses)
            print('Val MSE:\t' + loss_print)
            print('Train MSE:\t' + train_loss_print)

            list(
                map(lambda x: x.train(), [
                    fc_encoder, conv_encoder, gru, fc_decoder, conv_decoder,
                    mean_decoder, log_cov_decoder, act_proc
                ]))
Exemplo n.º 5
0
def experiment(exp_specs):
    ptu.set_gpu_mode(exp_specs['use_gpu'])
    # Set up logging ----------------------------------------------------------
    exp_id = exp_specs['exp_id']
    exp_prefix = exp_specs['exp_name']
    seed = exp_specs['seed']
    set_seed(seed)
    setup_logger(exp_prefix=exp_prefix, exp_id=exp_id, variant=exp_specs)

    # Prep the data -----------------------------------------------------------
    replay_dict = joblib.load(exp_specs['replay_dict_path'])
    next_obs_array = replay_dict['next_observations']
    acts_array = replay_dict['actions']
    data_loader = BasicDataLoader(next_obs_array[:40000],
                                  acts_array[:40000],
                                  exp_specs['episode_length'],
                                  exp_specs['batch_size'],
                                  use_gpu=ptu.gpu_enabled())
    val_data_loader = BasicDataLoader(next_obs_array[40000:],
                                      acts_array[40000:],
                                      exp_specs['episode_length'],
                                      exp_specs['batch_size'],
                                      use_gpu=ptu.gpu_enabled())

    # Model Definition --------------------------------------------------------
    conv_channels = 32
    conv_encoder = nn.Sequential(
        nn.Conv2d(3, conv_channels, 4, stride=2, padding=1, bias=False),
        nn.BatchNorm2d(conv_channels), nn.ReLU(),
        nn.Conv2d(conv_channels,
                  conv_channels,
                  4,
                  stride=2,
                  padding=1,
                  bias=False), nn.BatchNorm2d(conv_channels), nn.ReLU())
    gru_channels = 128
    inter_h = 5
    act_channels = 4
    act_proc = nn.Linear(4, act_channels * inter_h * inter_h, bias=True)
    pre_gru_conv = nn.Sequential(
        nn.Conv2d(act_channels + conv_channels,
                  conv_channels,
                  3,
                  stride=1,
                  padding=1,
                  bias=False),
        nn.BatchNorm2d(conv_channels),
        nn.ReLU(),
    )
    gru = ConvGRUCell(conv_channels, gru_channels, 3)
    post_gru_conv = nn.Sequential(
        nn.Conv2d(act_channels + gru_channels,
                  conv_channels,
                  3,
                  stride=1,
                  padding=1,
                  bias=False),
        nn.BatchNorm2d(conv_channels),
        nn.ReLU(),
    )
    conv_decoder = nn.Sequential(
        nn.ConvTranspose2d(conv_channels,
                           conv_channels,
                           4,
                           stride=2,
                           padding=1,
                           output_padding=0,
                           bias=False),
        nn.BatchNorm2d(conv_channels),
        nn.ReLU(),
        # nn.Conv2d(conv_channels, conv_channels, 3, stride=1, padding=1, bias=False),
        # nn.BatchNorm2d(conv_channels),
        # nn.ReLU(),
        nn.ConvTranspose2d(conv_channels,
                           conv_channels,
                           4,
                           stride=2,
                           padding=1,
                           output_padding=0,
                           bias=False),
        nn.BatchNorm2d(conv_channels),
        nn.ReLU(),
        # nn.Conv2d(conv_channels, conv_channels, 3, stride=1, padding=1, bias=False),
        # nn.BatchNorm2d(conv_channels),
        # nn.ReLU(),
    )
    mean_decoder = nn.Sequential(
        nn.Conv2d(conv_channels, 3, 1, stride=1, padding=0, bias=True),
        nn.Sigmoid())
    log_cov_decoder = nn.Sequential(
        nn.Conv2d(conv_channels, 3, 1, stride=1, padding=0, bias=True), )
    if ptu.gpu_enabled():
        conv_encoder.cuda()
        pre_gru_conv.cuda()
        gru.cuda()
        post_gru_conv.cuda()
        conv_decoder.cuda()
        mean_decoder.cuda()
        log_cov_decoder.cuda()
        act_proc.cuda()

    # Optimizer ---------------------------------------------------------------
    model_optim = Adam([
        item for sublist in map(lambda x: list(x.parameters()), [
            conv_encoder, pre_gru_conv, gru, post_gru_conv, conv_decoder,
            mean_decoder, log_cov_decoder
        ]) for item in sublist
    ],
                       lr=float(exp_specs['model_lr']),
                       weight_decay=float(exp_specs['model_wd']))

    # -------------------------------------------------------------------------
    freq_bptt = exp_specs['freq_bptt']
    episode_length = exp_specs['episode_length']
    losses = []
    for iter_num in range(int(float(exp_specs['max_iters']))):
        if iter_num % freq_bptt == 0:
            if iter_num > 0:
                # loss = loss / freq_bptt
                loss.backward()
                model_optim.step()
                prev_h_batch = prev_h_batch.detach()
            loss = 0
        if iter_num % episode_length == 0:
            prev_h_batch = Variable(
                torch.zeros(exp_specs['batch_size'], gru_channels, inter_h,
                            inter_h))
            if ptu.gpu_enabled():
                prev_h_batch = prev_h_batch.cuda()

            train_loss_print = '\t'.join(losses)
            losses = []

        obs_batch, act_batch = data_loader.get_next_batch()
        act_batch = act_proc(act_batch).view(act_batch.size(0), act_channels,
                                             inter_h, inter_h)

        hidden = post_gru_conv(torch.cat([prev_h_batch, act_batch], 1))
        hidden = conv_decoder(hidden)
        recon = mean_decoder(hidden)
        log_cov = log_cov_decoder(hidden)
        log_cov = torch.clamp(log_cov, LOG_COV_MIN, LOG_COV_MAX)

        enc = conv_encoder(obs_batch)
        enc = pre_gru_conv(torch.cat([enc, act_batch], 1))
        prev_h_batch = gru(enc, prev_h_batch)

        losses.append('%.4f' % ((obs_batch - recon)**2).mean())
        if iter_num % episode_length != 0:
            loss = loss + (
                (obs_batch - recon)**2).sum() / float(exp_specs['batch_size'])
            # loss = loss + compute_diag_log_prob(recon, log_cov, obs_batch)/float(exp_specs['batch_size'])

        if iter_num % (500 * episode_length) in range(2 * episode_length):
            save_pytorch_tensor_as_img(
                recon[0].data.cpu(),
                'junk_vis/conv_gru_pogrid_len_8_scale_4/rnn_recon_%d.png' %
                iter_num)
            save_pytorch_tensor_as_img(
                obs_batch[0].data.cpu(),
                'junk_vis/conv_gru_pogrid_len_8_scale_4/rnn_obs_%d.png' %
                iter_num)

        if iter_num % exp_specs['freq_val'] == 0:
            print('\nValidating Iter %d...' % iter_num)
            list(
                map(lambda x: x.eval(), [
                    conv_encoder, pre_gru_conv, gru, post_gru_conv,
                    conv_decoder, mean_decoder, log_cov_decoder, act_proc
                ]))

            val_prev_h_batch = Variable(
                torch.zeros(exp_specs['batch_size'], gru_channels, inter_h,
                            inter_h))
            if ptu.gpu_enabled():
                val_prev_h_batch = val_prev_h_batch.cuda()

            losses = []
            for i in range(episode_length):
                obs_batch, act_batch = val_data_loader.get_next_batch()
                act_batch = act_proc(act_batch).view(act_batch.size(0),
                                                     act_channels, inter_h,
                                                     inter_h)

                hidden = post_gru_conv(
                    torch.cat([val_prev_h_batch, act_batch], 1))
                hidden = conv_decoder(hidden)
                recon = mean_decoder(hidden)
                log_cov = log_cov_decoder(hidden)
                log_cov = torch.clamp(log_cov, LOG_COV_MIN, LOG_COV_MAX)

                enc = conv_encoder(obs_batch)
                enc = pre_gru_conv(torch.cat([enc, act_batch], 1))
                val_prev_h_batch = gru(enc, val_prev_h_batch)

                # val_loss = compute_diag_log_prob(recon, log_cov, obs_batch)/float(exp_specs['batch_size'])
                losses.append('%.4f' % ((obs_batch - recon)**2).mean())

            loss_print = '\t'.join(losses)
            print('Val MSE:\t' + loss_print)
            print('Train MSE:\t' + train_loss_print)

            list(
                map(lambda x: x.train(), [
                    conv_encoder, pre_gru_conv, gru, post_gru_conv,
                    conv_decoder, mean_decoder, log_cov_decoder, act_proc
                ]))
Exemplo n.º 6
0
def experiment(exp_specs):
    ptu.set_gpu_mode(exp_specs['use_gpu'])
    # Set up logging ----------------------------------------------------------
    exp_id = exp_specs['exp_id']
    exp_prefix = exp_specs['exp_name']
    seed = exp_specs['seed']
    set_seed(seed)
    setup_logger(exp_prefix=exp_prefix, exp_id=exp_id, variant=exp_specs)

    # Prep the data -----------------------------------------------------------
    replay_dict = joblib.load(exp_specs['replay_dict_path'])
    next_obs_array = replay_dict['next_observations']
    acts_array = replay_dict['actions']
    data_loader = RandomDataLoader(next_obs_array[:4000],
                                   acts_array[:4000],
                                   use_gpu=ptu.gpu_enabled())
    val_data_loader = RandomDataLoader(next_obs_array[4000:],
                                       acts_array[4000:],
                                       use_gpu=ptu.gpu_enabled())

    # Model Definition --------------------------------------------------------
    if exp_specs['use_masked_vae']:
        model = VAESeg()
    else:
        model = VAE()
    if ptu.gpu_enabled(): model.cuda()

    # Optimizer ---------------------------------------------------------------
    model_optim = Adam(model.parameters(),
                       lr=float(exp_specs['model_lr']),
                       weight_decay=float(exp_specs['model_wd']))

    # -------------------------------------------------------------------------
    for iter_num in range(int(float(exp_specs['max_iters']))):
        obs_batch, act_batch = data_loader.get_next_batch(
            exp_specs['batch_size'])
        if exp_specs['use_masked_vae']:
            recon_mean, recon_log_cov, z_mean, z_log_cov, mask = model(
                obs_batch)
        else:
            recon_mean, recon_log_cov, z_mean, z_log_cov = model(obs_batch)
        elbo = model.compute_ELBO(z_mean, z_log_cov, recon_mean, recon_log_cov,
                                  obs_batch)
        KL = model.compute_KL(z_mean, z_log_cov)
        neg_elbo = -1. * elbo
        neg_elbo.backward()
        model_optim.step()

        if iter_num % exp_specs['freq_val'] == 0:
            print('\nValidating Iter %d...' % iter_num)
            model.eval()

            obs_batch, act_batch = val_data_loader.get_next_batch(
                exp_specs['batch_size'])
            if exp_specs['use_masked_vae']:
                recon_mean, recon_log_cov, z_mean, z_log_cov, mask = model(
                    obs_batch)
                mask = mask.repeat(1, 3, 1, 1)
                save_pytorch_tensor_as_img(
                    mask[0].data.cpu(), 'junk_vis/mask_vae_%d.png' % iter_num)
            else:
                recon_mean, recon_log_cov, z_mean, z_log_cov = model(obs_batch)
            elbo = model.compute_ELBO(z_mean, z_log_cov, recon_mean,
                                      recon_log_cov, obs_batch)
            KL = model.compute_KL(z_mean, z_log_cov)

            print('\nELBO:\t%.4f' % elbo)
            print('KL:\t%.4f' % KL)
            print('MSE:\t%.4f' % ((recon_mean - obs_batch)**2).mean())
            print(obs_batch[0][0, :4, :4])
            print(recon_mean[0][0, :4, :4])
            print(recon_log_cov[0][0, :4, :4])
            print(z_mean[0, 1])
            print(torch.exp(z_log_cov[0, 1]))

            save_pytorch_tensor_as_img(recon_mean[0].data.cpu(),
                                       'junk_vis/recon_vae_%d.png' % iter_num)
            save_pytorch_tensor_as_img(obs_batch[0].data.cpu(),
                                       'junk_vis/obs_vae_%d.png' % iter_num)

            model.train()
Exemplo n.º 7
0
def experiment(exp_specs):
    ptu.set_gpu_mode(exp_specs['use_gpu'])
    # Set up logging ----------------------------------------------------------
    exp_id = exp_specs['exp_id']
    exp_prefix = exp_specs['exp_name']
    seed = exp_specs['seed']
    set_seed(seed)
    setup_logger(exp_prefix=exp_prefix, exp_id=exp_id, variant=exp_specs)
    img_save_path = 'junk_vis/debug_more_proper'

    # Prep the data -----------------------------------------------------------
    data_path = 'junk_vis/multi_mnist_data'
    canvas_size = 36
    (X_train, _), (X_test, _) = multi_mnist(data_path,
                                            max_digits=1,
                                            canvas_size=canvas_size,
                                            seed=42,
                                            use_max=True)
    X_train = X_train[:, None, ...]
    X_test = X_test[:, None, ...]
    X_train, X_test = torch.FloatTensor(X_train) / 255.0, torch.FloatTensor(
        X_test) / 255.0

    # np_imgs = np.load('/u/kamyar/dsprites-dataset/dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz')['imgs']

    # np_imgs = None

    X_train = torch.clamp(X_train, 0.05, 0.95)
    X_test = torch.clamp(X_test, 0.05, 0.95)
    train_ds = TensorDataset(X_train)
    val_ds = TensorDataset(X_test)

    # Model Definition --------------------------------------------------------
    if exp_specs['masked']:
        model = MaskedVAE(
            [1, canvas_size, canvas_size],
            exp_specs['vae_specs']['z_dim'],
            exp_specs['vae_specs']['encoder_specs'],
            exp_specs['vae_specs']['decoder_specs'],
        )
    else:
        model = VAE(
            [1, canvas_size, canvas_size],
            exp_specs['vae_specs']['z_dim'],
            exp_specs['vae_specs']['encoder_specs'],
            exp_specs['vae_specs']['decoder_specs'],
        )
    if ptu.gpu_enabled():
        model.cuda()

    # Optimizer ---------------------------------------------------------------
    model_optim = Adam(model.parameters(),
                       lr=float(exp_specs['model_lr']),
                       weight_decay=float(exp_specs['model_wd']))

    # -------------------------------------------------------------------------
    global_iter = 0
    for epoch in range(exp_specs['epochs']):
        train_loader = DataLoader(train_ds,
                                  batch_size=exp_specs['batch_size'],
                                  shuffle=True,
                                  num_workers=4,
                                  pin_memory=True,
                                  drop_last=True)
        for iter_num, img_batch in enumerate(train_loader):
            img_batch = img_batch[0]
            if ptu.gpu_enabled(): img_batch = img_batch.cuda()

            z_mean, z_log_cov, recon_mean, recon_log_cov, enc_mask, dec_mask = model(
                img_batch)
            elbo, KL = model.compute_ELBO(z_mean,
                                          z_log_cov,
                                          recon_mean,
                                          recon_log_cov,
                                          img_batch,
                                          average_over_batch=True)
            loss = -1. * elbo
            loss.backward()
            model_optim.step()

            if global_iter % 1000 == 0:
                mse = ((recon_mean - img_batch)**2).mean()
                print('\nTraining Iter %d...' % global_iter)
                print('ELBO:\t%.4f' % elbo)
                print('MSE:\t%.4f' % mse)
                print('KL:\t%.4f' % KL)
                save_pytorch_tensor_as_img(
                    img_batch[0].data.cpu(),
                    os.path.join(img_save_path,
                                 '%d_train_img.png' % (global_iter)))
                save_pytorch_tensor_as_img(
                    recon_mean[0].data.cpu(),
                    os.path.join(img_save_path,
                                 '%d_train_recon.png' % (global_iter)))
                if exp_specs['masked']:
                    save_pytorch_tensor_as_img(
                        enc_mask[0].data.cpu(),
                        os.path.join(img_save_path,
                                     '%d_train_enc_mask.png' % (global_iter)))
                    # save_pytorch_tensor_as_img(dec_mask[0].data.cpu(), os.path.join(img_save_path, '%d_train_dec_mask.png'%(global_iter)))

            if global_iter % exp_specs['freq_val'] == 0:
                with torch.no_grad():
                    print('Validating Iter %d...' % global_iter)
                    model.eval()

                    idxs = np.random.choice(int(X_test.size(0)),
                                            size=exp_specs['batch_size'],
                                            replace=False)
                    img_batch = X_test[idxs]
                    if ptu.gpu_enabled(): img_batch = img_batch.cuda()

                    z_mean, z_log_cov, recon_mean, recon_log_cov, enc_mask, dec_mask = model(
                        img_batch)
                    elbo, KL = model.compute_ELBO(z_mean,
                                                  z_log_cov,
                                                  recon_mean,
                                                  recon_log_cov,
                                                  img_batch,
                                                  average_over_batch=True)
                    mse = ((recon_mean - img_batch)**2).mean()

                    print('ELBO:\t%.4f' % elbo)
                    print('MSE:\t%.4f' % mse)
                    print('KL:\t%.4f' % KL)

                    for i in range(1):
                        save_pytorch_tensor_as_img(
                            img_batch[i].data.cpu(),
                            os.path.join(img_save_path,
                                         '%d_%d_img.png' % (global_iter, i)))
                        save_pytorch_tensor_as_img(
                            recon_mean[i].data.cpu(),
                            os.path.join(img_save_path,
                                         '%d_%d_recon.png' % (global_iter, i)))
                        if exp_specs['masked']:
                            save_pytorch_tensor_as_img(
                                enc_mask[i].data.cpu(),
                                os.path.join(
                                    img_save_path,
                                    '%d_%d_enc_mask.png' % (global_iter, i)))
                            # save_pytorch_tensor_as_img(dec_mask[i].data.cpu(), os.path.join(img_save_path, '%d_%d_dec_mask.png'%(global_iter, i)))

                    model.train()

            global_iter += 1
Exemplo n.º 8
0
def experiment(exp_specs):
    ptu.set_gpu_mode(exp_specs['use_gpu'])
    # Set up logging ----------------------------------------------------------
    exp_id = exp_specs['exp_id']
    exp_prefix = exp_specs['exp_name']
    seed = exp_specs['seed']
    set_seed(seed)
    setup_logger(exp_prefix=exp_prefix, exp_id=exp_id, variant=exp_specs)

    # Prep the data -----------------------------------------------------------
    replay_dict = joblib.load(exp_specs['replay_dict_path'])
    next_obs_array = replay_dict['next_observations']
    acts_array = replay_dict['actions']
    data_loader = BasicDataLoader(next_obs_array[:40000],
                                  acts_array[:40000],
                                  exp_specs['episode_length'],
                                  exp_specs['batch_size'],
                                  use_gpu=ptu.gpu_enabled())
    val_data_loader = BasicDataLoader(next_obs_array[40000:],
                                      acts_array[40000:],
                                      exp_specs['episode_length'],
                                      exp_specs['batch_size'],
                                      use_gpu=ptu.gpu_enabled())

    # Model Definition --------------------------------------------------------
    conv_encoder = nn.Sequential(
        nn.Conv2d(3, 32, 1, stride=1, padding=0, bias=False),
        nn.BatchNorm2d(32), nn.ReLU(),
        nn.Conv2d(32, 32, 1, stride=1, padding=0, bias=False),
        nn.BatchNorm2d(32), nn.ReLU())
    ae_dim = 128
    z_dim = 128
    pre_gru = nn.Sequential(nn.Linear(288 + z_dim + 4, ae_dim, bias=False),
                            nn.BatchNorm1d(ae_dim), nn.ReLU(),
                            nn.Linear(ae_dim, ae_dim, bias=False),
                            nn.BatchNorm1d(ae_dim), nn.ReLU())
    post_fc = nn.Sequential(nn.Linear(ae_dim + 288 + 4, ae_dim, bias=False),
                            nn.BatchNorm1d(ae_dim), nn.ReLU(),
                            nn.Linear(ae_dim, ae_dim, bias=False),
                            nn.BatchNorm1d(ae_dim), nn.ReLU())
    post_mean_fc = nn.Linear(ae_dim, z_dim, bias=True)
    post_log_cov_fc = nn.Linear(ae_dim, z_dim, bias=True)
    prior_fc = nn.Sequential(nn.Linear(ae_dim + 4, ae_dim, bias=False),
                             nn.BatchNorm1d(ae_dim), nn.ReLU(),
                             nn.Linear(ae_dim, ae_dim, bias=False),
                             nn.BatchNorm1d(ae_dim), nn.ReLU())
    prior_mean_fc = nn.Linear(ae_dim, z_dim, bias=True)
    prior_log_cov_fc = nn.Linear(ae_dim, z_dim, bias=True)
    gru = nn.GRUCell(ae_dim, ae_dim, bias=True)
    fc_decoder = nn.Sequential(
        nn.Linear(ae_dim + z_dim + 4, ae_dim, bias=False),
        nn.BatchNorm1d(ae_dim),
        nn.ReLU(),
        nn.Linear(ae_dim, ae_dim, bias=False),
        nn.BatchNorm1d(ae_dim),
        nn.ReLU(),
        nn.Linear(ae_dim, 288, bias=False),
        nn.BatchNorm1d(288),
        nn.ReLU(),
    )
    conv_decoder = nn.Sequential(
        nn.ConvTranspose2d(32,
                           32,
                           1,
                           stride=1,
                           padding=0,
                           output_padding=0,
                           bias=False), nn.BatchNorm2d(32), nn.ReLU(),
        nn.ConvTranspose2d(32,
                           32,
                           1,
                           stride=1,
                           padding=0,
                           output_padding=0,
                           bias=False), nn.BatchNorm2d(32), nn.ReLU(),
        nn.Conv2d(32, 3, 1, stride=1, padding=0, bias=True), nn.Sigmoid())
    if ptu.gpu_enabled():
        conv_encoder.cuda()
        pre_gru.cuda()
        post_fc.cuda()
        post_mean_fc.cuda()
        post_log_cov_fc.cuda()
        prior_fc.cuda()
        prior_mean_fc.cuda()
        prior_log_cov_fc.cuda()
        gru.cuda()
        fc_decoder.cuda()
        conv_decoder.cuda()

    # Optimizer ---------------------------------------------------------------
    model_optim = Adam([
        item for sublist in map(lambda x: list(x.parameters()), [
            pre_gru, conv_encoder, gru, fc_decoder, conv_decoder, post_fc,
            post_log_cov_fc, post_mean_fc, prior_fc, prior_log_cov_fc,
            prior_mean_fc
        ]) for item in sublist
    ],
                       lr=float(exp_specs['model_lr']),
                       weight_decay=float(exp_specs['model_wd']))

    # -------------------------------------------------------------------------
    freq_bptt = exp_specs['freq_bptt']
    episode_length = exp_specs['episode_length']
    losses = []
    KLs = []
    for iter_num in range(int(float(exp_specs['max_iters']))):
        if iter_num % freq_bptt == 0:
            if iter_num > 0:
                # loss = loss / freq_bptt
                loss = loss + total_KL
                loss.backward()
                model_optim.step()
            loss = 0
            total_KL = 0
            prev_h_batch = Variable(
                torch.zeros(exp_specs['batch_size'], ae_dim))
            if ptu.gpu_enabled():
                prev_h_batch = prev_h_batch.cuda()

            if iter_num % exp_specs['freq_val'] == 0:
                train_loss_print = '\t'.join(losses)
                train_KLs_print = '\t'.join(KLs)
            losses = []
            KLs = []

        obs_batch, act_batch = data_loader.get_next_batch()

        enc = conv_encoder(obs_batch).view(obs_batch.size(0), -1)

        hidden = post_fc(torch.cat([prev_h_batch, enc, act_batch], 1))
        post_mean = post_mean_fc(hidden)
        post_log_cov = post_log_cov_fc(hidden)

        hidden = prior_fc(torch.cat([prev_h_batch, act_batch], 1))
        prior_mean = prior_mean_fc(hidden)
        prior_log_cov = prior_log_cov_fc(hidden)

        recon = fc_decoder(torch.cat([prev_h_batch, act_batch, post_mean],
                                     1)).view(obs_batch.size(0), 32, 3, 3)
        recon = conv_decoder(recon)

        hidden = pre_gru(torch.cat([enc, post_mean, act_batch], 1))
        prev_h_batch = gru(hidden, prev_h_batch)

        KL = compute_KL(prior_mean, prior_log_cov, post_mean, post_log_cov)
        if iter_num % episode_length != 0:
            loss = loss + torch.sum(
                (obs_batch.view(obs_batch.size(0), -1) -
                 recon.view(obs_batch.size(0), -1))**2, 1).mean()
            total_KL = total_KL + KL
        losses.append('%.4f' % ((obs_batch - recon)**2).mean())
        KLs.append('%.4f' % KL)

        if iter_num % (50 * exp_specs['episode_length']) in range(
                2 * exp_specs['episode_length']):
            save_pytorch_tensor_as_img(
                recon[0].data.cpu(),
                'junk_vis/full_KL_mem_grid_%d_recon.png' % iter_num)
            save_pytorch_tensor_as_img(
                obs_batch[0].data.cpu(),
                'junk_vis/full_KL_mem_grid_%d_obs.png' % iter_num)

        if iter_num % exp_specs['freq_val'] == 0:
            print('\nValidating Iter %d...' % iter_num)
            list(
                map(lambda x: x.eval(), [
                    pre_gru, conv_encoder, gru, fc_decoder, conv_decoder,
                    post_fc, post_log_cov_fc, post_mean_fc, prior_fc,
                    prior_log_cov_fc, prior_mean_fc
                ]))

            val_prev_h_batch = Variable(
                torch.zeros(exp_specs['batch_size'], ae_dim))
            if ptu.gpu_enabled():
                val_prev_h_batch = val_prev_h_batch.cuda()

            val_losses = []
            val_KLs = []
            for i in range(freq_bptt):
                obs_batch, act_batch = data_loader.get_next_batch()

                enc = conv_encoder(obs_batch).view(obs_batch.size(0), -1)

                hidden = post_fc(torch.cat([prev_h_batch, enc, act_batch], 1))
                post_mean = post_mean_fc(hidden)
                post_log_cov = post_log_cov_fc(hidden)

                hidden = prior_fc(torch.cat([prev_h_batch, act_batch], 1))
                prior_mean = prior_mean_fc(hidden)
                prior_log_cov = prior_log_cov_fc(hidden)

                recon = fc_decoder(
                    torch.cat([prev_h_batch, act_batch, post_mean],
                              1)).view(obs_batch.size(0), 32, 3, 3)
                recon = conv_decoder(recon)

                hidden = pre_gru(torch.cat([enc, post_mean, act_batch], 1))
                prev_h_batch = gru(hidden, prev_h_batch)

                val_losses.append('%.4f' % ((obs_batch - recon)**2).mean())
                val_KL = compute_KL(prior_mean, prior_log_cov, post_mean,
                                    post_log_cov)
                val_KLs.append('%.4f' % val_KL)

            val_loss_print = '\t'.join(val_losses)
            val_KLs_print = '\t'.join(val_KLs)
            print('Val MSE:\t' + val_loss_print)
            print('Train MSE:\t' + train_loss_print)
            print('Val KL:\t\t' + val_KLs_print)
            print('Train KL:\t' + train_KLs_print)

            list(
                map(lambda x: x.train(), [
                    pre_gru, conv_encoder, gru, fc_decoder, conv_decoder,
                    post_fc, post_log_cov_fc, post_mean_fc, prior_fc,
                    prior_log_cov_fc, prior_mean_fc
                ]))
def experiment(exp_specs):
    ptu.set_gpu_mode(exp_specs['use_gpu'])
    # Set up logging ----------------------------------------------------------
    exp_id = exp_specs['exp_id']
    exp_prefix = exp_specs['exp_name']
    seed = exp_specs['seed']
    set_seed(seed)
    setup_logger(exp_prefix=exp_prefix, exp_id=exp_id, variant=exp_specs)

    # Prep the data -----------------------------------------------------------
    env_specs = {
        'flat_repr': False,
        'one_hot_repr': False,
        'maze_h': 9,
        'maze_w': 9,
        'obs_h': 5,
        'obs_w': 5,
        'scale': 4,
        'num_objs': 10
    }
    maze_constructor = lambda: PartiallyObservedGrid(env_specs)
    data_loader = VerySpecificOnTheFLyDataLoader(maze_constructor,
                                                 exp_specs['episode_length'],
                                                 exp_specs['batch_size'],
                                                 use_gpu=ptu.gpu_enabled())
    val_data_loader = VerySpecificOnTheFLyDataLoader(
        maze_constructor,
        exp_specs['episode_length'],
        exp_specs['batch_size'],
        use_gpu=ptu.gpu_enabled())

    # Model Definition --------------------------------------------------------
    model = RecurrentModel()
    if ptu.gpu_enabled():
        model.cuda()

    # Optimizer ---------------------------------------------------------------
    model_optim = Adam(model.parameters(),
                       lr=float(exp_specs['model_lr']),
                       weight_decay=float(exp_specs['model_wd']))

    # -------------------------------------------------------------------------
    freq_bptt = exp_specs['freq_bptt']
    episode_length = exp_specs['episode_length']
    losses = []
    for iter_num in range(int(float(exp_specs['max_iters']))):
        if iter_num % freq_bptt == 0:
            if iter_num > 0:
                # loss = loss / freq_bptt
                loss.backward()
                model_optim.step()
                prev_h_batch = prev_h_batch.detach()
                prev_c_batch = prev_c_batch.detach()
            loss = 0
        if iter_num % episode_length == 0:
            prev_h_batch = Variable(
                torch.zeros(exp_specs['batch_size'], model.lstm_dim))
            prev_c_batch = Variable(
                torch.zeros(exp_specs['batch_size'], model.lstm_dim))
            if ptu.gpu_enabled():
                prev_h_batch = prev_h_batch.cuda()
                prev_c_batch = prev_c_batch.cuda()

            train_loss_print = '\t'.join(losses)
            losses = []

        obs_batch, act_batch = data_loader.get_next_batch()
        recon, log_cov, prev_h_batch, prev_c_batch = model.forward(
            obs_batch, act_batch, prev_h_batch, prev_c_batch)

        losses.append('%.4f' % ((obs_batch - recon)**2).mean())
        if iter_num % episode_length != 0:
            # temp = (obs_batch - recon)**2 / 4.
            # temp[:,:,1:4,1:4] = temp[:,:,1:4,1:4] * 4.

            temp = (obs_batch - recon)**2
            loss = loss + temp.sum() / float(
                exp_specs['batch_size']) + model.reg_loss

            # loss = loss - compute_diag_log_prob(recon, log_cov, obs_batch)/float(exp_specs['batch_size'])

        if iter_num % (500 * episode_length) in range(2 * episode_length):
            save_pytorch_tensor_as_img(
                recon[0].data.cpu(),
                'junk_vis/recurrent_deconv_stronger_2/rnn_recon_%d.png' %
                iter_num)
            save_pytorch_tensor_as_img(
                obs_batch[0].data.cpu(),
                'junk_vis/recurrent_deconv_stronger_2/rnn_obs_%d.png' %
                iter_num)

        if iter_num % exp_specs['freq_val'] == 0:
            model.eval()
            # print(mask[0], torch.mean(mask, 1), torch.std(mask, 1), torch.min(mask, 1), torch.max(mask, 1))
            print('\nValidating Iter %d...' % iter_num)

            val_prev_h_batch = Variable(
                torch.zeros(exp_specs['batch_size'], model.lstm_dim))
            val_prev_c_batch = Variable(
                torch.zeros(exp_specs['batch_size'], model.lstm_dim))
            if ptu.gpu_enabled():
                val_prev_h_batch = val_prev_h_batch.cuda()
                val_prev_c_batch = val_prev_c_batch.cuda()

            losses = []
            for i in range(episode_length):
                obs_batch, act_batch = val_data_loader.get_next_batch()

                recon, log_cov, val_prev_h_batch, val_prev_c_batch = model.forward(
                    obs_batch, act_batch, val_prev_h_batch, val_prev_c_batch)

                # val_loss = compute_diag_log_prob(recon, log_cov, obs_batch)/float(exp_specs['batch_size'])
                losses.append('%.4f' % ((obs_batch - recon)**2).mean())

            loss_print = '\t'.join(losses)
            print('Val MSE:\t' + loss_print)
            print('Train MSE:\t' + train_loss_print)
            model.train()
Exemplo n.º 10
0
def experiment(exp_specs):
    ptu.set_gpu_mode(exp_specs['use_gpu'])
    # Set up logging ----------------------------------------------------------
    exp_id = exp_specs['exp_id']
    exp_prefix = exp_specs['exp_name']
    seed = exp_specs['seed']
    set_seed(seed)
    setup_logger(exp_prefix=exp_prefix, exp_id=exp_id, variant=exp_specs)

    # Prep the data -----------------------------------------------------------
    path = 'junk_vis/debug_att_vae_shallower_48_64_dim_0p1_kl_stronger_seg_conv'
    (X_train, Y_train), (X_test, Y_test) = multi_mnist(path,
                                                       max_digits=2,
                                                       canvas_size=48,
                                                       seed=42,
                                                       use_max=False)
    convert_dict = {0: [0., 0.], 1: [1., 0.], 2: [1., 1.]}
    Num_train = np.array([convert_dict[a.shape[0]] for a in Y_train])
    Num_test = np.array([convert_dict[a.shape[0]] for a in Y_test])
    X_train = X_train[:, None, ...]
    X_test = X_test[:, None, ...]
    X_train, X_test = torch.FloatTensor(X_train) / 255.0, torch.FloatTensor(
        X_test) / 255.0
    mask_train, mask_test = torch.FloatTensor(Num_train), torch.FloatTensor(
        Num_test)
    train_ds = TensorDataset(X_train, Num_train)
    val_ds = TensorDataset(X_test, Num_test)

    # Model Definition --------------------------------------------------------
    model = AttentiveVAE([1, 48, 48], exp_specs['vae_specs']['z_dim'],
                         exp_specs['vae_specs']['x_encoder_specs'],
                         exp_specs['vae_specs']['z_seg_conv_specs'],
                         exp_specs['vae_specs']['z_seg_fc_specs'],
                         exp_specs['vae_specs']['z_obj_conv_specs'],
                         exp_specs['vae_specs']['z_obj_fc_specs'],
                         exp_specs['vae_specs']['z_seg_recon_fc_specs'],
                         exp_specs['vae_specs']['z_seg_recon_upconv_specs'],
                         exp_specs['vae_specs']['z_obj_recon_fc_specs'],
                         exp_specs['vae_specs']['z_obj_recon_upconv_specs'],
                         exp_specs['vae_specs']['recon_upconv_part_specs'])
    if ptu.gpu_enabled():
        model.cuda()

    # Optimizer ---------------------------------------------------------------
    model_optim = Adam(model.parameters(),
                       lr=float(exp_specs['model_lr']),
                       weight_decay=float(exp_specs['model_wd']))

    # -------------------------------------------------------------------------
    global_iter = 0
    for epoch in range(exp_specs['epochs']):
        train_loader = DataLoader(train_ds,
                                  batch_size=exp_specs['batch_size'],
                                  shuffle=True,
                                  num_workers=4,
                                  pin_memory=False,
                                  drop_last=True)
        for iter_num, img_batch in enumerate(train_loader):
            img_batch, num_batch = img_batch[0], img_batch[1]
            if ptu.gpu_enabled(): img_batch = img_batch.cuda()

            what_means, what_log_covs, where_means, where_log_covs, masks, recon_mean, recon_log_cov = model(
                img_batch, num_batch)
            elbo, KL = model.compute_ELBO(what_means + where_means,
                                          what_log_covs + where_log_covs,
                                          recon_mean,
                                          recon_log_cov,
                                          img_batch,
                                          average_over_batch=True)
            loss = -1. * elbo
            loss = loss + 1. * sum([m.mean() for m in masks])
            loss.backward()
            model_optim.step()

            if global_iter % exp_specs['freq_val'] == 0:
                with torch.no_grad():
                    print('\nValidating Iter %d...' % global_iter)
                    model.eval()

                    idxs = np.random.choice(int(X_test.size(0)),
                                            size=exp_specs['batch_size'],
                                            replace=False)
                    img_batch, num_batch = X_test[idxs], Num_test[idxs]
                    if ptu.gpu_enabled(): img_batch = img_batch.cuda()

                    what_means, what_log_covs, where_means, where_log_covs, masks, recon_mean, recon_log_cov = model(
                        img_batch, num_batch)
                    elbo, KL = model.compute_ELBO(what_means + where_means,
                                                  what_log_covs +
                                                  where_log_covs,
                                                  recon_mean,
                                                  recon_log_cov,
                                                  img_batch,
                                                  average_over_batch=True)

                    mse = ((recon_mean - img_batch)**2).mean()

                    print('ELBO:\t%.4f' % elbo)
                    print('MSE:\t%.4f' % mse)
                    print('KL:\t%.4f' % KL)

                    for i in range(1):
                        save_pytorch_tensor_as_img(
                            img_batch[i].data.cpu(),
                            os.path.join(path,
                                         '%d_%d_img.png' % (global_iter, i)))
                        save_pytorch_tensor_as_img(
                            recon_mean[i].data.cpu(),
                            os.path.join(path,
                                         '%d_%d_recon.png' % (global_iter, i)))
                        save_pytorch_tensor_as_img(
                            masks[0][i].data.cpu(),
                            os.path.join(path, '%d_%d_mask_0.png' %
                                         (global_iter, i)))
                        # save_pytorch_tensor_as_img(masks[1][i].data.cpu(), os.path.join(path, '%d_%d_mask_1.png'%(global_iter, i)))

                    model.train()

            global_iter += 1