コード例 #1
0
ファイル: torch_nets_tests.py プロジェクト: iramusa/nn-plays
def save_crisp_states():
    DATA_PARTITION_SIZE = 10

    try:
        os.makedirs(EXP_FOLDER)
        os.makedirs(DATA_FOLDER)
    except OSError:
        pass

    # prepare data
    data_train = DataContainer(
        '/home/ira/code/projects/nn-play/experiments/0__well_done/17-11-30_09:05-wp_1b_1l_small_deter/data/train.pt',
        batch_size=BATCH_SIZE, ep_len_read=EP_LEN)
    data_train.populate_images()

    pae = PredictiveAutoencoder(v_size=V_SIZE).cuda()
    pae.load_state_dict(torch.load("%s/autencoder_epoch_4.pth" % (EXP_FOLDER)))

    ims = data_train.images
    ims = ims.transpose((1, 0, 4, 2, 3))

    x = Variable(torch.FloatTensor(DATA_PARTITION_SIZE, *ims.shape[1:]).cuda())

    real_states = []
    for i in range(ims.shape[0] // DATA_PARTITION_SIZE):
        begin = i * DATA_PARTITION_SIZE
        end = (i+1) * DATA_PARTITION_SIZE
        im_slice = ims[begin:end, ...]

        x.data.copy_(torch.FloatTensor(im_slice))
        state_slice = pae.bs_prop(x).view((-1, BS_SIZE)).data.cpu().numpy()
        real_states.append(state_slice)

    real_states = np.concatenate(real_states, axis=0)
    np.save("{}/crisp_states.npy".format(DATA_FOLDER), real_states)
コード例 #2
0
ファイル: torch_nets_tests.py プロジェクト: iramusa/nn-plays
def test_autoencoder():
    try:
        os.makedirs(EXP_FOLDER)
    except OSError:
        pass

    data_test = DataContainer('data-balls/pass-train.pt', batch_size=BATCH_SIZE, ep_len_read=EP_LEN)
    data_test.populate_images()

    net = Autoencoder()
    criterion = nn.MSELoss()
    x = torch.FloatTensor(BATCH_SIZE, IM_CHANNELS, IM_WIDTH, IM_WIDTH)

    net = net.cuda()
    criterion = criterion.cuda()
    x = x.cuda()

    x = Variable(x)

    optimiser = optim.Adam(net.parameters(), lr=0.0005)

    for epoch in range(EPOCHS):
        for update in range(UPDATES_PER_EPOCH):
            batch = data_test.get_n_random_images(BATCH_SIZE)
            batch = batch.transpose((0, 3, 1, 2))
            batch = torch.FloatTensor(batch)

            net.zero_grad()

            x.data.resize_(batch.size()).copy_(batch)
            recon = net(x)
            err = criterion(recon, x)
            err.backward()

            optimiser.step()

            print('[%d/%d][%d/%d] Recon loss: %.4f'
                  % (epoch, EPOCHS, update, UPDATES_PER_EPOCH,
                     err.data[0]))

            if update % 100 == 0:
                vutils.save_image(recon.data,
                        '%s/reconstruction_epoch_%03d.png' % (EXP_FOLDER, epoch),
                                  normalize=True)

        # do checkpointing
        torch.save(net.state_dict(), '%s/autencoder_epoch_%d.pth' % (EXP_FOLDER, epoch))
コード例 #3
0
ファイル: experiment.py プロジェクト: iramusa/nn-plays
    def generate_data(self):
        print('Generating data')

        rec = Record(**self.train_config)
        rec.run()
        fpath_train = '{}/train.pt'.format(self.folder_data)
        rec.write(fpath_train)
        self.train_box = DataContainer(fpath_train,
                                       batch_size=BATCH_SIZE,
                                       ep_len_read=EP_LEN)
        self.train_box.populate_images()

        rec = Record(**self.valid_config)
        rec.run()
        fpath_valid = '{}/test.pt'.format(self.folder_data)
        rec.write(fpath_valid)
        self.valid_box = DataContainer(fpath_valid,
                                       batch_size=BATCH_SIZE,
                                       ep_len_read=EP_LEN)
        self.valid_box.populate_images()
コード例 #4
0
    if use_cuda:
        assert torch.cuda.is_available() is True

    if not os.path.exists(args.output_dir):
        torch_utils.make_dir_tree(args.output_dir)

    # prepare data
    sim_config = None
    obs_shape = None
    train_getter = None
    valid_getter = None
    if args.dataset_type == 'balls':
        sim_config = torch.load('{}/train.conf'.format(args.data_dir))
        obs_shape = BALLS_OBS_SHAPE

        train_container = DataContainer('{}/train.pt'.format(args.data_dir),
                                        batch_size=PAE_BATCH_SIZE)
        # train_container = DataContainer('{}/valid.pt'.format(args.data_dir), batch_size=PAE_BATCH_SIZE)
        valid_container = DataContainer('{}/valid.pt'.format(args.data_dir),
                                        batch_size=PAE_BATCH_SIZE)
        sim_config = torch.load(
            open('{}/train.conf'.format(args.data_dir), 'rb'))

        train_container.populate_images()
        valid_container.populate_images()

        train_getter = train_container.get_batch_episodes
        valid_getter = valid_container.get_batch_episodes

    else:
        raise ValueError('Failed to load data. Wrong dataset type {}'.format(
            args.dataset_type))
コード例 #5
0
ファイル: torch_nets_tests.py プロジェクト: iramusa/nn-plays
def train_predictive_autoencoder():
    try:
        os.makedirs(EXP_FOLDER)
    except OSError:
        pass

    data_train = DataContainer('/home/ira/code/projects/nn-play/experiments/0__well_done/17-11-30_09:05-wp_1b_1l_small_deter/data/train.pt', batch_size=BATCH_SIZE, ep_len_read=EP_LEN)
    data_test = DataContainer('/home/ira/code/projects/nn-play/experiments/0__well_done/17-11-30_09:05-wp_1b_1l_small_deter/data/test.pt', batch_size=BATCH_SIZE, ep_len_read=EP_LEN)
    data_train.populate_images()
    data_test.populate_images()

    net = PredictiveAutoencoder(v_size=V_SIZE)
    criterion = nn.MSELoss()
    x = torch.FloatTensor(EP_LEN, BATCH_SIZE, IM_CHANNELS, IM_WIDTH, IM_WIDTH)
    y = torch.FloatTensor(EP_LEN, BATCH_SIZE, IM_CHANNELS, IM_WIDTH, IM_WIDTH)

    net = net.cuda()
    criterion = criterion.cuda()
    x = x.cuda()
    y = y.cuda()

    x = Variable(x)
    y = Variable(y)

    optimiser = optim.Adam(net.parameters(), lr=0.0002)

    start_at_epoch = 5
    net.load_state_dict(torch.load("%s/autencoder_epoch_%d.pth" % (EXP_FOLDER, start_at_epoch - 1)))

    postfix = {}
    for epoch in range(start_at_epoch, EPOCHS):
        bar = trange(UPDATES_PER_EPOCH)
        postfix['epoch'] = '[%d/%d]' % (epoch, EPOCHS)
        for update in bar:

            batch = data_train.get_batch_episodes()
            masked = mask_percepts(batch, p=0.98)

            batch = batch.transpose((1, 0, 4, 2, 3))
            masked = masked.transpose((1, 0, 4, 2, 3))

            batch = torch.FloatTensor(batch)
            masked = torch.FloatTensor(masked)

            net.zero_grad()

            x.data.copy_(masked)
            y.data.copy_(batch)

            recon = net(x)
            err = criterion(recon, y)
            err.backward()
            optimiser.step()

            postfix['train loss'] = err.data[0]

            if update % 500 == 0:
                recon_ims = recon.data.cpu().numpy()
                target_ims = y.data.cpu().numpy()
                joint = np.concatenate((target_ims, recon_ims), axis=-2)
                my_utils.batch_to_sequence(joint, fpath='%s/training_recon_%03d.gif' % (EXP_FOLDER, epoch))

            if update % 10 == 0:
                batch = data_test.get_batch_episodes()
                masked = mask_percepts(batch, p=1.0)

                masked = masked.transpose((1, 0, 4, 2, 3))
                masked = torch.FloatTensor(masked)
                x.data.copy_(masked)

                batch = batch.transpose((1, 0, 4, 2, 3))
                batch = torch.FloatTensor(batch)
                y.data.copy_(batch)

                recon = net(x)
                err = criterion(recon, y)
                postfix['valid loss'] = err.data[0]

            if update % 500 == 0:
                recon_ims = recon.data.cpu().numpy()
                target_ims = y.data.cpu().numpy()
                joint = np.concatenate((target_ims, recon_ims), axis=-2)
                my_utils.batch_to_sequence(joint, fpath='%s/valid_recon_%03d.gif' % (EXP_FOLDER, epoch))

            bar.set_postfix(**postfix)

        # do checkpointing
        torch.save(net.state_dict(), '%s/autencoder_epoch_%d.pth' % (EXP_FOLDER, epoch))
コード例 #6
0
ファイル: torch_nets_tests.py プロジェクト: iramusa/nn-plays
def train_PAEGAN(start_at_epoch=0, train_gan=True, train_av=True, n_epochs=EPOCHS):
    try:
        os.makedirs(EXP_FOLDER)
    except OSError:
        pass

    data_train = DataContainer('/home/ira/code/projects/nn-play/experiments/0__well_done/17-11-30_09:05-wp_1b_1l_small_deter/data/train.pt', batch_size=BATCH_SIZE, ep_len_read=EP_LEN)
    data_test = DataContainer('/home/ira/code/projects/nn-play/experiments/0__well_done/17-11-30_09:05-wp_1b_1l_small_deter/data/test.pt', batch_size=BATCH_SIZE, ep_len_read=EP_LEN)
    data_train.populate_images()
    data_test.populate_images()

    if torch.cuda.is_available():
        net = VisualPAEGAN(v_size=V_SIZE, bs_size=BS_SIZE, n_size=N_SIZE, g_size=G_SIZE).cuda()
        criterion_pae = nn.MSELoss().cuda()
        criterion_gan = nn.BCELoss().cuda()
        # criterion_gan = nn.MSELoss().cuda()
        criterion_gen_averaged = nn.MSELoss().cuda()

        obs_in = Variable(torch.FloatTensor(EP_LEN, BATCH_SIZE, IM_CHANNELS, IM_WIDTH, IM_WIDTH).cuda())
        obs_out = Variable(torch.FloatTensor(EP_LEN, BATCH_SIZE, IM_CHANNELS, IM_WIDTH, IM_WIDTH).cuda())

        averaging_noise = Variable(torch.FloatTensor(AVERAGING_BATCH_SIZE, N_SIZE).cuda())
        noise = Variable(torch.FloatTensor(GAN_BATCH_SIZE, N_SIZE).cuda())

        fixed_noise = Variable(torch.FloatTensor(GAN_BATCH_SIZE, N_SIZE).normal_(0, 1).cuda())
        fixed_bs_noise = Variable(torch.FloatTensor(GAN_BATCH_SIZE, BS_SIZE).uniform_(-1, 1).cuda())
        label = Variable(torch.FloatTensor(GAN_BATCH_SIZE, 1).cuda())
    else:
        net = VisualPAEGAN(v_size=V_SIZE, bs_size=BS_SIZE, n_size=N_SIZE, g_size=G_SIZE)
        criterion_pae = nn.MSELoss()
        criterion_gan = nn.BCELoss()
        # criterion_gan = nn.MSELoss()
        criterion_gen_averaged = nn.MSELoss()

        obs_in = Variable(torch.FloatTensor(EP_LEN, BATCH_SIZE, IM_CHANNELS, IM_WIDTH, IM_WIDTH))
        obs_out = Variable(torch.FloatTensor(EP_LEN, BATCH_SIZE, IM_CHANNELS, IM_WIDTH, IM_WIDTH))

        averaging_noise = Variable(torch.FloatTensor(AVERAGING_BATCH_SIZE, N_SIZE))
        noise = Variable(torch.FloatTensor(GAN_BATCH_SIZE, N_SIZE))

        fixed_noise = Variable(torch.FloatTensor(GAN_BATCH_SIZE, N_SIZE).normal_(0, 1))
        fixed_bs_noise = Variable(torch.FloatTensor(GAN_BATCH_SIZE, BS_SIZE).uniform_(-1, 1))
        label = Variable(torch.FloatTensor(GAN_BATCH_SIZE, 1))

    real_label = 1
    fake_label = 0

    optimiser_pae = optim.Adam([{'params': net.bs_prop.parameters()},
                                {'params': net.decoder.parameters()}],
                               lr=0.0004)
    # optimiser_g = optim.Adam([{'params': net.bs_prop.parameters(), 'lr': 0.0001},
    #                           {'params': net.G.parameters(), 'lr': 0.0002}])
    optimiser_g = optim.Adam(net.G.parameters(), lr=0.0002)
    optimiser_sum = optim.Adam(net.G.parameters(), lr=0.0002)
    optimiser_d = optim.Adam(net.D.parameters(), lr=0.0002)

    if start_at_epoch > 0:
        net.load_state_dict(torch.load("%s/paegan_epoch_%d.pth" % (EXP_FOLDER, start_at_epoch - 1)))

    postfix = {}
    for epoch in range(start_at_epoch, n_epochs):
        bar = trange(UPDATES_PER_EPOCH)
        postfix['epoch'] = '[%d/%d]' % (epoch, EPOCHS)
        for update in bar:
            batch = data_train.get_batch_episodes()
            masked = mask_percepts(batch, p=P_NO_OBS)

            batch = batch.transpose((1, 0, 4, 2, 3))
            masked = masked.transpose((1, 0, 4, 2, 3))

            batch = torch.FloatTensor(batch)
            masked = torch.FloatTensor(masked)

            net.zero_grad()

            obs_in.data.copy_(masked)
            obs_out.data.copy_(batch)

            # generate beliefs states
            states = net.bs_prop(obs_in)
            states = states.view(EP_LEN * BATCH_SIZE, -1)

            # generate observation expectations, compute pae gradients, update
            obs_expectation = net.decoder(states)
            obs_expectation = obs_expectation.view(obs_in.size())

            err_pae = criterion_pae(obs_expectation, obs_out)
            err_pae.backward(retain_graph=True)
            optimiser_pae.step()
            postfix['pae train loss'] = err_pae.data[0]

            if train_gan is True:
                net.zero_grad()

                # # flip labels
                # if update % 2000 == 0:
                #     if real_label == 1:
                #         real_label = 0
                #         fake_label = 1
                #     else:
                #         real_label = 1
                #         fake_label = 0
                # train discriminator with real data
                label.data.fill_(real_label)
                obs_out_non_ep = obs_out.view(EP_LEN * BATCH_SIZE,
                                              obs_out.size(2), obs_out.size(3), obs_out.size(4))

                obs_out_non_ep = obs_out_non_ep.unfold(0, 1, (EP_LEN*BATCH_SIZE)//GAN_BATCH_SIZE).squeeze(-1)
                states_non_ep = states.unfold(0, 1, (EP_LEN*BATCH_SIZE)//GAN_BATCH_SIZE).squeeze(-1)

                out_D_real = net.D(obs_out_non_ep)
                err_D_real = criterion_gan(out_D_real, label)
                err_D_real.backward()
                # D_x = out_D_real.data.mean()

                # print('out_D_real', out_D_real.data)
                # print('err_D_real', err_D_real.data[0])

                # train discriminator with fake data
                noise.data.normal_(0, 1)
                obs_sample = net.G(noise, states_non_ep)
                obs_sample = net.decoder(obs_sample)
                label.data.fill_(fake_label)
                out_D_fake = net.D(obs_sample.detach())
                err_D_fake = criterion_gan(out_D_fake, label)
                err_D_fake.backward()
                # D_G_z1 = out_D_fake.data.mean()

                # print('out_D_fake', out_D_fake.data)
                # print('err_D_fake', err_D_fake.data[0])

                err_D = (err_D_fake + err_D_real)/2
                optimiser_d.step()

                # train generator using discriminator
                net.zero_grad()

                batch = data_train.get_batch_episodes()
                masked = mask_percepts(batch, p=P_NO_OBS)

                batch = batch.transpose((1, 0, 4, 2, 3))
                masked = masked.transpose((1, 0, 4, 2, 3))

                batch = torch.FloatTensor(batch)
                masked = torch.FloatTensor(masked)

                obs_in.data.copy_(masked)
                obs_out.data.copy_(batch)

                # generate beliefs states
                states = net.bs_prop(obs_in)
                states = states.view(EP_LEN * BATCH_SIZE, -1)
                states_non_ep = states.unfold(0, 1, (EP_LEN*BATCH_SIZE)//GAN_BATCH_SIZE).squeeze(-1)

                noise.data.normal_(0, 1)
                obs_sample = net.G(noise, states_non_ep)
                obs_sample = net.decoder(obs_sample)

                label.data.fill_(real_label)
                out_D_fake = net.D(obs_sample)
                # print('out d fake', out_D_fake)
                err_G = criterion_gan(out_D_fake, label)
                err_G.backward()
                # D_G_z2 = out_D_fake.data.mean()
                optimiser_g.step()
                postfix['g train loss'] = err_G.data[0]
                postfix['d train loss'] = err_D.data[0]

                if update % 500 == 0:
                    vutils.save_image(obs_out_non_ep.data,
                            '%s/real_samples.png' % EXP_FOLDER,
                            normalize=True)
                    obs_sample = net.G(fixed_noise, fixed_bs_noise)
                    obs_sample = net.decoder(obs_sample)
                    vutils.save_image(obs_sample.data,
                            '%s/fake_samples_epoch_%03d.png' % (EXP_FOLDER, epoch),
                            normalize=False)
                    obs_recon = net.decoder(fixed_bs_noise)
                    vutils.save_image(obs_recon.data,
                            '%s/expectation_samples_epoch_%03d.png' % (EXP_FOLDER, epoch),
                            normalize=False)

            if train_av is True:
                net.zero_grad()
                # train generator using averaging
                # pull a 50th state from 0th episode in the last batch
                state = states[50:51, ...]
                # print('state size', state.size())
                state = state.expand(AVERAGING_BATCH_SIZE, -1)
                # print('state size', state.size())
                # get corresponding observation expectation
                obs_exp = obs_expectation[50:51, 0, ...]
                # print('obs size', obs_exp.size())

                # generate samples from state
                averaging_noise.data.normal_(0, 1)
                n_samples = net.G(averaging_noise, state.detach())
                # print('samples size', n_samples.size())

                sample_av = n_samples.mean(dim=0)
                sample_av = sample_av.unsqueeze(0)
                # print('samples av size', sample_av.size())
                # print('obs_exp size', obs_exp.size())

                err_sum = criterion_gen_averaged(sample_av, obs_exp.detach())
                err_sum.backward()
                optimiser_sum.step()
                postfix['sum train loss'] = err_sum.data[0]

                if update % 100 == 0:
                    sample_mixture = sample_av.data.cpu().numpy()
                    observation_belief = obs_exp.data.cpu().numpy()
                    joint = np.concatenate((observation_belief, sample_mixture), axis=-2)
                    joint = np.expand_dims(joint, axis=0)
                    my_utils.batch_to_sequence(joint, fpath='%s/training_sum_%03d.gif' % (EXP_FOLDER, epoch))

            if update % 500 == 0:
                recon_ims = obs_expectation.data.cpu().numpy()
                target_ims = obs_out.data.cpu().numpy()
                joint = np.concatenate((target_ims, recon_ims), axis=-2)
                my_utils.batch_to_sequence(joint, fpath='%s/training_recon_%03d.gif' % (EXP_FOLDER, epoch))

            if update % 100 == 0:
                batch = data_test.get_batch_episodes()
                masked = mask_percepts(batch, p=P_NO_OBS)
                # masked = mask_percepts(batch, p=1)

                masked = masked.transpose((1, 0, 4, 2, 3))
                masked = torch.FloatTensor(masked)
                obs_in.data.copy_(masked)

                batch = batch.transpose((1, 0, 4, 2, 3))
                batch = torch.FloatTensor(batch)
                obs_out.data.copy_(batch)

                states = net.bs_prop(obs_in)
                obs_expectation = net.decoder(states.view(EP_LEN * BATCH_SIZE, -1))
                obs_expectation = obs_expectation.view(obs_in.size())
                err_pae = criterion_pae(obs_expectation, obs_out)
                postfix['pae valid loss'] = err_pae.data[0]

            if update % 500 == 0:
                recon_ims = obs_expectation.data.cpu().numpy()
                target_ims = obs_out.data.cpu().numpy()
                joint = np.concatenate((target_ims, recon_ims), axis=-2)
                my_utils.batch_to_sequence(joint, fpath='%s/valid_recon_%03d.gif' % (EXP_FOLDER, epoch))

            bar.set_postfix(**postfix)

        # do checkpointing
        torch.save(net.state_dict(), '%s/paegan_epoch_%d.pth' % (EXP_FOLDER, epoch))
コード例 #7
0
ファイル: experiment.py プロジェクト: iramusa/nn-plays
class Experiment(object):
    def __init__(self, ctrl_var, var_vals, exp_name):
        self.date = datetime.datetime.now().strftime('%y-%m-%d_%H:%M')
        self.sim_conf = sim_config
        self.train_scheme = train_scheme

        self.train_config = train_config
        self.valid_config = valid_config

        self.ctrl_var = ctrl_var
        self.var_vals = var_vals
        self.exp_name = exp_name

        # folders
        self.folder_top = '{}/{}-{}/'.format(FOLDER_EXPS, self.date,
                                             self.exp_name)
        self.folder_data = '{}/{}-{}/data/'.format(FOLDER_EXPS, self.date,
                                                   self.exp_name)
        self.folder_gifs = '{}/{}-{}/gifs/'.format(FOLDER_EXPS, self.date,
                                                   self.exp_name)
        self.folder_modules = '{}/{}-{}/modules/'.format(
            FOLDER_EXPS, self.date, self.exp_name)
        self.folder_numerical = '{}/{}-{}/nums/'.format(
            FOLDER_EXPS, self.date, self.exp_name)
        self.folder_plots = '{}/{}-{}/plots/'.format(FOLDER_EXPS, self.date,
                                                     self.exp_name)

        self.folder_base_models = 'base_models/'

        self.folders = [
            self.folder_modules, self.folder_data, self.folder_base_models,
            self.folder_gifs, self.folder_numerical, self.folder_plots
        ]
        self.make_folders()

        self.train_box = None
        self.valid_box = None
        self.net = None

        self.x = []
        self.train_errors = []
        self.valid_errors = []

    def make_folders(self):
        for folder in self.folders:
            if not os.path.exists(folder):
                os.makedirs(folder)

    def run(self):
        for i, val in enumerate(self.var_vals):
            self.run_single(val, i)

    def run_single(self, val, i):
        print('Setting {} to {}'.format(ctrl_var, val))

        if self.ctrl_var in self.sim_conf.keys():
            self.sim_conf[self.ctrl_var] = val
            self.train_config['sim_config'] = self.sim_conf
            self.valid_config['sim_config'] = self.sim_conf
            self.generate_data()

        elif self.ctrl_var == 'v_size':
            self.train_scheme[self.ctrl_var] = val
            if self.train_box is None:
                self.generate_data()
        else:
            raise ValueError('Bad ctrl_var {}'.format(self.ctrl_var))

        v_size = self.train_scheme['v_size']
        self.net = HydraNet(**self.train_scheme)

        # print('Loading base models')
        # tag = 'base-{}'.format(v_size)
        # self.net.load_modules(self.folder_base_models, tag=tag)

        print('Starting training')

        self.net.execute_scheme(self.train_box.get_batch_episodes,
                                self.valid_box.get_batch_episodes)
        self.net.save_modules(self.folder_modules, tag='{}'.format(v_size))

        print('Saving base models')
        tag = 'base-{}'.format(v_size)
        self.net.save_modules(self.folder_base_models, tag=tag)

        print('Recording videos')
        pae_losseses = []
        pf_losseses = []
        for j in range(GIFS_NO):
            losses = self.net.draw_pred_gif(
                self.valid_box.get_n_random_episodes_full,
                p=1.0,
                use_stepper=False,
                use_pf=True,
                sim_config=sim_config,
                folder_plots=self.folder_gifs,
                tag='{}-{}'.format(val, j),
                normalize=True)
            pae_losseses.append(losses['pae_losses'])
            pf_losseses.append(losses['pf_losses'])

        baseline_level = self.net.plot_losses(
            folder_plots=self.folder_plots,
            tag=val,
            image_getter=self.valid_box.get_batch_episodes)

        av_pae_losses = np.mean(np.array(pae_losseses), axis=0)
        av_pf_losses = np.mean(np.array(pf_losseses), axis=0)
        self.write_av_losses(av_pae_losses, av_pf_losses, baseline_level, val)

        # get numericals
        self.x.append(val)
        self.train_errors.append(
            self.get_errors(self.train_box.get_batch_episodes))
        self.valid_errors.append(
            self.get_errors(self.valid_box.get_batch_episodes))

    def write_av_losses(self, pae_loss, pf_loss, baseline_level, tag):
        plt.clf()

        plt.plot(pae_loss)
        plt.plot(pf_loss)
        baseline = np.ones(len(pae_loss)) * baseline_level
        plt.plot(baseline, 'g--')

        plt.title('Loss')
        plt.ylabel('loss')
        plt.xlabel('timestep')
        plt.legend(['PAE', 'PF', 'baseline'])
        fpath = '{}/av_time_losses-{}.png'.format(self.folder_plots, tag)
        plt.savefig(fpath)

    def write_losses(self):
        plt.clf()

        results = pd.DataFrame({
            self.ctrl_var: self.x,
            'train_error': self.train_errors,
            'valid_error': self.valid_errors
        })
        fpath = '{}/errors.csv'.format(self.folder_numerical)
        results.to_csv(fpath)

        plt.scatter(self.x, self.train_errors)
        plt.scatter(self.x, self.valid_errors)

        plt.title('Loss')
        plt.ylabel('loss')
        plt.xlabel(self.ctrl_var)
        plt.legend(['train', 'valid'])
        fpath = '{}/run-summary.png'.format(self.folder_plots)
        plt.savefig(fpath)

    def get_errors(self, data_getter, test_iters=20):
        error_cum = 0
        for j in range(test_iters):
            error_cum += self.net.train_batch_pred_ae(data_getter,
                                                      p=1.0,
                                                      test=True)

        error = error_cum / test_iters
        return error

    def generate_data(self):
        print('Generating data')

        rec = Record(**self.train_config)
        rec.run()
        fpath_train = '{}/train.pt'.format(self.folder_data)
        rec.write(fpath_train)
        self.train_box = DataContainer(fpath_train,
                                       batch_size=BATCH_SIZE,
                                       ep_len_read=EP_LEN)
        self.train_box.populate_images()

        rec = Record(**self.valid_config)
        rec.run()
        fpath_valid = '{}/test.pt'.format(self.folder_data)
        rec.write(fpath_valid)
        self.valid_box = DataContainer(fpath_valid,
                                       batch_size=BATCH_SIZE,
                                       ep_len_read=EP_LEN)
        self.valid_box.populate_images()

    def generate_report(self):
        html_doc = "<html><head><title>{0}</title></head>\n<body>\n".format(
            self.exp_name)
        html_doc += "<h2>{0}</h2>\n".format(self.exp_name)
        html_doc += "Simulation configuration: {0}\n".format(self.sim_conf)
        html_doc += "Controlled variable: {0}\n".format(self.ctrl_var)
        html_doc += "Range: {0}\n".format(self.var_vals)

        html_doc += "<h3>Loss for different values of {}</h3>\n".format(
            self.ctrl_var)
        html_doc += "<center><img src=\"plots/run-summary.png\" width=\"800\"></center>".format(
            self.folder_plots)

        for val in self.var_vals:
            html_doc += "<h3>{0} set to {1}</h3>\n".format(self.ctrl_var, val)
            html_doc += "<center><img src=\"plots/loss-{}.png\" width=\"800\"></center>\n".format(
                val)

            html_doc += "<h3>Loss over time for {} set to {}</h3>\n".format(
                self.ctrl_var, val)
            html_doc += "<center><img src=\"plots/av_time_losses-{}.png\" width=\"800\"></center>\n".format(
                val)

            table = "<center><table style=\"text-align: center;\" style=\"margin: 0px auto;\" border=\"1\">" \
                    "<tr>" \
                    "<td>{0}</td>" \
                    "<td>percept</td>" \
                    "<td>ground truth</td>" \
                    "<td>prediction</td>" \
                    "<td>particle filter</td>" \
                    "</tr>\n".format(self.ctrl_var)

            for i in range(GIFS_NO):
                new_row =   "<tr>" \
                                "<td>{2}</td>" \
                                "<td><img src=\"gifs/percepts-{2}-{3}.gif\" width=\"140\"></td>" \
                                "<td><img src=\"gifs/truths-{2}-{3}.gif\" width=\"140\"></td>" \
                                "<td><img src=\"gifs/pae_preds-{2}-{3}.gif\" width=\"140\"></td>" \
                                "<td><img src=\"gifs/pf_preds-{2}-{3}.gif\" width=\"140\"></td>" \
                            "</tr>\n".format(self.ctrl_var, self.folder_gifs, val, i)
                table += new_row

            table += "</table></center>\n"
            html_doc += table

        html_doc += "</body>"

        rep = open('{}/report.html'.format(self.folder_top), 'w')
        rep.write(html_doc)
コード例 #8
0
ファイル: hydranet.py プロジェクト: iramusa/nn-plays
            frame = np.concatenate(table, axis=1)

            # print(frame.shape)
            width = frame.shape[1]
            row = np.ones((1, width))
            frame = np.concatenate([header, frame, row], axis=0)

            frames.append(frame)

        fpath = '{}/predictions-{}.gif'.format(folder_plots, tag)
        imageio.mimsave(fpath, frames)

if __name__ == '__main__':
    # train_box = DataContainer('data-balls/pass-train.pt', batch_size=32, ep_len_read=EP_LEN)
    # test_box = DataContainer('data-balls/pass-valid.pt', batch_size=32, ep_len_read=EP_LEN)
    # train_box = DataContainer('data-balls/mixed-train.pt', batch_size=32, ep_len_read=EP_LEN)
    # test_box = DataContainer('data-balls/mixed-valid.pt', batch_size=32, ep_len_read=EP_LEN)
    train_box = DataContainer('data-balls/bounce-train.pt', batch_size=BATCH_SIZE, ep_len_read=EP_LEN)
    test_box = DataContainer('data-balls/bounce-valid.pt', batch_size=BATCH_SIZE, ep_len_read=EP_LEN)
    train_box.populate_images()
    test_box.populate_images()

    hydra = HydraNet()
    # hydra.load_modules()
    hydra.load_modules(tag='base')
    hydra.execute_scheme(train_box.get_batch_episodes, test_box.get_batch_episodes)
    hydra.plot_losses()
    hydra.draw_pred_gif(test_box.get_n_random_episodes_full, use_stepper=False, use_pf=False)


コード例 #9
0
def batch_to_sequence(batch_eps, fpath, normalise=False):
    """

    :param batch_eps: in format (timesteps, batchs_size, im_channels, im_height, im_width)
    :param fpath:
    :return:
    """
    batch_eps = [batch_eps[:, i, ...] for i in range(batch_eps.shape[1])]
    batch_eps = np.concatenate(batch_eps, axis=-1)

    im_seq = []
    for i in range(batch_eps.shape[0]):
        im_seq.append(batch_eps[i, 0, :, :])

    imageio.mimsave(fpath, im_seq)


if __name__ == "__main__":

    data_test = DataContainer('data-balls/simple-test.pt',
                              batch_size=16,
                              ep_len_read=40)
    data_test.populate_images()
    batch_eps = data_test.get_batch_episodes()
    print(batch_eps.shape)

    batch_eps = batch_eps.transpose((1, 0, 4, 2, 3))

    batch_to_sequence(batch_eps, 'test.gif')
コード例 #10
0
input = Variable(input)
label = Variable(label)
noise = Variable(noise)
fixed_noise = Variable(fixed_noise)

# setup optimizer
optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))

# data_train = DataContainer('data-balls/bounce-train.pt', batch_size=opt.batchSize, ep_len_read=EP_LEN)
# data_train.populate_images()

if __name__ == "__main__":

    data_test = DataContainer('data-balls/bounce-valid.pt',
                              batch_size=opt.batchSize,
                              ep_len_read=EP_LEN)
    data_test.populate_images()

    for epoch in range(opt.niter):
        for i in range(1000):
            x = data_test.get_n_random_images(opt.batchSize)
            x = x.transpose((0, 3, 1, 2))
            x = torch.FloatTensor(x)

            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            # train with real
            netD.zero_grad()
            real_cpu = x