示例#1
0
                                   legend=legend,
                                   xlimits=xlimits,
                                   ylimits=ylimits,
                                   alpha=1.)

        # plot_scatter(ax, samps=z ,xlimits=xlimits,ylimits=ylimits)
        # plot_kde(ax,samps=z,xlimits=xlimits,ylimits=ylimits,cmap='Blues')
        # plot_kde(ax,samps=z,xlimits=xlimits,ylimits=ylimits,cmap='Greens')

        mean, logvar = model.q_dist.get_mean_logvar(samp_torch)
        func = lambda zs: lognormal4(torch.Tensor(zs),
                                     torch.squeeze(mean.data.cpu()),
                                     torch.squeeze(logvar.data.cpu()))
        plot_isocontours(ax,
                         func,
                         cmap='Greens',
                         xlimits=xlimits,
                         ylimits=ylimits)

        # func = lambda zs: lognormal4(torch.Tensor(zs), torch.zeros(2), torch.zeros(2))
        # plot_isocontours(ax, func, cmap='Blues', alpha=.3,xlimits=xlimits,ylimits=ylimits)

        # #Plot prob
        # col +=1
        # ax = plt.subplot2grid((rows,cols), (samp_i,col), frameon=False)
        # Ws, logpW, logqW = model.sample_W()  #_ , [1], [1]
        # # func = lambda zs: lognormal4(torch.Tensor(zs), torch.squeeze(mean.data), torch.squeeze(logvar.data))
        # # plot_isocontours(ax, func, cmap='Reds')
        # # func = lambda zs: log_bernoulli(model.decode(Ws, Variable(torch.unsqueeze(zs,1))), Variable(torch.unsqueeze(samp,0)))+ Variable(torch.unsqueeze(lognormal4(torch.Tensor(zs), torch.zeros(2), torch.zeros(2)), 1))
        # plot_isocontours2_exp_norm_logspace(ax, func, cmap='Greens', legend=legend)
        # if samp_i==0:  ax.annotate('p(z|x,W1)', xytext=(.1, 1.1), xy=(0, 1), textcoords='axes fraction')
示例#2
0
    # models = []
    # model_names = []
    
    alpha=.2
    rows = len(posteriors)
    columns = len(models) +1 #+1 for posteriors

    fig = plt.figure(figsize=(6+columns,4+rows), facecolor='white')

    for p_i in range(len(posteriors)):

        print '\nPosterior', p_i, posterior_names[p_i]

        posterior = ttp.posterior_class(posteriors[p_i])
        ax = plt.subplot2grid((rows,columns), (p_i,0), frameon=False)#, colspan=3)
        plot_isocontours(ax, posterior.run_log_post, cmap='Blues')
        if p_i == 0: ax.annotate('Posterior', xytext=(.3, 1.1), xy=(0, 1), textcoords='axes fraction')

        for q_i in range(len(models)):

            print model_names[q_i]
            ax = plt.subplot2grid((rows,columns), (p_i,q_i+1), frameon=False)#, colspan=3)
            model = models[q_i](posteriors[p_i])
            # model.train(10000, save_to=home+'/Documents/tmp/vars.ckpt')
            model.train(9999000, save_to='')
            samps = model.sample(1000)
            plot_kde(ax, samps, cmap='Reds')
            plot_isocontours(ax, posterior.run_log_post, cmap='Blues', alpha=alpha)
            if p_i == 0: ax.annotate(model_names[q_i], xytext=(.38, 1.1), xy=(0, 1), textcoords='axes fraction')

    # plt.show()
        #Plot prob
        row +=1
        ax = plt.subplot2grid((rows,cols), (row, samp_i+1), frameon=False)

        func = lambda zs: model.logposterior_func(samp_torch,zs)
        # plot_isocontours2_exp_norm(ax, func, cmap='Greys', legend=legend,xlimits=xlimits,ylimits=ylimits,alpha=.2)
        plot_isocontours2_exp_norm(ax, func, cmap='Blues', legend=legend,xlimits=xlimits,ylimits=ylimits,alpha=1.)

        # plot_scatter(ax, samps=z ,xlimits=xlimits,ylimits=ylimits)
        # plot_kde(ax,samps=z,xlimits=xlimits,ylimits=ylimits,cmap='Blues')
        # plot_kde(ax,samps=z,xlimits=xlimits,ylimits=ylimits,cmap='Greens')

        mean, logvar = model.q_dist.get_mean_logvar(samp_torch)
        func = lambda zs: lognormal4(torch.Tensor(zs), torch.squeeze(mean.data.cpu()), torch.squeeze(logvar.data.cpu()))
        plot_isocontours(ax, func, cmap='Greens',xlimits=xlimits,ylimits=ylimits)



        # func = lambda zs: lognormal4(torch.Tensor(zs), torch.zeros(2), torch.zeros(2))
        # plot_isocontours(ax, func, cmap='Blues', alpha=.3,xlimits=xlimits,ylimits=ylimits)




        # #Plot prob
        # col +=1
        # ax = plt.subplot2grid((rows,cols), (samp_i,col), frameon=False)
        # Ws, logpW, logqW = model.sample_W()  #_ , [1], [1]   
        # # func = lambda zs: lognormal4(torch.Tensor(zs), torch.squeeze(mean.data), torch.squeeze(logvar.data))
        # # plot_isocontours(ax, func, cmap='Reds')

            # #Plot prior
            # col +=1
            # ax = plt.subplot2grid((rows,cols), (samp_i,col), frameon=False)
            # func = lambda zs: lognormal4(torch.Tensor(zs), torch.zeros(2), torch.zeros(2))
            # plot_isocontours(ax, func, cmap='Blues')
            # if samp_i==0:  ax.annotate('Prior p(z)', xytext=(.3, 1.1), xy=(0, 1), textcoords='axes fraction')

            #Plot q
            col +=1
            val = 3
            ax = plt.subplot2grid((rows,cols), (samp_i,col), frameon=False)
            mean, logvar = model.encode(Variable(torch.unsqueeze(samp,0)))
            func = lambda zs: lognormal4(torch.Tensor(zs), torch.squeeze(mean.data), torch.squeeze(logvar.data))
            plot_isocontours(ax, func, cmap='Reds', xlimits=[-val, val], ylimits=[-val, val])
            if samp_i==0:  ax.annotate('p(z)\nq(z|x)\np(z|x)', xytext=(.3, 1.1), xy=(0, 1), textcoords='axes fraction')
            func = lambda zs: lognormal4(torch.Tensor(zs), torch.zeros(2), torch.zeros(2))
            plot_isocontours(ax, func, cmap='Blues', alpha=.3, xlimits=[-val, val], ylimits=[-val, val])
            
            Ws, logpW, logqW = model.sample_W()  #_ , [1], [1]   
            func = lambda zs: log_bernoulli(model.decode(Ws, Variable(torch.unsqueeze(zs,1))), Variable(torch.unsqueeze(samp,0)))+ Variable(torch.unsqueeze(lognormal4(torch.Tensor(zs), torch.zeros(2), torch.zeros(2)), 1))
            plot_isocontours2_exp_norm(ax, func, cmap='Greens', legend=legend, xlimits=[-val, val], ylimits=[-val, val])

            # #Plot logprior
            # col +=1
            # ax = plt.subplot2grid((rows,cols), (samp_i,col), frameon=False)
            # func = lambda zs: lognormal4(torch.Tensor(zs), torch.zeros(2), torch.zeros(2))
            # plot_isocontoursNoExp(ax, func, cmap='Blues', legend=legend)
            # if samp_i==0:  ax.annotate('Prior\nlogp(z)', xytext=(.3, 1.1), xy=(0, 1), textcoords='axes fraction')
示例#5
0
                                   xlimits=xlimits,
                                   ylimits=ylimits)  #,alpha=.2)
        # plot_isocontours2_exp_norm(ax, func, cmap='Blues', legend=legend,xlimits=xlimits,ylimits=ylimits,alpha=1.)

        # plot_scatter(ax, samps=z ,xlimits=xlimits,ylimits=ylimits)
        # plot_kde(ax,samps=z,xlimits=xlimits,ylimits=ylimits,cmap='Blues')
        # plot_kde(ax,samps=z,xlimits=xlimits,ylimits=ylimits,cmap='Greens')

        mean, logvar = model.q_dist.get_mean_logvar(samp_torch)
        func = lambda zs: lognormal4(torch.Tensor(zs),
                                     torch.squeeze(mean.data.cpu()),
                                     torch.squeeze(logvar.data.cpu()))
        # plot_isocontours(ax, func, cmap='Greens',xlimits=xlimits,ylimits=ylimits)
        plot_isocontours(ax,
                         func,
                         cmap='Blues',
                         xlimits=xlimits,
                         ylimits=ylimits)

        # func = lambda zs: lognormal4(torch.Tensor(zs), torch.zeros(2), torch.zeros(2))
        # plot_isocontours(ax, func, cmap='Blues', alpha=.3,xlimits=xlimits,ylimits=ylimits)

        # #Plot prob
        # col +=1
        # ax = plt.subplot2grid((rows,cols), (samp_i,col), frameon=False)
        # Ws, logpW, logqW = model.sample_W()  #_ , [1], [1]
        # # func = lambda zs: lognormal4(torch.Tensor(zs), torch.squeeze(mean.data), torch.squeeze(logvar.data))
        # # plot_isocontours(ax, func, cmap='Reds')
        # # func = lambda zs: log_bernoulli(model.decode(Ws, Variable(torch.unsqueeze(zs,1))), Variable(torch.unsqueeze(samp,0)))+ Variable(torch.unsqueeze(lognormal4(torch.Tensor(zs), torch.zeros(2), torch.zeros(2)), 1))
        # plot_isocontours2_exp_norm_logspace(ax, func, cmap='Greens', legend=legend)
        # if samp_i==0:  ax.annotate('p(z|x,W1)', xytext=(.1, 1.1), xy=(0, 1), textcoords='axes fraction')
示例#6
0
        fig = plt.figure(figsize=(5 + columns, 3 + rows), facecolor='white')

        # for p_i in range(len(disrtibutions)):

        #######################
        #ROW 1

        p_i = 0

        ######
        #COL 1
        ax = plt.subplot2grid((rows, columns), (p_i, 0),
                              frameon=False)  #, colspan=3)
        #Plot distribution

        plot_isocontours(ax, dist.run_log_post, cmap='Blues')
        if p_i == 0:
            ax.annotate('p(X)',
                        xytext=(.4, 1.1),
                        xy=(0, 1),
                        textcoords='axes fraction')

        ######
        #COL 2
        #Plot samples from distribution
        ax = plt.subplot2grid((rows, columns), (p_i, 1),
                              frameon=False)  #, colspan=3)

        if p_i == 0:
            ax.annotate('Dataset Samples',
                        xytext=(.2, 1.1),
示例#7
0
            if samp_i==0:  ax.annotate('Sample', xytext=(.3, 1.1), xy=(0, 1), textcoords='axes fraction')


            # #Plot prior
            # col +=1
            # ax = plt.subplot2grid((rows,cols), (samp_i,col), frameon=False)
            # func = lambda zs: lognormal4(torch.Tensor(zs), torch.zeros(2), torch.zeros(2))
            # plot_isocontours(ax, func, cmap='Blues')
            # if samp_i==0:  ax.annotate('Prior p(z)', xytext=(.3, 1.1), xy=(0, 1), textcoords='axes fraction')

            #Plot q
            col +=1
            ax = plt.subplot2grid((rows,cols), (samp_i,col), frameon=False)
            mean, logvar = model.encode(Variable(torch.unsqueeze(samp,0)))
            func = lambda zs: lognormal4(torch.Tensor(zs), torch.squeeze(mean.data), torch.squeeze(logvar.data))
            plot_isocontours(ax, func, cmap='Reds',xlimits=xlimits,ylimits=ylimits)
            if samp_i==0:  ax.annotate('p(z)\nq(z|x)', xytext=(.3, 1.1), xy=(0, 1), textcoords='axes fraction')
            func = lambda zs: lognormal4(torch.Tensor(zs), torch.zeros(2), torch.zeros(2))
            plot_isocontours(ax, func, cmap='Blues', alpha=.3,xlimits=xlimits,ylimits=ylimits)


            # #Plot logprior
            # col +=1
            # ax = plt.subplot2grid((rows,cols), (samp_i,col), frameon=False)
            # func = lambda zs: lognormal4(torch.Tensor(zs), torch.zeros(2), torch.zeros(2))
            # plot_isocontoursNoExp(ax, func, cmap='Blues', legend=legend)
            # if samp_i==0:  ax.annotate('Prior\nlogp(z)', xytext=(.3, 1.1), xy=(0, 1), textcoords='axes fraction')

            # #Plot logq
            # col +=1
            # ax = plt.subplot2grid((rows,cols), (samp_i,col), frameon=False)
示例#8
0
            if samp_i==0:  ax.annotate('Sample', xytext=(.3, 1.1), xy=(0, 1), textcoords='axes fraction')


            # #Plot prior
            # col +=1
            # ax = plt.subplot2grid((rows,cols), (samp_i,col), frameon=False)
            # func = lambda zs: lognormal4(torch.Tensor(zs), torch.zeros(2), torch.zeros(2))
            # plot_isocontours(ax, func, cmap='Blues')
            # if samp_i==0:  ax.annotate('Prior p(z)', xytext=(.3, 1.1), xy=(0, 1), textcoords='axes fraction')

            #Plot q
            col +=1
            ax = plt.subplot2grid((rows,cols), (samp_i,col), frameon=False)
            mean, logvar = model.encode(Variable(torch.unsqueeze(samp,0)))
            func = lambda zs: lognormal4(torch.Tensor(zs), torch.squeeze(mean.data), torch.squeeze(logvar.data))
            plot_isocontours(ax, func, cmap='Reds')
            if samp_i==0:  ax.annotate('p(z)\nq(z|x)', xytext=(.3, 1.1), xy=(0, 1), textcoords='axes fraction')
            func = lambda zs: lognormal4(torch.Tensor(zs), torch.zeros(2), torch.zeros(2))
            plot_isocontours(ax, func, cmap='Blues', alpha=.3)


            # #Plot logprior
            # col +=1
            # ax = plt.subplot2grid((rows,cols), (samp_i,col), frameon=False)
            # func = lambda zs: lognormal4(torch.Tensor(zs), torch.zeros(2), torch.zeros(2))
            # plot_isocontoursNoExp(ax, func, cmap='Blues', legend=legend)
            # if samp_i==0:  ax.annotate('Prior\nlogp(z)', xytext=(.3, 1.1), xy=(0, 1), textcoords='axes fraction')

            # #Plot logq
            # col +=1
            # ax = plt.subplot2grid((rows,cols), (samp_i,col), frameon=False)