コード例 #1
0
ファイル: main.py プロジェクト: spicyyou/Eyecon
def main():

    df = make_df(constants.PATH_JSON)
    train_ds, valid_ds = data_generator(constants.PATH_IMG, df)
    input_main, output_main = multimodal_multistream_model()

    model = Model(inputs=input_main, outputs=output_main)
    model.compile(optimizer=tf.keras.optimizers.Adam(
        learning_rate=constants.LEARNING_RATE),
                  loss=tf.losses.BinaryCrossentropy(),
                  metrics=['accuracy'])
    model.summary()

    checkpoint_path = constants.PATH_MODEL_SAVE
    cb_checkpoint = tf.keras.callbacks.ModelCheckpoint(
        filepath=checkpoint_path,
        monitor='val_loss',
        save_weights_only=False,
        save_best_only=True,
        verbose=2)
    cb_earlystopper = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                                       patience=10)
    with tf.device('/device:GPU:0'):
        fit_history = model.fit(train_ds,
                                epochs=100,
                                validation_data=valid_ds,
                                callbacks=[cb_earlystopper, cb_checkpoint])

    plot_model(fit_history)
コード例 #2
0
    def test_plot_model(self):
        """
        Test if plotted model is saved to disk
        """
        mock_model = self.mock_model
        plot_model(mock_model, self.test_plots_dir)
        files = os.listdir(self.test_plots_dir)
        if files:
            test_ext = os.path.splitext(files[0])[1]

        # Saves a models
        self.assertTrue(files, "no model plot saved")
        self.assertEqual(".png", test_ext, "model plot not saved as '.png'")
コード例 #3
0
ファイル: plot_mcmc_old.py プロジェクト: nhmc/LAE
def main(args):
    path = os.path.abspath(__file__).rsplit('/', 1)[0]
    defaults = parse_config(path + '/default.cfg')
    opt = parse_config('model.cfg', defaults)
    print pprint.pformat(opt)
    print '### Read parameters from model.cfg ###'

    filename, = args
    samples = loadobj(filename)

    mean_accept =  samples['accept'].mean()
    print 'Mean acceptance fraction', mean_accept
    nwalkers, nsamples, npar = samples['chain'].shape

    if not os.path.lexists('fig/'):
        os.mkdir('fig')

    if filename.startswith('samples_burn'):

        # estimate maximum likelihood as the point in the chain with
        # the highest likelihood.
        i = samples['lnprob'].ravel().argmax()
        P['ml'] = samples['chain'].reshape(-1, npar)[i]

        print 'Plotting burn-in sample posteriors'
        # bins for plotting posterior histograms
        P['bins'] = [np.linspace(lo, hi, opt.Nhistbins) for
                     lo,hi in zip(P['min'], P['max'])]

        fig,axes = plot_posteriors_burn(samples['chain'], P, npar=opt.npar)
        fig.suptitle('%i samples of %i walkers' % (
            nsamples, nwalkers), fontsize=14)
        fig.savefig('fig/posterior_burnin.' + opt.plotformat)
        
        print 'Plotting traces'
        fig, nwplot = plot_trace(samples['chain'])
        fig.suptitle('Chain traces for %i of %i walkers' % (nwplot,nwalkers))
        fig.savefig('fig/traces.' + opt.plotformat)

        if opt.autocorr:
            print 'Plotting autocorrelation'
            fig, axes = plot_autocorr(samples['chain'])
            fig.suptitle('Autocorrelation for %i walkers with %i samples. '
                         '(Mean acceptance fraction %.2f)' %
                         (nwalkers, nsamples, mean_accept), fontsize=14)
            fig.savefig('fig/autocorr.' + opt.plotformat)

    else:
        # make a chain of independent samples
        Ns, Nt = opt.Nsamp, opt.Nthin
        assert Ns * Nt <= nsamples 
        chain = samples['chain'][:,0:Ns*Nt:Nt,:].reshape(-1, npar)


        # bins for plotting posterior histograms
        P['bins'] = []
        for i in xrange(len(P['names'])):
            x0, x1 = chain[:,i].min(), chain[:,i].max()
            dx = x1 - x0
            lo = x0 - 0.1*dx
            hi = x1 + 0.1*dx
            P['bins'].append( np.linspace(lo, hi, opt.Nhistbins) )


        levels = 0.6827, 0.9545
        P['p1sig'] = [find_min_interval(chain[:, i], levels[0]) for i
                      in range(npar)]
        P['p2sig'] = [find_min_interval(chain[:, i], levels[1]) for i
                      in range(npar)]

        # if hasattr(P, 'nuisance') and any(P.nuisance):
        #     print 'marginalising over nuisance parameters'
        #     marginalised_chain = chain[:, [i for i in range(npar)
        #                                    if not P.nuisance[i]]]
        #     print chain.shape, marginalised_chain.shape
        #     ijoint_sig = get_levels(marginalised_chain, levels)

        lnprob = samples['lnprob'][:,0:Ns*Nt:Nt].ravel()
        isort = lnprob.argsort()
        P['ijoint_sig'] = [isort[int((1-l)*len(lnprob)):] for l in levels]

        # the joint 1 and 2 sigma regions, simulatenously estimating
        # all parameters.
        P['p1sig_joint'] = []
        P['p2sig_joint'] = []
        for i in range(npar):
            lo = chain[P['ijoint_sig'][0], i].min()
            hi = chain[P['ijoint_sig'][0], i].max() 
            P['p1sig_joint'].append((lo, hi))
            lo = chain[P['ijoint_sig'][1], i].min()
            hi = chain[P['ijoint_sig'][1], i].max()
            P['p2sig_joint'].append((lo, hi))

        P['median'] = np.median(chain, axis=0)

        # estimate maximum likelihood as the point in the chain with
        # the highest likelihood.
        i = samples['lnprob'].ravel().argmax()
        P['ml'] = samples['chain'].reshape(-1, npar)[i]

        if opt.find_maximum_likelihood:
            if not scipy:
                raise ImportError('Scipy minimize not available')
            print 'Finding maximum likelihood parameter values'
            P['ml'] = minimize(lambda *x: -ln_likelihood(*x), P['ml'])
            print 'done'

        if opt.plotposteriors:
            print 'Plotting sample posteriors'
            fig, axes = plot_posteriors(chain, P, npar=opt.npar)
            fig.suptitle('%i of %i samples, %i walkers, thinning %i' % (
                Ns, nsamples, nwalkers, Nt), fontsize=14)
            fig.savefig('fig/posterior_mcmc.' + opt.plotformat)

    if opt.plotdata:
        print 'Plotting the maximum likelihood model and data'
        from model import plot_model
        fig = plot_model(P['ml'])
        fig.savefig('fig/model.' + opt.plotformat)

    if opt.printpar and not filename.startswith('samples_burn'):
        from model import print_par
        print_par(P)

    if opt.display:
        print 'Displaying...'
        pl.show()

    print 'Done!'
コード例 #4
0
ファイル: test.py プロジェクト: nhmc/LAE
# if you get an error when running emcee in parallel, you often get an
# unhelpful error messgae, making debugging very difficult. To check
# everything is working before you start running emcee, use thsi test
# file.

from model import \
     ln_likelihood, P, x, ydata, ysigma, get_initial_positions, plot_model

p0 = get_initial_positions(1)
plot_model(p0)
コード例 #5
0
ファイル: plot_mcmc.py プロジェクト: nhmc/LAE
def main(args):
    path = os.path.abspath(__file__).rsplit("/", 1)[0]
    defaults = parse_config(path + "/default.cfg")
    opt = parse_config("model.cfg", defaults)
    print pprint.pformat(opt)
    print "### Read parameters from model.cfg ###"

    filename, = args
    samples = loadobj(filename)

    mean_accept = samples["accept"].mean()
    print "Mean acceptance fraction", mean_accept
    nwalkers, nsamples, npar = samples["chain"].shape

    if not os.path.lexists("fig/"):
        os.mkdir("fig")

    if filename.startswith("samples_burn"):

        # estimate maximum likelihood as the point in the chain with
        # the highest likelihood.
        i = samples["lnprob"].ravel().argmax()
        P["ml"] = samples["chain"].reshape(-1, npar)[i]

        print "Plotting burn-in sample posteriors"
        # bins for plotting posterior histograms
        P["bins"] = [np.linspace(lo, hi, opt.Nhistbins) for lo, hi in zip(P["min"], P["max"])]

        fig, axes = plot_posteriors_burn(samples["chain"], P, npar=opt.npar)
        fig.suptitle("%i samples of %i walkers" % (nsamples, nwalkers), fontsize=14)
        fig.savefig("fig/posterior_burnin." + opt.plotformat)

        print "Plotting traces"
        fig, nwplot = plot_trace(samples["chain"])
        fig.suptitle("Chain traces for %i of %i walkers" % (nwplot, nwalkers))
        fig.savefig("fig/traces." + opt.plotformat)

        if opt.autocorr:
            print "Plotting autocorrelation"
            fig, axes = plot_autocorr(samples["chain"])
            fig.suptitle(
                "Autocorrelation for %i walkers with %i samples. "
                "(Mean acceptance fraction %.2f)" % (nwalkers, nsamples, mean_accept),
                fontsize=14,
            )
            fig.savefig("fig/autocorr." + opt.plotformat)

    else:
        # make a chain of independent samples
        Ns, Nt = opt.Nsamp, opt.Nthin
        assert Ns * Nt <= nsamples
        chain = samples["chain"][:, 0 : Ns * Nt : Nt, :].reshape(-1, npar)

        # bins for plotting posterior histograms
        P["bins"] = []
        for i in xrange(len(P["names"])):
            x0, x1 = chain[:, i].min(), chain[:, i].max()
            dx = x1 - x0
            lo = x0 - 0.1 * dx
            hi = x1 + 0.1 * dx
            P["bins"].append(np.linspace(lo, hi, opt.Nhistbins))

        levels = 0.6827, 0.9545
        P["p1sig"] = [find_min_interval(chain[:, i], levels[0]) for i in range(npar)]
        P["p2sig"] = [find_min_interval(chain[:, i], levels[1]) for i in range(npar)]

        # if hasattr(P, 'nuisance') and any(P.nuisance):
        #     print 'marginalising over nuisance parameters'
        #     marginalised_chain = chain[:, [i for i in range(npar)
        #                                    if not P.nuisance[i]]]
        #     print chain.shape, marginalised_chain.shape
        #     ijoint_sig = get_levels(marginalised_chain, levels)

        lnprob = samples["lnprob"][:, 0 : Ns * Nt : Nt].ravel()
        isort = lnprob.argsort()
        P["ijoint_sig"] = [isort[int((1 - l) * len(lnprob)) :] for l in levels]

        # the joint 1 and 2 sigma regions, simulatenously estimating
        # all parameters.
        P["p1sig_joint"] = []
        P["p2sig_joint"] = []
        for i in range(npar):
            lo = chain[P["ijoint_sig"][0], i].min()
            hi = chain[P["ijoint_sig"][0], i].max()
            P["p1sig_joint"].append((lo, hi))
            lo = chain[P["ijoint_sig"][1], i].min()
            hi = chain[P["ijoint_sig"][1], i].max()
            P["p2sig_joint"].append((lo, hi))

        P["median"] = np.median(chain, axis=0)

        # estimate maximum likelihood as the point in the chain with
        # the highest likelihood.
        i = samples["lnprob"].ravel().argmax()
        P["ml"] = samples["chain"].reshape(-1, npar)[i]

        if opt.find_maximum_likelihood:
            if not scipy:
                raise ImportError("Scipy minimize not available")
            print "Finding maximum likelihood parameter values"
            P["ml"] = minimize(lambda *x: -ln_likelihood(*x), P["ml"])
            print "done"

        if opt.plotposteriors:
            print "Plotting sample posteriors"
            fig, axes = plot_posteriors(chain, P, nplot=opt.npar)
            fig.suptitle("%i of %i samples, %i walkers, thinning %i" % (Ns, nsamples, nwalkers, Nt), fontsize=14)
            fig.savefig("fig/posterior_mcmc." + opt.plotformat, dpi=200)

    if opt.plotdata:
        print "Plotting the maximum likelihood model and data"
        from model import plot_model

        if opt.nsamp_plot > 1:
            chain = samples["chain"].reshape(-1, npar)
            step = int(len(chain) / opt.nsamp_plot)
            samp = chain[::step]
            fig = plot_model(samp)
        else:
            fig = plot_model([P["median"]])

    if opt.printpar and not filename.startswith("samples_burn"):
        from model import print_par

        print_par(P)

    if opt.display:
        print "Displaying..."
        pl.show()

    print "Done!"
コード例 #6
0
ファイル: plot_mcmc.py プロジェクト: nhmc/LAE
def main(args):
    path = os.path.abspath(__file__).rsplit('/', 1)[0]
    defaults = parse_config(path + '/default.cfg')
    opt = parse_config('model.cfg', defaults)
    print pprint.pformat(opt)
    print '### Read parameters from model.cfg ###'

    filename, = args
    samples = loadobj(filename)

    mean_accept = samples['accept'].mean()
    print 'Mean acceptance fraction', mean_accept
    nwalkers, nsamples, npar = samples['chain'].shape

    if not os.path.lexists('fig/'):
        os.mkdir('fig')

    if filename.startswith('samples_burn'):

        # estimate maximum likelihood as the point in the chain with
        # the highest likelihood.
        i = samples['lnprob'].ravel().argmax()
        P['ml'] = samples['chain'].reshape(-1, npar)[i]

        print 'Plotting burn-in sample posteriors'
        # bins for plotting posterior histograms
        P['bins'] = [
            np.linspace(lo, hi, opt.Nhistbins)
            for lo, hi in zip(P['min'], P['max'])
        ]

        fig, axes = plot_posteriors_burn(samples['chain'], P, npar=opt.npar)
        fig.suptitle('%i samples of %i walkers' % (nsamples, nwalkers),
                     fontsize=14)
        fig.savefig('fig/posterior_burnin.' + opt.plotformat)

        print 'Plotting traces'
        fig, nwplot = plot_trace(samples['chain'])
        fig.suptitle('Chain traces for %i of %i walkers' % (nwplot, nwalkers))
        fig.savefig('fig/traces.' + opt.plotformat)

        if opt.autocorr:
            print 'Plotting autocorrelation'
            fig, axes = plot_autocorr(samples['chain'])
            fig.suptitle('Autocorrelation for %i walkers with %i samples. '
                         '(Mean acceptance fraction %.2f)' %
                         (nwalkers, nsamples, mean_accept),
                         fontsize=14)
            fig.savefig('fig/autocorr.' + opt.plotformat)

    else:
        # make a chain of independent samples
        Ns, Nt = opt.Nsamp, opt.Nthin
        assert Ns * Nt <= nsamples
        chain = samples['chain'][:, 0:Ns * Nt:Nt, :].reshape(-1, npar)

        # bins for plotting posterior histograms
        P['bins'] = []
        for i in xrange(len(P['names'])):
            x0, x1 = chain[:, i].min(), chain[:, i].max()
            dx = x1 - x0
            lo = x0 - 0.1 * dx
            hi = x1 + 0.1 * dx
            P['bins'].append(np.linspace(lo, hi, opt.Nhistbins))

        levels = 0.6827, 0.9545
        P['p1sig'] = [
            find_min_interval(chain[:, i], levels[0]) for i in range(npar)
        ]
        P['p2sig'] = [
            find_min_interval(chain[:, i], levels[1]) for i in range(npar)
        ]

        # if hasattr(P, 'nuisance') and any(P.nuisance):
        #     print 'marginalising over nuisance parameters'
        #     marginalised_chain = chain[:, [i for i in range(npar)
        #                                    if not P.nuisance[i]]]
        #     print chain.shape, marginalised_chain.shape
        #     ijoint_sig = get_levels(marginalised_chain, levels)

        lnprob = samples['lnprob'][:, 0:Ns * Nt:Nt].ravel()
        isort = lnprob.argsort()
        P['ijoint_sig'] = [isort[int((1 - l) * len(lnprob)):] for l in levels]

        # the joint 1 and 2 sigma regions, simulatenously estimating
        # all parameters.
        P['p1sig_joint'] = []
        P['p2sig_joint'] = []
        for i in range(npar):
            lo = chain[P['ijoint_sig'][0], i].min()
            hi = chain[P['ijoint_sig'][0], i].max()
            P['p1sig_joint'].append((lo, hi))
            lo = chain[P['ijoint_sig'][1], i].min()
            hi = chain[P['ijoint_sig'][1], i].max()
            P['p2sig_joint'].append((lo, hi))

        P['median'] = np.median(chain, axis=0)

        # estimate maximum likelihood as the point in the chain with
        # the highest likelihood.
        i = samples['lnprob'].ravel().argmax()
        P['ml'] = samples['chain'].reshape(-1, npar)[i]

        if opt.find_maximum_likelihood:
            if not scipy:
                raise ImportError('Scipy minimize not available')
            print 'Finding maximum likelihood parameter values'
            P['ml'] = minimize(lambda *x: -ln_likelihood(*x), P['ml'])
            print 'done'

        if opt.plotposteriors:
            print 'Plotting sample posteriors'
            fig, axes = plot_posteriors(chain, P, nplot=opt.npar)
            fig.suptitle('%i of %i samples, %i walkers, thinning %i' %
                         (Ns, nsamples, nwalkers, Nt),
                         fontsize=14)
            fig.savefig('fig/posterior_mcmc.' + opt.plotformat, dpi=200)

    if opt.plotdata:
        print 'Plotting the maximum likelihood model and data'
        from model import plot_model
        if opt.nsamp_plot > 1:
            chain = samples['chain'].reshape(-1, npar)
            step = int(len(chain) / opt.nsamp_plot)
            samp = chain[::step]
            fig = plot_model(samp)
        else:
            fig = plot_model([P['median']])

    if opt.printpar and not filename.startswith('samples_burn'):
        from model import print_par
        print_par(P)

    if opt.display:
        print 'Displaying...'
        pl.show()

    print 'Done!'