コード例 #1
0
def main():
    parser = buildArgsParser()
    args = parser.parse_args()

    # Get experiment folder
    experiment_path = args.name
    if not os.path.isdir(experiment_path):
        # If not a directory, it must be the name of the experiment.
        experiment_path = pjoin(".", "experiments", args.name)

    if not os.path.isdir(experiment_path):
        parser.error('Cannot find experiment: {0}!'.format(args.name))

    if not os.path.isfile(pjoin(experiment_path, "model.pkl")):
        parser.error(
            'Cannot find model for experiment: {0}!'.format(experiment_path))

    if not os.path.isfile(pjoin(experiment_path, "hyperparams.json")):
        parser.error('Cannot find hyperparams for experiment: {0}!'.format(
            experiment_path))

    # Load experiments hyperparameters
    hyperparams = utils.load_dict_from_json_file(
        pjoin(experiment_path, "hyperparams.json"))

    with Timer("Loading model"):
        if hyperparams["model"] == "rbm":
            from iRBM.models.rbm import RBM
            model_class = RBM
        elif hyperparams["model"] == "orbm":
            from iRBM.models.orbm import oRBM
            model_class = oRBM
        elif hyperparams["model"] == "irbm":
            from iRBM.models.irbm import iRBM
            model_class = iRBM

        # Load the actual model.
        model = model_class.load(pjoin(experiment_path, "model.pkl"))

    if hyperparams["dataset"] == "binarized_mnist":
        image_shape = (28, 28)
    elif hyperparams["dataset"] == "caltech101_silhouettes28":
        image_shape = (28, 28)
    else:
        raise ValueError("Unknown dataset: {0}".format(hyperparams["dataset"]))

    weights = model.W.get_value()
    clim = (weights.min(), weights.max())
    data = vizu.concatenate_images(args.contrast * weights,
                                   shape=image_shape,
                                   border_size=1,
                                   clim=clim)
    plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
    plt.tight_layout()
    plt.xticks([])
    plt.yticks([])
    plt.show()
コード例 #2
0
ファイル: eval_model.py プロジェクト: vikkamath/iRBM
def main():
    parser = buildArgsParser()
    args = parser.parse_args()

    # Get experiment folder
    experiment_path = args.name
    if not os.path.isdir(experiment_path):
        # If not a directory, it must be the name of the experiment.
        experiment_path = pjoin(".", "experiments", args.name)

    if not os.path.isdir(experiment_path):
        parser.error('Cannot find experiment: {0}!'.format(args.name))

    if not os.path.isfile(pjoin(experiment_path, "model.pkl")):
        parser.error('Cannot find model for experiment: {0}!'.format(experiment_path))

    if not os.path.isfile(pjoin(experiment_path, "hyperparams.json")):
        parser.error('Cannot find hyperparams for experiment: {0}!'.format(experiment_path))

    # Load experiments hyperparameters
    hyperparams = utils.load_dict_from_json_file(pjoin(experiment_path, "hyperparams.json"))

    with Timer("Loading dataset"):
        trainset, validset, testset = dataset.load(hyperparams['dataset'])

    with Timer("Loading model"):
        if hyperparams["model"] == "rbm":
            from iRBM.models.rbm import RBM
            model_class = RBM
        elif hyperparams["model"] == "orbm":
            from iRBM.models.orbm import oRBM
            model_class = oRBM
        elif hyperparams["model"] == "irbm":
            from iRBM.models.irbm import iRBM
            model_class = iRBM

        # Load the actual model.
        model = model_class.load(pjoin(experiment_path, "model.pkl"))

    if args.lnZ is None:
        with Timer("Estimating model's partition function with AIS({0}) and {1} temperatures.".format(args.nb_samples, args.nb_temperatures)):
            lnZ, lnZ_down, lnZ_up = compute_lnZ(model, nb_chains=args.nb_samples, temperatures=np.linspace(0, 1, args.nb_temperatures))
            lnZ_down = lnZ - lnZ_down
            lnZ_up = lnZ + lnZ_up
    else:
        lnZ, lnZ_down, lnZ_up = args.lnZ

    print "-> lnZ: {lnZ_down} <= {lnZ} <= {lnZ_up}".format(lnZ_down=lnZ_down, lnZ=lnZ, lnZ_up=lnZ_up)

    with Timer("\nComputing average NLL on {0} using lnZ={1}.".format(hyperparams['dataset'], lnZ)):
        NLL_train, NLL_valid, NLL_test = compute_AvgStderrNLL(model, lnZ, trainset, validset, testset)

    print "Avg. NLL on trainset: {:.2f} ± {:.2f}".format(NLL_train.avg, NLL_train.stderr)
    print "Avg. NLL on validset: {:.2f} ± {:.2f}".format(NLL_valid.avg, NLL_valid.stderr)
    print "Avg. NLL on testset:  {:.2f} ± {:.2f}".format(NLL_test.avg, NLL_test.stderr)
コード例 #3
0
ファイル: show_filters.py プロジェクト: MarcCote/iRBM
def main():
    parser = buildArgsParser()
    args = parser.parse_args()

    # Get experiment folder
    experiment_path = args.name
    if not os.path.isdir(experiment_path):
        # If not a directory, it must be the name of the experiment.
        experiment_path = pjoin(".", "experiments", args.name)

    if not os.path.isdir(experiment_path):
        parser.error('Cannot find experiment: {0}!'.format(args.name))

    if not os.path.isfile(pjoin(experiment_path, "model.pkl")):
        parser.error('Cannot find model for experiment: {0}!'.format(experiment_path))

    if not os.path.isfile(pjoin(experiment_path, "hyperparams.json")):
        parser.error('Cannot find hyperparams for experiment: {0}!'.format(experiment_path))

    # Load experiments hyperparameters
    hyperparams = utils.load_dict_from_json_file(pjoin(experiment_path, "hyperparams.json"))

    with Timer("Loading model"):
        if hyperparams["model"] == "rbm":
            from iRBM.models.rbm import RBM
            model_class = RBM
        elif hyperparams["model"] == "orbm":
            from iRBM.models.orbm import oRBM
            model_class = oRBM
        elif hyperparams["model"] == "irbm":
            from iRBM.models.irbm import iRBM
            model_class = iRBM

        # Load the actual model.
        model = model_class.load(pjoin(experiment_path, "model.pkl"))

    if hyperparams["dataset"] == "binarized_mnist":
        image_shape = (28, 28)
    elif hyperparams["dataset"] == "caltech101_silhouettes28":
        image_shape = (28, 28)
    else:
        raise ValueError("Unknown dataset: {0}".format(hyperparams["dataset"]))

    weights = model.W.get_value()
    clim = (weights.min(), weights.max())
    data = vizu.concatenate_images(args.contrast*weights, shape=image_shape, border_size=1, clim=clim)
    plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
    plt.tight_layout()
    plt.xticks([])
    plt.yticks([])
    plt.show()
コード例 #4
0
ファイル: eval_model.py プロジェクト: napsternxg/iRBM
def main():
    parser = buildArgsParser()
    args = parser.parse_args()

    # Get experiment folder
    experiment_path = args.name
    if not os.path.isdir(experiment_path):
        # If not a directory, it must be the name of the experiment.
        experiment_path = pjoin(".", "experiments", args.name)

    if not os.path.isdir(experiment_path):
        parser.error('Cannot find experiment: {0}!'.format(args.name))

    if not os.path.isfile(pjoin(experiment_path, "model.pkl")):
        parser.error(
            'Cannot find model for experiment: {0}!'.format(experiment_path))

    if not os.path.isfile(pjoin(experiment_path, "hyperparams.json")):
        parser.error('Cannot find hyperparams for experiment: {0}!'.format(
            experiment_path))

    # Load experiments hyperparameters
    hyperparams = utils.load_dict_from_json_file(
        pjoin(experiment_path, "hyperparams.json"))

    with Timer("Loading dataset"):
        trainset, validset, testset = dataset.load(
            hyperparams['dataset'], hyperparams.get('dataset_percent', 1.))
        print " (data: {:,}; {:,}; {:,}) ".format(len(trainset), len(validset),
                                                  len(testset)),

    with Timer("Loading model"):
        if hyperparams["model"] == "rbm":
            from iRBM.models.rbm import RBM
            model_class = RBM
        elif hyperparams["model"] == "orbm":
            from iRBM.models.orbm import oRBM
            model_class = oRBM
        elif hyperparams["model"] == "irbm":
            from iRBM.models.irbm import iRBM
            model_class = iRBM

        # Load the actual model.
        model = model_class.load(pjoin(experiment_path, "model.pkl"))

        if args.irbm_fixed_size:
            # Use methods from the oRBM.
            import functools
            from iRBM.models.orbm import oRBM
            setattr(model, "get_base_rate",
                    functools.partial(oRBM.get_base_rate, model))
            setattr(model, "pdf_z_given_v",
                    functools.partial(oRBM.pdf_z_given_v, model))
            setattr(model, "log_z_given_v",
                    functools.partial(oRBM.log_z_given_v, model))
            setattr(model, "free_energy",
                    functools.partial(oRBM.free_energy, model))
            print "({} with {} fixed hidden units)".format(
                hyperparams["model"], model.hidden_size)

        else:
            print "({} with {} hidden units)".format(hyperparams["model"],
                                                     model.hidden_size)

    # Result files.
    if args.irbm_fixed_size:
        experiment_path = pjoin(experiment_path, "irbm_fixed_size")
        try:
            os.makedirs(experiment_path)
        except:
            pass

    ais_result_file = pjoin(experiment_path, "ais_result.json")
    result_file = pjoin(experiment_path, "result.json")

    if args.lnZ is not None:
        lnZ, lnZ_down, lnZ_up = args.lnZ
    else:
        if not os.path.isfile(ais_result_file) or args.force:
            with Timer(
                    "Estimating model's partition function with AIS({0}) and {1} temperatures."
                    .format(args.nb_samples, args.nb_temperatures)):
                ais_results = compute_AIS(model,
                                          M=args.nb_samples,
                                          betas=np.linspace(
                                              0, 1, args.nb_temperatures),
                                          seed=args.seed,
                                          ais_working_dir=experiment_path,
                                          force=args.force)
                ais_results["irbm_fixed_size"] = args.irbm_fixed_size
                utils.save_dict_to_json_file(ais_result_file, ais_results)
        else:
            print "Loading previous AIS results... (use --force to re-run AIS)"
            ais_results = utils.load_dict_from_json_file(ais_result_file)
            print "AIS({0}) with {1} temperatures".format(
                ais_results['nb_samples'], ais_results['nb_temperatures'])

            if ais_results['nb_samples'] != args.nb_samples:
                print "The number of samples specified ({:,}) doesn't match the one found in ais_results.json ({:,}). Aborting.".format(
                    args.nb_samples, ais_results['nb_samples'])
                sys.exit(-1)

            if ais_results['nb_temperatures'] != args.nb_temperatures:
                print "The number of temperatures specified ({:,}) doesn't match the one found in ais_results.json ({:,}). Aborting.".format(
                    args.nb_temperatures, ais_results['nb_temperatures'])
                sys.exit(-1)

            if ais_results['seed'] != args.seed:
                print "The seed specified ({}) doesn't match the one found in ais_results.json ({}). Aborting.".format(
                    args.seed, ais_results['seed'])
                sys.exit(-1)

            if ais_results.get('irbm_fixed_size',
                               False) != args.irbm_fixed_size:
                print "The option '--irbm-fixed' specified ({}) doesn't match the one found in ais_results.json ({}). Aborting.".format(
                    args.irbm_fixed_size, ais_results['irbm_fixed_size'])
                sys.exit(-1)

        lnZ = ais_results['logcummean_Z'][-1]
        logcumstd_Z_down = ais_results['logcumstd_Z_down'][-1]
        logcumstd_Z_up = ais_results['logcumstd_Z_up'][-1]
        lnZ_down = lnZ - logcumstd_Z_down
        lnZ_up = lnZ + logcumstd_Z_up

    print "-> lnZ: {lnZ_down} <= {lnZ} <= {lnZ_up}".format(lnZ_down=lnZ_down,
                                                           lnZ=lnZ,
                                                           lnZ_up=lnZ_up)

    with Timer("\nComputing average NLL on {0} using lnZ={1}.".format(
            hyperparams['dataset'], lnZ)):
        NLL_train, NLL_valid, NLL_test = compute_AvgStderrNLL(
            model, lnZ, trainset, validset, testset)

    print "Avg. NLL on trainset: {:.2f} ± {:.2f}".format(
        NLL_train.avg, NLL_train.stderr)
    print "Avg. NLL on validset: {:.2f} ± {:.2f}".format(
        NLL_valid.avg, NLL_valid.stderr)
    print "Avg. NLL on testset:  {:.2f} ± {:.2f}".format(
        NLL_test.avg, NLL_test.stderr)

    # Save results JSON file.
    if args.lnZ is None:
        result = {
            'lnZ': float(lnZ),
            'lnZ_down': float(lnZ_down),
            'lnZ_up': float(lnZ_up),
            'trainset': [float(NLL_train.avg),
                         float(NLL_train.stderr)],
            'validset': [float(NLL_valid.avg),
                         float(NLL_valid.stderr)],
            'testset': [float(NLL_test.avg),
                        float(NLL_test.stderr)],
            'irbm_fixed_size': args.irbm_fixed_size,
        }
        utils.save_dict_to_json_file(result_file, result)

    if args.view:

        from iRBM.misc import vizu
        import matplotlib.pyplot as plt

        if hyperparams["dataset"] == "binarized_mnist":
            image_shape = (28, 28)
        elif hyperparams["dataset"] == "caltech101_silhouettes28":
            image_shape = (28, 28)
        else:
            raise ValueError("Unknown dataset: {0}".format(
                hyperparams["dataset"]))

        # Display AIS samples.
        data = vizu.concatenate_images(ais_results['last_sample_chain'],
                                       shape=image_shape,
                                       border_size=1,
                                       clim=(0, 1))
        plt.figure()
        plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
        plt.title("AIS samples")

        # Display AIS ~lnZ.
        plt.figure()
        plt.gca().set_xmargin(0.1)
        plt.errorbar(np.arange(ais_results['nb_samples']) + 1,
                     ais_results["logcummean_Z"],
                     yerr=[
                         ais_results['logcumstd_Z_down'],
                         ais_results['logcumstd_Z_up']
                     ],
                     fmt='ob',
                     label='with std ~ln std Z')
        plt.legend()
        plt.ticklabel_format(useOffset=False, axis='y')
        plt.title("~ln mean Z for different number of AIS samples")
        plt.ylabel("~lnZ")
        plt.xlabel("# AIS samples")

        plt.show()
コード例 #5
0
ファイル: view_results.py プロジェクト: napsternxg/iRBM
def main():
    parser = buildArgsParser()
    args = parser.parse_args()

    sort_by = args.sort

    names = []
    results_files = []
    hyperparams_files = []
    status_files = []
    for f in args.results:
        exp_folder = f
        if os.path.isfile(f):
            exp_folder = os.path.dirname(f)

        result_file = pjoin(exp_folder, "result.json")
        hyperparams_file = pjoin(exp_folder, "hyperparams.json")
        status_file = pjoin(exp_folder, "status.json")

        if not os.path.isfile(result_file):
            print 'Skip: {0} is not a file!'.format(result_file)
            continue

        if not os.path.isfile(hyperparams_file):
            print 'Skip: {0} is not a file!'.format(hyperparams_file)
            continue

        if not os.path.isfile(status_file):
            print 'Skip: {0} is not a file!'.format(status_file)
            continue

        name = os.path.basename(exp_folder)
        if 'hyperparams.json' in os.listdir(os.path.abspath(pjoin(exp_folder, os.path.pardir))):
            name = os.path.basename(os.path.abspath(pjoin(exp_folder, os.path.pardir)))

        names.append(name)

        results_files.append(result_file)
        hyperparams_files.append(hyperparams_file)
        status_files.append(status_file)

    if len([no for no in sort_by if no == 0]) > 0:
        parser.error('Column ID are starting at 1!')

    # Retrieve headers from hyperparams
    headers_hyperparams = set()
    headers_results = set()
    headers_status = set()

    for hyperparams_file, status_file, results_file in zip(hyperparams_files, status_files, results_files):
        hyperparams = load_dict_from_json_file(hyperparams_file)
        results = load_dict_from_json_file(results_file)
        status = load_dict_from_json_file(status_file)
        headers_hyperparams |= set(hyperparams.keys())
        headers_results |= set(results.keys())
        headers_status |= set(status.keys())

    headers_hyperparams = sorted(list(headers_hyperparams))
    headers_status = sorted(list(headers_status))
    # TODO: when generating result.json split 'trainset' scores in two key:
    #       'trainset' and 'trainset_std' (same goes for validset and testset).
    headers_results |= set(["trainset_std", "validset_std", "testset_std"])
    headers_results = sorted(list(headers_results))
    headers = headers_hyperparams + headers_status + ["name"] + headers_results

    # Build results table
    table = Texttable(max_width=0)
    table.set_deco(Texttable.HEADER)
    table.set_precision(8)
    table.set_cols_dtype(['a'] * len(headers))
    table.set_cols_align(['c'] * len(headers))

    # Headers
    table.header([str(i) + "\n" + h for i, h in enumerate(headers, start=1)])

    if args.only_header:
        print table.draw()
        return

    # Results
    for name, hyperparams_file, status_file, results_file in zip(names, hyperparams_files, status_files, results_files):
        hyperparams = load_dict_from_json_file(hyperparams_file)
        results = load_dict_from_json_file(results_file)
        status = load_dict_from_json_file(status_file)

        # Build results table row (hyperparams columns)
        row = []
        for h in headers_hyperparams:
            value = hyperparams.get(h, '')
            row.append(value)

        for h in headers_status:
            value = status.get(h, '')
            row.append(value)

        row.append(name)

        for h in headers_results:
            if h in ["trainset", "validset", "testset"]:
                value = results.get(h, '')[0]
            elif h in ["trainset_std", "validset_std", "testset_std"]:
                value = results.get(h[:-4], '')[1]
            else:
                value = results.get(h, '')
            row.append(value)

        table.add_row(row)

    # Sort
    for col in reversed(sort_by):
        table._rows = sorted(table._rows, key=sort_nicely(abs(col) - 1), reverse=col < 0)

    if args.out is not None:
        import csv

        results = []
        results.append(headers)
        results.extend(table._rows)

        with open(args.out, 'wb') as csvfile:
            w = csv.writer(csvfile)
            w.writerows(results)

    else:
        print table.draw()
コード例 #6
0
ファイル: eval_model.py プロジェクト: MarcCote/iRBM
def main():
    parser = buildArgsParser()
    args = parser.parse_args()

    # Get experiment folder
    experiment_path = args.name
    if not os.path.isdir(experiment_path):
        # If not a directory, it must be the name of the experiment.
        experiment_path = pjoin(".", "experiments", args.name)

    if not os.path.isdir(experiment_path):
        parser.error('Cannot find experiment: {0}!'.format(args.name))

    if not os.path.isfile(pjoin(experiment_path, "model.pkl")):
        parser.error('Cannot find model for experiment: {0}!'.format(experiment_path))

    if not os.path.isfile(pjoin(experiment_path, "hyperparams.json")):
        parser.error('Cannot find hyperparams for experiment: {0}!'.format(experiment_path))

    # Load experiments hyperparameters
    hyperparams = utils.load_dict_from_json_file(pjoin(experiment_path, "hyperparams.json"))

    with Timer("Loading dataset"):
        trainset, validset, testset = dataset.load(hyperparams['dataset'], hyperparams.get('dataset_percent', 1.))
        print " (data: {:,}; {:,}; {:,}) ".format(len(trainset), len(validset), len(testset)),

    with Timer("Loading model"):
        if hyperparams["model"] == "rbm":
            from iRBM.models.rbm import RBM
            model_class = RBM
        elif hyperparams["model"] == "orbm":
            from iRBM.models.orbm import oRBM
            model_class = oRBM
        elif hyperparams["model"] == "irbm":
            from iRBM.models.irbm import iRBM
            model_class = iRBM

        # Load the actual model.
        model = model_class.load(pjoin(experiment_path, "model.pkl"))

        if args.irbm_fixed_size:
            # Use methods from the oRBM.
            import functools
            from iRBM.models.orbm import oRBM
            setattr(model, "get_base_rate", functools.partial(oRBM.get_base_rate, model))
            setattr(model, "pdf_z_given_v", functools.partial(oRBM.pdf_z_given_v, model))
            setattr(model, "log_z_given_v", functools.partial(oRBM.log_z_given_v, model))
            setattr(model, "free_energy", functools.partial(oRBM.free_energy, model))
            print "({} with {} fixed hidden units)".format(hyperparams["model"], model.hidden_size)

        else:
            print "({} with {} hidden units)".format(hyperparams["model"], model.hidden_size)

    # Result files.
    if args.irbm_fixed_size:
        experiment_path = pjoin(experiment_path, "irbm_fixed_size")
        try:
            os.makedirs(experiment_path)
        except:
            pass

    ais_result_file = pjoin(experiment_path, "ais_result.json")
    result_file = pjoin(experiment_path, "result.json")

    if args.lnZ is not None:
        lnZ, lnZ_down, lnZ_up = args.lnZ
    else:
        if not os.path.isfile(ais_result_file) or args.force:
            with Timer("Estimating model's partition function with AIS({0}) and {1:,} temperatures.".format(args.nb_samples, args.nb_temperatures)):
                ais_results = compute_AIS(model, M=args.nb_samples, betas=np.linspace(0, 1, args.nb_temperatures), seed=args.seed, ais_working_dir=experiment_path, force=args.force)
                ais_results["irbm_fixed_size"] = args.irbm_fixed_size
                utils.save_dict_to_json_file(ais_result_file, ais_results)
        else:
            print "Loading previous AIS results... (use --force to re-run AIS)"
            ais_results = utils.load_dict_from_json_file(ais_result_file)
            print "AIS({0}) with {1:,} temperatures".format(ais_results['nb_samples'], ais_results['nb_temperatures'])

            if ais_results['nb_samples'] != args.nb_samples:
                print "The number of samples specified ({:,}) doesn't match the one found in ais_results.json ({:,}). Aborting.".format(args.nb_samples, ais_results['nb_samples'])
                sys.exit(-1)

            if ais_results['nb_temperatures'] != args.nb_temperatures:
                print "The number of temperatures specified ({:,}) doesn't match the one found in ais_results.json ({:,}). Aborting.".format(args.nb_temperatures, ais_results['nb_temperatures'])
                sys.exit(-1)

            if ais_results['seed'] != args.seed:
                print "The seed specified ({}) doesn't match the one found in ais_results.json ({}). Aborting.".format(args.seed, ais_results['seed'])
                sys.exit(-1)

            if ais_results.get('irbm_fixed_size', False) != args.irbm_fixed_size:
                print "The option '--irbm-fixed' specified ({}) doesn't match the one found in ais_results.json ({}). Aborting.".format(args.irbm_fixed_size, ais_results['irbm_fixed_size'])
                sys.exit(-1)

        lnZ = ais_results['logcummean_Z'][-1]
        logcumstd_Z_down = ais_results['logcumstd_Z_down'][-1]
        logcumstd_Z_up = ais_results['logcumstd_Z_up'][-1]
        lnZ_down = lnZ - logcumstd_Z_down
        lnZ_up = lnZ + logcumstd_Z_up

    print "-> lnZ: {lnZ_down} <= {lnZ} <= {lnZ_up}".format(lnZ_down=lnZ_down, lnZ=lnZ, lnZ_up=lnZ_up)

    with Timer("\nComputing average NLL on {0} using lnZ={1}.".format(hyperparams['dataset'], lnZ)):
        NLL_train, NLL_valid, NLL_test = compute_AvgStderrNLL(model, lnZ, trainset, validset, testset)

    print "Avg. NLL on trainset: {:.2f} ± {:.2f}".format(NLL_train.avg, NLL_train.stderr)
    print "Avg. NLL on validset: {:.2f} ± {:.2f}".format(NLL_valid.avg, NLL_valid.stderr)
    print "Avg. NLL on testset:  {:.2f} ± {:.2f}".format(NLL_test.avg, NLL_test.stderr)
    print "---"
    Fv_rnd = model.free_energy(np.random.rand(*ais_results['last_sample_chain'].shape)).eval()
    print "Avg. F(v) on {:,} random samples: {:.2f} ± {:.2f}".format(args.nb_samples, Fv_rnd.mean(), Fv_rnd.std())
    Fv_model = model.free_energy(ais_results['last_sample_chain']).eval()
    print "Avg. F(v) on {:,} AIS samples:    {:.2f} ± {:.2f}".format(args.nb_samples, Fv_model.mean(), Fv_model.std())

    # Save results JSON file.
    if args.lnZ is None:
        result = {'lnZ': float(lnZ),
                  'lnZ_down': float(lnZ_down),
                  'lnZ_up': float(lnZ_up),
                  'trainset': [float(NLL_train.avg), float(NLL_train.stderr)],
                  'validset': [float(NLL_valid.avg), float(NLL_valid.stderr)],
                  'testset': [float(NLL_test.avg), float(NLL_test.stderr)],
                  'irbm_fixed_size': args.irbm_fixed_size,
                  }
        utils.save_dict_to_json_file(result_file, result)

    if args.view:

        from iRBM.misc import vizu
        import matplotlib.pyplot as plt

        if hyperparams["dataset"] == "binarized_mnist":
            image_shape = (28, 28)
        elif hyperparams["dataset"] == "caltech101_silhouettes28":
            image_shape = (28, 28)
        else:
            raise ValueError("Unknown dataset: {0}".format(hyperparams["dataset"]))

        # Display AIS samples.
        data = vizu.concatenate_images(ais_results['last_sample_chain'], shape=image_shape, border_size=1, clim=(0, 1))
        plt.figure()
        plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
        plt.title("AIS samples")

        # Display AIS ~lnZ.
        plt.figure()
        plt.gca().set_xmargin(0.1)
        plt.errorbar(np.arange(ais_results['nb_samples'])+1, ais_results["logcummean_Z"],
                     yerr=[ais_results['logcumstd_Z_down'], ais_results['logcumstd_Z_up']],
                     fmt='ob', label='with std ~ln std Z')
        plt.legend()
        plt.ticklabel_format(useOffset=False, axis='y')
        plt.title("~ln mean Z for different number of AIS samples")
        plt.ylabel("~lnZ")
        plt.xlabel("# AIS samples")

        plt.show()
コード例 #7
0
def main():
    parser = buildArgsParser()
    args = parser.parse_args()

    # Check that a least one of --view or --save has been given.
    #if not args.view and not args.save:
    #    parser.error("At least one the following options must be chosen: --view or --save")

    # Get experiment folder
    experiment_path = args.name
    if not os.path.isdir(experiment_path):
        # If not a directory, it must be the name of the experiment.
        experiment_path = pjoin(".", "experiments", args.name)

    if not os.path.isdir(experiment_path):
        parser.error('Cannot find experiment: {0}!'.format(args.name))

    if not os.path.isfile(pjoin(experiment_path, "model.pkl")):
        parser.error('Cannot find model for experiment: {0}!'.format(experiment_path))

    if not os.path.isfile(pjoin(experiment_path, "hyperparams.json")):
        parser.error('Cannot find hyperparams for experiment: {0}!'.format(experiment_path))

    # Load experiments hyperparameters
    hyperparams = utils.load_dict_from_json_file(pjoin(experiment_path, "hyperparams.json"))

    with Timer("Loading dataset"):
        trainset, validset, testset = dataset.load(hyperparams['dataset'], hyperparams.get('dataset_percent', 1.))
        print " (data: {:,}; {:,}; {:,}) ".format(len(trainset), len(validset), len(testset)),

        if hyperparams["dataset"] == "binarized_mnist":
            image_shape = (28, 28)
        elif hyperparams["dataset"] == "caltech101_silhouettes28":
            image_shape = (28, 28)
        else:
            raise ValueError("Unknown dataset: {0}".format(hyperparams["dataset"]))

    with Timer("Loading model"):
        if hyperparams["model"] == "rbm":
            raise ValueError("RBM doesn't have a p(z|v) distribution.")
        elif hyperparams["model"] == "orbm":
            from iRBM.models.orbm import oRBM
            model_class = oRBM
        elif hyperparams["model"] == "irbm":
            from iRBM.models.irbm import iRBM
            model_class = iRBM

        # Load the actual model.
        model = model_class.load(pjoin(experiment_path, "model.pkl"))

    with Timer("Building function p(z|v)"):
        v = testset.symb_inputs
        pdf_z_given_v = theano.function([v], model.pdf_z_given_v(v))

    min_z = args.start
    max_z = model.hidden_size if args.end is None else args.end
    size = args.bucket_size
    buckets = np.arange(min_z, max_z, size)
    nb_buckets = len(buckets)

    inputs = testset.inputs.get_value()
    probs = pdf_z_given_v(inputs)

    # plt.figure()
    # plt.plot(probs.T)
    # plt.title("p(z|v) for all inputs in the testset")
    # plt.xlabel("z")
    # plt.ylabel("p(z|v)")

    topk = 10
    images = np.zeros((nb_buckets*topk, int(np.prod(image_shape))))
    images_dummy = np.zeros((nb_buckets, int(np.prod(image_shape))))
    for i, start in enumerate(buckets):
        bucket_probs = np.sum(probs[:, start:start+size], axis=1)
        indices = np.argsort(bucket_probs)[::-1]

        for j in range(topk)[::-1]:
            images[j*nb_buckets + i] = inputs[indices[j]]

        # Dummy images are used to proportionally represent, via their intensity, the mean p(a <= z < b|v) of the top-k inputs.
        images_dummy[i] = np.mean(bucket_probs[indices][:topk])

    # Prepend the dummy images so they are displayed on the first row.
    #images = np.r_[images_dummy, images]
    #data = vizu.concatenate_images(images, shape=image_shape, dim=(topk+1, nb_buckets), border_size=0, clim=(0, 1))
    data = vizu.concatenate_images(images, shape=image_shape, dim=(topk, nb_buckets), border_size=0, clim=(0, 1))

    f, (ax1, ax2) = plt.subplots(2, sharex=True, figsize=(24, 9))
    #ax1.set_title("Top-{} inputs maximizing $p(z|\\mathbf{{v}})$ for different values of $z$".format(topk), fontsize=20)
    xticks = image_shape[1]*np.arange(nb_buckets)+image_shape[1]//2
    ax1.plot(xticks, images_dummy[:, 0], linewidth=2)
    ax1.set_ylabel("Avg. $p(a\\leq z < b|\\mathbf{v})$ of " + "top-{}".format(topk), fontsize=20)
    ax1.set_ylim(0, 1)
    ax1.set_adjustable('box-forced')

    ax2.imshow(data, cmap=plt.cm.gray, interpolation='nearest', origin="upper")
    ax2.set_ylabel("Top-{} inputs".format(topk), fontsize=20)
    ax2.set_yticks(image_shape[1]*np.arange(topk)+image_shape[1]/2.)
    ax2.set_yticklabels(map(str, range(1, topk+1)[::-1]))
    ax2.set_anchor('N')
    #ax2.set_ylim(0, image_shape[1]*topk)

    xticks = image_shape[1]*np.arange(nb_buckets+1)
    xticks_labels = map(str, buckets) + [str(buckets[-1]+size)]
    ax2.set_xlabel("$a\\leq z < b$", fontsize=20)
    ax2.set_xticks(xticks)
    ax2.set_xticklabels(xticks_labels, rotation=45)
    ax2.set_xlim(min(xticks), max(xticks))
    ax2.set_adjustable('box-forced')

    # Fine-tune figure; make subplots close to each other and hide x ticks for
    # all but bottom plot.
    f.subplots_adjust(hspace=0.)
    #plt.setp(ax1.get_xticklabels(), visible=False)

    f.tight_layout()
    plt.savefig("topk_prob_z_given_x.png", dpi=300, bbox_inches='tight')
    print "Saving to ./topk_prob_z_given_x.png"
    #plt.show()
    return

    plt.figure()

    plt1 = plt.subplot(2, 1, 2)
    plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
    plt.ylabel("Top-{}".format(topk), fontsize=20)
    #plt.yticks(image_shape[1]*np.arange(topk+1)+image_shape[1]/2., ["Intensity"] + map(str, range(1, topk+1)[::-1]))
    plt.yticks(image_shape[1]*np.arange(topk)+image_shape[1]/2., map(str, range(1, topk+1)[::-1]))
    plt.xlabel("$a\\leq z < b$", fontsize=20)

    xticks = image_shape[1]*np.arange(nb_buckets+1)
    xticks_labels = map(str, buckets) + [str(buckets[-1]+size)]
    plt.xticks(xticks, xticks_labels, rotation=45)
    plt.xlim(min(xticks), max(xticks))
    #plt.xlim(min(xticks), max(xticks))

    plt.subplot(2, 1, 1, sharex=plt1)
    plt.title("Top-{} inputs maximizing $p(z|\\mathbf{{v}})$ for different values of $z$".format(topk), fontsize=20)
    #x = buckets+size//2
    xticks = image_shape[1]*np.arange(nb_buckets)+image_shape[1]//2
    plt.plot(xticks, images_dummy[:, 0])
    plt.ylabel("Mean $p(a\\leq z < b|\\mathbf{v})$", fontsize=20)
    #plt.xticks(xticks, map(str, buckets) + [str(buckets[-1]+size)], rotation=45)
    #plt.xticks(xticks, [])
    #plt.xlim(min(xticks)-image_shape[1]//2, max(xticks)+image_shape[1]//2)


    plt.subplots_adjust(hspace=0.001, left=0., right=1., top=1., bottom=0.)
    plt.tight_layout()
    plt.savefig("test.png", bbox_inches='tight')
    plt.show()
コード例 #8
0
def main():
    parser = buildArgsParser()
    args = parser.parse_args()

    # Check that a least one of --view or --save has been given.
    #if not args.view and not args.save:
    #    parser.error("At least one the following options must be chosen: --view or --save")

    # Get experiment folder
    experiment_path = args.name
    if not os.path.isdir(experiment_path):
        # If not a directory, it must be the name of the experiment.
        experiment_path = pjoin(".", "experiments", args.name)

    if not os.path.isdir(experiment_path):
        parser.error('Cannot find experiment: {0}!'.format(args.name))

    if not os.path.isfile(pjoin(experiment_path, "model.pkl")):
        parser.error(
            'Cannot find model for experiment: {0}!'.format(experiment_path))

    if not os.path.isfile(pjoin(experiment_path, "hyperparams.json")):
        parser.error('Cannot find hyperparams for experiment: {0}!'.format(
            experiment_path))

    # Load experiments hyperparameters
    hyperparams = utils.load_dict_from_json_file(
        pjoin(experiment_path, "hyperparams.json"))

    with Timer("Loading dataset"):
        trainset, validset, testset = dataset.load(
            hyperparams['dataset'], hyperparams.get('dataset_percent', 1.))
        print " (data: {:,}; {:,}; {:,}) ".format(len(trainset), len(validset),
                                                  len(testset)),

        if hyperparams["dataset"] == "binarized_mnist":
            image_shape = (28, 28)
        elif hyperparams["dataset"] == "caltech101_silhouettes28":
            image_shape = (28, 28)
        else:
            raise ValueError("Unknown dataset: {0}".format(
                hyperparams["dataset"]))

    with Timer("Loading model"):
        if hyperparams["model"] == "rbm":
            raise ValueError("RBM doesn't have a p(z|v) distribution.")
        elif hyperparams["model"] == "orbm":
            from iRBM.models.orbm import oRBM
            model_class = oRBM
        elif hyperparams["model"] == "irbm":
            from iRBM.models.irbm import iRBM
            model_class = iRBM

        # Load the actual model.
        model = model_class.load(pjoin(experiment_path, "model.pkl"))

    with Timer("Building function p(z|v)"):
        v = testset.symb_inputs
        pdf_z_given_v = theano.function([v], model.pdf_z_given_v(v))

    min_z = 200
    max_z = 900
    size = 16
    buckets = np.arange(min_z, max_z, size)
    nb_buckets = len(buckets)

    inputs = testset.inputs.get_value()
    probs = pdf_z_given_v(inputs)

    # plt.figure()
    # plt.plot(probs.T)
    # plt.title("p(z|v) for all inputs in the testset")
    # plt.xlabel("z")
    # plt.ylabel("p(z|v)")

    topk = 10
    images = np.zeros((nb_buckets * topk, int(np.prod(image_shape))))
    images_dummy = np.zeros((nb_buckets, int(np.prod(image_shape))))
    for i, start in enumerate(buckets):
        bucket_probs = np.sum(probs[:, start:start + size], axis=1)
        indices = np.argsort(bucket_probs)[::-1]

        for j in range(topk)[::-1]:
            images[j * nb_buckets + i] = inputs[indices[j]]

        # Dummy images are used to proportionally represent, via their intensity, the mean p(a <= z < b|v) of the top-k inputs.
        images_dummy[i] = np.mean(bucket_probs[indices])

    # Prepend the dummy images so they are displayed on the first row.
    images = np.r_[images_dummy, images]
    data = vizu.concatenate_images(images,
                                   shape=image_shape,
                                   dim=(topk + 1, nb_buckets),
                                   border_size=0,
                                   clim=(0, 1))

    plt.figure()
    plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
    plt.title(
        "Top-{} inputs maximizing p(z|v) for different values of z".format(
            topk))
    plt.ylabel("Top-{}".format(topk))
    plt.yticks(image_shape[1] * np.arange(topk + 1) + image_shape[1] / 2.,
               ["Intensity"] + map(str,
                                   range(1, topk + 1)[::-1]))
    plt.xlabel("z")

    xticks_labels = map(str, buckets) + [str(buckets + size)]
    plt.xticks(image_shape[1] * np.arange(nb_buckets) + image_shape[1] / 2.,
               xticks_labels,
               rotation=45)
    plt.tight_layout()

    plt.show()
コード例 #9
0
def main():
    parser = buildArgsParser()
    args = parser.parse_args()

    # Check that a least one of --view or --save has been given.
    #if not args.view and not args.save:
    #    parser.error("At least one the following options must be chosen: --view or --save")

    # Get experiment folder
    experiment_path = args.name
    if not os.path.isdir(experiment_path):
        # If not a directory, it must be the name of the experiment.
        experiment_path = pjoin(".", "experiments", args.name)

    if not os.path.isdir(experiment_path):
        parser.error('Cannot find experiment: {0}!'.format(args.name))

    if not os.path.isfile(pjoin(experiment_path, "model.pkl")):
        parser.error('Cannot find model for experiment: {0}!'.format(experiment_path))

    if not os.path.isfile(pjoin(experiment_path, "hyperparams.json")):
        parser.error('Cannot find hyperparams for experiment: {0}!'.format(experiment_path))

    # Load experiments hyperparameters
    hyperparams = utils.load_dict_from_json_file(pjoin(experiment_path, "hyperparams.json"))

    with Timer("Loading dataset"):
        trainset, validset, testset = dataset.load(hyperparams['dataset'], hyperparams.get('dataset_percent', 1.))
        print " (data: {:,}; {:,}; {:,}) ".format(len(trainset), len(validset), len(testset)),

        if hyperparams["dataset"] == "binarized_mnist":
            image_shape = (28, 28)
        elif hyperparams["dataset"] == "caltech101_silhouettes28":
            image_shape = (28, 28)
        else:
            raise ValueError("Unknown dataset: {0}".format(hyperparams["dataset"]))

    with Timer("Loading model"):
        if hyperparams["model"] == "rbm":
            raise ValueError("RBM doesn't have a p(z|v) distribution.")
        elif hyperparams["model"] == "orbm":
            from iRBM.models.orbm import oRBM
            model_class = oRBM
        elif hyperparams["model"] == "irbm":
            from iRBM.models.irbm import iRBM
            model_class = iRBM

        # Load the actual model.
        model = model_class.load(pjoin(experiment_path, "model.pkl"))

    with Timer("Building function p(z|v)"):
        v = testset.symb_inputs
        pdf_z_given_v = theano.function([v], model.pdf_z_given_v(v))

    min_z = 200
    max_z = 900
    size = 16
    buckets = np.arange(min_z, max_z, size)
    nb_buckets = len(buckets)

    inputs = testset.inputs.get_value()
    probs = pdf_z_given_v(inputs)

    # plt.figure()
    # plt.plot(probs.T)
    # plt.title("p(z|v) for all inputs in the testset")
    # plt.xlabel("z")
    # plt.ylabel("p(z|v)")

    topk = 10
    images = np.zeros((nb_buckets*topk, int(np.prod(image_shape))))
    images_dummy = np.zeros((nb_buckets, int(np.prod(image_shape))))
    for i, start in enumerate(buckets):
        bucket_probs = np.sum(probs[:, start:start+size], axis=1)
        indices = np.argsort(bucket_probs)[::-1]

        for j in range(topk)[::-1]:
            images[j*nb_buckets + i] = inputs[indices[j]]

        # Dummy images are used to proportionally represent, via their intensity, the mean p(a <= z < b|v) of the top-k inputs.
        images_dummy[i] = np.mean(bucket_probs[indices])

    # Prepend the dummy images so they are displayed on the first row.
    images = np.r_[images_dummy, images]
    data = vizu.concatenate_images(images, shape=image_shape, dim=(topk+1, nb_buckets), border_size=0, clim=(0, 1))

    plt.figure()
    plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
    plt.title("Top-{} inputs maximizing p(z|v) for different values of z".format(topk))
    plt.ylabel("Top-{}".format(topk))
    plt.yticks(image_shape[1]*np.arange(topk+1)+image_shape[1]/2., ["Intensity"] + map(str, range(1, topk+1)[::-1]))
    plt.xlabel("z")

    xticks_labels = map(str, buckets) + [str(buckets+size)]
    plt.xticks(image_shape[1]*np.arange(nb_buckets)+image_shape[1]/2., xticks_labels, rotation=45)
    plt.tight_layout()

    plt.show()
コード例 #10
0
ファイル: sample_model.py プロジェクト: MarcCote/iRBM
def main():
    parser = buildArgsParser()
    args = parser.parse_args()

    # Check that a least one of --view or --save has been given.
    if not args.view and not args.save:
        parser.error("At least one the following options must be chosen: --view or --save")

    # Get experiment folder
    experiment_path = args.name
    if not os.path.isdir(experiment_path):
        # If not a directory, it must be the name of the experiment.
        experiment_path = pjoin(".", "experiments", args.name)

    if not os.path.isdir(experiment_path):
        parser.error('Cannot find experiment: {0}!'.format(args.name))

    if not os.path.isfile(pjoin(experiment_path, "model.pkl")):
        parser.error('Cannot find model for experiment: {0}!'.format(experiment_path))

    if not os.path.isfile(pjoin(experiment_path, "hyperparams.json")):
        parser.error('Cannot find hyperparams for experiment: {0}!'.format(experiment_path))

    # Load experiments hyperparameters
    hyperparams = utils.load_dict_from_json_file(pjoin(experiment_path, "hyperparams.json"))

    with Timer("Loading model"):
        if hyperparams["model"] == "rbm":
            from iRBM.models.rbm import RBM
            model_class = RBM
        elif hyperparams["model"] == "orbm":
            from iRBM.models.orbm import oRBM
            model_class = oRBM
        elif hyperparams["model"] == "irbm":
            from iRBM.models.irbm import iRBM
            model_class = iRBM

        # Load the actual model.
        model = model_class.load(pjoin(experiment_path, "model.pkl"))

    rng = np.random.RandomState(args.seed)

    # Sample from uniform
    # TODO: sample from Bernouilli distribution parametrized with visible biases
    chain_start = (rng.rand(args.nb_samples, model.input_size) > 0.5).astype(theano.config.floatX)

    with Timer("Building sampling function"):
        v0 = theano.shared(np.asarray(chain_start, dtype=theano.config.floatX))
        v1 = model.gibbs_step(v0)
        gibbs_step = theano.function([], updates={v0: v1})

        if args.full_gibbs_step:
            print "Using z=K"
            # Use z=K for first Gibbs step.
            from iRBM.models.rbm import RBM
            h0 = RBM.sample_h_given_v(model, v0)
            v1 = RBM.sample_v_given_h(model, h0)
            v0.set_value(v1.eval())

    with Timer("Sampling"):
        for k in range(args.cdk):
            gibbs_step()

    samples = v0.get_value()

    if args.save:
        np.savez(args.out, samples)

    if args.view:
        if hyperparams["dataset"] == "binarized_mnist":
            image_shape = (28, 28)
        elif hyperparams["dataset"] == "caltech101_silhouettes28":
            image_shape = (28, 28)
        else:
            raise ValueError("Unknown dataset: {0}".format(hyperparams["dataset"]))

        data = vizu.concatenate_images(samples, shape=image_shape, border_size=1, clim=(0, 1))
        plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
        plt.show()
コード例 #11
0
ファイル: view_results.py プロジェクト: napsternxg/iRBM
def main():
    parser = buildArgsParser()
    args = parser.parse_args()

    sort_by = args.sort

    names = []
    results_files = []
    hyperparams_files = []
    status_files = []
    for f in args.results:
        exp_folder = f
        if os.path.isfile(f):
            exp_folder = os.path.dirname(f)

        result_file = pjoin(exp_folder, "result.json")
        hyperparams_file = pjoin(exp_folder, "hyperparams.json")
        status_file = pjoin(exp_folder, "status.json")

        if not os.path.isfile(result_file):
            print 'Skip: {0} is not a file!'.format(result_file)
            continue

        if not os.path.isfile(hyperparams_file):
            print 'Skip: {0} is not a file!'.format(hyperparams_file)
            continue

        if not os.path.isfile(status_file):
            print 'Skip: {0} is not a file!'.format(status_file)
            continue

        name = os.path.basename(exp_folder)
        if 'hyperparams.json' in os.listdir(
                os.path.abspath(pjoin(exp_folder, os.path.pardir))):
            name = os.path.basename(
                os.path.abspath(pjoin(exp_folder, os.path.pardir)))

        names.append(name)

        results_files.append(result_file)
        hyperparams_files.append(hyperparams_file)
        status_files.append(status_file)

    if len([no for no in sort_by if no == 0]) > 0:
        parser.error('Column ID are starting at 1!')

    # Retrieve headers from hyperparams
    headers_hyperparams = set()
    headers_results = set()
    headers_status = set()

    for hyperparams_file, status_file, results_file in zip(
            hyperparams_files, status_files, results_files):
        hyperparams = load_dict_from_json_file(hyperparams_file)
        results = load_dict_from_json_file(results_file)
        status = load_dict_from_json_file(status_file)
        headers_hyperparams |= set(hyperparams.keys())
        headers_results |= set(results.keys())
        headers_status |= set(status.keys())

    headers_hyperparams = sorted(list(headers_hyperparams))
    headers_status = sorted(list(headers_status))
    # TODO: when generating result.json split 'trainset' scores in two key:
    #       'trainset' and 'trainset_std' (same goes for validset and testset).
    headers_results |= set(["trainset_std", "validset_std", "testset_std"])
    headers_results = sorted(list(headers_results))
    headers = headers_hyperparams + headers_status + ["name"] + headers_results

    # Build results table
    table = Texttable(max_width=0)
    table.set_deco(Texttable.HEADER)
    table.set_precision(8)
    table.set_cols_dtype(['a'] * len(headers))
    table.set_cols_align(['c'] * len(headers))

    # Headers
    table.header([str(i) + "\n" + h for i, h in enumerate(headers, start=1)])

    if args.only_header:
        print table.draw()
        return

    # Results
    for name, hyperparams_file, status_file, results_file in zip(
            names, hyperparams_files, status_files, results_files):
        hyperparams = load_dict_from_json_file(hyperparams_file)
        results = load_dict_from_json_file(results_file)
        status = load_dict_from_json_file(status_file)

        # Build results table row (hyperparams columns)
        row = []
        for h in headers_hyperparams:
            value = hyperparams.get(h, '')
            row.append(value)

        for h in headers_status:
            value = status.get(h, '')
            row.append(value)

        row.append(name)

        for h in headers_results:
            if h in ["trainset", "validset", "testset"]:
                value = results.get(h, '')[0]
            elif h in ["trainset_std", "validset_std", "testset_std"]:
                value = results.get(h[:-4], '')[1]
            else:
                value = results.get(h, '')
            row.append(value)

        table.add_row(row)

    # Sort
    for col in reversed(sort_by):
        table._rows = sorted(table._rows,
                             key=sort_nicely(abs(col) - 1),
                             reverse=col < 0)

    if args.out is not None:
        import csv

        results = []
        results.append(headers)
        results.extend(table._rows)

        with open(args.out, 'wb') as csvfile:
            w = csv.writer(csvfile)
            w.writerows(results)

    else:
        print table.draw()
コード例 #12
0
def main():
    parser = buildArgsParser()
    args = parser.parse_args()

    # Extract experiments hyperparameters
    hyperparams = dict(vars(args))
    # Remove hyperparams that should not be part of the hash
    del hyperparams['nb_epochs']
    del hyperparams['max_epoch']
    del hyperparams['keep']
    del hyperparams['force']
    del hyperparams['name']

    # Get/generate experiment name
    experiment_name = args.name
    if experiment_name is None:
        experiment_name = utils.generate_uid_from_string(repr(hyperparams))

    # Create experiment folder
    experiment_path = pjoin(".", "experiments", experiment_name)
    resuming = False
    if os.path.isdir(experiment_path) and not args.force:
        resuming = True
        print "### Resuming experiment ({0}). ###\n".format(experiment_name)
        # Check if provided hyperparams match those in the experiment folder
        hyperparams_loaded = utils.load_dict_from_json_file(
            pjoin(experiment_path, "hyperparams.json"))
        if hyperparams != hyperparams_loaded:
            print "The arguments provided are different than the one saved. Use --force if you are certain.\nQuitting."
            exit(1)
    else:
        if os.path.isdir(experiment_path):
            shutil.rmtree(experiment_path)

        os.makedirs(experiment_path)
        utils.save_dict_to_json_file(
            pjoin(experiment_path, "hyperparams.json"), hyperparams)

    with Timer("Loading dataset"):
        trainset, validset, testset = dataset.load(args.dataset,
                                                   args.dataset_percent)
        print " (data: {:,}; {:,}; {:,}) ".format(len(trainset), len(validset),
                                                  len(testset)),

    with Timer("\nCreating model"):
        model = model_factory(args.model,
                              input_size=trainset.input_size,
                              hyperparams=hyperparams)

    starting_epoch = 1
    if resuming:
        with Timer("\nLoading model"):
            status = utils.load_dict_from_json_file(
                pjoin(experiment_path, "status.json"))
            starting_epoch = status['no_epoch'] + 1
            model = model.load(pjoin(experiment_path, "model.pkl"))

    ### Build trainer ###
    with Timer("\nBuilding trainer"):
        trainer = Trainer(model,
                          trainset,
                          batch_size=hyperparams['batch_size'],
                          starting_epoch=starting_epoch)

        # Add stopping criteria
        ending_epoch = args.max_epoch if args.max_epoch is not None else starting_epoch + args.nb_epochs - 1
        # Stop when max number of epochs is reached.
        trainer.add_stopping_criterion(tasks.MaxEpochStopping(ending_epoch))

        # Print time a training epoch took
        trainer.add_task(tasks.PrintEpochDuration())
        avg_reconstruction_error = tasks.AverageReconstructionError(
            model.CD.chain_start, model.CD.chain_end, len(trainset))
        trainer.add_task(
            tasks.Print(avg_reconstruction_error,
                        msg="Avg. reconstruction error: {0:.1f}"))

        if args.model == 'irbm':
            trainer.add_task(
                irbm.GrowiRBM(model,
                              shrinkable=args.shrinkable,
                              nb_neurons_to_add=args.nb_neurons_to_add))

        # Save training progression
        trainer.add_task(
            tasks.SaveProgression(model, experiment_path, each_epoch=50))
        if args.keep is not None:
            trainer.add_task(
                tasks.KeepProgression(model,
                                      experiment_path,
                                      each_epoch=args.keep))

        trainer.build()

    print "\nWill train {0} from epoch {1} to epoch {2}.".format(
        args.model, starting_epoch, ending_epoch)
    trainer.train()

    with Timer("\nSaving"):
        # Save final model
        model.save(pjoin(experiment_path, "model.pkl"))
コード例 #13
0
ファイル: train_model.py プロジェクト: MarcCote/iRBM
def main():
    parser = buildArgsParser()
    args = parser.parse_args()

    # Extract experiments hyperparameters
    hyperparams = dict(vars(args))
    # Remove hyperparams that should not be part of the hash
    del hyperparams['nb_epochs']
    del hyperparams['max_epoch']
    del hyperparams['keep']
    del hyperparams['force']
    del hyperparams['name']

    # Get/generate experiment name
    experiment_name = args.name
    if experiment_name is None:
        experiment_name = utils.generate_uid_from_string(repr(hyperparams))

    # Create experiment folder
    experiment_path = pjoin(".", "experiments", experiment_name)
    resuming = False
    if os.path.isdir(experiment_path) and not args.force:
        resuming = True
        print "### Resuming experiment ({0}). ###\n".format(experiment_name)
        # Check if provided hyperparams match those in the experiment folder
        hyperparams_loaded = utils.load_dict_from_json_file(pjoin(experiment_path, "hyperparams.json"))
        if hyperparams != hyperparams_loaded:
            print "The arguments provided are different than the one saved. Use --force if you are certain.\nQuitting."
            exit(1)
    else:
        if os.path.isdir(experiment_path):
            shutil.rmtree(experiment_path)

        os.makedirs(experiment_path)
        utils.save_dict_to_json_file(pjoin(experiment_path, "hyperparams.json"), hyperparams)

    with Timer("Loading dataset"):
        trainset, validset, testset = dataset.load(args.dataset, args.dataset_percent)
        print " (data: {:,}; {:,}; {:,}) ".format(len(trainset), len(validset), len(testset)),

    with Timer("\nCreating model"):
        model = model_factory(args.model, input_size=trainset.input_size, hyperparams=hyperparams)

    starting_epoch = 1
    if resuming:
        with Timer("\nLoading model"):
            status = utils.load_dict_from_json_file(pjoin(experiment_path, "status.json"))
            starting_epoch = status['no_epoch'] + 1
            model = model.load(pjoin(experiment_path, "model.pkl"))

    ### Build trainer ###
    with Timer("\nBuilding trainer"):
        trainer = Trainer(model, trainset, batch_size=hyperparams['batch_size'], starting_epoch=starting_epoch)

        # Add stopping criteria
        ending_epoch = args.max_epoch if args.max_epoch is not None else starting_epoch + args.nb_epochs - 1
        # Stop when max number of epochs is reached.
        trainer.add_stopping_criterion(tasks.MaxEpochStopping(ending_epoch))

        # Print time a training epoch took
        trainer.add_task(tasks.PrintEpochDuration())
        avg_reconstruction_error = tasks.AverageReconstructionError(model.CD.chain_start, model.CD.chain_end, len(trainset))
        trainer.add_task(tasks.Print(avg_reconstruction_error, msg="Avg. reconstruction error: {0:.1f}"))

        if args.model == 'irbm':
            trainer.add_task(irbm.GrowiRBM(model, shrinkable=args.shrinkable, nb_neurons_to_add=args.nb_neurons_to_add))

        # Save training progression
        trainer.add_task(tasks.SaveProgression(model, experiment_path, each_epoch=50))
        if args.keep is not None:
            trainer.add_task(tasks.KeepProgression(model, experiment_path, each_epoch=args.keep))

        trainer.build()

    print "\nWill train {0} from epoch {1} to epoch {2}.".format(args.model, starting_epoch, ending_epoch)
    trainer.train()

    with Timer("\nSaving"):
        # Save final model
        model.save(pjoin(experiment_path, "model.pkl"))
コード例 #14
0
def _compute_AIS(model, M=100, betas=BETAS, batch_size=None, seed=1234, ais_working_dir=".", force=False):
    ais_results_json = pjoin(ais_working_dir, "ais_results.part.json")

    if batch_size is None:
        batch_size = M

    # Will be executing M AIS's runs.
    last_sample_chain = np.zeros((M, model.input_size), dtype=config.floatX)
    M_log_w_ais = np.zeros(M, dtype=np.float64)

    model.set_rng_seed(seed)

    ais_results = {}
    if os.path.isfile(ais_results_json) and not force:
        print "Resuming AIS using info from {}".format(ais_results_json)
        ais_results = utils.load_dict_from_json_file(ais_results_json)
        M_log_w_ais = ais_results['M_log_w_ais']
        last_sample_chain = ais_results['last_sample_chain']
        lnZ_trivial = ais_results['lnZ_trivial']

    # Iterate through all AIS runs.
    for i in range(0, M, batch_size):
        if i <= ais_results.get('batch_id', -1):
            continue

        model.set_rng_seed(seed+i)
        actual_size = min(M - i, batch_size)
        print "AIS run: {}/{} (using batch size of {})".format(i, M, batch_size)
        ais_partial_results = _compute_AIS_samples(model, M=actual_size, betas=betas)

        M_log_w_ais[i:i+batch_size] = ais_partial_results['M_log_w_ais']
        last_sample_chain[i:i+batch_size] = ais_partial_results['last_sample_chain']
        lnZ_trivial = ais_partial_results['lnZ_trivial']

        # Save partial results
        if os.path.isfile(ais_results_json):
            shutil.copy(ais_results_json, ais_results_json[:-4] + "old.json")

        ais_results = {'batch_id': i,
                       'M': M,
                       'batch_size': batch_size,
                       'last_sample_chain': last_sample_chain,
                       'M_log_w_ais': M_log_w_ais,
                       'lnZ_trivial': lnZ_trivial}
        utils.save_dict_to_json_file(ais_results_json, ais_results)

    # We compute the mean of the estimated `r_AIS`
    Ms = np.arange(1, M+1)
    log_sum_w_ais = np.logaddexp.accumulate(M_log_w_ais)
    logcummean_Z = log_sum_w_ais - np.log(Ms)

    # We compute the standard deviation of the estimated `r_AIS`
    logstd_AIS = np.zeros_like(M_log_w_ais)
    for k in Ms[1:]:
        m = np.max(M_log_w_ais[:k])
        logstd_AIS[k-1] = np.log(np.std(np.exp(M_log_w_ais[:k]-m), ddof=1)) - np.log(np.sqrt(k))
        logstd_AIS[k-1] += m

    logstd_AIS[0] = np.nan  # Standard deviation of only one sample does not exist.

    # The authors report AIS error using ln(Ẑ ± 3\sigma)
    m = max(np.nanmax(logstd_AIS), np.nanmax(logcummean_Z))
    logcumstd_Z_up = np.log(np.exp(logcummean_Z-m) + 3*np.exp(logstd_AIS-m)) + m - logcummean_Z
    logcumstd_Z_down = -(np.log(np.exp(logcummean_Z-m) - 3*np.exp(logstd_AIS-m)) + m) + logcummean_Z

    # Compute the standard deviation of ln(Z)
    std_lnZ = np.array([np.std(M_log_w_ais[:k], ddof=1) for k in Ms[1:]])
    std_lnZ = np.r_[np.nan, std_lnZ]  # Standard deviation of only one sample does not exist.

    return {"logcummean_Z": logcummean_Z.astype(config.floatX),
            "logcumstd_Z_down": logcumstd_Z_down.astype(config.floatX),
            "logcumstd_Z_up": logcumstd_Z_up.astype(config.floatX),
            "logcumstd_Z": logstd_AIS.astype(config.floatX),
            "M_log_w_ais": M_log_w_ais,
            "lnZ_trivial": lnZ_trivial,
            "std_lnZ": std_lnZ,
            "last_sample_chain": last_sample_chain,
            "batch_size": batch_size,
            "seed": seed,
            "nb_temperatures": len(betas),
            "nb_samples": M
            }
コード例 #15
0
def main():
    parser = buildArgsParser()
    args = parser.parse_args()

    # Check that a least one of --view or --save has been given.
    #if not args.view and not args.save:
    #    parser.error("At least one the following options must be chosen: --view or --save")

    # Get experiment folder
    experiment_path = args.name
    if not os.path.isdir(experiment_path):
        # If not a directory, it must be the name of the experiment.
        experiment_path = pjoin(".", "experiments", args.name)

    if not os.path.isdir(experiment_path):
        parser.error('Cannot find experiment: {0}!'.format(args.name))

    if not os.path.isfile(pjoin(experiment_path, "model.pkl")):
        parser.error(
            'Cannot find model for experiment: {0}!'.format(experiment_path))

    if not os.path.isfile(pjoin(experiment_path, "hyperparams.json")):
        parser.error('Cannot find hyperparams for experiment: {0}!'.format(
            experiment_path))

    # Load experiments hyperparameters
    hyperparams = utils.load_dict_from_json_file(
        pjoin(experiment_path, "hyperparams.json"))

    with Timer("Loading dataset"):
        trainset, validset, testset = dataset.load(
            hyperparams['dataset'], hyperparams.get('dataset_percent', 1.))
        print " (data: {:,}; {:,}; {:,}) ".format(len(trainset), len(validset),
                                                  len(testset)),

        if hyperparams["dataset"] == "binarized_mnist":
            image_shape = (28, 28)
        elif hyperparams["dataset"] == "caltech101_silhouettes28":
            image_shape = (28, 28)
        else:
            raise ValueError("Unknown dataset: {0}".format(
                hyperparams["dataset"]))

    with Timer("Loading model"):
        if hyperparams["model"] == "rbm":
            raise ValueError("RBM doesn't have a p(z|v) distribution.")
        elif hyperparams["model"] == "orbm":
            from iRBM.models.orbm import oRBM
            model_class = oRBM
        elif hyperparams["model"] == "irbm":
            from iRBM.models.irbm import iRBM
            model_class = iRBM

        # Load the actual model.
        model = model_class.load(pjoin(experiment_path, "model.pkl"))

    with Timer("Building function p(z|v)"):
        v = testset.symb_inputs
        pdf_z_given_v = theano.function([v], model.pdf_z_given_v(v))

    min_z = args.start
    max_z = model.hidden_size if args.end is None else args.end
    size = args.bucket_size
    buckets = np.arange(min_z, max_z, size)
    nb_buckets = len(buckets)

    inputs = testset.inputs.get_value()
    probs = pdf_z_given_v(inputs)

    # plt.figure()
    # plt.plot(probs.T)
    # plt.title("p(z|v) for all inputs in the testset")
    # plt.xlabel("z")
    # plt.ylabel("p(z|v)")

    topk = 10
    images = np.zeros((nb_buckets * topk, int(np.prod(image_shape))))
    images_dummy = np.zeros((nb_buckets, int(np.prod(image_shape))))
    for i, start in enumerate(buckets):
        bucket_probs = np.sum(probs[:, start:start + size], axis=1)
        indices = np.argsort(bucket_probs)[::-1]

        for j in range(topk)[::-1]:
            images[j * nb_buckets + i] = inputs[indices[j]]

        # Dummy images are used to proportionally represent, via their intensity, the mean p(a <= z < b|v) of the top-k inputs.
        images_dummy[i] = np.mean(bucket_probs[indices][:topk])

    # Prepend the dummy images so they are displayed on the first row.
    #images = np.r_[images_dummy, images]
    #data = vizu.concatenate_images(images, shape=image_shape, dim=(topk+1, nb_buckets), border_size=0, clim=(0, 1))
    data = vizu.concatenate_images(images,
                                   shape=image_shape,
                                   dim=(topk, nb_buckets),
                                   border_size=0,
                                   clim=(0, 1))

    f, (ax1, ax2) = plt.subplots(2, sharex=True, figsize=(24, 9))
    #ax1.set_title("Top-{} inputs maximizing $p(z|\\mathbf{{v}})$ for different values of $z$".format(topk), fontsize=20)
    xticks = image_shape[1] * np.arange(nb_buckets) + image_shape[1] // 2
    ax1.plot(xticks, images_dummy[:, 0], linewidth=2)
    ax1.set_ylabel("Avg. $p(a\\leq z < b|\\mathbf{v})$ of " +
                   "top-{}".format(topk),
                   fontsize=20)
    ax1.set_ylim(0, 1)
    ax1.set_adjustable('box-forced')

    ax2.imshow(data, cmap=plt.cm.gray, interpolation='nearest', origin="upper")
    ax2.set_ylabel("Top-{} inputs".format(topk), fontsize=20)
    ax2.set_yticks(image_shape[1] * np.arange(topk) + image_shape[1] / 2.)
    ax2.set_yticklabels(map(str, range(1, topk + 1)[::-1]))
    ax2.set_anchor('N')
    #ax2.set_ylim(0, image_shape[1]*topk)

    xticks = image_shape[1] * np.arange(nb_buckets + 1)
    xticks_labels = map(str, buckets) + [str(buckets[-1] + size)]
    ax2.set_xlabel("$a\\leq z < b$", fontsize=20)
    ax2.set_xticks(xticks)
    ax2.set_xticklabels(xticks_labels, rotation=45)
    ax2.set_xlim(min(xticks), max(xticks))
    ax2.set_adjustable('box-forced')

    # Fine-tune figure; make subplots close to each other and hide x ticks for
    # all but bottom plot.
    f.subplots_adjust(hspace=0.)
    #plt.setp(ax1.get_xticklabels(), visible=False)

    f.tight_layout()
    plt.savefig("topk_prob_z_given_x.png", dpi=300, bbox_inches='tight')
    print "Saving to ./topk_prob_z_given_x.png"
    #plt.show()
    return

    plt.figure()

    plt1 = plt.subplot(2, 1, 2)
    plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
    plt.ylabel("Top-{}".format(topk), fontsize=20)
    #plt.yticks(image_shape[1]*np.arange(topk+1)+image_shape[1]/2., ["Intensity"] + map(str, range(1, topk+1)[::-1]))
    plt.yticks(image_shape[1] * np.arange(topk) + image_shape[1] / 2.,
               map(str,
                   range(1, topk + 1)[::-1]))
    plt.xlabel("$a\\leq z < b$", fontsize=20)

    xticks = image_shape[1] * np.arange(nb_buckets + 1)
    xticks_labels = map(str, buckets) + [str(buckets[-1] + size)]
    plt.xticks(xticks, xticks_labels, rotation=45)
    plt.xlim(min(xticks), max(xticks))
    #plt.xlim(min(xticks), max(xticks))

    plt.subplot(2, 1, 1, sharex=plt1)
    plt.title(
        "Top-{} inputs maximizing $p(z|\\mathbf{{v}})$ for different values of $z$"
        .format(topk),
        fontsize=20)
    #x = buckets+size//2
    xticks = image_shape[1] * np.arange(nb_buckets) + image_shape[1] // 2
    plt.plot(xticks, images_dummy[:, 0])
    plt.ylabel("Mean $p(a\\leq z < b|\\mathbf{v})$", fontsize=20)
    #plt.xticks(xticks, map(str, buckets) + [str(buckets[-1]+size)], rotation=45)
    #plt.xticks(xticks, [])
    #plt.xlim(min(xticks)-image_shape[1]//2, max(xticks)+image_shape[1]//2)

    plt.subplots_adjust(hspace=0.001, left=0., right=1., top=1., bottom=0.)
    plt.tight_layout()
    plt.savefig("test.png", bbox_inches='tight')
    plt.show()
コード例 #16
0
ファイル: sample_model.py プロジェクト: orapradeep/iRBM
def main():
    parser = buildArgsParser()
    args = parser.parse_args()

    # Check that a least one of --view or --save has been given.
    if not args.view and not args.save:
        parser.error(
            "At least one the following options must be chosen: --view or --save"
        )

    # Get experiment folder
    experiment_path = args.name
    if not os.path.isdir(experiment_path):
        # If not a directory, it must be the name of the experiment.
        experiment_path = pjoin(".", "experiments", args.name)

    if not os.path.isdir(experiment_path):
        parser.error('Cannot find experiment: {0}!'.format(args.name))

    if not os.path.isfile(pjoin(experiment_path, "model.pkl")):
        parser.error(
            'Cannot find model for experiment: {0}!'.format(experiment_path))

    if not os.path.isfile(pjoin(experiment_path, "hyperparams.json")):
        parser.error('Cannot find hyperparams for experiment: {0}!'.format(
            experiment_path))

    # Load experiments hyperparameters
    hyperparams = utils.load_dict_from_json_file(
        pjoin(experiment_path, "hyperparams.json"))

    with Timer("Loading model"):
        if hyperparams["model"] == "rbm":
            from iRBM.models.rbm import RBM
            model_class = RBM
        elif hyperparams["model"] == "orbm":
            from iRBM.models.orbm import oRBM
            model_class = oRBM
        elif hyperparams["model"] == "irbm":
            from iRBM.models.irbm import iRBM
            model_class = iRBM

        # Load the actual model.
        model = model_class.load(pjoin(experiment_path, "model.pkl"))

    rng = np.random.RandomState(args.seed)

    # Sample from uniform
    # TODO: sample from Bernouilli distribution parametrized with visible biases
    chain_start = (rng.rand(args.nb_samples, model.input_size) > 0.5).astype(
        theano.config.floatX)

    with Timer("Building sampling function"):
        v0 = theano.shared(np.asarray(chain_start, dtype=theano.config.floatX))
        v1 = model.gibbs_step(v0)
        gibbs_step = theano.function([], updates={v0: v1})

        if args.full_gibbs_step:
            print "Using z=K"
            # Use z=K for first Gibbs step.
            from iRBM.models.rbm import RBM
            h0 = RBM.sample_h_given_v(model, v0)
            v1 = RBM.sample_v_given_h(model, h0)
            v0.set_value(v1.eval())

    with Timer("Sampling"):
        for k in range(args.cdk):
            gibbs_step()

    samples = v0.get_value()

    if args.save:
        np.savez(args.out, samples)

    if args.view:
        if hyperparams["dataset"] == "binarized_mnist":
            image_shape = (28, 28)
        elif hyperparams["dataset"] == "caltech101_silhouettes28":
            image_shape = (28, 28)
        else:
            raise ValueError("Unknown dataset: {0}".format(
                hyperparams["dataset"]))

        data = vizu.concatenate_images(samples,
                                       shape=image_shape,
                                       border_size=1,
                                       clim=(0, 1))
        plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
        plt.show()