n_in = hd2
    n_out = outdim
    irange_out = numpy.sqrt(6. / (n_in + n_out))

    state.hyper_parameters = {
        'trainfile': 'train_gray_uvd_rot_1562_31.h5',
        'N': 10 * 64,
        'batch_size': 64 * 2,
        'c1': c1,
        'kernel_c1': kernel_c1,
        'pool_c1': pool_c1,
        'c2': c2,
        'kernel_c2': kernel_c2,
        'pool_c2': pool_c2,
        'irange_c1': irange_c1,
        'irange_c2': irange_c2,
        'irange_hd1': irange_hd1,
        'irange_hd2': irange_hd2,
        'irange_out': irange_out,
        'hd1': hd1,
        'hd2': hd2,
        'output_dim': outdim,
        'lamda': lamda,
        'decay': decay,
        'max_epochs': 100,
        'save_best_path': save_best_path
    }

    yaml_template = state.yaml_template
    hyper_parameters = expand(flatten(state.hyper_parameters), dict_type=ydict)
    # This will be the complete yaml string that should be executed
Пример #2
0
def load_experiments(args):
    dataset_name = args.dataset_name
    db = sql.db("postgres://%(user)s@%(host)s:%(port)d/%(database)s?table=%(table)s"
                % {"user": args.user,
                   "host": args.host,
                   "port": args.port,
                   "database": args.database,
                   "table": args.table,
                   })

    logger.info("Getting dataset info for %s%s"
                % (dataset_name, ", transposed" if args.transposed else ""))
    data_path = serial.preprocess("${PYLEARN2_NI_PATH}/" + args.dataset_name)

    if args.transposed:
        logger.info("Data in transpose...")
        mri = MRI.MRI_Transposed(dataset_name=args.dataset_name,
                                 unit_normalize=True,
                                 even_input=True,
                                 apply_mask=True)
        input_dim = mri.X.shape[1]
        variance_map_file = path.join(data_path, "transposed_variance_map.npy")
    else:
        mask_file = path.join(data_path, "mask.npy")
        mask = np.load(mask_file)
        input_dim = (mask == 1).sum()
        if input_dim % 2 == 1:
            input_dim -= 1
        mri = MRI.MRI_Standard(which_set="full",
                               dataset_name=args.dataset_name,
                               unit_normalize=True,
                               even_input=True,
                               apply_mask=True)
        variance_map_file = path.join(data_path, "variance_map.npy")

    mri_nifti.save_variance_map(mri, variance_map_file)

    for items in nested_generator(layer_depth_generator("encoder.layer_depths", 
                                                        xrange(4, 6), 5),
                                  hidden_generator("encoder.nhid", 4),
                                  float_generator("weight_decay.coeffs.z", 3, 0.1, 0.001, log_scale=True)):
#        logger.info("Adding NICE experiment with hyperparameters %s" % (items, ))
        state = DD()

        experiment_hyperparams = nice_experiment.default_hyperparams(input_dim)
        if args.transposed:
            experiment_hyperparams["data_class"] = "MRI_Transposed"
        if args.logistic:
            experiment_hyperparams["prior"]["__builder__"] =\
                "nice.pylearn2.models.nice.StandardLogistic"

        for key, value in items:
            split_keys = key.split(".")
            entry = experiment_hyperparams
            for k in split_keys[:-1]:
                entry = entry[k]
            entry[split_keys[-1]] = value
        experiment_hyperparams["dataset_name"] = dataset_name
        h = abs(hash(frozenset(flatten(experiment_hyperparams).keys() +\
                                   [tuple(v) if isinstance(v, list) else v for v in flatten(experiment_hyperparams).values()])))

        user = path.expandvars("$USER")
        save_path = serial.preprocess("/export/mialab/users/%s/pylearn2_outs/%d"
                                      % (user, h))

        file_params = {
            "save_path": save_path,
            "variance_map_file": variance_map_file,
            }

        state.file_parameters = file_params
        state.hyper_parameters = experiment_hyperparams

        sql.insert_job(
            nice_experiment.experiment,
            flatten(state),
            db
            )

    db.createView("%s_view" % args.table)
Пример #3
0
}

    '''

    state.hyper_parameters = {'trainfile':'train_gray_uvd_rot_1562_31.h5',
                        'testfile':'test_gray_uvd_rot_1562_31.h5',
                        'N': 12*64,
                        'n': 2*64,
                            'batch_size': 64,
                            'c1': c1,
                            'kernel_c1':kernel_c1,
                            'pool_c1':pool_c1,
                            'c2': c2,
                            'kernel_c2':kernel_c2,
                            'pool_c2':pool_c2,
                            'irange_c1':irange_c1,
                            'irange_c2':irange_c2,
                            'irange_hd1':irange_hd1,
                            'irange_hd2':irange_hd2,
                            'irange_out':irange_out,
                            'hd1': 2592,
                            'hd2': 36,
                            'output_dim':constants.NUM_JNTS * 3,
                            'lamda':lamda,
                            'decay':decay,
                            'max_epochs': 50,
                            'save_best_path': save_best_path
            }

    yaml_template = state.yaml_template
    hyper_parameters = expand(flatten(state.hyper_parameters), dict_type=ydict)
    # This will be the complete yaml string that should be executed
Пример #4
0
def load_experiments(args):

    dataset_name = args.dataset_name
    db = sql.db(
        "postgres://%(user)s@%(host)s:"
        "%(port)d/%(database)s?table=%(table)s" % {
            "user": args.user,
            "host": args.host,
            "port": args.port,
            "database": args.database,
            "table": args.table,
        })
    input_handler = InputHandler()

    for items in jg.nested_generator(
            jg.list_generator(
                "encoder.layer_depths",
                [[3, 5, 5, 5, 3], [5, 5, 5, 5, 5], [2, 4, 4, 2]]),
            jg.list_generator("variance_normalize", [False, 2]),
            jg.float_generator("weight_decay.coeff",
                               4,
                               0.1,
                               0.0001,
                               log_scale=True),
            jg.list_generator("prior.__builder__", [
                "nice.pylearn2.models.nice.StandardNormal",
                "nice.pylearn2.models.nice.StandardLogistic"
            ])):

        logger.info("Adding NICE experiment across hyperparameters %s" %
                    (items, ))
        state = DD()

        experiment_hyperparams = nice_experiment.default_hyperparams()

        for key, value in items:
            split_keys = key.split(".")
            entry = experiment_hyperparams
            for k in split_keys[:-1]:
                entry = entry[k]
            assert split_keys[-1] in entry,\
                ("Key not found in hyperparams: %s, "
                 "found: %s" % (split_keys[-1], entry.keys()))
            entry[split_keys[-1]] = value
        experiment_hyperparams["dataset_name"] = dataset_name
        input_dim, variance_map_file = input_handler.get_input_params(
            args, experiment_hyperparams)
        logger.info("%s\n%s\n" % (input_dim, variance_map_file))
        experiment_hyperparams["nvis"] = input_dim
        experiment_hyperparams["encoder"]["nvis"] = input_dim

        h = abs(hash(frozenset(
            flatten(experiment_hyperparams).keys() +\
            [tuple(v) if isinstance(v, list)
             else v for v in flatten(experiment_hyperparams).values()])))

        user = path.expandvars("$USER")
        save_path = serial.preprocess(
            "/export/mialab/users/%s/pylearn2_outs/%d" % (user, h))

        file_params = {
            "save_path": save_path,
            "variance_map_file": variance_map_file,
        }

        state.file_parameters = file_params
        state.hyper_parameters = experiment_hyperparams

        sql.insert_job(nice_experiment.experiment, flatten(state), db)

    db.createView("%s_view" % args.table)
Пример #5
0
def load_experiments(args):
    dataset_name = args.dataset_name

    # Load the database and table.
    db = sql.db(
        "postgres://%(user)s@%(host)s:%(port)d/%(database)s?table=%(table)s" %
        {
            "user": args.user,
            "host": args.host,
            "port": args.port,
            "database": args.database,
            "table": args.table,
        })

    # Don't worry about this yet.
    input_handler = InputHandler()

    # For generating models, we use a special set of jobman generators, made
    # for convenience.
    for items in jg.nested_generator(
            jg.float_generator("learning_rate",
                               3,
                               0.01,
                               0.0001,
                               log_scale=True),
            jg.list_generator("nhid", [50, 100, 200, 300]),
    ):

        logger.info("Adding RBM experiment across hyperparameters %s" %
                    (items, ))
        state = DD()

        # Load experiment hyperparams from experiment
        experiment_hyperparams = experiment.default_hyperparams()

        # Set them with values in our loop.
        for key, value in items:
            split_keys = key.split(".")
            entry = experiment_hyperparams
            for k in split_keys[:-1]:
                entry = entry[k]
            assert split_keys[-1] in entry, (
                "Key not found in hyperparams: %s" % split_keys[-1])
            entry[split_keys[-1]] = value

        # Set the dataset name
        experiment_hyperparams["dataset_name"] = dataset_name

        # Get the input dim and variance map. Don't worry about variance maps right now,
        # they aren't used here.
        input_dim, variance_map_file = input_handler.get_input_params(
            args, experiment_hyperparams)
        logger.info("%s\n%s\n" % (input_dim, variance_map_file))

        # Set the input dimensionality by the data
        experiment_hyperparams["nvis"] = input_dim

        # Set the minimum learning rate relative to the initial learning rate.
        experiment_hyperparams[
            "min_lr"] = experiment_hyperparams["learning_rate"] / 10

        # Make a unique hash for experiments. Remember that lists, dicts, and other data
        # types may not be hashable, so you may need to do some special processing. In
        # this case we convert the lists to tuples.
        h = abs(hash(frozenset(flatten(experiment_hyperparams).keys() +\
                                   [tuple(v) if isinstance(v, list)
                                    else v
                                    for v in flatten(experiment_hyperparams).values()])))

        # Save path for the experiments. In this case we are sharing a directory in my
        # export directory so IT can blame me.
        save_path = serial.preprocess(
            "/export/mialab/users/dhjelm/pylearn2_outs/rbm_demo/%d" % h)

        # We save file params separately as they aren't model specific.
        file_params = {
            "save_path": save_path,
            "variance_map_file": variance_map_file,
        }

        state.file_parameters = file_params
        state.hyper_parameters = experiment_hyperparams
        user = path.expandvars("$USER")
        state.created_by = user

        # Finally we add the experiment to the table.
        sql.insert_job(experiment.experiment, flatten(state), db)

    # A view can be used when querying the database using psql. May not be needed in future.
    db.createView("%s_view" % args.table)
        for (lr_mult, lr_num, lr_denum) in [
                (1, 100, 20000),
                (1, 500, 100000),
                (1, 1000, 200000),
                (1, 2000, 400000),
                (1, 4000, 800000),
                (2, 2000, 400000)]:

            for batch_size in [100]:
                for nh1 in [500]:
                    for nh2 in [500]:
                        for rbm1_l2 in [1e-1,1e-2,1e-3,1e-4]:
                            state.hyper_parameters = {
                                'seed1': seed,
                                'seed2': seed + 1,
                                'lr_mult': lr_mult,
                                'lr_num': lr_num,
                                'lr_denum': lr_denum,
                                'nh1': nh1,
                                'nh2': nh2,
                                'batch_size': batch_size,
                                'rbm1_l2': rbm1_l2,
                            }
                            
                            sql.insert_job(
                                    experiment.train_experiment,
                                    flatten(state),
                                    db,
                                    force_dup=True)
Пример #7
0
 for (pos_mf_steps, pos_sample_steps) in [(5,0),(0,5)]:
     for iscale_bias in [0]:
         state.hyper_parameters = {
             'binarize': 1,
             'nu1': 500,
             'nu2': 1000,
             'lr': lr,
             'enable_natural': 0,
             'enable_natural_diag': 1,
             'enable_centering': 1,
             'enable_warm_start': 0,
             'mlbiases': 1,
             'pos_mf_steps': pos_mf_steps,
             'pos_sample_steps': pos_sample_steps,
             'neg_sample_steps': 5,
             'iscale_w1': 0.,
             'iscale_w2': 0.,
             'iscale_bias0': iscale_bias,
             'iscale_bias1': iscale_bias,
             'iscale_bias2': iscale_bias,
             'l1_W1': 0.,
             'l1_W2': 0.,
             'batch_size': batch_size,
             'rtol': 0.00001,
             'damp': 0.1,
             'maxit': 80,
             'switch_threshold': 0,
             'switch_at': 0,
             'ais_interval': 10,
         }
         
         sql.insert_job(
                },
                !obj:deep_tempering.scripts.likelihood.log_swap_callback.pylearn2_log_swap_callback {
                    "interval": 1000,
                },
            ]
        }
    """

    for seed in [123141, 71629, 92735, 1230, 34789]:
        for lr in [1e-1, 1e-2, 1e-3, 1e-4, 1e-5]:
            for batch_size in [5]:
                for nh1 in [10]:
                    for nh2 in [10,50]:
                        state.hyper_parameters = {
                            'seed1': seed,
                            'seed2': seed + 1,
                            'seed3': seed + 2,
                            'lr_start': lr,
                            'lr_end': lr,
                            'nh1': nh1,
                            'nh2': nh2,
                            'nh3': nh2,
                            'batch_size': batch_size,
                        }
                        
                        sql.insert_job(
                                experiment.train_experiment,
                                flatten(state),
                                db,
                                force_dup=True)
Пример #9
0
def load_experiments(args):
    dataset_name = args.dataset_name
    db = sql.db(
        "postgres://%(user)s@%(host)s:%(port)d/%(database)s?table=%(table)s" %
        {
            "user": args.user,
            "host": args.host,
            "port": args.port,
            "database": args.database,
            "table": args.table,
        })

    logger.info("Getting dataset info for %s%s" %
                (dataset_name, ", transposed" if args.transposed else ""))
    data_path = serial.preprocess("${PYLEARN2_NI_PATH}/" + args.dataset_name)

    if args.transposed:
        logger.info("Data in transpose...")
        mri = MRI.MRI_Transposed(dataset_name=args.dataset_name,
                                 unit_normalize=True,
                                 even_input=True,
                                 apply_mask=True)
        input_dim = mri.X.shape[1]
        variance_map_file = path.join(data_path, "transposed_variance_map.npy")
    else:
        mask_file = path.join(data_path, "mask.npy")
        mask = np.load(mask_file)
        input_dim = (mask == 1).sum()
        if input_dim % 2 == 1:
            input_dim -= 1
        mri = MRI.MRI_Standard(which_set="full",
                               dataset_name=args.dataset_name,
                               unit_normalize=True,
                               even_input=True,
                               apply_mask=True)
        variance_map_file = path.join(data_path, "variance_map.npy")

    mri_nifti.save_variance_map(mri, variance_map_file)

    for items in nested_generator(
            layer_depth_generator("encoder.layer_depths", xrange(4, 6), 5),
            hidden_generator("encoder.nhid", 4),
            float_generator("weight_decay.coeffs.z",
                            3,
                            0.1,
                            0.001,
                            log_scale=True)):
        #        logger.info("Adding NICE experiment with hyperparameters %s" % (items, ))
        state = DD()

        experiment_hyperparams = nice_experiment.default_hyperparams(input_dim)
        if args.transposed:
            experiment_hyperparams["data_class"] = "MRI_Transposed"
        if args.logistic:
            experiment_hyperparams["prior"]["__builder__"] =\
                "nice.pylearn2.models.nice.StandardLogistic"

        for key, value in items:
            split_keys = key.split(".")
            entry = experiment_hyperparams
            for k in split_keys[:-1]:
                entry = entry[k]
            entry[split_keys[-1]] = value
        experiment_hyperparams["dataset_name"] = dataset_name
        h = abs(hash(frozenset(flatten(experiment_hyperparams).keys() +\
                                   [tuple(v) if isinstance(v, list) else v for v in flatten(experiment_hyperparams).values()])))

        user = path.expandvars("$USER")
        save_path = serial.preprocess(
            "/export/mialab/users/%s/pylearn2_outs/%d" % (user, h))

        file_params = {
            "save_path": save_path,
            "variance_map_file": variance_map_file,
        }

        state.file_parameters = file_params
        state.hyper_parameters = experiment_hyperparams

        sql.insert_job(nice_experiment.experiment, flatten(state), db)

    db.createView("%s_view" % args.table)
Пример #10
0
    '''

    state.hyper_parameters = {
        'trainfile': 'train_gray_uvd_rot_1562_31.h5',
        'testfile': 'test_gray_uvd_rot_1562_31.h5',
        'N': 12 * 64,
        'n': 2 * 64,
        'batch_size': 64,
        'c1': c1,
        'kernel_c1': kernel_c1,
        'pool_c1': pool_c1,
        'c2': c2,
        'kernel_c2': kernel_c2,
        'pool_c2': pool_c2,
        'irange_c1': irange_c1,
        'irange_c2': irange_c2,
        'irange_hd1': irange_hd1,
        'irange_hd2': irange_hd2,
        'irange_out': irange_out,
        'hd1': 2592,
        'hd2': 36,
        'output_dim': constants.NUM_JNTS * 3,
        'lamda': lamda,
        'decay': decay,
        'max_epochs': 50,
        'save_best_path': save_best_path
    }

    yaml_template = state.yaml_template
    hyper_parameters = expand(flatten(state.hyper_parameters), dict_type=ydict)
    """

    batch_size = 5
    nh1 = 10
    nh2 = 10
    for seed in [123141, 71629, 92735, 1230, 34789]:
        for lr1 in [1e-3, 1e-4, 1e-5]:
            for lr2_mult in [1., 10., 100.]:

                lr2 = lr1 * lr2_mult

                state.hyper_parameters = {
                    'seed1': seed,
                    'seed2': seed + 1,
                    'lr_start1': lr1,
                    'lr_end1': lr1,
                    'lr_start2': lr2,
                    'lr_end2': lr2,
                    'lr_start3': lr2,
                    'lr_end3': lr2,
                    'nh1': nh1,
                    'nh2': nh2,
                    'batch_size': batch_size,
                }
                
                sql.insert_job(
                        experiment.train_experiment,
                        flatten(state),
                        db,
                        force_dup=True)
Пример #12
0
                for iscale_bias in [0]:
                    for damp in [0.1]:
                        state.hyper_parameters = {
                            'binarize': 1,
                            'nu1': 400,
                            'nu2': 100,
                            'lr': lr,
                            'enable_natural': 1,
                            'enable_natural_diag': 0,
                            'enable_centering': 1,
                            'enable_warm_start': 0,
                            'mlbiases': 1,
                            'pos_mf_steps': pos_mf_steps,
                            'pos_sample_steps': pos_sample_steps,
                            'neg_sample_steps': 5,
                            'iscale_w1': 0.,
                            'iscale_w2': 0.,
                            'iscale_bias0': iscale_bias,
                            'iscale_bias1': iscale_bias,
                            'iscale_bias2': iscale_bias,
                            'l1_W1': 0.,
                            'l1_W2': 0.,
                            'batch_size': batch_size,
                            'rtol': 0.00001,
                            'damp': damp,
                            'maxit': 80,
                            'switch_threshold': 0,
                            'switch_at': 0,
                            'ais_interval': 1,
                        }

                        sql.insert_job(experiment.train_experiment,
Пример #13
0
def load_experiments(args):
    dataset_name = args.dataset_name
    db = sql.db("postgres://%(user)s@%(host)s:%(port)d/%(database)s?table=%(table)s"
                % {"user": args.user,
                   "host": args.host,
                   "port": args.port,
                   "database": args.database,
                   "table": args.table,
                   })

    logger.info("Getting dataset info for %s" % dataset_name)
    data_path = serial.preprocess("${PYLEARN2_NI_PATH}/" + dataset_name)
    mask_file = path.join(data_path, "mask.npy")
    mask = np.load(mask_file)
    input_dim = (mask == 1).sum()
    if input_dim % 2 == 1:
        input_dim -= 1
    mri = MRI.MRI_Standard(which_set="full",
                           dataset_name=dataset_name,
                           unit_normalize=True,
                           even_input=True,
                           apply_mask=True)
    variance_map_file = path.join(data_path, "variance_map.npy")
    mri_nifti.save_variance_map(mri, variance_map_file)

    for items in jg.nested_generator(jg.hidden_generator("nhid1", 1),
                                     jg.hidden_generator("nhid2", 1),
                                     ):

        state = DD()
        experiment_hyperparams = mlp_experiment.default_hyperparams(input_dim)

        for key, value in items:
            split_keys = key.split(".")
            entry = experiment_hyperparams
            for k in split_keys[:-1]:
                entry = entry[k]
            entry[split_keys[-1]] = value
        
        experiment_hyperparams["dataset_name"] = dataset_name

        h = abs(hash(frozenset(flatten(experiment_hyperparams).keys() +\
                                   flatten(experiment_hyperparams).values())))

        user = path.expandvars("$USER")
        save_path = serial.preprocess("/export/mialab/users/%s/pylearn2_outs/%d"
                                      % (user, h))

        file_params = {
            "save_path": save_path,
            "variance_map_file": variance_map_file,
            }

        state.file_parameters = file_params
        state.hyper_parameters = experiment_hyperparams
        state.pid = 0

        sql.insert_job(
            mlp_experiment.experiment,
            flatten(state),
            db
            )

    db.createView("%s_view" % args.table)
    n_in = hd2
    n_out = outdim
    irange_out = numpy.sqrt(6. / (n_in + n_out))

    state.hyper_parameters = {'trainfile':'train_gray_uvd_rot_1562_31.h5',
                        'N': 10*64,
                            'batch_size': 64*2,
                            'c1': c1,
                            'kernel_c1':kernel_c1,
                            'pool_c1':pool_c1,
                            'c2': c2,
                            'kernel_c2':kernel_c2,
                            'pool_c2':pool_c2,
                            'irange_c1':irange_c1,
                            'irange_c2':irange_c2,
                            'irange_hd1':irange_hd1,
                            'irange_hd2':irange_hd2,
                            'irange_out':irange_out,
                            'hd1': hd1,
                            'hd2': hd2,
                            'output_dim': outdim,
                            'lamda':lamda,
                            'decay':decay,
                            'max_epochs': 100,
                            'save_best_path': save_best_path
            }

    yaml_template = state.yaml_template
    hyper_parameters = expand(flatten(state.hyper_parameters), dict_type=ydict)
    # This will be the complete yaml string that should be executed
    final_yaml_str = yaml_template % hyper_parameters
Пример #15
0
def load_experiments(args):
    dataset_name = args.dataset_name
    db = sql.db(
        "postgres://%(user)s@%(host)s:%(port)d/%(database)s?table=%(table)s" %
        {
            "user": args.user,
            "host": args.host,
            "port": args.port,
            "database": args.database,
            "table": args.table,
        })

    logger.info("Getting dataset info for %s" % dataset_name)
    data_path = serial.preprocess("${PYLEARN2_NI_PATH}/" + dataset_name)
    mask_file = path.join(data_path, "mask.npy")
    mask = np.load(mask_file)
    input_dim = (mask == 1).sum()
    if input_dim % 2 == 1:
        input_dim -= 1
    mri = MRI.MRI_Standard(which_set="full",
                           dataset_name=dataset_name,
                           unit_normalize=True,
                           even_input=True,
                           apply_mask=True)
    variance_map_file = path.join(data_path, "variance_map.npy")
    mri_nifti.save_variance_map(mri, variance_map_file)

    for items in jg.nested_generator(
            jg.hidden_generator("nhid1", 1),
            jg.hidden_generator("nhid2", 1),
    ):

        state = DD()
        experiment_hyperparams = mlp_experiment.default_hyperparams(input_dim)

        for key, value in items:
            split_keys = key.split(".")
            entry = experiment_hyperparams
            for k in split_keys[:-1]:
                entry = entry[k]
            entry[split_keys[-1]] = value

        experiment_hyperparams["dataset_name"] = dataset_name

        h = abs(hash(frozenset(flatten(experiment_hyperparams).keys() +\
                                   flatten(experiment_hyperparams).values())))

        user = path.expandvars("$USER")
        save_path = serial.preprocess(
            "/export/mialab/users/%s/pylearn2_outs/%d" % (user, h))

        file_params = {
            "save_path": save_path,
            "variance_map_file": variance_map_file,
        }

        state.file_parameters = file_params
        state.hyper_parameters = experiment_hyperparams
        state.pid = 0

        sql.insert_job(mlp_experiment.experiment, flatten(state), db)

    db.createView("%s_view" % args.table)
                            "which_set": 'train',
                            "one_hot": 1.,
                        },
            },
            "callbacks": [
                !obj:deep_tempering.scripts.likelihood.exactll_callback.pylearn2_exactll_callback {
                    "interval": 1000,
                    "trainset": *data,
                }
            ]
        }
    """

    for seed in [123141, 71629, 92735, 1230, 34789]:
        for lr in [1e-1, 1e-2, 1e-3, 1e-4, 1e-5]:
            for batch_size in [5]:
                for nh in [10]:
                        state.hyper_parameters = {
                            'seed': seed,
                            'lr_start': lr,
                            'lr_end': lr,
                            'nh1': nh,
                            'batch_size': batch_size,
                        }
                        
                        sql.insert_job(
                                experiment.train_experiment,
                                flatten(state),
                                db,
                                force_dup=True)
Пример #17
0
                        method_name: contraction_penalty
                    },
                    coefficient: %(coefficient)f
                }
            ],
            "termination_criterion" : %(term_crit)s,
        }
    }
    '''

    state.hyper_parameters = {
            "file": "${PYLEARN2_DATA_PATH}/UTLC/pca/sylvester_train_x_pca32.npy",
            "nvis": 32,
            "nhid": 6,
            "learning_rate": 0.1,
            "batch_size": 10,
            "coefficient": 0.5,
            "term_crit": {
                "__builder__": "pylearn2.training_algorithms.sgd.EpochCounter",
                "max_epochs": 2
                }
            }

    state.extract_results = "pylearn2.scripts.jobman.tester.result_extractor"

    sql.insert_job(
            experiment.train_experiment,
            flatten(state),
            db,
            force_dup=True)
Пример #18
0
def load_experiments(args):
    dataset_name = args.dataset_name

    # Load the database and table.
    db = sql.db("postgres://%(user)s@%(host)s:%(port)d/%(database)s?table=%(table)s"
                % {"user": args.user,
                   "host": args.host,
                   "port": args.port,
                   "database": args.database,
                   "table": args.table,
                   })

    # Don't worry about this yet.
    input_handler = InputHandler()

    # For generating models, we use a special set of jobman generators, made
    # for convenience.
    for items in jg.nested_generator(
        jg.float_generator("learning_rate", 3, 0.01, 0.0001, log_scale=True),
        jg.list_generator("nhid", [50, 100, 200, 300]),
        ):

        logger.info("Adding RBM experiment across hyperparameters %s" % (items, ))
        state = DD()

        # Load experiment hyperparams from experiment
        experiment_hyperparams = experiment.default_hyperparams()

        # Set them with values in our loop.
        for key, value in items:
            split_keys = key.split(".")
            entry = experiment_hyperparams
            for k in split_keys[:-1]:
                entry = entry[k]
            assert split_keys[-1] in entry, ("Key not found in hyperparams: %s"
                                             % split_keys[-1])
            entry[split_keys[-1]] = value

        # Set the dataset name
        experiment_hyperparams["dataset_name"] = dataset_name

        # Get the input dim and variance map. Don't worry about variance maps right now,
        # they aren't used here.
        input_dim, variance_map_file = input_handler.get_input_params(args,
                                                                      experiment_hyperparams)
        logger.info("%s\n%s\n" % (input_dim, variance_map_file))

        # Set the input dimensionality by the data
        experiment_hyperparams["nvis"] = input_dim

        # Set the minimum learning rate relative to the initial learning rate.
        experiment_hyperparams["min_lr"] = experiment_hyperparams["learning_rate"] / 10

        # Make a unique hash for experiments. Remember that lists, dicts, and other data
        # types may not be hashable, so you may need to do some special processing. In
        # this case we convert the lists to tuples.
        h = abs(hash(frozenset(flatten(experiment_hyperparams).keys() +\
                                   [tuple(v) if isinstance(v, list)
                                    else v 
                                    for v in flatten(experiment_hyperparams).values()])))

        # Save path for the experiments. In this case we are sharing a directory in my 
        # export directory so IT can blame me.
        save_path = serial.preprocess("/export/mialab/users/dhjelm/pylearn2_outs/rbm_demo/%d"
                                      % h)

        # We save file params separately as they aren't model specific.
        file_params = {
            "save_path": save_path,
            "variance_map_file": variance_map_file,
            }

        state.file_parameters = file_params
        state.hyper_parameters = experiment_hyperparams
        user = path.expandvars("$USER")
        state.created_by = user

        # Finally we add the experiment to the table.
        sql.insert_job(
            experiment.experiment,
            flatten(state),
            db
            )

    # A view can be used when querying the database using psql. May not be needed in future.
    db.createView("%s_view" % args.table)