Esempio n. 1
0
def jobman_insert_random(n_jobs,
                         table_name="emotiw_mlp_audio_fixed_pool2_mixed_grbmx2"
                         ):

    JOBDB = 'postgresql://[email protected]/gulcehrc_db?table=' + table_name
    EXPERIMENT_PATH = "experiment_cg_2layer_sigm_hyper2_fixed2_pool2_save_mixed_grbmx2.jobman_entrypoint"
    nlr = 45
    learning_rates = numpy.logspace(numpy.log10(0.0008), numpy.log10(0.09),
                                    nlr)
    max_col_norms = [1.9835, 1.8256, 1.2124, 0.98791]
    jobs = []

    for _ in range(n_jobs):

        job = DD()
        id_lr = numpy.random.random_integers(0, nlr - 1)
        rnd_maxcn = numpy.random.random_integers(0, len(max_col_norms) - 1)
        job.n_hiddens = numpy.random.random_integers(
            2, 5) * 100 + 2 * numpy.random.random_integers(0, 15)
        job.n_layers = 2
        job.learning_rate = learning_rates[id_lr]
        job.momentum = 10.**numpy.random.uniform(-1, -0)
        job.hidden_dropout = numpy.random.uniform(low=0.1, high=0.2)
        job.layer_dropout = 0
        job.topN_pooling = 1
        job.no_final_dropout = 1
        job.l2 = numpy.random.random_integers(1, 20) * 1e-3
        job.rmsprop = 1
        job.normalize_acts = 0
        job.enable_standardization = 0
        job.response_normalize = 0
        job.rbm_epochs = 15
        job.rho = 0.94
        job.validerror = 0.0
        job.loss = 0.0
        job.epoch = 0
        job.epoch_time = 0
        job.use_nesterov = 1
        job.trainerror = 0.0
        job.features = "full.pca"
        job.max_col_norm = max_col_norms[rnd_maxcn]
        job.example_dropout = numpy.random.randint(60, 200)
        job.tag = "relu_nlayers_dbn"

        jobs.append(job)
        print job

    answer = raw_input("Submit %d jobs?[y/N] " % len(jobs))

    if answer == "y":
        numpy.random.shuffle(jobs)

        db = jobman.sql.db(JOBDB)
        for job in jobs:
            job.update({jobman.sql.EXPERIMENT: EXPERIMENT_PATH})
            jobman.sql.insert_dict(job, db)

        print "inserted %d jobs" % len(jobs)
        print "To run: jobdispatch --condor --mem=3G --gpu --env=THEANO_FLAGS='floatX=float32, device=gpu' --repeat_jobs=%d jobman sql -n 1 '%s' ." % (
            len(jobs), JOBDB)
Esempio n. 2
0
def jobman_insert_random(n_jobs):
    JOBDB = 'postgres://[email protected]/dauphiya_db/emotiw_mlp_audio'
    EXPERIMENT_PATH = "experiment.jobman_entrypoint"

    jobs = []
    for _ in range(n_jobs):
        job = DD()

        job.n_hiddens = numpy.random.randint(8, 512)
        job.n_layers = numpy.random.randint(1, 4)
        job.learning_rate = 10.**numpy.random.uniform(-3, -0)
        job.momentum = 10.**numpy.random.uniform(-1, -0)
        job.features = ["minimal.pca", "full.pca"][numpy.random.binomial(1, 0.5)]
        job.example_dropout = numpy.random.randint(16, 200)
        job.rbm_learning_rate = 10.**numpy.random.uniform(-3, -0)
        job.rbm_epochs = numpy.random.randint(8, 100)
        job.tag = "pretrain"

        jobs.append(job)
        print job

    answer = raw_input("Submit %d jobs?[y/N] " % len(jobs))
    if answer == "y":
        numpy.random.shuffle(jobs)

        db = jobman.sql.db(JOBDB)
        for job in jobs:
            job.update({jobman.sql.EXPERIMENT: EXPERIMENT_PATH})
            jobman.sql.insert_dict(job, db)

        print "inserted %d jobs" % len(jobs)
        print "To run: jobdispatch --condor --repeat_jobs=%d jobman sql -n 1 'postgres://[email protected]/dauphiya_db/emotiw_mlp_audio' ." % len(jobs)
Esempio n. 3
0
def jobman_insert_random(n_jobs):
    JOBDB = 'postgres://[email protected]/dauphiya_db/emotiw_mlp_audio'
    EXPERIMENT_PATH = "experiment.jobman_entrypoint"

    jobs = []
    for _ in range(n_jobs):
        job = DD()

        job.n_hiddens = numpy.random.randint(8, 512)
        job.n_layers = numpy.random.randint(1, 4)
        job.learning_rate = 10.**numpy.random.uniform(-3, -0)
        job.momentum = 10.**numpy.random.uniform(-1, -0)
        job.features = ["minimal.pca",
                        "full.pca"][numpy.random.binomial(1, 0.5)]
        job.example_dropout = numpy.random.randint(16, 200)
        job.rbm_learning_rate = 10.**numpy.random.uniform(-3, -0)
        job.rbm_epochs = numpy.random.randint(8, 100)
        job.tag = "pretrain"

        jobs.append(job)
        print job

    answer = raw_input("Submit %d jobs?[y/N] " % len(jobs))
    if answer == "y":
        numpy.random.shuffle(jobs)

        db = jobman.sql.db(JOBDB)
        for job in jobs:
            job.update({jobman.sql.EXPERIMENT: EXPERIMENT_PATH})
            jobman.sql.insert_dict(job, db)

        print "inserted %d jobs" % len(jobs)
        print "To run: jobdispatch --condor --repeat_jobs=%d jobman sql -n 1 'postgres://[email protected]/dauphiya_db/emotiw_mlp_audio' ." % len(
            jobs)
def jobman_insert_random(n_jobs,  table_name="emotiw_mlp_audio_fixed_pool2_mixed_grbmx2"):

    JOBDB = 'postgresql://[email protected]/gulcehrc_db?table=' + table_name
    EXPERIMENT_PATH = "experiment_cg_2layer_sigm_hyper2_fixed2_pool2_save_mixed_grbmx2.jobman_entrypoint"
    nlr = 45
    learning_rates = numpy.logspace(numpy.log10(0.0008), numpy.log10(0.09), nlr)
    max_col_norms = [1.9835, 1.8256, 1.2124, 0.98791]
    jobs = []

    for _ in range(n_jobs):

        job = DD()
        id_lr = numpy.random.random_integers(0, nlr-1)
        rnd_maxcn = numpy.random.random_integers(0, len(max_col_norms)-1)
        job.n_hiddens = numpy.random.random_integers(2,5) * 100 + 2 * numpy.random.random_integers(0,15)
        job.n_layers = 2
        job.learning_rate = learning_rates[id_lr]
        job.momentum = 10.**numpy.random.uniform(-1, -0)
        job.hidden_dropout = numpy.random.uniform(low=0.1, high=0.2)
        job.layer_dropout = 0
        job.topN_pooling = 1
        job.no_final_dropout = 1
        job.l2 = numpy.random.random_integers(1, 20) * 1e-3
        job.rmsprop = 1
        job.normalize_acts = 0
        job.enable_standardization = 0
        job.response_normalize = 0
        job.rbm_epochs = 15
        job.rho = 0.94
        job.validerror = 0.0
        job.loss = 0.0
        job.epoch = 0
        job.epoch_time = 0
        job.use_nesterov = 1
        job.trainerror = 0.0
        job.features = "full.pca"
        job.max_col_norm = max_col_norms[rnd_maxcn]
        job.example_dropout = numpy.random.randint(60, 200)
        job.tag = "relu_nlayers_dbn"

        jobs.append(job)
        print job

    answer = raw_input("Submit %d jobs?[y/N] " % len(jobs))

    if answer == "y":
        numpy.random.shuffle(jobs)

        db = jobman.sql.db(JOBDB)
        for job in jobs:
            job.update({jobman.sql.EXPERIMENT: EXPERIMENT_PATH})
            jobman.sql.insert_dict(job, db)

        print "inserted %d jobs" % len(jobs)
        print "To run: jobdispatch --condor --mem=3G --gpu --env=THEANO_FLAGS='floatX=float32, device=gpu' --repeat_jobs=%d jobman sql -n 1 '%s' ." % (len(jobs), JOBDB)
def jobman_insert_random(n_jobs, table_name="emotiw_mlp_audio_sigm_fixed_pool2_mixed_norbm3"):

    JOBDB = 'postgresql://[email protected]/gulcehrc_db?table=' + table_name
    EXPERIMENT_PATH = "experiment_cg_2layer_sigm_hyper2_fixed2_pool2_save_mixed_norbm3.jobman_entrypoint"
    nlr = 45
    learning_rates = numpy.logspace(numpy.log10(0.0008), numpy.log10(0.1), nlr)
    max_col_norms = [1.8256, 1.5679, 1.2124, 0.98791]
    rhos = [0.96, 0.92, 0.88]
    jobs = []

    for _ in range(n_jobs):

        job = DD()
        id_lr = numpy.random.random_integers(0, nlr-1)
        rnd_maxcn = numpy.random.random_integers(0, len(max_col_norms)-1)
        rnd_rho = numpy.random.random_integers(0, len(rhos)-1)
        job.n_hiddens = numpy.random.randint(80, 500)
        job.n_layers = numpy.random.random_integers(1, 2)
        job.learning_rate = learning_rates[id_lr]
        job.momentum = 10.**numpy.random.uniform(-1, -0)
        job.rmsprop = 1
        job.rho = rhos[rnd_rho]
        job.validerror = 0.0
        job.loss = 0.0
        job.seed = 1938471
        job.rbm_epochs = 0
        job.epoch = 0
        job.epoch_time = 0
        job.use_nesterov = 1
        job.trainerror = 0.0
        job.features = "full.pca"
        job.max_col_norm = max_col_norms[rnd_maxcn]
        job.example_dropout = numpy.random.randint(60, 200)
        job.tag = "sigm_norm_const_fixed_pool2_norbm3"

        jobs.append(job)
        print job

    answer = raw_input("Submit %d jobs?[y/N] " % len(jobs))

    if answer == "y":
        numpy.random.shuffle(jobs)

        db = jobman.sql.db(JOBDB)
        for job in jobs:
            job.update({jobman.sql.EXPERIMENT: EXPERIMENT_PATH})
            jobman.sql.insert_dict(job, db)

        print "inserted %d jobs" % len(jobs)
        print "To run: jobdispatch --condor --mem=3G --gpu --env=THEANO_FLAGS='floatX=float32, device=gpu' --repeat_jobs=%d jobman sql -n 1 '%s' ." % (len(jobs), JOBDB)
Esempio n. 6
0
def jobman_insert_random(n_jobs, table_name="emotiw_mlp_audio_tanh"):
    JOBDB = 'postgresql://[email protected]/gulcehrc_db?table=' + table_name

    EXPERIMENT_PATH = "experiment_cg.jobman_entrypoint"
    nlr = 50
    learning_rates = numpy.logspace(numpy.log10(0.001), numpy.log10(0.3), nlr)

    jobs = []
    for _ in range(n_jobs):
        job = DD()
        id_lr = numpy.random.random_integers(0, nlr - 1)
        job.n_hiddens = numpy.random.randint(100, 800)
        job.n_layers = numpy.random.randint(1, 4)
        job.learning_rate = learning_rates[id_lr]
        job.momentum = 10.**numpy.random.uniform(-1, -0)
        job.rmsprop = numpy.random.binomial(1, 0.5)
        job.validerror = 0.0
        job.loss = 0.0
        job.epoch = 0
        job.epoch_time = 0
        job.trainerror = 0.0
        job.features = "full.pca"
        job.max_col_norm = 1.8456
        job.example_dropout = numpy.random.randint(16, 200)
        job.rbm_learning_rate = 10.**numpy.random.uniform(-3, -0)
        job.rbm_epochs = 0  #numpy.random.randint(8, 100)
        job.tag = "tanh_norm_const"

        jobs.append(job)
        print job

    answer = raw_input("Submit %d jobs?[y/N] " % len(jobs))
    if answer == "y":
        numpy.random.shuffle(jobs)

        db = jobman.sql.db(JOBDB)
        for job in jobs:
            job.update({jobman.sql.EXPERIMENT: EXPERIMENT_PATH})
            jobman.sql.insert_dict(job, db)

        print "inserted %d jobs" % len(jobs)
        print "To run: jobdispatch --condor --mem=3G --gpu --env=THEANO_FLAGS='floatX=float32, device=gpu' --repeat_jobs=%d jobman sql -n 1 '%s' ." % (
            len(jobs), JOBDB)
Esempio n. 7
0
def jobman_insert_random(n_jobs, table_name="emotiw_mlp_audio_tanh"):
    JOBDB = 'postgresql://[email protected]/gulcehrc_db?table=' + table_name

    EXPERIMENT_PATH = "experiment_cg.jobman_entrypoint"
    nlr = 50
    learning_rates = numpy.logspace(numpy.log10(0.001), numpy.log10(0.3), nlr)

    jobs = []
    for _ in range(n_jobs):
        job = DD()
        id_lr = numpy.random.random_integers(0, nlr-1)
        job.n_hiddens = numpy.random.randint(100, 800)
        job.n_layers = numpy.random.randint(1, 4)
        job.learning_rate = learning_rates[id_lr]
        job.momentum = 10.**numpy.random.uniform(-1, -0)
        job.rmsprop = numpy.random.binomial(1, 0.5)
        job.validerror = 0.0
        job.loss = 0.0
        job.epoch = 0
        job.epoch_time = 0
        job.trainerror = 0.0
        job.features = "full.pca"
        job.max_col_norm = 1.8456
        job.example_dropout = numpy.random.randint(16, 200)
        job.rbm_learning_rate = 10.**numpy.random.uniform(-3, -0)
        job.rbm_epochs = 0 #numpy.random.randint(8, 100)
        job.tag = "tanh_norm_const"

        jobs.append(job)
        print job

    answer = raw_input("Submit %d jobs?[y/N] " % len(jobs))
    if answer == "y":
        numpy.random.shuffle(jobs)

        db = jobman.sql.db(JOBDB)
        for job in jobs:
            job.update({jobman.sql.EXPERIMENT: EXPERIMENT_PATH})
            jobman.sql.insert_dict(job, db)

        print "inserted %d jobs" % len(jobs)
        print "To run: jobdispatch --condor --mem=3G --gpu --env=THEANO_FLAGS='floatX=float32, device=gpu' --repeat_jobs=%d jobman sql -n 1 '%s' ." % (len(jobs), JOBDB)