def config(module_config, params):
    c = dict()
    c['embdim'] = 300
    c['inp_e_dropout'] = 1 / 2
    c['inp_w_dropout'] = 0
    c['e_add_flags'] = True

    c['ptscorer'] = B.mlp_ptscorer
    c['mlpsum'] = 'sum'
    c['Ddim'] = 1

    c['opt'] = 'adam'
    c['loss'] = pearsonobj
    c['batch_size'] = 64
    c['nb_epoch'] = 16

    c['fix_layers'] = []

    module_config(c)

    for p in params:
        k, v = p.split('=')
        c[k] = eval(v)

    ps, h = hash_params(c)
    return c, ps, h
def config(module_config, params):
    c = dict()
    c['embdim'] = 300
    c['inp_e_dropout'] = 1/2
    c['inp_w_dropout'] = 0
    c['e_add_flags'] = True

    c['ptscorer'] = B.mlp_ptscorer
    c['mlpsum'] = 'sum'
    c['Ddim'] = 1

    c['opt'] = 'adam'
    c['loss'] = ranknet  # XXX: binary_crossentropy back?
    c['balance_class'] = True  # seems essential
    c['batch_size'] = 64
    c['nb_epoch'] = 16
    c['epoch_fract'] = 1/4  # XXX: or much smaller?

    c['fix_layers'] = []

    module_config(c)

    for p in params:
        k, v = p.split('=')
        c[k] = eval(v)

    ps, h = hash_params(c)
    return c, ps, h
def config(module_config, params):
    c = dict()
    c['embdim'] = 300
    c['inp_e_dropout'] = 1/2
    c['inp_w_dropout'] = 0
    c['e_add_flags'] = True

    c['ptscorer'] = B.mlp_ptscorer
    c['mlpsum'] = 'sum'
    c['Ddim'] = 1

    c['opt'] = 'adam'
    c['loss'] = pearsonobj
    c['batch_size'] = 64
    c['nb_epoch'] = 16

    c['fix_layers'] = []

    module_config(c)

    for p in params:
        k, v = p.split('=')
        c[k] = eval(v)

    ps, h = hash_params(c)
    return c, ps, h
예제 #4
0
def config(model_config, task_config, params):
    c = default_config(model_config, task_config)

    for p in params:
        k, v = p.split('=')
        c[k] = eval(v)

    ps, h = hash_params(c)

    return c, ps, h
예제 #5
0
파일: train.py 프로젝트: lizihan021/Hotpot
def config(model_config, task_config, params):
    c = default_config(model_config,
                       task_config)  # by default, model_config > task_config

    for p in params:
        keyword, value = p.split('=')
        c[keyword] = eval(value)

    ps, h = hash_params(c)

    return c, ps, h
예제 #6
0
def config(model_config, task_config, params):
    c = default_config(model_config, task_config)

    for p in params:
        k, v = p.split('=')
        c[k] = eval(v)

    ps, h = hash_params(c)

    # post-ps,h c-munging - only things that are redundant to whatever
    # is user-visible
    if c['prescoring'] is not None:
        prescoring_setup(c, task_config)

    return c, ps, h
예제 #7
0
def config(module_config, params):
    c = dict()
    c['embdim'] = 300
    c['ptscorer'] = B.mlp_ptscorer
    c['mlpsum'] = 'sum'
    c['Ddim'] = 1
    c['loss'] = ranknet
    module_config(c)

    for p in params:
        k, v = p.split('=')
        c[k] = eval(v)

    ps, h = hash_params(c)
    return c, ps, h
예제 #8
0
def config(model_config, task_config, params):
    c = default_config(model_config, task_config)

    for p in params:
        k, v = p.split('=')
        c[k] = eval(v)

    ps, h = hash_params(c)

    # post-ps,h c-munging - only things that are redundant to whatever
    # is user-visible
    if c['prescoring'] is not None:
        prescoring_setup(c, task_config)

    return c, ps, h
예제 #9
0
def config(module_config, params):
    c = dict()
    c['embdim'] = 300
    c['ptscorer'] = B.mlp_ptscorer
    c['mlpsum'] = 'sum'
    c['Ddim'] = 1
    c['loss'] = ranknet
    module_config(c)

    for p in params:
        k, v = p.split('=')
        c[k] = eval(v)

    ps, h = hash_params(c)
    return c, ps, h
예제 #10
0
def config(module_config, params):
    c = dict()
    c['embdim'] = 300
    c['inp_e_dropout'] = 3/4
    c['inp_w_dropout'] = 0
    c['e_add_flags'] = True

    c['ptscorer'] = B.dot_ptscorer
    c['mlpsum'] = 'sum'
    c['Ddim'] = 2

    c['loss'] = pearsonobj  # ...or 'categorical_crossentropy'
    module_config(c)

    for p in params:
        k, v = p.split('=')
        c[k] = eval(v)

    ps, h = hash_params(c)
    return c, ps, h
예제 #11
0
def config(module_config, params):
    c = dict()
    c['embdim'] = 50
    c['inp_e_dropout'] = 1 / 2
    c['e_add_flags'] = True

    c['ptscorer'] = B.mlp_ptscorer
    c['mlpsum'] = 'sum'
    c['Ddim'] = 1

    c['loss'] = 'binary_crossentropy'
    c['nb_epoch'] = 2
    module_config(c)

    for p in params:
        k, v = p.split('=')
        c[k] = eval(v)

    ps, h = hash_params(c)
    return c, ps, h
예제 #12
0
def config(module_config, params):
    c = dict()
    c['embdim'] = 300
    c['inp_e_dropout'] = 1/2
    c['e_add_flags'] = True

    c['ptscorer'] = B.mlp_ptscorer
    c['mlpsum'] = 'sum'
    c['Ddim'] = 1

    c['batch_size'] = 192
    c['loss'] = 'binary_crossentropy'
    module_config(c)

    for p in params:
        k, v = p.split('=')
        c[k] = eval(v)

    ps, h = hash_params(c)
    return c, ps, h
예제 #13
0
def config(module_config, params):
    c = dict()
    c['embdim'] = 300
    c['inp_e_dropout'] = 3 / 4
    c['inp_w_dropout'] = 0
    c['e_add_flags'] = True

    c['ptscorer'] = B.dot_ptscorer
    c['mlpsum'] = 'sum'
    c['Ddim'] = 2

    c['loss'] = pearsonobj  # ...or 'categorical_crossentropy'
    c['batch_size'] = 160
    c['nb_epoch'] = 32
    module_config(c)

    for p in params:
        k, v = p.split('=')
        c[k] = eval(v)

    ps, h = hash_params(c)
    return c, ps, h
예제 #14
0
def config(module_config, params):
    c = dict()
    c['embdim'] = 300
    c['inp_e_dropout'] = 1/2
    c['inp_w_dropout'] = 0
    c['e_add_flags'] = True

    c['ptscorer'] = B.mlp_ptscorer
    c['mlpsum'] = 'sum'
    c['Ddim'] = 1

    c['loss'] = ranknet
    c['balance_class'] = False
    c['batch_size'] = 160
    c['nb_epoch'] = 16
    c['epoch_fract'] = 1/4
    module_config(c)

    for p in params:
        k, v = p.split('=')
        c[k] = eval(v)

    ps, h = hash_params(c)
    return c, ps, h
예제 #15
0
def config(module_config, params):
    c = dict()
    c['embdim'] = 300
    c['inp_e_dropout'] = 1/2
    c['inp_w_dropout'] = 0
    c['e_add_flags'] = True

    c['ptscorer'] = B.mlp_ptscorer
    c['mlpsum'] = 'sum'
    c['Ddim'] = 2

    c['loss'] = 'categorical_crossentropy'
    c['balance_class'] = False
    c['batch_size'] = 160
    c['nb_epoch'] = 32
    module_config(c)

    for p in params:
	print(p)
        k, v = p.split('=')
        c[k] = eval(v)

    ps, h = hash_params(c)
    return c, ps, h
예제 #16
0
        task.load_vocab(conf['vocabf'])
    task.load_data(trainf, valf)

    tuneargs = dict()
    for p in params:
        k, v = p.split('=')
        v = eval(v)
        if isinstance(v, list) or isinstance(v, dict):
            tuneargs[k] = v

    rs = RandomSearch(modelname+'_'+taskname+'_log.txt', **tuneargs)

    for ps, h, pardict in rs():
        # final config for this run
        conf, ps, h = config(model_module.config, task.config, [])
        for k, v in pardict.items():
            conf[k] = v
        ps, h = hash_params(conf)
        task.set_conf(conf)

        runid = '%s-%s-%x' % (taskname, modelname, h)
        print()
        print(' ...... %s .................... %s' % (runid, ps))

        try:
            model, res = train_and_eval(runid, model_module.prep_model, task, conf)
            rs.report(ps, h, res[1])
        except Exception as e:
            print(e)
            time.sleep(1)
예제 #17
0
        k, v = p.split('=')
        v = eval(v)
        if isinstance(v, list) or isinstance(v, dict):
            tuneargs[k] = v
        elif isinstance(v, int) or isinstance(v, string):
            tuneargs[k] = [v]

    # rs = RandomSearch(modelname+'_'+taskname+'_log.txt', **tuneargs)
    # Permutation does not support dict argument like "cdim"
    rs = PermutationSearch(modelname + '_' + taskname + '_log.txt', **tuneargs)

    for ps, h, pardict in rs():
        # final config for this run
        conf, ps, h = config(model_module.config, task.config, [])
        for k, v in pardict.items():
            conf[k] = v
        ps, h = hash_params(conf)
        task.set_conf(conf)

        runid = '%s-%s-%x' % (taskname, modelname, h)
        print()
        print(' ...... %s .................... %s' % (runid, ps))

        try:
            model, res = train_and_eval(runid, model_module.prep_model, task,
                                        conf)
            rs.report(ps, h, res[1])
        except Exception as e:
            print(e)
            time.sleep(1)