Пример #1
0
def setup(optdict, settings):
    # prog is shortcut to the output optdict, for convenience.
    prog = optdict['prog']

    # the weight vector is sparse - just the constants in the unary predicate rule
    prog.setRuleWeights(prog.db.vector(declare.asMode("rule(i)")))

    # set the max recursion depth
    prog.maxDepth = settings['maxDepth']

    # be verbose
    # funs.conf.trace = True

    # use a non-default learner, overriding the tracing function,
    # number of epochs, and regularizer
    learner = plearn.ParallelFixedRateGDLearner(
        prog,
        epochs=settings['epochs'],
        parallel=settings['para'],
        rate=settings['rate'],
        miniBatchSize=settings['batch'],
        regularizer=learn.L2Regularizer())

    #learner = learn.FixedRateGDLearner(
    #    prog,epochs=epochs,regularizer=learn.L2Regularizer())

    #learner = learn.FixedRateSGDLearner(
    #    prog,epochs=epochs,regularizer=learn.L2Regularizer())

    #    learner = plearn.ParallelAdaGradLearner(
    #        prog,epochs=epochs,parallel=40,regularizer=learn.L2Regularizer())
    return learner
Пример #2
0
def setExptParams():
  #usage: [targetPredicate] [epochs]
  #get the command-line options for this experiment
  pred = 'hypernym' if len(sys.argv)<=1 else sys.argv[1]
  epochs = 30 if len(sys.argv)<=2 else int(sys.argv[2])
  # use comline.parseCommandLine to set up the program, etc
  optdict,args = comline.parseCommandLine([
      '--logging', 'warn',
      '--db', 'inputs/wnet.db|inputs/wnet.cfacts',
      '--prog','inputs/wnet-learned.ppr', '--proppr',
      '--train','inputs/wnet-train.dset|inputs/wnet-train.exam',
      '--test', 'inputs/wnet-test.dset|inputs/wnet-valid.exam'])

  prog = optdict['prog']
  # the weight vector is sparse - just the constants in the unary predicate rule
  prog.setRuleWeights(prog.db.vector(declare.asMode("rule(i)")))
  targetMode = 'i_%s/io' % pred if pred!='ALL' else None
  learner = plearn.ParallelFixedRateGDLearner(
      prog,epochs=epochs,parallel=40,regularizer=learn.L2Regularizer())
  return {'prog':prog,
          'trainData':optdict['trainData'],
          'testData':optdict['testData'],
          'targetMode':targetMode,
          'savedTestPredictions':'tmp-cache/%s-test.solutions.txt' % pred,
          'savedTrainExamples':'tmp-cache/wnet-train.examples',
          'savedTestExamples':'tmp-cache/wnet-test.examples',
          'learner':learner
    }, epochs
Пример #3
0
def setExptParams(num):
    db = comline.parseDBSpec('tmp-cache/train-%d.db|inputs/train-%d.cfacts' % (num,num))
    trainData = comline.parseDatasetSpec('tmp-cache/train-%d.dset|inputs/train-%d.exam'  % (num,num), db)
    testData = comline.parseDatasetSpec('tmp-cache/test-%d.dset|inputs/test-%d.exam'  % (num,num), db)
    prog = comline.parseProgSpec("theory.ppr",db,proppr=True)
    prog.setFeatureWeights()
    learner = plearn.ParallelFixedRateGDLearner(prog,regularizer=learn.L2Regularizer(),parallel=5,epochs=10)
    return {'prog':prog,
            'trainData':trainData,
            'testData':testData,
            'targetMode':'answer/io',
            'savedModel':'learned-model.db',
            'learner':learner
    }
Пример #4
0
def setExptParams():
    db = comline.parseDBSpec('tmp-cache/cora.db|inputs/cora.cfacts')
    trainData = comline.parseDatasetSpec('tmp-cache/cora-train.dset|inputs/train.examples', db)
    testData = comline.parseDatasetSpec('tmp-cache/cora-test.dset|inputs/test.examples', db)
    prog = comline.parseProgSpec("cora.ppr",db,proppr=True)
    prog.setRuleWeights()
    prog.db.markAsParam('kaw',1)
    prog.db.markAsParam('ktw',1)
    prog.db.markAsParam('kvw',1)
    prog.maxDepth = 1
    learner = plearn.ParallelFixedRateGDLearner(prog,regularizer=learn.L2Regularizer(),parallel=5,epochs=30)
    return {'prog':prog,
            'trainData':trainData, 'testData':testData,
            'targetMode':'samebib/io',
            'savedModel':'tmp-cache/cora-trained.db',
            'savedTestPredictions':'tmp-cache/cora-test.solutions.txt',
            'savedTrainExamples':'tmp-cache/cora-train.examples',
            'savedTestExamples':'tmp-cache/cora-test.examples',
            'learner':learner
    }
Пример #5
0
from tensorlog import learn
from tensorlog import plearn
from tensorlog import comline

if __name__=="__main__":
    logging.basicConfig(level=logging.INFO)
    logging.info('level is info')

    db = comline.parseDBSpec('tmp-cache/cora.db|inputs/cora.cfacts')
    trainData = comline.parseDatasetSpec('tmp-cache/cora-train.dset|inputs/train.examples', db)
    testData = comline.parseDatasetSpec('tmp-cache/cora-test.dset|inputs/test.examples', db)
    prog = comline.parseProgSpec("cora.ppr",db,proppr=True)
    prog.setRuleWeights()
    prog.db.markAsParam('kaw',1)
    prog.db.markAsParam('ktw',1)
    prog.db.markAsParam('kvw',1)
    prog.maxDepth = 1
#    learner = learn.FixedRateGDLearner(prog,regularizer=learn.L2Regularizer(),epochs=5)
    learner = plearn.ParallelFixedRateGDLearner(prog,regularizer=learn.L2Regularizer(),parallel=5,epochs=5)
    params = {'prog':prog,
              'trainData':trainData, 'testData':testData,
              'targetMode':'samebib/io',
              'savedModel':'tmp-cache/cora-trained.db',
              'savedTestPredictions':'tmp-cache/cora-test.solutions.txt',
              'savedTrainExamples':'tmp-cache/cora-train.examples',
              'savedTestExamples':'tmp-cache/cora-test.examples',
              'learner':learner
    }
    print 'maxdepth',prog.maxDepth
    expt.Expt(params).run()
Пример #6
0
        ]))

    prog = optdict['prog']
    prog.setRuleWeights(weights=prog.db.vector(declare.asMode("ruleid(i)")))
    if processes==0:
        learner = learn.FixedRateGDLearner(
            prog,
            epochs=epochs,
            #        regularizer=learn.L2Regularizer(),
        )
    else:
        learner = plearn.ParallelFixedRateGDLearner(
            prog,
            epochs=epochs,
            parallel=processes,
            miniBatchSize=100,
            #epochTracer=learn.EpochTracer.defaultPlusAcc,
            epochTracer=learn.EpochTracer.cheap,
            regularizer=learn.L2Regularizer(),
        )

    # configure the experiment
    params = {'prog':prog,
              'trainData':optdict['trainData'], 
              'testData':optdict['testData'],
              'savedModel':'tmp-cache/trained.db',
              'savedTestPredictions':'tmp-cache/valid.solutions.txt',
              'savedTestExamples':'tmp-cache/valid.examples',
              'targetMode':targetMode,
              'learner':learner
    }
Пример #7
0
        'inputs/%s-test.dset|inputs/%s-test.exam' % (stem, stem)
    ])

    # prog is shortcut to the output optdict, for convenience.
    prog = optdict['prog']

    # the weight vector is sparse - just the constants in the unary predicate rule
    prog.setFeatureWeights()

    # use a non-default learner, overriding the tracing function,
    # number of epochs, and regularizer
    #    learner = learn.FixedRateGDLearner(prog,regularizer=learn.L2Regularizer(),traceFun=learn.Learner.cheapTraceFun,epochs=epochs)
    #    learner = plearn.ParallelFixedRateGDLearner(prog,epochs=epochs,parallel=40,regularizer=learn.L2Regularizer())
    learner = plearn.ParallelFixedRateGDLearner(
        prog,
        epochs=epochs,
        parallel=55,
        rate=20.0,
        regularizer=learn.L2Regularizer(0.0001))
    #    learner = plearn.ParallelAdaGradLearner(prog,epochs=epochs,parallel=40,regularizer=learn.L2Regularizer())
    #    learner = plearn.ParallelFixedRateGDLearner(prog,epochs=epochs,parallel=40)

    # configute the experiment
    params = {
        'prog': prog,
        'trainData': optdict['trainData'],
        'testData': optdict['testData'],
        'savedTestPredictions': 'tmp-cache/%s-test.solutions.txt' % stem,
        'savedTrainExamples': 'tmp-cache/%s-train.examples' % stem,
        'savedTestExamples': 'tmp-cache/%s-test.examples' % stem,
        'savedModel': 'tmp-cache/%s-model.examples' % stem,
        'learner': learner