示例#1
0
文件: text.py 项目: insperatum/ec
                                                      tasks=tasks,
                                                      bidirectional=True)


if __name__ == "__main__":
    tasks = makeTasks()
    eprint("Generated", len(tasks), "tasks")

    test, train = testTrainSplit(tasks, 0.2)
    eprint("Split tasks into %d/%d test/train" % (len(test), len(train)))

    baseGrammar = Grammar.uniform(primitives)

    explorationCompression(baseGrammar,
                           train,
                           testingTasks=test,
                           outputPrefix="experimentOutputs/text",
                           evaluationTimeout=0.0005,
                           **commandlineArguments(
                               steps=500,
                               iterations=10,
                               helmholtzRatio=0.5,
                               topK=2,
                               maximumFrontier=2,
                               structurePenalty=10.,
                               a=3,
                               activation="relu",
                               CPUs=numberOfCPUs(),
                               featureExtractor=LearnedFeatureExtractor,
                               pseudoCounts=10.0))
示例#2
0
def rational_options(p):
    p.add_argument("--smooth",
                   action="store_true",
                   default=False,
                   help="smooth likelihood model")


if __name__ == "__main__":
    import time

    arguments = commandlineArguments(featureExtractor=FeatureExtractor,
                                     iterations=6,
                                     CPUs=numberOfCPUs(),
                                     structurePenalty=1.,
                                     recognitionTimeout=7200,
                                     helmholtzRatio=0.5,
                                     activation="tanh",
                                     maximumFrontier=5,
                                     a=3,
                                     topK=2,
                                     pseudoCounts=30.0,
                                     extras=rational_options)

    primitives = [
        real,
        # f1,
        real_division,
        real_addition,
        real_multiplication
    ]
    baseGrammar = Grammar.uniform(primitives)
    random.seed(42)
示例#3
0
            f.close()
            self.mean = []


if __name__ == "__main__":
    tasks = makeTasks()
    eprint("Generated", len(tasks), "tasks")

    test, train = testTrainSplit(tasks, 0.5)
    eprint("Split tasks into %d/%d test/train" % (len(test), len(train)))

    baseGrammar = Grammar.uniform(primitives)

    explorationCompression(baseGrammar, train,
                           testingTasks=test,
                           outputPrefix="experimentOutputs/geom",
                           compressor="rust",
                           evaluationTimeout=0.01,
                           **commandlineArguments(
                               steps=200,
                               a=1,
                               iterations=100,
                               useRecognitionModel=True,
                               helmholtzRatio=0.5,
                               helmholtzBatch=200,
                               featureExtractor=GeomFeatureCNN,
                               topK=2,
                               maximumFrontier=1000,
                               CPUs=numberOfCPUs(),
                               pseudoCounts=10.0))
示例#4
0
文件: list.py 项目: insperatum/ec
    parser.add_argument("--split", metavar="TRAIN_RATIO",
        type=float,
        help="split test/train")
    parser.add_argument("-H", "--hidden", type=int,
        default=16,
        help="number of hidden units")
    parser.add_argument("--filter_task_args", 
                        default=False,
                        action="store_true",
                        help="only use tasks with 1 arguments")


if __name__ == "__main__":
    args = commandlineArguments(
        frontierSize=None, activation='sigmoid', iterations=10,
        a=3, maximumFrontier=10, topK=2, pseudoCounts=10.0,
        helmholtzRatio=0.5, structurePenalty=1.,
        CPUs=numberOfCPUs(),
        extras=list_options)

    tasks = retrieveTasks(args.pop("dataset"))


    
    #removing f****d up tasks
   # tasks = [ t for t in tasks 
              #if tasks.request == 1]



    maxTasks = args.pop("maxTasks")
    if len(tasks) > maxTasks:
示例#5
0
        fn = "/tmp/logo_primitive_%d.png" % len(matrix)
        eprint("\tExported to", fn)
        scipy.misc.imsave(fn, r)

    matrix = montageMatrix(matrix)
    scipy.misc.imsave(export, matrix)


if __name__ == "__main__":
    args = commandlineArguments(structurePenalty=1.5,
                                recognitionTimeout=3600,
                                a=3,
                                topK=2,
                                iterations=10,
                                useRecognitionModel=True,
                                helmholtzRatio=0.5,
                                featureExtractor=LogoFeatureCNN,
                                maximumFrontier=5,
                                CPUs=numberOfCPUs(),
                                pseudoCounts=30.0,
                                activation="tanh",
                                extras=list_options)
    visualizeCheckpoint = args.pop("visualize")
    if visualizeCheckpoint is not None:
        with open(visualizeCheckpoint, 'rb') as handle:
            primitives = pickle.load(handle).grammars[-1].primitives
        visualizePrimitives(primitives)
        import sys
        sys.exit(0)

    dreamCheckpoint = args.pop("dreamCheckpoint")
示例#6
0
        )
    parser.add_argument("--random-seed", 
        type=int, 
        default=0
        )
    parser.add_argument("--checkpoint-analysis",
        default=None,
        type=str)

if __name__ == "__main__":
    # EC command line arguments.
    args = commandlineArguments(
        enumerationTimeout=10, 
        activation='tanh', 
        iterations=1, 
        recognitionTimeout=3600,
        a=3, maximumFrontier=10, topK=2, pseudoCounts=30.0,
        helmholtzRatio=0.5, structurePenalty=1.,
        CPUs=numberOfCPUs(),
        featureExtractor=InstructionsFeatureExtractor,
        extras=puddleworld_options)

    checkpoint_analysis = args.pop("checkpoint_analysis") # EC checkpoints need to be run out of their calling files, so this is here.

    """Run the EC learner."""
    if checkpoint_analysis is None:
        # Set up output directories.
        random.seed(args.pop("random_seed"))
        timestamp = datetime.datetime.now().isoformat()
        outputDirectory = "experimentOutputs/puddleworld/%s"%timestamp
        os.system("mkdir -p %s"%outputDirectory)
示例#7
0
    while len(circuits) < NUMBEROFTASKS * 2:
        inputs = sampleDistribution(inputDistribution)
        gates = sampleDistribution(gateDistribution)
        newTask = Circuit(numberOfInputs=inputs, numberOfGates=gates)
        if newTask not in circuits:
            circuits.append(newTask)
    eprint("Sampled %d circuits with %d unique functions" %
           (len(circuits), len({t.signature
                                for t in circuits})))
    tasks = [t.task() for t in circuits[:NUMBEROFTASKS]]
    testing = [t.task() for t in circuits[NUMBEROFTASKS:]]

    baseGrammar = Grammar.uniform(primitives)
    explorationCompression(baseGrammar,
                           tasks,
                           testingTasks=testing,
                           outputPrefix="experimentOutputs/circuit",
                           evaluationTimeout=None,
                           **commandlineArguments(
                               iterations=10,
                               aic=1.,
                               structurePenalty=1,
                               CPUs=numberOfCPUs(),
                               featureExtractor=DeepFeatureExtractor,
                               topK=2,
                               maximumFrontier=100,
                               helmholtzRatio=0.5,
                               a=2,
                               activation="relu",
                               pseudoCounts=5.))
示例#8
0
             S=5),
    makeTask("series capacitors",
             arrow(tlist(tpositive), tpositive),
             lambda cs: sum(c**(-1) for c in cs)**(-1),
             N=20,
             S=5),
]

if __name__ == "__main__":
    baseGrammar = Grammar.uniform([
        real, f0, f1, fpi, real_power, real_subtraction, real_addition,
        real_multiplication
    ])

    eprint("Got %d equation discovery tasks..." % len(tasks))

    explorationCompression(baseGrammar,
                           tasks,
                           outputPrefix="experimentOutputs/scientificLaws",
                           evaluationTimeout=0.1,
                           testingTasks=[],
                           **commandlineArguments(iterations=10,
                                                  CPUs=numberOfCPUs(),
                                                  structurePenalty=1.,
                                                  helmholtzRatio=0.5,
                                                  a=3,
                                                  maximumFrontier=10000,
                                                  topK=2,
                                                  featureExtractor=None,
                                                  pseudoCounts=10.0))
示例#9
0
        return e

    def invented(self, e):
        return e.body.visit(self)

    def abstraction(self, e):
        return Abstraction(e.body.visit(self))

    def application(self, e):
        return Application(e.f.visit(self), e.x.visit(self))

    def index(self, e):
        return e


RandomParameterization.single = RandomParameterization()

if __name__ == "__main__":
    baseGrammar = Grammar.uniform(primitives)
    statistics = Task.standardizeTasks(tasks)
    featureExtractor = makeFeatureExtractor(statistics)

    explorationCompression(
        baseGrammar,
        tasks,
        outputPrefix="experimentOutputs/continuousPolynomial",
        **commandlineArguments(frontierSize=10**2,
                               iterations=5,
                               featureExtractor=featureExtractor,
                               pseudoCounts=10.0))
示例#10
0
        for t in train:
            l = t.logLikelihood(e)
            eprint(t, l)
            biggest = min(biggest,l)
        eprint(biggest)
        assert False

    if False:
        with timing("best first enumeration"): baseGrammar.bestFirstEnumeration(arrow(tint,tint))
        with timing("depth first search"):
            print len(list(enumeration(baseGrammar, Context.EMPTY, [], arrow(tint,tint),
                                       maximumDepth = 99,
                                       upperBound = 13,
                                       lowerBound = 0)))
        assert False
    
    explorationCompression(baseGrammar, train,
                           outputPrefix = "experimentOutputs/regression",
                           evaluationTimeout = None,
                           testingTasks = test,
                           **commandlineArguments(frontierSize = 10**2,
                                                  iterations = 10,
                                                  CPUs = numberOfCPUs(),
                                                  structurePenalty = 1.,
                                                  helmholtzRatio = 0.5,
                                                  a = 1,#arity
                                                  maximumFrontier = 1000,
                                                  topK = 2,
                                                  featureExtractor = DeepFeatureExtractor,
                                                  pseudoCounts = 10.0))
示例#11
0
文件: text.py 项目: lcary/ec-backup
        "--compete",
        nargs='+',
        default=None,
        type=str,
        help=
        "Do a simulated sygus competition (1hr+8cpus/problem) on the sygus tasks, restoring from provided checkpoint(s). If multiple checkpoints are provided, then we ensemble the models."
    )


if __name__ == "__main__":
    arguments = commandlineArguments(recognitionTimeout=7200,
                                     iterations=10,
                                     helmholtzRatio=0.5,
                                     topK=2,
                                     maximumFrontier=5,
                                     structurePenalty=10.,
                                     a=3,
                                     activation="tanh",
                                     CPUs=numberOfCPUs(),
                                     featureExtractor=LearnedFeatureExtractor,
                                     pseudoCounts=30.0,
                                     extras=text_options)
    doChallenge = arguments.pop('doChallenge')

    tasks = makeTasks()
    eprint("Generated", len(tasks), "tasks")

    for t in tasks:
        t.mustTrain = False

    test, train = testTrainSplit(tasks, 1.)
    eprint("Split tasks into %d/%d test/train" % (len(test), len(train)))
示例#12
0
        f1,
        fpi,
        real_power,
        real_subtraction,
        real_addition,
        real_division,
        real_multiplication] + [
            Program.parse(n)
            for n in ["map","fold",
                      "empty","cons","car","cdr",
                      "zip"]]
    baseGrammar = Grammar.uniform(equationPrimitives)

    eprint("Got %d equation discovery tasks..." % len(tasks))

    explorationCompression(baseGrammar, tasks,
                           outputPrefix="experimentOutputs/scientificLaws",
                           evaluationTimeout=0.1,
                           testingTasks=[],
                           **commandlineArguments(
                               compressor="ocaml",
                               featureExtractor=DummyFeatureExtractor,
                               iterations=10,
                               CPUs=numberOfCPUs(),
                               structurePenalty=0.5,
                               helmholtzRatio=0.5,
                               a=3,
                               maximumFrontier=10000,
                               topK=2,
                               pseudoCounts=10.0))