Example #1
0
    def __init__(self, infer_func, param_path, place=None, parallel=False):
        """
        :param infer_func: a function that will return predict Variable
        :param param_path: the path where the inference model is saved by fluid.io.save_params
        :param place: place to do the inference
        :param parallel: use parallel_executor to run the inference, it will use multi CPU/GPU.
        """
        self.param_path = param_path
        self.scope = core.Scope()
        self.parallel = parallel
        self.place = check_and_get_place(place)

        self.inference_program = framework.Program()
        with framework.program_guard(self.inference_program):
            with unique_name.guard():
                self.predict_var = infer_func()

        with self._prog_and_scope_guard():
            # load params from param_path into scope
            io.load_params(executor.Executor(self.place), param_path)

        if parallel:
            with self._prog_and_scope_guard():
                self.exe = parallel_executor.ParallelExecutor(
                    use_cuda=isinstance(self.place, core.CUDAPlace),
                    loss_name=self.predict_var.name)
        else:
            self.exe = executor.Executor(self.place)
Example #2
0
    def __init__(self, program_func, optimizer, param_path=None, place=None):
        # 1. we need to generate a framework.Program by calling
        # program_func. Reference: fluid.program_guard in
        # test_word2vec.py
        self.scope = core.Scope()

        self.startup_program = framework.Program()
        self.train_program = framework.Program()

        with framework.program_guard(self.train_program, self.startup_program):
            loss = program_func()
            if not isinstance(optimizer, opt_module.Optimizer):
                raise TypeError(
                    "The optimizer should be an instance of Optimizer")

            optimize_ops, params_grads = optimizer.minimize(loss)

        self.place = Trainer._check_and_get_place(place)

        self.dist_transpile_if_necessary(optimize_ops, params_grads)

        # 2. move the default_main_program to self.program and run the
        # default_startup program on an empty core.Scope()
        # Run startup program
        with self._prog_and_scope_guard():
            exe = executor.Executor(place)
            exe.run(self.startup_program)

        if param_path:
            # load params from param_path into scope
            io.load_persistables(exe, dirname=param_path)
Example #3
0
    def Collect_Data(self):

        '''
        Most important part!
        When the user hits "submit", this function collects all data that were given as input, 
        initializes the Executor class which knows how to deal with this stuff further, 
        and calls the get_data function from executor, which will use smart_slice and visualization to create an output. 
        '''
        ###Run the other class###
        exec_ = ex.Executor()

        #Get Time
        StartingDateTime = str(ui.StartingDate.date().toPyDate())
        EndingDateTime = str(ui.EndingDate.date().toPyDate())
        StartingDateTime = StartingDateTime.replace('-','')
        EndingDateTime = EndingDateTime.replace('-','')
        StartingTime = 0
        EndingTime = 0
        if ui.HourlyFlag.isChecked():
            StartingTime = str(ui.StartingTime.time().toPyTime())
            EndingTime = str(ui.EndingTime.time().toPyTime())
            StartingTime = StartingTime[0:2]
            EndingTime = EndingTime[0:2]
        
        #Get hourly/daily, recent/hist
        hourly_daily = ('hourly' if ui.HourlyFlag.isChecked() else 'daily')
        recent_hist = ('historical' if ui.HistoricalFlag.isChecked() else 'recent')
        #Get parameter and station (selected in drop-down menu)
        parameter = ui.ParametersList.currentText()
        station = ui.StationsList.currentText()

        ###pass info!###
        exec_.get_data(hourly_daily, recent_hist, parameter, station, StartingDateTime, EndingDateTime, StartingTime, EndingTime)
Example #4
0
    def train(self,
              num_epochs,
              event_handler,
              reader=None,
              parallel=False,
              feed_order=None):
        """
        Train the model.

        Args:
            num_epochs: The number of epoch. An epoch will process all data in reader
            event_handler: The event handler. A function with type (ev:Event)->void
            reader:
            parallel: True if use multi-CPUs or multi-GPUs
            feed_order: Feeding order of reader. None will following the defining
                order in program

        Returns:

        """
        if parallel:
            raise NotImplementedError(
                "Parallel Executor version of trainer is not implemented")

        training_role = os.getenv("PADDLE_TRAINING_ROLE", "")
        if training_role == "PSERVER":
            with self._prog_and_scope_guard():
                exe = executor.Executor(self.place)
                exe.run()
                return

        self._train_by_executor(num_epochs, event_handler, reader, feed_order)
Example #5
0
    def Collect_Data(self):
        exec_ = ex.Executor()

        #Get Time
        StartingDateTime = str(ui.StartingDate.date().toPyDate())
        EndingDateTime = str(ui.EndingDate.date().toPyDate())
        StartingDateTime = StartingDateTime.replace('-', '')
        EndingDateTime = EndingDateTime.replace('-', '')

        if ui.HourlyFlag.isChecked():
            StartingTime = str(ui.StartingTime.time().toPyTime())
            EndingTime = str(ui.EndingTime.time().toPyTime())
            StartingDateTime += StartingTime[0:2]
            EndingDateTime += EndingTime[0:2]

        #Get h/d, r/h
        hourly_daily = ('h' if ui.HourlyFlag.isChecked() else 'd')
        recent_hist = ('h' if ui.HistoricalFlag.isChecked() else 'r')
        #get parameter and station (selected in drop-down menu)
        parameter = ui.ParametersList.currentText()
        station = ui.StationsList.currentText()

        #pass info!
        exec_.get_data(hourly_daily, recent_hist, parameter, station,
                       StartingDateTime, EndingDateTime)
Example #6
0
    def train(self, num_epochs, event_handler, reader=None, feed_order=None):
        """
        Start the train loop to train the model.

        Args:
            num_epochs(int): The number of epoch. An epoch will process all data in reader
            event_handler(callable): The event handler. A function with type (ev:Event)->void
            reader(callable): A reader creator object. See also
                :ref:`api_guide_python_reader` .
            feed_order(list): Feeding order of reader. None will following the defining
                order in program

        Returns:
            None
        """
        training_role = os.getenv("PADDLE_TRAINING_ROLE", "")
        if training_role == "PSERVER":
            with self._prog_and_scope_guard():
                exe = executor.Executor(self.place)
                exe.run()
                return
        if self.parallel:
            self._train_by_parallel_executor(num_epochs, event_handler, reader,
                                             feed_order)
        else:
            self._train_by_executor(num_epochs, event_handler, reader,
                                    feed_order)
Example #7
0
    def test_exec_suite(self):
        dst = io.StringIO()
        verb = formatter.TextFormatter.SUCCESS
        fmt = formatter.TextFormatter(destination=dst, verbosity=verb)
        pv = PvCheck(executor.Executor(), fmt)

        sections = [
            Section(".TEST", ["echo1"]),
            Section(".ARGS", ["[OUT]\nfoo"]),
            Section("OUT", ["foo"]),
            Section(".TEST", ["echo2"]),
            Section(".ARGS", ["[OUT]\nbar"]),
            Section("OUT", ["foo"]),
            Section(".TEST", ["echo3"]),
            Section(".ARGS", ["[OUT]\nfoo"]),
            Section("OUT", ["foo"]),
            Section("NOTFOUND", ["notfound"])
        ]
        failures = pv.exec_suite(TestSuite(sections), ["echo"])
        exp = """OUT: OK
OUT: line 1 is wrong  (expected 'foo', got 'bar')
OUT: OK
NOTFOUND: missing section
"""
        self.assertEqual(failures, 2)
        self.assertEqual(dst.getvalue(), exp)
Example #8
0
    def train(self, num_epochs, event_handler, reader=None, feed_order=None):
        """
        Train the model.

        Args:
            num_epochs: The number of epoch. An epoch will process all data in reader
            event_handler: The event handler. A function with type (ev:Event)->void
            reader:
            feed_order: Feeding order of reader. None will following the defining
                order in program

        Returns:

        """
        training_role = os.getenv("PADDLE_TRAINING_ROLE", "")
        if training_role == "PSERVER":
            with self._prog_and_scope_guard():
                exe = executor.Executor(self.place)
                exe.run()
                return
        if self.parallel:
            self._train_by_parallel_executor(num_epochs, event_handler, reader,
                                             feed_order)
        else:
            self._train_by_executor(num_epochs, event_handler, reader,
                                    feed_order)
Example #9
0
def main():
    ''' Attempts to dump an M95160W EEPROM using an FT2232H. '''
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s - %(process)d - [%(levelname)s] %(message)s',
    )
    log = logging.getLogger()

    # NOTE: Enabling debug logging has an impact on clock jitter!
    # log.setLevel(logging.DEBUG)

    # We're using queues to communicate with the main execution process -
    # which is responsible for doing the actual bit banging. This is in
    # order to (hopefully) reduce clock jitter.
    log.debug("Setting up requests queue")
    request = multiprocessing.Queue()
    log.debug("Setting up response queue")
    response = multiprocessing.Queue()

    # Kick off the bit banger.
    log.debug("Setting up bit banger")
    banger = executor.Executor(request, response)
    banger.start()

    # Push in a READ message - per page 16 of ST 022580 Rev 8.
    log.info("Sending READ starting from address 0x0000")

    # Start the READ from 0x0000. Per page 23 of ST 022580 Rev 8, if we
    # continue to drive CS low - which we do as part of the banger - then
    # "the internal address register is incremented automatically". This
    # allows us to read the ENTIRE contents of the EEPROM with a "single READ
    # instruction". Just bang in a read, keep CS low, and keep reading until
    # we've had our fill.
    size = 16384
    operation = [0, 0, 0, 0, 0, 0, 1, 1]
    operation.extend([0b0] * 16)
    request.put({
        "bits": operation,
        "size": size,
    })

    # ...and fetch the response!
    log.info("Starting read, will output to %s", 'eeprom.bin')
    with open('eeprom.bin', 'wb') as fout:
        read = 0

        while read < size:
            payload = response.get()
            log.debug(
                "Response from EEPROM was: %s (%s)",
                "{0:08b}".format(bits_to_bytes(payload)),
                "0x{0:02x}".format(bits_to_bytes(payload)),
            )
            read += len(payload)
            fout.write(struct.pack('B', bits_to_bytes(payload)))

    # Done, so write!
    log.info("Read %s bits of EEPROM!", read)
    banger.terminate()
Example #10
0
 def __init__(self, v, e, activation_func, problem_type, epochs,
              learning_rate):
     self.node_collection = v
     self.topo_node_list = executor.Executor(
         copy.copy(self.node_collection.node_list()), e).topoSort()
     self.func = activators.Activator_factory(
         activation_func).get_activator()
     self.problem_type = problem_type
     self.epochs = epochs
     self.learning_rate = learning_rate
Example #11
0
 def run_target(self):
     # Trigger linearlize to remove complicate expressions
     q = executor.Executor(self.cmd, self.cur_input, self.tmp_dir, bitmap=self.bitmap, argv=["-l", "1"])
     ret = q.run(self.state.timeout)
     logger.debug("Total=%d s, Emulation=%d s, Solver=%d s, Return=%d"
                  % (ret.total_time,
                     ret.emulation_time,
                     ret.solving_time,
                     ret.returncode))
     return q, ret
Example #12
0
 def __init__(self, v, e, dataset, batch_size, learning_rate, epochs, loss,
              optimizer):
     self.node_collection = v
     self.topo_node_list = executor.Executor(
         copy.copy(self.node_collection.node_list()), e).topoSort()
     self.train_loader, self.test_loader = torch_datasets.Dataset(
         dataset, batch_size).get_dataset()
     self.learning_rate = learning_rate
     self.epochs = epochs
     self.criterion = torch_loss.Loss(loss).loss
     self.optimizer = torch_optim.Optimizer(optimizer).optimizer
     self.model = self.build_model()
Example #13
0
    def _save_checkpoint(self, epoch_id, step_id):
        assert self.checkpoint_cfg

        if epoch_id % self.checkpoint_cfg.epoch_interval == 0 and step_id % self.checkpoint_cfg.step_interval == 0:
            exe = executor.Executor(self.place)
            io.save_checkpoint(
                executor=exe,
                checkpoint_dir=self.checkpoint_cfg.checkpoint_dir,
                trainer_id=self.trainer_id,
                trainer_args=self._get_checkpoint_save_args(epoch_id, step_id),
                main_program=self.train_program,
                max_num_checkpoints=self.checkpoint_cfg.max_num_checkpoints)
    def test_execute_program(self):

        exec = executor.Executor()

        file_path = "D:\\longTimeProgram1.py"

        file = open(file_path)
        content = file.read()

        test, result = exec.execute_program(content)

        self.assertEqual(test, 1)
Example #15
0
    def save_params(self, param_path):
        """
        Save all parameters into :code:`param_path`.

        Args:
            param_path(str): The path to save parameters.

        Returns:
            None
        """
        with self._prog_and_scope_guard():
            exe = executor.Executor(self.place)
            io.save_persistables(exe, dirname=param_path)
Example #16
0
 def __init__(self, launcher, replay_file, interactive, target, retry_count, msg_log, faithful, cont_after_succ, cont_after_branch):
     self.launcher = launcher
     self.replay_file = replay_file
     self.interactive = interactive
     self.target = target
     self.states = {}
     self.retry_count = retry_count
     self.executor = executor.Executor(launcher)
     self.executor.setup_err_handler()
     self.msg_log = msg_log
     self.faithful = faithful
     self.ops = []
     self.cont_after_succ = cont_after_succ
     self.cont_after_branch = cont_after_branch
Example #17
0
 def __init__(self, v, e, activation_func, problem_type, epochs,
              learning_rate):
     self.node_collection = v
     self.topo_node_list = executor.Executor(
         copy.copy(self.node_collection.node_list()), e).topoSort()
     self.func = activators.Activator_factory(
         activation_func).get_activator()
     self.problem_type = problem_type
     self.epochs = epochs
     self.learning_rate = learning_rate
     self.loss = []
     self.accuracy = []
     for node in self.get_not_input_nodelist():
         node.set_biasNode()
Example #18
0
 def __init__(self, batch_number, num_of_operations, launcher):
     self.batch_number = batch_number
     self.num_of_operations = num_of_operations
     self.launcher = launcher
     self.states = {}
     self.target_queue = Queue.PriorityQueue()
     self.cur_steps = []
     self.batch_no = -1
     self.replay_fail_count = 0
     self.target_restart_count = 0
     self.curr_target = None
     self.restart_count = 0
     self.executor = executor.Executor(launcher)
     self.executor.setup_err_handler()
Example #19
0
    def __init__(self, infer_func, param_path, place=None, parallel=False):
        self.param_path = param_path
        self.scope = core.Scope()
        self.parallel = parallel
        self.place = check_and_get_place(place)

        self.inference_program = framework.Program()
        with framework.program_guard(self.inference_program):
            with unique_name.guard():
                self.predict_var = infer_func()

        with self._prog_and_scope_guard():
            # load params from param_path into scope
            io.load_params(executor.Executor(self.place), param_path)

        if parallel:
            with self._prog_and_scope_guard():
                self.exe = parallel_executor.ParallelExecutor(
                    use_cuda=isinstance(self.place, core.CUDAPlace),
                    loss_name=self.predict_var.name)
        else:
            self.exe = executor.Executor(self.place)

        self.inference_program = self.inference_program.clone(for_test=True)
Example #20
0
    def _test_by_executor(self, reader, feed_order, fetch_list):
        with executor.scope_guard(self.scope):
            feed_var_list = build_feed_var_list(self.test_program, feed_order)
            feeder = data_feeder.DataFeeder(
                feed_list=feed_var_list, place=self.place)
            exe = executor.Executor(self.place)
            accumulated = len(fetch_list) * [0]
            count = 0
            for data in reader():
                outs = exe.run(program=self.test_program,
                               feed=feeder.feed(data),
                               fetch_list=fetch_list)
                accumulated = [x[0] + x[1][0] for x in zip(accumulated, outs)]
                count += 1

            return [x / count for x in accumulated]
Example #21
0
def execute_file(file):
    audio_file = audio.Audio(file)
    e = executor.Executor()

    audio_data = audio_file.read()
    if audio_data[0] == translator.python_startcode:
        python_code = t.audio_to_python(audio_data)

        for i in range(len(argv)):
            if argv[i] == file:
                del argv[i]

        exec(python_code)
    elif audio_data[0] == translator.executor_startcode:
        executable_data = t.audio_to_executor(audio_data)
        e.execute(executable_data)
Example #22
0
    def __init__(self,
                 train_func,
                 optimizer,
                 param_path=None,
                 place=None,
                 parallel=False):
        self.__stop = False
        self.parallel = parallel
        # 1. we need to generate a framework.Program by calling
        # program_func. Reference: fluid.program_guard in
        # test_word2vec.py
        if not isinstance(optimizer, opt_module.Optimizer):
            raise TypeError("The optimizer should be an instance of Optimizer")

        self.scope = core.Scope()

        self.startup_program = framework.Program()
        self.train_program = framework.Program()

        with framework.program_guard(self.train_program, self.startup_program):
            program_func_outs = train_func()
            self.train_func_outputs = program_func_outs if isinstance(
                program_func_outs, list) else [program_func_outs]
            self.test_program = self.train_program.clone()
            if not isinstance(optimizer, opt_module.Optimizer):
                raise TypeError(
                    "The optimizer should be an instance of Optimizer")
            # The fisrt element of program_func_outs is loss.
            loss = self.train_func_outputs[0]
            optimize_ops, params_grads = optimizer.minimize(loss)

        self.place = check_and_get_place(place)

        self._dist_transpile_if_necessary(optimize_ops, params_grads)

        # 2. move the default_main_program to self.program and run the
        # default_startup program on an empty core.Scope()
        # Run startup program
        with self._prog_and_scope_guard():
            exe = executor.Executor(place)
            exe.run(self.startup_program)

        if param_path:
            # load params from param_path into scope
            io.load_persistables(exe, dirname=param_path)
Example #23
0
    def test_exec_single_test(self):
        dst = io.StringIO()
        fmt = formatter.TextFormatter(destination=dst)
        pv = PvCheck(executor.Executor(), fmt)

        test = TestCase(
            "echo",
            [Section(".ARGS", ["[OUT]\nfoo"]),
             Section("OUT", ["foo"])])

        ok = pv.exec_single_test(test, ["echo"])
        exp = """TEST: echo
COMMAND LINE:
echo [OUT]
foo
OUT: OK
"""
        self.assertTrue(ok)
        self.assertEqual(dst.getvalue(), exp)
Example #24
0
    def run(self):

        taskCounter = 0
        total = len(self.spoutList)
        while (self.numRequests != 0):

            taskToSchedule = task.Task("Task " + str(taskCounter), self.env)
            taskCounter = taskCounter + 1

            spoutNode = makeChoice(total, self.spoutList)
            print("Workload %s sends out %s to %s at time %d" %
                  (self.id, taskToSchedule.getID(), spoutNode.getName(),
                   self.env.now))

            executorProcess = executor.Executor(self.env, spoutNode,
                                                taskToSchedule)
            self.env.process(executorProcess.run())

            yield self.env.timeout(1)
Example #25
0
    def _train_by_executor(self, num_epochs, event_handler, reader, feed_order):
        """
        Train by Executor and single device.

        Args:
            num_epochs:
            event_handler:
            reader:
            feed_order:

        Returns:

        """
        with self._prog_and_scope_guard():
            feed_var_list = build_feed_var_list(self.train_program, feed_order)
            feeder = data_feeder.DataFeeder(
                feed_list=feed_var_list, place=self.place)
            exe = executor.Executor(self.place)
            reader = feeder.decorate_reader(reader, multi_devices=False)
            self._train_by_any_executor(event_handler, exe, num_epochs, reader)
Example #26
0
def main():
    algorithm1 = '''
        a = sin(30);
        position = {"x": 1, "y": 2};
        pose = {"x": 0, "y": 1};
        if (pose.x - position.x > 0.01 || pose.y - position.y > 0.01)
        {
            skip
        } 
        else 
        {
            skip
        };
        angles = {"yaw": 0, "pitch":3.14159/4, "roll":0};
        position.y = 2*2 + sin(30);
        # receive(m_Idle){(msg_MoveToPosition, position, { m_MoveToPosition(position) })};
        # send(id_arm, msg_Rotate, angles)
    '''
    visitor = nv.Executor('comp1')
    visitor.execute(algorithm1)
Example #27
0
    def __init__(self, network_func, param_path=None, place=None):
        # 1. we need to generate a framework.Program by calling
        # network_func. Reference: fluid.program_guard in test_word2vec.py

        # 2. move the default_main_program to self.program.

        # 3. run the default_startup program.

        # 4. load params from param_path into scope
        self.scope = core.Scope()
        self.place = place
        self.startup_program = framework.Program()
        # TODO: generate the startup_program with network_func

        exe = executor.Executor(place)
        exe.run(self.startup_program, scope=self.scope)

        if param_path:
            # load params from param_path into scope
            io.load_persistables(exe, dirname=param_path)
Example #28
0
    def start(self):
        print 'Starting dellve worker ... OK'

        # Create executor
        self._executor = executor.Executor(self._benchmarks)

        # Start executor
        self._executor.start()

        # Start server
        self._server.start()

        # Wait forever...
        self._stop.wait()

        # Stop benchmark
        self._executor.stop_benchmark()

        # Join executor process
        self._executor.join()

        # Stop server
        self._server.stop()
Example #29
0
    def _train_by_executor(self, num_epochs, event_handler, reader, feed_order):
        """
        Train by Executor and single device.

        Args:
            num_epochs:
            event_handler:
            reader:
            feed_order:

        Returns:

        """
        with self._prog_and_scope_guard():
            exe = executor.Executor(self.place)
            if feed_order is None:
                feed_var_list = [
                    var
                    for var in self.train_program.global_block(
                    ).vars.itervalues()
                    if hasattr(var, 'is_data') and var.is_data
                ]
            else:
                feed_var_list = [
                    self.train_program.global_block().var(var_name)
                    for var_name in feed_order
                ]

            feeder = data_feeder.DataFeeder(
                feed_list=feed_var_list, place=self.place)
            for epoch_id in range(num_epochs):
                event_handler(BeginEpochEvent(epoch_id))
                for step_id, data in enumerate(reader()):
                    event_handler(BeginStepEvent(epoch_id, step_id))
                    exe.run(feed=feeder.feed(data), fetch_list=[])
                    event_handler(EndStepEvent(epoch_id, step_id))
                event_handler(EndEpochEvent(epoch_id))
Example #30
0
def main(uri, use_gui=True):
    multiprocessing.freeze_support()
    # get the existing QApplication instance, or creating a new one if
    # necessary.
    app = QtGui.QApplication.instance()
    if app is None:
        app = QtGui.QApplication(sys.argv)

#    validate(json_args)

    # Check to see if the URI exists in the current directory.  If not, assume
    # it exists in the directory where this module exists.
    if not os.path.exists(uri):
        file_path = os.path.dirname(os.path.abspath(__file__))
        uri = os.path.join(file_path, os.path.basename(uri))

        # If the URI still doesn't exist, raise a helpful exception.
        if not os.path.exists(uri):
            raise Exception('Can\'t find the file %s.'%uri)

    window = base_widgets.MainWindow(ModelUI, uri)
    if use_gui == True:
        window.show()
        result = app.exec_()
    else:
        orig_args = json.loads(open(json_args).read())
        args = getFlatDefaultArgumentsDictionary(orig_args)
        thread = executor.Executor()
        thread.addOperation('model', args, orig_args['targetScript'])

        thread.start()

        while thread.isAlive() or thread.hasMessages():
            message = thread.getMessage()
            if message != None:
                print(message.rstrip())
            time.sleep(0.005)