Esempio n. 1
0
def test_pipespeed():
    c, d = processing.Pipe()
    cond = processing.Condition()
    elapsed = 0
    iterations = 1

    while elapsed < delta:
        iterations *= 2

        p = processing.Process(target=pipe_func, args=(d, cond, iterations))
        cond.acquire()
        p.start()
        cond.wait()
        cond.release()

        result = None
        t = _timer()

        while result != 'STOP':
            result = c.recv()

        elapsed = _timer() - t
        p.join()

    print iterations, 'objects passed through connection in', elapsed, 'seconds'
    print 'average number/sec:', iterations / elapsed
Esempio n. 2
0
    def create(cls, operation_spec, result_directory, debug=False, input_paths=[]):
        """
        A factory method that creates an WEKA operation based on the 
        information given in the operation specification operation_spec
        """
        assert(operation_spec["type"] == "weka_classification")
        # Determine all parameter combinations that should be tested
        parameter_settings = cls._get_parameter_space(operation_spec)
        
        # Read the command template from a file
        template_file = open(os.path.join(pySPACE.configuration.spec_dir,
                                               "operations",
                                               "weka_templates",
                                               operation_spec["template"]),
                             'r')
        command_template = template_file.read()
        template_file.close() 

        # number of processes
        if "runs" in operation_spec:
            number_processes = len(input_paths) * len(parameter_settings) * \
                           operation_spec["runs"]
        else: # approximate the number of processes 
            runs = []
            for dataset_dir in input_paths:
                collection = BaseDataset.load(dataset_dir)
                runs.append(collection.meta_data["runs"])
            runs = max(runs)
            number_processes = len(input_paths) * len(parameter_settings) * \
                               runs
        
        if debug == True:
            # To better debug creation of processes we don't limit the queue 
            # and create all processes before executing them
            processes = processing.Queue()
            cls._createProcesses(processes, result_directory, operation_spec, 
                                 parameter_settings, input_paths,
                                 command_template)
            # create and return the weka operation object
            return cls(processes, operation_spec, result_directory, 
                       number_processes)
        else:
            # Create all processes by calling a recursive helper method in 
            # another thread so that already created processes can be executed in 
            # parallel. Therefore a queue is used which size is maximized to 
            # guarantee that not to much objects are created (because this costs
            # memory). However, the actual number of 100 is arbitrary and might
            # be reviewed.
            processes = processing.Queue(100)
            create_process = processing.Process(target=cls._createProcesses,
                             args=( processes, result_directory, operation_spec, 
                                    parameter_settings, input_paths,
                                    command_template))
            create_process.start()            
            # create and return the weka operation object
            return cls(processes, operation_spec, result_directory, 
                       number_processes, create_process)        
Esempio n. 3
0
def test_join_timeout():
    p = processing.Process(target=join_timeout_func)
    p.start()

    print 'waiting for process to finish'

    while 1:
        p.join(timeout=1)
        if not p.isAlive():
            break
        print '.',
        sys.stdout.flush()
Esempio n. 4
0
    def create(cls,
               operation_spec,
               result_directory,
               debug=False,
               input_paths=[]):
        """ A factory method that creates an Analysis operation based on the 
        information given in the operation specification operation_spec
        """
        assert (operation_spec["type"] == "analysis")
        input_path = operation_spec["input_path"]
        summary = BaseDataset.load(
            os.path.join(pySPACE.configuration.storage, input_path))
        data_dict = summary.data

        # Determine the parameters that should be analyzed
        parameters = operation_spec["parameters"]

        # Determine the metrics that should be plotted
        metrics = operation_spec["metrics"]

        # Determine how many processes will be created
        number_parameter_values = [
            len(set(data_dict[param])) for param in parameters
        ]
        number_processes = cls._numberOfProcesses(0,
                                                  number_parameter_values) + 1

        if debug == True:
            # To better debug creation of processes we don't limit the queue
            # and create all processes before executing them
            processes = processing.Queue()
            cls._createProcesses(processes, result_directory, data_dict,
                                 parameters, metrics, True)
            return cls(processes, operation_spec, result_directory,
                       number_processes)
        else:
            # Create all plot processes by calling a recursive helper method in
            # another thread so that already created processes can be executed
            # although creation of processes is not finished yet. Therefore a queue
            # is used which size is limited to guarantee that not to much objects
            # are created (since this costs memory). However, the actual number
            # of 100 is arbitrary and might be changed according to the system at hand.
            processes = processing.Queue(100)
            create_process = processing.Process(
                target=cls._createProcesses,
                args=(processes, result_directory, data_dict, parameters,
                      metrics, True))
            create_process.start()
            # create and return the operation object
            return cls(processes, operation_spec, result_directory,
                       number_processes, create_process)
Esempio n. 5
0
def test_sharedvalues():
    values = [('i', 10), ('h', -2), ('d', 1.25)]
    arrays = [('i', range(100)), ('d', [0.25 * i for i in range(100)]),
              ('H', range(1000))]

    shared_values = [processing.Value(id, v) for id, v in values]
    shared_arrays = [processing.Array(id, a) for id, a in arrays]

    p = processing.Process(target=sharedvalues_func,
                           args=(values, arrays, shared_values, shared_arrays))
    p.start()
    p.join()

    assert p.getExitCode() == 0
Esempio n. 6
0
def test_semaphore():
    sema = processing.Semaphore(3)
    mutex = processing.RLock()
    running = processing.Value('i', 0)

    processes = [
        processing.Process(target=semaphore_func, args=(sema, mutex, running))
        for i in range(10)
    ]

    for p in processes:
        p.start()

    for p in processes:
        p.join()
Esempio n. 7
0
def test_queue():
    q = processing.Queue()

    p = processing.Process(target=queue_func, args=(q, ))
    p.start()

    o = None
    while o != 'STOP':
        try:
            o = q.get(timeout=0.3)
            print o,
            sys.stdout.flush()
        except Empty:
            print 'TIMEOUT'

    print
Esempio n. 8
0
def test_value():
    TASKS = 10
    running = processing.Value('i', TASKS)
    mutex = processing.Lock()

    for i in range(TASKS):
        processing.Process(target=value_func, args=(running, mutex)).start()

    while running.value > 0:
        time.sleep(0.08)
        mutex.acquire()
        print running.value,
        sys.stdout.flush()
        mutex.release()

    print
    print 'No more running processes'
Esempio n. 9
0
def test_event():
    event = processing.Event()

    processes = [
        processing.Process(target=event_func, args=(event, )) for i in range(5)
    ]

    for p in processes:
        p.start()

    print 'main is sleeping'
    time.sleep(2)

    print 'main is setting event'
    event.set()

    for p in processes:
        p.join()
Esempio n. 10
0
    def submit(self, plan):
        """
    Starts a subprocess to manage the submission, and returns quickly.
    """
        tmp_dir = tempfile.mkdtemp(dir="/tmp", prefix="jobsub-")
        state = ServerSubmissionState(submission_state=State.SUBMITTED,
                                      tmp_dir=tmp_dir)
        state.save()  # Early save to get an "id"

        process = processing.Process(target=run_plan,
                                     args=(state.id, plan, tmp_dir),
                                     name=plan.name)
        process.setDaemon(True)
        process.start()

        state.pid = process.getPid()
        state.submission_state = State.RUNNING
        state.save()

        return SubmissionHandle(id=state.id)
Esempio n. 11
0
def test_condition():
    cond = processing.Condition()

    p = processing.Process(target=condition_func, args=(cond, ))
    print cond

    cond.acquire()
    print cond
    cond.acquire()
    print cond

    p.start()

    print 'main is waiting'
    cond.wait()
    print 'main has woken up'

    print cond
    cond.release()
    print cond
    cond.release()

    p.join()
    print cond
Esempio n. 12
0
File: mmlf.py Progetto: pyspace/test
    def create(cls,
               operation_spec,
               result_directory,
               debug=False,
               input_paths=[]):
        """
        A factory method that creates an MMLF operation based on the
        information given in the operation specification operation_spec
        """
        assert (operation_spec["type"] == "mmlf")

        # The generic world configuration YAML file
        world_conf = """
worldPackage : %s
environment:
%s
agent:
%s
monitor:
    policyLogFrequency : 100000
%s
"""
        # Create directory for the experiment
        world_name = operation_spec['world_name']
        world_path = "%s/config/%s" % (result_directory, world_name)
        if not os.path.exists(world_path):
            os.makedirs(world_path)

        # Compute all possible parameter combinations
        # Determine all parameter combinations that should be tested
        parameter_settings = cls._get_parameter_space(operation_spec)

        # If the operation spec defines parameters for a generalized domain:
        if "generalized_domain" in operation_spec:
            # We have to test each parameter setting in each instantiation of
            # the generalized domain. This can be achieved by computing the
            # crossproduct of parameter settings and domain settings
            augmented_parameter_settings = []
            for parameter_setting in parameter_settings:
                for domain_parameter_setting in operation_spec[
                        "generalized_domain"]:
                    instantiation = dict(parameter_setting)
                    instantiation.update(domain_parameter_setting)
                    augmented_parameter_settings.append(instantiation)
            parameter_settings = augmented_parameter_settings

        # Create and remember all worlds for the given  parameter_settings
        world_pathes = []

        # for all parameter setting
        for parameter_setting in parameter_settings:
            # Add 4 blanks to all lines in templates
            environment_template = operation_spec['environment_template']
            environment_template = \
                    "\n".join("    " + line
                                for line in environment_template.split("\n"))
            agent_template = operation_spec['agent_template']
            agent_template = \
                    "\n".join("    " + line
                                for line in agent_template.split("\n"))
            monitor_conf = \
                operation_spec['monitor_conf'] if 'monitor_conf' in operation_spec else ""
            monitor_conf = \
                    "\n".join("    " + line
                                for line in monitor_conf.split("\n"))

            # Instantiate the templates
            environment_conf = environment_template
            agent_conf = agent_template
            for parameter, value in parameter_setting.iteritems():
                environment_conf = environment_conf.replace(
                    parameter, str(value))
                agent_conf = agent_conf.replace(parameter, str(value))

            def get_parameter_str(parameter_name):
                return "".join(subpart[:4]
                               for subpart in parameter_name.split("_"))

            configuration_str = "{" + "}{".join([
                "%s:%s" % (get_parameter_str(parameter), str(value)[:6])
                for parameter, value in parameter_setting.iteritems()
            ]) + "}"
            configuration_str = configuration_str.replace('_', '')

            world_file_name = "world_%s.yaml" % configuration_str

            open(os.path.join(world_path, world_file_name), 'w').write(
                world_conf %
                (world_name, environment_conf, agent_conf, monitor_conf))
            world_pathes.append(os.path.join(world_path, world_file_name))

        number_processes = len(world_pathes) * int(operation_spec["runs"])

        if debug is True:
            # To better debug creation of processes we don't limit the queue
            # and create all processes before executing them
            processes = processing.Queue()
            cls._createProcesses(processes, world_pathes, operation_spec,
                                 result_directory)
            return cls(processes, operation_spec, result_directory, world_name,
                       number_processes)
        else:
            # Create all processes by calling a recursive helper method
            # in another thread so that already created processes can be
            # executed although creation of processes is not finished yet.
            # Therefor a queue is used which size is limited to guarantee that
            # not to much objects are created (since this costs memory).
            # However, the actual number of 100 is arbitrary and might be
            # changed according to the system at hand.
            processes = processing.Queue(100)
            create_process = processing.Process(target=cls._createProcesses,
                                                args=(processes, world_pathes,
                                                      operation_spec,
                                                      result_directory))
            create_process.start()
            # create and return the operation object
            return cls(processes, operation_spec, result_directory, world_name,
                       number_processes, create_process)
        models = create_model(flux)

        # Processing support?
        if has_processing:

            # Wait until one thread has finished
            while len(processing.activeChildren()) >= max_threads:
                time.sleep(60)

            # Set arguments
            args = (loge, emin, emax, models)
            kwargs = {}

            # Generate pull distribution
            p = processing.Process(target=create_pull,
                                   args=args,
                                   kwargs=kwargs)
            p.start()
            print("Process emin=%.4f emax=%.4f started." % (emin, emax))

            # Wait some time
            time.sleep(1)

        # ... no
        else:
            create_pull(loge, emin, emax, models)

    # Processing support
    if has_processing:

        # Wait until all threads finished
Esempio n. 14
0
    def create(cls,
               operation_spec,
               result_directory,
               debug=False,
               input_paths=[]):
        """
        A factory method that creates an Analysis operation based on the
        information given in the operation specification operation_spec.
        If debug is TRUE the creation of the Analysis Processes will not
        be in a separated thread.
        """
        assert (operation_spec["type"] == "comp_analysis")
        input_path = operation_spec["input_path"]
        summary = BaseDataset.load(
            os.path.join(pySPACE.configuration.storage, input_path))
        data_dict = summary.data
        ## Done

        # Determine the parameters that should be analyzed
        parameters = operation_spec["parameters"]

        # Determine dependent parameters, which don't get extra resolution
        try:
            dep_par = operation_spec["dep_par"]
        except KeyError:
            dep_par = []

        # Determine the metrics that should be plotted
        spec_metrics = operation_spec["metrics"]
        metrics = []
        for metric in spec_metrics:
            if data_dict.has_key(metric):
                metrics.append(metric)
            else:
                import warnings
                warnings.warn('The metric "' + metric +
                              '" is not contained in the results csv file.')
        if len(metrics) == 0:
            warnings.warn(
                'No metric available from spec file, default to first dict entry.'
            )
            metrics.append(data_dict.keys()[0])

        # Determine how many processes will be created
        number_parameter_values = [
            len(set(data_dict[param])) for param in parameters
        ]
        number_processes = cls._numberOfProcesses(0,
                                                  number_parameter_values) + 1

        logscale = False
        if operation_spec.has_key('logscale'):
            logscale = operation_spec['logscale']

        markertype = 'x'
        if operation_spec.has_key('markertype'):
            markertype = operation_spec['markertype']

        if debug == True:
            # To better debug creation of processes we don't limit the queue
            # and create all processes before executing them
            processes = processing.Queue()
            cls._createProcesses(processes, result_directory, data_dict,
                                 parameters, dep_par, metrics, logscale,
                                 markertype, True)
            return cls(processes, operation_spec, result_directory,
                       number_processes)
        else:
            # Create all plot processes by calling a recursive helper method in
            # another thread so that already created processes can be executed
            # although creation of processes is not finished yet. Therefore a queue
            # is used which size is limited to guarantee that not to much objects
            # are created (since this costs memory). However, the actual number
            # of 100 is arbitrary and might be reviewed.
            processes = processing.Queue(100)
            create_process = processing.Process(
                target=cls._createProcesses,
                args=(processes, result_directory, data_dict, parameters,
                      dep_par, metrics, logscale, markertype, True))
            create_process.start()
            # create and return the comp_analysis operation object
            return cls(processes, operation_spec, result_directory,
                       number_processes, create_process)
Esempio n. 15
0
from PIL import Image
import processing
import utils
import sys
import numpy as np

sys.setrecursionlimit(10000)

filename = '54.png'
img = Image.open(filename).convert('L')
w, h = img.size
pixels = img.load()
pixels = np.reshape([pixels[i, j] for j in range(h) for i in range(w)], (w, h))
process = processing.Process(w, h, pixels)
img = process.make()
Esempio n. 16
0
 def __startWorker(self):
 #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
     self.t = pc.Process(target=self._worker)
     self.t.setDaemon(True)
     self.t.start()
Esempio n. 17
0
 def startProcess(self, target, **kwargs):
     process = processing.Process(target=target, **kwargs)
     self.processes.append(process)
     process.start()
Esempio n. 18
0
    def create(cls,
               operation_spec,
               result_directory,
               debug=False,
               input_paths=[]):
        """ A factory method that creates the processes which form an operation
        based on the  information given in the operation specification, *operation_spec*.

        In debug mode this is done in serial. In the other default mode,
        at the moment 4 processes are created in parallel and can be immediately
        executed. So generation of processes and execution are made in parallel.
        This kind of process creation is done independently from the backend.

        For huge parameter spaces this is necessary!
        Otherwise numerous processes are created and corresponding data is loaded
        but the concept of spreading the computation to different processors
        can not really be used, because process creation is blocking only
        one processor and memory space, but nothing more is done,
        till the processes are all created.

        .. todo:: Use :class:`~pySPACE.resources.dataset_defs.dummy.DummyDataset`
                  for empty data, when no input_path is given.
        """
        assert (operation_spec["type"] == "node_chain")

        # Determine all parameter combinations that should be tested
        parameter_settings = cls._get_parameter_space(operation_spec)

        ## Use node_chain parameter if no templates are given ##
        if not operation_spec.has_key("templates"):
            if operation_spec.has_key("node_chain"):
                operation_spec["templates"] = [
                    operation_spec.pop("node_chain")
                ]


#                    extract_key_str(operation_spec["base_file"],
#                                    keyword="node_chain")]
#                operation_spec.pop("node_chain")
            else:
                warnings.warn(
                    "Specify parameter 'templates' or 'node_chain' in your operation spec!"
                )
        elif operation_spec.has_key("node_chain"):
            operation_spec.pop("node_chain")
            warnings.warn(
                "node_chain parameter is ignored. Templates are used.")
        # load files in templates as dictionaries
        elif type(operation_spec["templates"][0]) == str:
            operation_spec["template_files"] = \
                copy.deepcopy(operation_spec["templates"])
            for i in range(len(operation_spec["templates"])):
                rel_node_chain_file = operation_spec["templates"][i]
                abs_node_chain_file_name = os.sep.join([
                    pySPACE.configuration.spec_dir, "node_chains",
                    rel_node_chain_file
                ])
                with open(abs_node_chain_file_name, "r") as read_file:
                    node_chain = read_file.read()
                    #node_chain = yaml.load(read_file)
                operation_spec["templates"][i] = node_chain

        storage = pySPACE.configuration.storage
        if not input_paths:
            raise Exception("No input datasets found in input_path %s in %s!" %
                            (operation_spec["input_path"], storage))

        # Get relative path
        rel_input_paths = [name[len(storage):] for name in input_paths]

        # Determine approximate number of runs
        if "runs" in operation_spec:
            runs = operation_spec["runs"]
        else:
            runs = []
            for dataset_dir in rel_input_paths:
                abs_collection_path = \
                        pySPACE.configuration.storage + os.sep \
                            + dataset_dir
                collection_runs = \
                        BaseDataset.load_meta_data(abs_collection_path).get('runs',1)
                runs.append(collection_runs)
            runs = max(runs)

        # Determine splits
        dataset_dir = rel_input_paths[0]
        abs_collection_path = \
                pySPACE.configuration.storage + os.sep + dataset_dir

        splits = BaseDataset.load_meta_data(abs_collection_path).get(
            'splits', 1)

        # Determine how many processes will be created
        number_processes = len(operation_spec["templates"]) * \
                           len(parameter_settings) * len(rel_input_paths) * \
                           runs * splits

        if debug:
            # To better debug creation of processes we don't limit the queue
            # and create all processes before executing them
            processes = processing.Queue()
            cls._createProcesses(processes, result_directory, operation_spec,
                                 parameter_settings, rel_input_paths)
            # create and return the operation object
            return cls(processes, operation_spec, result_directory,
                       number_processes)
        else:
            # Create all processes by calling a recursive helper method in
            # another thread so that already created processes can be executed in
            # parallel. Therefore a queue is used which size is maximized to
            # guarantee that not to much objects are created (because this costs
            # memory). However, the actual number of 4 is arbitrary and might
            # be changed according to the system at hand.
            processes = processing.Queue(4)
            create_process = \
                    processing.Process(target=cls._createProcesses,
                                       args=(processes, result_directory,
                                             operation_spec, parameter_settings,
                                             rel_input_paths))
            create_process.start()
            # create and return the operation object
            return cls(processes, operation_spec, result_directory,
                       number_processes, create_process)