Beispiel #1
0
    def __init__(self, compute, cluster=False, depends=[], server=False):
        """
        Class to submit and run task(s), same class gathers results at the end of a compute.
 
        Parameters
        ----------
        compute        : function
                         Compute function to be run.
        cluster        : bool
                         Set it to True to distribute your job across a cluster or multi CPU cores ofa computer. When set to True, make sure that dispynode.py is running, see https://pgiri.github.io/dispy/dispynode.html .Set it to False to run your tasks locally with a single CPU core of a computer.
        depends        : list
                         List of modules, default is an empty list. Use this only when cluster flag is set to True.

        """
        self.cluster = cluster
        self.compute = compute
        self.depends = depends
        self.server = server
        self.results = []
        self.jobs = []
        if self.cluster == True:
            self.job_cluster = dispy.JobCluster(self.compute,
                                                depends=self.depends)
        if self.server == True:
            self.server_thread = threading.Thread(target=start_server)
            self.server_thread.start()
Beispiel #2
0
def generate_compute_clusters(cluster_ip_addresses, func_name, dependency_list):
    '''
    Generate clusters based on given list of ip address

    Args:
        cluster_ip_addresses: a list of ip address
        func_name: function name
        dependency_list: the dependencies for running the current function

    Returns:
        cluster_list: a list of clusters as dispy object

    '''
    import sys
    import dispy

    try:
        cluster_list = []
        range_list = range(0, len(cluster_ip_addresses))

        for i in range_list:
            cur_cluster = dispy.JobCluster(func_name,
                                           nodes=[cluster_ip_addresses[i]],
                                           depends=dependency_list,
                                           loglevel=dispy.logger.DEBUG)
            cluster_list.append(cur_cluster)
        return cluster_list
    except:
        print("Unexpected error: {}".format(sys.exc_info()))
        raise
Beispiel #3
0
def start_dispy_cluster(method, nodes):
    import dispy, dispy.httpd, logging

    cluster = dispy.JobCluster(method, nodes=nodes, loglevel=logging.DEBUG, ping_interval=1000)

    http_server = dispy.httpd.DispyHTTPServer(cluster)

    return cluster, http_server
Beispiel #4
0
    def create_cluster(self,
                       function,
                       deps=(),
                       callback=None,
                       progress_bar=None,
                       verbose=False,
                       modules=(),
                       on_nodes_changed=None,
                       node_setup=None,
                       node_cleanup=None):

        self.jobs = []
        self.callback = callback
        self.function = function
        self.verbose = verbose
        self.close = True
        self.on_nodes_changed = on_nodes_changed
        self.node_cleanup = node_cleanup

        if not self.run_parallel:
            if node_setup:
                node_setup()
            _thread.start_new_thread(self._job_runner_thread, ())
        else:
            # DDG: NodeAllocate is used to pass the arguments to setup during node initialization
            self.cluster = dispy.JobCluster(
                function,
                [
                    dispy.NodeAllocate(node.ip_addr,
                                       setup_args=() if node_setup else
                                       (modules, )) for node in self.nodes
                ],
                list(deps),
                callback,
                self.cluster_status,
                pulse_interval=60,
                # Note, exceptions in setup seems to be swallowed up and
                # never shown.
                setup=node_setup or setup,
                cleanup=node_cleanup or True,
                loglevel=dispy.logger.CRITICAL,
                # if communication is lost to a node, his jobs will be
                # automatically rescheduled to another one.. (but
                # jobs must be reentrant! Because if rescheduled and the
                # disconnected node is really alive (temporal netsplit) the
                # job will run multiple times and maybe in parallel)
                reentrant=True,
                ip_addr=self.ip_address)

            self.http_server = dispy.httpd.DispyHTTPServer(self.cluster,
                                                           poll_sec=2)

            # wait for all nodes to be created
            print(" >> Waiting %d seconds to initialize all nodes... " % DELAY)
            time.sleep(DELAY)

        self.progress_bar = progress_bar
Beispiel #5
0
def main(args):

    if (len(args) < 2):
        print("Need at least 1 file")
        exit(1)

    jobs = []

    print("launching cluster")

    cluster = dispy.JobCluster(
        Grayscaler.grayscaleImage,
        nodes=['192.168.1.22', '192.168.1.20', '192.168.1.6'],
        depends=[Grayscaler],
        loglevel=logging.DEBUG,
        ip_addr='192.168.1.20')

    http_server = dispy.httpd.DispyHTTPServer(cluster)

    #sleep just in case a host is slow to respond
    time.sleep(10)

    #expand image files into a list of jobs

    print("Submitting jobs")

    for file in args[1:]:
        data = Image.open(file)
        newjob = cluster.submit(Grayscaler(data))
        jobs.append(newjob)

    print("Jobs submitted")

    for job in jobs:
        job()
        if job.status != dispy.DispyJob.Finished:
            print('job %s failed: %s' % (job.id, job.exception))
        else:
            print('job %s finished: %s' % (job.id, job.result))

            job.result.save(("output/output%d.jpg" % job.id), "JPEG")

    print("Waiting on job completion")

    cluster.print_status()

    if (http_server):
        http_server.shutdown()

    if (cluster):
        cluster.close()
Beispiel #6
0
def start_dispy_cluster(method, nodes):
    """
    Start a new Dispy cluster on 'nodes' to execute the method 'method'

    :param method: function to be executed on each cluster node
    :param nodes: list of node names or IP's.
    :return: the dispy cluster instance and the http_server for monitoring
    """

    cluster = dispy.JobCluster(method, nodes=nodes, loglevel=logging.DEBUG, ping_interval=1000)

    http_server = dispy.httpd.DispyHTTPServer(cluster)

    return cluster, http_server
Beispiel #7
0
def _dispy_create_cluster(status_cb: _DispyCallbackType,
                          nodes: Iterable[NodeAllocate]) -> dispy.JobCluster:
    """Create a dispy job cluster to run word counting jobs."""
    import wordcounter

    # Code required by the computation. Will be sent to each node.
    dependencies = [
        wordcounter, wordcounter.word_tokenizer, wordcounter.word_counter
    ]
    return dispy.JobCluster(_computation,
                            nodes=nodes,
                            ip_addr="127.0.0.1",
                            cluster_status=status_cb,
                            depends=dependencies,
                            reentrant=True)
Beispiel #8
0
def eval_weights_dispy(oldweights, newweights):
    # pool = multiprocessing.Pool(processes=multiprocessing.cpu_count())
    def callback(job):
        nonlocal finished
        finished += 1
        print(f'\rSimulations finished: {finished}', end='')

    cluster = dispy.JobCluster(run_game,
                               nodes=DISPY_IPS,
                               depends=[
                                   ai, cfunc_caller, helpers, heuristics,
                                   machine_code, __file__, Othello_Core,
                                   play_random
                               ],
                               callback=callback,
                               ip_addr='198.38.22.6',
                               pulse_interval=2,
                               reentrant=True,
                               ping_interval=10)
    # ping_interval=10, loglevel=dispy.logger.DEBUG)
    cluster.print_status()
    intermediate_arr = []
    results = []
    finished = 0

    for _ in range(NUM_SIMULATIONS):
        # intermediate_arr.append(pool.apply_async(run_game, callback=callback))
        intermediate_arr.append(cluster.submit(oldweights, newweights))
    # time.sleep(5)
    cluster.print_status()
    for i, job in enumerate(intermediate_arr):
        # results.append(res.get())
        # print(f'\rSimulations finished: {i}', end='')
        results.append(job())
        if job.status == dispy.DispyJob.Finished:
            # print("Successful job!")
            pass
        else:
            print(job.exception)
    print()
    print(results)
    print(
        f"Won {sum(x>0 for x in results)} games ({sum(x>0 for x in results)/NUM_SIMULATIONS*100}%)"
    )
    print(
        f'Lost {sum(x<0 for x in results)} games ({sum(x<0 for x in results)/NUM_SIMULATIONS*100}%)'
    )
    cluster.print_status()
Beispiel #9
0
def run(numJobs, pswLength):
    cluster = dispy.JobCluster(crackPassword, nodes=ip_nodes)

    unit = int((10**pswLength) / numJobs) + 1
    jobs = []

    for i in xrange(0, numJobs):
        print i * unit, "-", (i + 1) * unit
        jobs.append(cluster.submit(i * unit, (i + 1) * unit))

    cluster.wait()
    cluster.stats()

    for j in jobs:
        if j.result is not None:
            return j.result
Beispiel #10
0
    def __init__(self, password, nodes, partioning=1):
        self._nodes = nodes
        self._password = password
        self._cluster = dispy.JobCluster(nodes=self._nodes,
                                         computation=compute,
                                         callback=self.job_callback)
        self._jobs = []
        self._solution_report = {}
        self._finished = False
        self._partioning = len(self._password) - partioning
        self._finished_time = 0

        if partioning == 0:
            raise Exception('Partioning has to be larger than 0')

        if partioning >= len(self._password):
            raise Exception(
                'Partioning has to be smaller than password length')
def run(nTimes, nThrows):

    cluster = dispy.JobCluster(drop, nodes=ip_nodes)

    jobs = []

    for i in range(nTimes):
        jobs.append(cluster.submit(nThrows))

    cluster.wait()
    cluster.stats()

    prob = 0
    for j in jobs:
        prob += j.result

    pi = (2.0 * int(sys.argv[1]) * int(sys.argv[2])) / prob

    return pi
Beispiel #12
0
def run(nTimes, nThrows):

    cluster = dispy.JobCluster(twoDiceRollSum, nodes=ip_nodes)

    jobs = []

    for i in range(nTimes):
        jobs.append(cluster.submit(nThrows))

    cluster.wait()
    cluster.stats()

    result = [0] * 11

    for j in jobs:
        for i in xrange(0, 11):
            result[i] += j.result[i]

    return result
Beispiel #13
0
    def disGeneration_Multi(map_size=12, max_step=200, g_loop=200, chrom_size=243, island=4):
            cluster = dispy.JobCluster(
                disUtil_Multi,
                depends=[Creature],
                nodes=["*"],
                secret='Z1207'
                )
            for i in range(m.generation_max):
                jobs = []
                for no, individual in enumerate(m.individuals):
                    c = Creature(map_size, max_step, g_loop, chrom_size)
                    job = cluster.submit(c, no, individual, m.strategyMX)
                    jobs.append(job)

                for job in jobs:
                    idx, score = job()
                    m.fitness[idx] = score
                m.fitness_func(i)
                if max(m.fitness) > (map_size-2)*(map_size-2)*10/2-10:
                    break
def run(nTimes, nThrows):

    cluster = dispy.JobCluster(leibniz, nodes=ip_nodes)

    jobs = []

    for i in range(nTimes):
        jobs.append(cluster.submit(nThrows * i, nThrows * (i + 1)))

    cluster.wait()
    cluster.stats()

    raw = Decimal(0.0)
    #raw = 0.0
    for j in jobs:
        raw += j.result

    pi = 4 * raw

    return pi
Beispiel #15
0
    def do_startcluster(self, args):
        """Start the dispy cluster with all Nodes in the network. 
        """
        if self.cluster:
            print("Cluster already running.")

        elif self.network.nodes:
            dependencies = fission.manager.get_dependencies()  # [network]
            nodes = [node.ipv4_addr for node in self.network.nodes]
            self.cluster = dispy.JobCluster(
                pipe_wrapper_no_sync,
                nodes=nodes,
                depends=dependencies,
                cluster_status=self.network.status_callback,
                pulse_interval=1,
                ping_interval=10,
                ip_addr="192.168.4.1")
            # setup=daemon.setup, cleanup=daemon.cleanup)
            time.sleep(2)
        else:
            print("Please use 'setnodes' or 'setxml' first.")
Beispiel #16
0
 def __init__(self, func, nodes, limit=0, port=7000):
     """Initialize pool object with the function
     
     Args:
         func: function needed to run
         nodes (list): list of str, containing ip addresses
         limit: if given, pool will limit the number of submit.
             This argument won't affect map.
         
     """
     lan_ip = get_local_ip()
     import dispy
     self.func = func
     self.cluster = dispy.JobCluster(func,
                                     ip_addr=lan_ip,
                                     ext_ip_addr=lan_ip,
                                     nodes=nodes,
                                     port=port)
     self.threads = []
     self.pool_sema = None
     if limit > 0:
         self.pool_sema = threading.Semaphore(limit)
Beispiel #17
0
    def create_cluster(self, function, deps=(), callback=None, progress_bar=None, verbose=False, modules=()):

        self.jobs = []
        self.callback = callback
        self.function = function
        self.verbose = verbose
        self.close = True

        if self.run_parallel:

            # DDG: NodeAllocate is used to pass the arguments to setup during node initialization
            self.cluster = dispy.JobCluster(function, [dispy.NodeAllocate(node.ip_addr, setup_args=(modules,))
                                                       for node in self.nodes], list(deps),
                                            callback, self.cluster_status, pulse_interval=60, setup=setup,
                                            loglevel=dispy.logger.CRITICAL, reentrant=True, ip_addr=self.ip_address)

            self.http_server = dispy.httpd.DispyHTTPServer(self.cluster, poll_sec=2)

            # wait for all nodes to be created
            time.sleep(DELAY)

        self.progress_bar = progress_bar
def run(wordLength, packetSize):
    cluster = dispy.JobCluster(hackHash, nodes=ip_nodes)
    jobs = []

    maxWordNumber = base**wordLength

    for i in xrange(0, maxWordNumber - packetSize, packetSize):
        jobs.append(
            cluster.submit(i, i + packetSize, base, alphabet, pw, wordLength))

    # considero che la lunghezza del pacchetto potrebbe non essere un divisore di tutte la parole da cercare
    rest = maxWordNumber % packetSize
    if rest != 0:
        jobs.append(
            cluster.submit(maxWordNumber - rest, maxWordNumber, base, alphabet,
                           pw, wordLength))

    cluster.wait()
    cluster.stats()

    for j in jobs:
        if j.result is not None:
            return j.result
Beispiel #19
0
def run(nNode, nStart, nEnd):
    cluster = dispy.JobCluster(count_collatz_iter, nodes=ip_nodes)

    jobs = []
    last = 0

    step = (nEnd - nStart) / nNode
    jobs.append(cluster.submit(nStart, step))
    for i in xrange(1, nNode - 1):
        jobs.append(cluster.submit(i * step, (i + 1) * step))
        last = i
    jobs.append(cluster.submit((last + 1) * step, nEnd))

    cluster.wait()
    cluster.stats()

    results = [0] * 1000

    for j in jobs:
        for i in xrange(0, 1000):
            results[i] += j.result[i]

    return results
Beispiel #20
0
    input_factor = {
        'F' + str(i):
        {(i - 1) * (workerVarNum * masterVarNum) + (j - 1) * masterVarNum + k:
         [j + masterVarNum + (i - 1) * workerVarNum, k, 'EQU', 0.9]
         for j in range(1, workerVarNum + 1)
         for k in range(1, masterVarNum + 1)}
        for i in range(1, workerNum + 1)
    }

    if localMode:
        worker_map = {str(i): "127.0.0.1" for i in range(1, workerNum + 1)}
        master_ip = "127.0.0.1"
        cluster_init_worker = dispy.JobCluster(init_worker,
                                               nodes=list(
                                                   set(worker_map.values())),
                                               ip_addr=master_ip,
                                               reentrant=True)
        cluster_gibbs_worker = dispy.JobCluster(gibbs_worker,
                                                nodes=list(
                                                    set(worker_map.values())),
                                                ip_addr=master_ip,
                                                reentrant=True)
    else:
        worker_map = {}
        with open("master_ip.conf", 'r') as f:
            master_ip = f.readline()
        with open("worker_ips.conf", 'r') as f:
            for idx, line in enumerate(f.readlines()):
                if idx >= workerNum: break
                worker_map[str(idx + 1)] = line.strip()
        help="algorithm to use; 0=naive, 1=Fermat, 2=Miller Rabin")
    args = parser.parse_args()

    lower_limit = args.lower_limit
    upper_limit = args.upper_limit
    primality = args.primality
    server_nodes = '172.16.0.*'

    # use Condition variable to protect access to pending_jobs, as
    # 'job_callback' is executed in another thread
    jobs_cond = threading.Condition()

    # choose your algorthm
    if (primality == 0):
        cluster = dispy.JobCluster(naivePrimalityTest,
                                   nodes=server_nodes,
                                   callback=job_callback,
                                   loglevel=logging.INFO)
        print('Naive primality test selected')
    elif (primality == 1):
        cluster = dispy.JobCluster(FermatPrimalityTest,
                                   nodes=server_nodes,
                                   callback=job_callback,
                                   loglevel=logging.INFO)
        print('Fermat primality test selected')
    elif (primality == 2):
        cluster = dispy.JobCluster(MillerRabinPrimalityTest,
                                   nodes=server_nodes,
                                   callback=job_callback,
                                   loglevel=logging.INFO)
        print('Miller-Rabin primality test selected')
Beispiel #22
0
        equation.Solve()

        fittedTarget = equation.CalculateAllDataFittingTarget(equation.solvedCoefficients)
        if fittedTarget > 1.0E290: # error too large
            return None
    except:
        return None

    return [fittedTarget, equation.GetDisplayName(), equation.solvedCoefficients, equationString, inExtendedVersionString]



print()
print('Creating dispy JobCluster')
cluster = dispy.JobCluster(SetParametersAndFit)

jobs = []

# this example has named equations only, for simplicity it has no polyrationals or polyfunctions
for submodule in inspect.getmembers(pyeq2.Models_2D):
    if inspect.ismodule(submodule[1]):
        for equationClass in inspect.getmembers(submodule[1]):
            if inspect.isclass(equationClass[1]):
                
                # ignore these special classes for simplicity
                if equationClass[1].splineFlag or \
                   equationClass[1].userSelectablePolynomialFlag or \
                   equationClass[1].userCustomizablePolynomialFlag or \
                   equationClass[1].userSelectablePolyfunctionalFlag or \
                   equationClass[1].userSelectableRationalFlag or \
Beispiel #23
0
def main(args):

    #    from Grayscaler import Grayscaler

    if (len(args) < 2):
        print("Need at least 1 file")
        exit(1)

    #todo: config file
    cluster_nodes = [
        '192.168.1.22', '192.168.1.20', '192.168.1.6', '192.168.1.21'
    ]
    client_ip = '192.168.1.20'
    pulse_interval = 300
    node_secret = "derpy"

    jobs = []

    #works
    cluster_dependencies = [("%s/Grayscaler.py" % srcDir)]
    ###################

    #cluster_dependencies = [ Grayscaler ]

    print("launching cluster with dependencies %s" % cluster_dependencies)

    cluster = dispy.JobCluster(Grayscaler.grayscaleImage,
                               cluster_status=cluster_status_cb,
                               nodes=cluster_nodes,
                               depends=cluster_dependencies,
                               loglevel=logging.DEBUG,
                               ip_addr=client_ip,
                               pulse_interval=pulse_interval,
                               secret=node_secret)

    http_server = dispy.httpd.DispyHTTPServer(cluster)

    #sleep just in case a host is slow to respond
    print("Sleeping, buying time for sluggish nodes to report...")
    time.sleep(5)

    #expand image files into a list of jobs

    print("Submitting jobs")

    images = []

    for file in args[1:]:

        #image filename
        # image = ClusterImage(file)
        #
        # pixelRows = image.getPixelRows()
        #
        # for pixelRow in pixelRows:
        #     job = cluster.submit( Grayscaler( pixelRow ) )
        #
        #
        #
        #     pass

        #need image object that has map of job ids to result rows

        #for an image file, map the resultant job id to a result matrix

        newjob = cluster.submit(Grayscaler(Image.open(file)))

        #jobs.append(newjob)

#############################

#print("Job submitted: %d" % len(jobs) )

# finishedJobs =0
# while( finishedJobs < len(jobs) ):
#     print("Sleeping %d seconds before next status update" % 3)
#     finishedJobs = 0
#     time.sleep(3)
#
#     for job in jobs:
#         #print( "Job status %d: %s" % job.id, job.status)
#
#         # Created = 5
#         # Running = 6
#         # ProvisionalResult = 7
#         # Cancelled = 8
#         # Terminated = 9
#         # Abandoned = 10
#         # Finished = 11
#
#         if(job.status == dispy.DispyJob.Cancelled or job.status == dispy.DispyJob.Terminated or job.status == dispy.DispyJob.Finished):
#             #TODO block print of ids => results
#             finishedJobs += 1
#
#         print("Job id: %s ==> status: %s" % (job.id, job.status) )
#
#
#     cluster.print_status()
#
# print("Job loop finished")

################################################
#wait for cluster operation to complete
#cluster.wait does not wait for callbacks to finish
    quit = False
    while (quit != True and cluster.wait(10) != True):
        cluster.print_status()

    ################################################

    #TODO: compare finished job count to expected

    print("Shutting down...")
    time.sleep(5)

    if (http_server):
        http_server.shutdown()

    if (cluster):
        cluster.close()

    ###############

    #save results
    print("Save loop")

if __name__ == '__main__':

    parser = argparse.ArgumentParser()
    parser.add_argument("encrypted_message",
                        type=str,
                        help="The message you want to decrypt")
    parser.add_argument("key_length", type=int, help="length of the key")
    args = parser.parse_args()

    # fetch IP address of the client
    socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    socket.connect(("8.8.8.8", 80))

    cluster = dispy.JobCluster(delegate,
                               ip_addr="192.168.0.142",
                               nodes="192.168.0.*",
                               depends=[
                                   'quadgram_analysis.py', QuadgramAnalyzer,
                                   "cipher.py", cipher.decrypt,
                                   cipher.decrypt_unknown_key
                               ])
    import time
    start_time = time.time()
    run_jobs()
    print(f"--- {time.time() - start_time} seconds ---")

    cluster.print_status()
    cluster.close()
Beispiel #25
0
    time.sleep(n)
    return (dispy_node_name + ': func1', n)


def func2(n):
    import time
    time.sleep(n)
    return (dispy_node_name + ': func2', n)


if __name__ == '__main__':
    import dispy, random, time
    # above functions can be sent with 'depends' so they are availabe for jobs
    # always; instead, here, requird function is sent with 'dispy_job_depends'
    # to illusrate how to send functions with 'submit' (dynamically)
    cluster = dispy.JobCluster(delegate, loglevel=dispy.logger.DEBUG)
    jobs = []
    for i in range(4):
        # run above functions (computations) alternately
        if i % 2 == 0:
            func = func1
        else:
            func = func2
        # send function with 'dispy_job_depends'; this function is specific to
        # this job - it is discarded when job is over
        job = cluster.submit(func.__name__,
                             random.randint(5, 10),
                             dispy_job_depends=[func])
        if not job:
            print('Failed to create job %s' % i)
            continue
Beispiel #26
0
            results = True
            break
    return results


if __name__ == '__main__':
    import dispy, socket, time
    import numpy as np

    # fetch the IP address of the client
    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    s.connect(("10.0.0.2", 80))
    # doesn't matter if IP cannot be reached, only that the socket exists

    cluster = dispy.JobCluster(
        binSearch,
        ip_addr=s.getsockname()[0],
        nodes=['10.0.0.3', '10.0.0.4', '10.0.0.5', '10.0.0.6'])
    data = np.random.randint(0, 4000, 500000)
    numJobs = 16

    jobData = np.array_split(data, numJobs)
    jobs = []
    results = []

    searchTarget = 50

    for D in jobData:
        job = cluster.submit(searchTarget, D)
        jobs.append(job)

    for job in jobs:
Beispiel #27
0

equationString = 'pyeq2.Models_2D.Polynomial.Linear'

# see the pyeq2.IModel.fittingTargetDictionary
fittingTargetString = 'SSQABS'

textData = '''
1.0   1.1
2.0   2.2
3.0   3.4159
'''

print()
print('Creating dispy JobCluster')
cluster = dispy.JobCluster(fitEquationUsingDispyCluster)

print('Submitting job to the cluster')
job = cluster.submit(equationString, fittingTargetString, 'Default', textData)

print('Waiting on job completion  and collecting results')
results = job()

print()
if job.exception:  # can also use job.status
    print('Remote Exception in job!')
    print()
    print(str(job.exception))
else:
    equationString = 'equation = ' + results[
        1] + '("' + fittingTargetString + '")'
           ):  #do this while any one of them will win

        if whoseTurn == 0:  #humans turn

            choice = temp.recv_human_move(myBoard)
            myBoard[choice] = -1
            whoseTurn = 1

            listOne = []
            listZero = []
            listMinusOne = []

        else:  #computer turn

            jobs = []
            cluster = dispy.JobCluster(processNodes, depends=[])
            print("Waiting for system to find its move ........")

            #processes = []

            for i in range(0, 9):
                if myBoard[i] == 0:

                    newnode = GameNode()
                    newnode.board = copy.deepcopy(myBoard)

                    newnode.board[i] = 1
                    #tempProcess = mp.Process(target=processNodes, args=(newnode, output))
                    #processes.append(tempProcess)
                    job = cluster.submit(newnode)
                    job.id = i  # optionally associate an ID to job (if needed later)
Beispiel #29
0
    return_array_24 = []
    return_array_25 = []
    return_array_26 = []
    return_array_27 = []
    return_array_28 = []
    return_array_29 = []
    return_array_30 = []
    return_array_31 = []
    return_array_32 = []

    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    s.connect(("8.8.8.8", 80))

    # setup the dispy cluster
    cluster = dispy.JobCluster(median,
                               loglevel=dispy.logger.CRITICAL,
                               ip_addr=s.getsockname()[0],
                               nodes='192.168.1.*')

    # initalise the array of jobs
    jobs = []
    # for each core, create a new dispy job which runs the median function over a segment of the input immages
    for split in range(cores):
        job = cluster.submit(split_array1[split], split_array2[split],
                             split_array3[split], len(split_array1[split]),
                             len(split_array1[split][0]), split)
        jobs.append(job)
    cluster.wait()  # waits until all jobs finish

    # for each completed job
    for job in jobs:
Beispiel #30
0
    STRATEGY_MUTATION_PROB = 0.01  #Sets tag mutation rate. note 0.01 = 1%
    TAG_MUTATION_PROB = 0.01  #Acts per individual tag! equiviliant to 0.01=1% here
    TAG_LENGTHS_TO_COMPUTE = [4, 32
                              ]  #Creates a series of data for that tag length
    ROUNDS_GENERATIONS = range(
        200, 500, 50)  #Sets how many generations of each quantity are computed
    #    ex. 300 gens are computed here [300,500] two seperate evolutions are computed
    #    one for 300 and one for 500... data added into the same series
    SAMPLES_PER_GEN_COUNT = 1  #For any given generation size how many times that generation count should that be redone....
    PAYOFF_CONSTANTS = [
        1.9, 1.0, 0.002, 0.001
    ]  #Sets the payoff constants for the prisoners dilemma:   [T,R,P,S]  where  T>R>P>S and 2R>T+S>2P
    # ============== (END) Settings ===================================================================================================================

    #Distributes to the cluster... currently just takes advantage of the entire cpu core count... not networked yet...
    cluster = dispy.JobCluster(compute, depends=[TagMediatedEvolution])
    jobs = []

    #Run the calculation
    #Stores all the seperate series of data to be plotted with differnet colors and names on the graph
    dataSeries = []
    for l in TAG_LENGTHS_TO_COMPUTE:
        # this is 1 series to be added to all the series...
        allData = {
            'x': [],
            'y': [],
            'g': [],
            'name': "Tag Quant={0}".format(l)
        }
        for g in ROUNDS_GENERATIONS:
            for s in range(SAMPLES_PER_GEN_COUNT):  #Collect multiple samples