def synaptic(load_compute, load_memory, load_input, load_output):

    start = time.time()

    # create containers for different system workload types
    atoms = dict()
    atoms['c'] = rsa.Compute()
    atoms['m'] = rsa.Memory()
    atoms['i'] = rsa.Storage()
    atoms['o'] = rsa.Storage()

    # the atoms below are executed concurrently (in their own threads)
    atoms['c'].run(info={'n': load_compute})
    atoms['m'].run(info={'n': load_memory})
    atoms['i'].run(info={'size': load_input, 'mode': 'r', 'tgt': '/tmp/src'})
    atoms['o'].run(info={'size': load_output, 'mode': 'w'})

    # wait for all atom threads to be done
    info_c = atoms['c'].wait()
    info_m = atoms['m'].wait()
    info_i = atoms['i'].wait()
    info_o = atoms['o'].wait()

    for info in [info_c, info_m, info_i, info_o]:
        for line in info['out']:
            l = ru.ReString(line)
            if l // '^(ru.\S+)\s+:\s+(\S+)$':
                info[l.get()[0]] = l.get()[1]

    print "------------------------------"
    pprint.pprint(info_i)
    print "------------------------------"
    pprint.pprint(info_o)
    print "------------------------------"
Ejemplo n.º 2
0
def synaptic (x, y, z, load_compute, load_memory, load_storage) :

    load_instances = 1

  # load_id = 'EMU.%04d' % x
  # print 'synaptic: %s %s %s' % (x, y, z)
  # print      '%8s: %s %s %s' % (load_id, load_compute, load_memory, load_storage)

    start = time.time()

    # create containers for different system workload types
    atoms = dict()
    atoms['c'] = rsa.Compute ()
    atoms['m'] = rsa.Memory  ()
    atoms['s'] = rsa.Storage ()

    # the atoms below are executed concurrently (in their own threads)
    atoms['c'].run (info={'n'   : load_compute})
    atoms['m'].run (info={'n'   : load_memory})
    atoms['s'].run (info={'n'   : load_storage,
                          'tgt' : '%(tmp)s/synapse_storage.tmp.%(pid)s'})

    # wait for all atom threads to be done
    info_c = atoms['c'].wait ()
    info_m = atoms['m'].wait ()
    info_s = atoms['s'].wait ()

    for info in [info_c, info_m, info_s] :
        for line in info['out'] :
            l = ru.ReString (line)
            if  l // '^(ru.\S+)\s+:\s+(\S+)$' :
                info[l.get()[0]] = l.get()[1]

    return {'c':info_c, 'm':info_m, 's':info_s}
Ejemplo n.º 3
0
def func():
    c = rsa.Storage()
    m = rsa.Memory()
    s = rsa.Storage()

    for i in range(3):
        c.run({'n': 1000})
        m.run({'n': 1000})
        s.run({'n': 1000})
        i = c.wait()
        i = m.wait()
        i = s.wait()

    c.stop()
    m.stop()
    s.stop()
Ejemplo n.º 4
0
    start = time.time()

    load_id = str(os.environ.get('SYNAPSE_ID', 'X'))
    load_instances = int(os.environ.get('SYNAPSE_INSTANCES', 1))
    load_compute = int(os.environ.get('SYNAPSE_COMPUTE_GFLOPS', 0))
    load_memory = int(os.environ.get('SYNAPSE_MEMORY_GBYTES', 0))
    load_storage = int(os.environ.get('SYNAPSE_STORAGE_GBYTES', 0))

    apps = list()

    # create containers for different system workload types
    for i in range(0, load_instances):

        app = dict()
        app['c'] = rsa.Compute()
        app['m'] = rsa.Memory()
        app['s'] = rsa.Storage()
        # app['n'] = rsa.Network ()

        apps.append(app)

    # run load (this spawns threads as quickly as possible)
    for app in apps:

        # the atoms below are executed concurrently (in their own threads)
        app['c'].run(info={'n': load_compute})  # consume  10 GFlop CPY Cycles
        app['m'].run(info={'n': load_memory})  # allocate  5 GByte memory
        app['s'].run(
            info={
                'n': load_storage,  # write     2 GByte to disk
                'tgt': '%(tmp)s/synapse_storage.tmp.%(pid)s'