def write_config_file(node_id, config_file, ns, lns, hosts_file_ns, hosts_file_lns):    
    from read_array_from_file import read_col_from_file
    from random import random    
    #hosts = ['compute-0-13']
    
    hosts = read_col_from_file(hosts_file_ns)
    host_count = 0
    port_number = 44001
    port_per_node = 50
    fw = open(config_file, 'w')
    for i in range(ns):
        port_number += port_per_node
        #s = '\t'.join([str(i), 'yes', hosts[host_count], str(port_number), str(random()), '100.0', '100.0'])
        latency = get_pl_latency(node_id, i)
        s = '\t'.join([str(i), 'yes', hosts[host_count], str(port_number), str(latency), '100.0', '100.0'])
        fw.write(s + '\n')
        #print s
        host_count = (host_count + 1) % len(hosts)
    
    hosts = read_col_from_file(hosts_file_lns)            
    host_count = 0
    #port_number = 20000
    for i in range(lns):
        port_number += port_per_node
        latency = get_pl_latency(node_id, i + ns)
        s = '\t'.join([str(i + ns), 'no', hosts[host_count], str(port_number), str(latency), '100.0', '100.0'])
        fw.write(s + '\n')
        #print s
        host_count = (host_count + 1) % len(hosts)
    fw.close()
示例#2
0
def run_all_lns(gns_folder):
    from read_array_from_file import read_col_from_file
    ns_hostnames = read_col_from_file(exp_config.ns_file)
    num_ns = len(ns_hostnames)
    tmp_cmd_file = '/tmp/local-name-server.sh'
    from read_array_from_file import read_col_from_file
    cmds = []
    pl_lns = read_col_from_file(exp_config.lns_file)

    update_trace_param = ''#exp_config.update_trace_url
    lookup_trace_param = '' #exp_config.lookup_trace_url
    for i, lns in enumerate(pl_lns):
        node_id = str(i + num_ns)

        update_trace_param = 'update' # + node_id # may be nod id

        lookup_trace_param = 'lookup' # + node_id

        cmd = 'ssh -i ' + exp_config.ssh_key + '  -oConnectTimeout=60 -oStrictHostKeyChecking=no -l ' + exp_config.user + ' ' + lns + ' "mkdir -p ' + \
            gns_folder + '; cd ' + gns_folder + '; python local-name-server.py  --lookupTrace ' \
            + lookup_trace_param + ' --updateTrace ' + update_trace_param + ' --id ' + node_id + '"'
        print cmd
        cmds.append(cmd)
    write_array(cmds, tmp_cmd_file, p=True)
    os.system('parallel -a ' + tmp_cmd_file)
def generate_ec2_config_file(load):
    global pl_latencies
    
    # config files
    os.system('mkdir -p ' + config_dir+ '; rm -rf ' + config_dir + '/*')
    from read_array_from_file import read_col_from_file
    ns_hostnames = read_col_from_file(hosts_file_ns)
    ns = len(ns_hostnames)
    lns_hostnames = read_col_from_file(hosts_file_lns)
    lns = len(lns_hostnames)
    assert len(ns_hostnames) == ns
    assert len(lns_hostnames) == lns
    pl_latencies = read_pl_latencies(pl_latency_folder)
    for i in range(ns + lns):
        if i < ns:
            config_file1 = os.path.join(config_dir, config_file + '_' + ns_hostnames[i])
        else:
            config_file1 = os.path.join(config_dir, config_file + '_' + lns_hostnames[i - ns])
        write_config_file(i, config_file1, ns, lns, hosts_file_ns, hosts_file_lns)
    print 'Written config files. ' + config_dir + ' count = ' + str(ns + lns)
    
    # workload
    if exp_config.experiment_run_time > 0:
        if exp_config.gen_workload:
            generate_workload(ns, lns, ns_hostnames, lns_hostnames, load)
            print 'Generate full workload. Nodes = ', lns
        elif exp_config.gen_test_workload:
            generate_test_workload(ns, lns_hostnames[ns - ns]) # id of lns is 'ns'
            print 'Generated single LNS test workload.'
    else :
        os.system('rm -rf lookupLocal updateLocal')
def randomize_trace_file(filename):

    values = read_col_from_file(filename)
    random.shuffle(values)

    from write_array_to_file import write_array
    write_array(values, filename, p = False)
def main():
    first_mobile_name = 10000000 #int(sys.argv[1])
    num_mobiles = 100000000 #int(sys.argv[2])
    num_lookups = 500000000
    num_updates = 500000000
    hosts_file = sys.argv[1]
    trace_folder = sys.argv[2]

    script_location = '/home/abhigyan/gnrs/ec2_scripts/gen_mobile_trace.py'
    
    splits = 10
    split_size = num_mobiles/splits
    from read_array_from_file import read_col_from_file
    hosts = read_col_from_file(hosts_file)
    print hosts
    print 'Num hosts:', len(hosts)
    #assert len(hosts) >= splits
    for i in range(splits):
        first = first_mobile_name + split_size * i
        split_trace_folder = os.path.join(trace_folder, 'part' + str(i))
        script_folder = os.path.split(script_location)[0]
        
        cmd = 'ssh ' + hosts[i % len(hosts)] + ' "cd ' + script_folder + '; nohup ' +  script_location + ' ' + split_trace_folder + ' ' + str(first) + ' ' + str(split_size) + ' ' + str(num_lookups/splits) + ' ' + str(num_updates/splits) + ' > output 2> output < /dev/null &"'
        print cmd
        os.system(cmd)
        print 'submitted split', i
示例#6
0
def randomize_trace_file(filename):
    
    values = read_col_from_file(filename)
    random.shuffle(values)
    
    from write_array_to_file import write_array
    write_array(values, filename, p = False)
def write_config_file(node_id, config_file, ns, lns, hosts_file_ns,
                      hosts_file_lns):
    from read_array_from_file import read_col_from_file

    #hosts = ['compute-0-13']

    hosts = read_col_from_file(hosts_file_ns)
    host_count = 0
    port_number = 44001
    port_per_node = 50
    fw = open(config_file, 'w')
    for i in range(ns):
        port_number += port_per_node
        #s = '\t'.join([str(i), 'yes', hosts[host_count], str(port_number), str(random()), '100.0', '100.0'])
        latency = pl_latencies[node_id][
            i]  # latency from node_id to (i)  #(1 + random.random()) * 10
        #latency = 10.0 #get_pl_latency(node_id, i)
        s = '\t'.join([
            str(i), 'yes', hosts[host_count],
            str(port_number),
            str(latency), '100.0', '100.0'
        ])
        fw.write(s + '\n')
        #print s
        host_count = (host_count + 1) % len(hosts)

    hosts = read_col_from_file(hosts_file_lns)
    host_count = 0
    #port_number = 20000
    for i in range(lns):
        port_number += port_per_node
        latency = pl_latencies[node_id][
            i +
            ns]  # latency from node_id to (i+ns)  #(1 + random.random()) * 10
        #latency = 10.0 #get_pl_latency(node_id, i + ns)
        s = '\t'.join([
            str(i + ns), 'no', hosts[host_count],
            str(port_number),
            str(latency), '100.0', '100.0'
        ])
        fw.write(s + '\n')
        #print s
        host_count = (host_count + 1) % len(hosts)
    fw.close()
def generate_multinode_config_file(load):
    print 'start'
    from read_array_from_file import read_col_from_file
    ns_hostnames = read_col_from_file(hosts_file_ns)
    ns = len(ns_hostnames)
    lns_hostnames = read_col_from_file(hosts_file_lns)
    lns = len(lns_hostnames)
    if exp_config.gen_workload == 'locality':
        print 'Generating locality-based workload'
        #sys.exit(2)
        generate_workload(ns, lns, ns_hostnames, lns_hostnames, load)
        print 'Generated'
        #print 'Generate full workload. Nodes = ', lns
    elif exp_config.gen_workload == 'test':
        for i in range(lns):
            generate_test_workload(
                ns + i, lns_hostnames[ns + i - ns])  # id of lns is 'ns'
        print 'Generated single LNS test workload'
    print 'after workload'

    # generate EC2 config file:
    if exp_config.gen_config == False:
        return
#    global pl_latencies
    compute_latency_between_nodes(hosts_file_ns, hosts_file_lns,
                                  hosts_file_ns_geo, hosts_file_lns_geo)
    # config files
    os.system('mkdir -p ' + config_dir + '; rm -rf ' + config_dir + '/*')

    #assert len(ns_hostnames) == ns
    #assert len(lns_hostnames) == lns
    #pl_latencies = read_pl_latencies(pl_latency_folder)
    for i in range(ns + lns):
        if i < ns:
            config_file1 = os.path.join(config_dir,
                                        config_file + '_' + ns_hostnames[i])
        else:
            config_file1 = os.path.join(
                config_dir, config_file + '_' + lns_hostnames[i - ns])
        write_config_file(i, config_file1, ns, lns, hosts_file_ns,
                          hosts_file_lns)
    print 'Written config files. ' + config_dir + ' count = ' + str(ns + lns)
def generate_workload(ns, lns, ns_hostnames, lns_hostnames, load):
    # now workload
    from read_array_from_file import read_col_from_file
    lns_names = read_col_from_file(pl_lns_workload)
    lns_count = 0
    
    os.system('rm -rf ' + lookup_trace + '; mkdir -p ' + lookup_trace)
    os.system('rm -rf ' + update_trace + '; mkdir -p ' + update_trace)
    # generate trace for load = load
    
    lookup_temp = '/home/abhigyan/gnrs/lookupTrace' + str(load)
    update_temp = '/home/abhigyan/gnrs/updateTrace' + str(load)

    from trace_generator import trace_generator
    trace_generator(load, lookup_temp, update_temp, other_data)
    print 'after trace here .....'
    #os.system('/home/abhigyan/gnrs/trace_generator.py ' + str(load))
    # trace generator outputs in following folders
    
    #generate_beehive_trace(load)
    no_updates = False
    for i in range(lns):
        #id = str(i + ns)
        node_id = lns_hostnames[i]
        host = lns_names[lns_count]
        lookup_input = lookup_temp + '/lookup_' + host
        lookup_output = os.path.join(lookup_trace, 'lookup_' + node_id)
        os.system('cp ' + lookup_input + ' ' + lookup_output)
#        if os.path.exists(lookup_temp + '/lookup_' + host):
#            #print 'cp ' + lookup_temp + '/lookup_' + host + ' lookupLocal/' + id
#            output_file = os.path.join(lookup_trace, 'lookup_' + id)
#            os.system('cp ' + lookup_temp + '/lookup_' + host + ' ' + id)
#        else:
#            #print 'rm lookupLocal/' + id + '; touch  lookupLocal/' + id
#            os.system('rm lookupLocal/' + id + '; touch  lookupLocal/' + id)
        update_input = update_temp + '/update_' + host
        update_output = os.path.join(update_trace, 'update_' + node_id)
        os.system('cp ' + update_input + ' ' + update_output)
        
#        if no_updates == False and os.path.exists(update_temp + '/update_' + host):
#            os.system('cp ' + update_temp + '/update_' + host + ' updateLocal/' + id)
#        else :
#            os.system('rm -rf updateLocal/' + id + '; touch  updateLocal/' + id)
        #if os.path.exists('workloadTrace/workload_' + host):
        #    os.system('cp workloadTrace/workload_' + host + ' workloadLocal/' + id)
        #else :
        #    os.system('rm workloadLocal/' + id + '; touch  workloadLocal/' + id)
        lns_count = (lns_count + 1) % len(lns_names)

    # delete folders
    os.system('rm -rf ' + lookup_temp + ' ' + update_temp)
    print 'Lookup trace:', lookup_trace
    print 'Update trace:', update_trace
def generate_workload(ns, lns, ns_hostnames, lns_hostnames, load):
    # now workload
    from read_array_from_file import read_col_from_file
    lns_names = read_col_from_file(pl_lns_workload)
    lns_count = 0

    os.system('rm -rf ' + lookup_trace + '; mkdir -p ' + lookup_trace)
    os.system('rm -rf ' + update_trace + '; mkdir -p ' + update_trace)
    # generate trace for load = load

    lookup_temp = '/home/abhigyan/gnrs/lookupTrace' + str(load)
    update_temp = '/home/abhigyan/gnrs/updateTrace' + str(load)

    from trace_generator import trace_generator
    trace_generator(load, lookup_temp, update_temp, other_data)
    print 'after trace here .....'
    #os.system('/home/abhigyan/gnrs/trace_generator.py ' + str(load))
    # trace generator outputs in following folders

    #generate_beehive_trace(load)
    no_updates = False
    for i in range(lns):
        #id = str(i + ns)
        node_id = lns_hostnames[i]
        host = lns_names[lns_count]
        lookup_input = lookup_temp + '/lookup_' + host
        lookup_output = os.path.join(lookup_trace, 'lookup_' + node_id)
        os.system('cp ' + lookup_input + ' ' + lookup_output)
        #        if os.path.exists(lookup_temp + '/lookup_' + host):
        #            #print 'cp ' + lookup_temp + '/lookup_' + host + ' lookupLocal/' + id
        #            output_file = os.path.join(lookup_trace, 'lookup_' + id)
        #            os.system('cp ' + lookup_temp + '/lookup_' + host + ' ' + id)
        #        else:
        #            #print 'rm lookupLocal/' + id + '; touch  lookupLocal/' + id
        #            os.system('rm lookupLocal/' + id + '; touch  lookupLocal/' + id)
        update_input = update_temp + '/update_' + host
        update_output = os.path.join(update_trace, 'update_' + node_id)
        os.system('cp ' + update_input + ' ' + update_output)

        #        if no_updates == False and os.path.exists(update_temp + '/update_' + host):
        #            os.system('cp ' + update_temp + '/update_' + host + ' updateLocal/' + id)
        #        else :
        #            os.system('rm -rf updateLocal/' + id + '; touch  updateLocal/' + id)
        #if os.path.exists('workloadTrace/workload_' + host):
        #    os.system('cp workloadTrace/workload_' + host + ' workloadLocal/' + id)
        #else :
        #    os.system('rm workloadLocal/' + id + '; touch  workloadLocal/' + id)
        lns_count = (lns_count + 1) % len(lns_names)

    # delete folders
    os.system('rm -rf ' + lookup_temp + ' ' + update_temp)
    print 'Lookup trace:', lookup_trace
    print 'Update trace:', update_trace
示例#11
0
def run_all_lns(gns_folder, num_ns):
    tmp_cmd_file = '/tmp/local-name-server.sh'
    from read_array_from_file import read_col_from_file
    cmds = []
    pl_lns = read_col_from_file('pl_lns')
    for i, lns in enumerate(pl_lns):
        node_id = str(i + num_ns)
        cmd = 'ssh -i auspice.pem -oConnectTimeout=60 -oStrictHostKeyChecking=no -l ec2-user ' + lns + ' "mkdir -p ' + \
            gns_folder + '; cd ' + gns_folder + '; python /home/ec2-user/local-name-server.py  --lookupTrace ' \
            '/home/ec2-user/lookup_' + lns + ' --updateTrace /home/ec2-user/update_' + lns + ' --id ' + node_id + '"'
        print cmd
        cmds.append(cmd)
    write_array(cmds, tmp_cmd_file, p=True)
    os.system('parallel -a ' + tmp_cmd_file)
def generate_ec2_config_file(load):
    print 'start'
    from read_array_from_file import read_col_from_file
    ns_hostnames = read_col_from_file(hosts_file_ns)
    ns = len(ns_hostnames)
    lns_hostnames = read_col_from_file(hosts_file_lns)
    lns = len(lns_hostnames)
    if exp_config.gen_workload == 'locality':
        print 'Generating locality-based workload'
        #sys.exit(2)
        generate_workload(ns, lns, ns_hostnames, lns_hostnames, load)
        print 'Generated'
        #print 'Generate full workload. Nodes = ', lns
    elif exp_config.gen_workload == 'test':
        for i in range(lns):
            generate_test_workload(ns + i, lns_hostnames[ns + i - ns]) # id of lns is 'ns'
        print 'Generated single LNS test workload'
    print 'after workload'

    # generate EC2 config file:
    if exp_config.gen_config == False:
        return
#    global pl_latencies
    compute_latency_between_nodes(hosts_file_ns, hosts_file_lns, hosts_file_ns_geo, hosts_file_lns_geo)
    # config files
    os.system('mkdir -p ' + config_dir+ '; rm -rf ' + config_dir + '/*')
    
    #assert len(ns_hostnames) == ns
    #assert len(lns_hostnames) == lns
    #pl_latencies = read_pl_latencies(pl_latency_folder)
    for i in range(ns + lns):
        if i < ns:
            config_file1 = os.path.join(config_dir, config_file + '_' + ns_hostnames[i])
        else:
            config_file1 = os.path.join(config_dir, config_file + '_' + lns_hostnames[i - ns])
        write_config_file(i, config_file1, ns, lns, hosts_file_ns, hosts_file_lns)
    print 'Written config files. ' + config_dir + ' count = ' + str(ns + lns)
示例#13
0
def run_all_ns(gns_folder):
    tmp_cmd_file = '/tmp/name-server.sh'

    from read_array_from_file import read_col_from_file
    cmds = []
    pl_ns = read_col_from_file(exp_config.ns_file)
    for i, ns in enumerate(pl_ns):

        node_id = str(i)
        cmd = 'ssh -i ' + exp_config.ssh_key + ' -oConnectTimeout=60 -oStrictHostKeyChecking=no -l ' + exp_config.user + ' ' + ns + ' "mkdir -p ' + \
              gns_folder + '; cd ' + gns_folder + '; python name-server.py --id ' + node_id + '"'
        print cmd
        cmds.append(cmd)

    write_array(cmds, tmp_cmd_file, p=True)

    os.system('parallel -a ' + tmp_cmd_file)
def main():
    orig_folder = sys.argv[1] #  use gnrs/ec2_data/pl_data_new_100K or *_1M
    final_folder = sys.argv[2] # 
    final_requests = int(sys.argv[3]) 
    orig_requests = 1000000
    print 'Initial number of requests is 1M'
    num_names = 10000 ## hardcoded    
    num_names_final = 10000000    #int(sys.argv[3])
    scale = num_names_final/num_names  ## number of times we will multiply a name
    
    sample_ratio = 1.0 * final_requests / (orig_requests * scale)
    
    os.system('mkdir -p ' + final_folder)
    
    files = os.listdir(orig_folder)
    count = 0
    import random
    for f in files:
        print count, f
        orig_file = os.path.join(orig_folder, f)
        output_file = os.path.join(final_folder, f)
        from read_array_from_file import read_col_from_file
        values = read_col_from_file(orig_file)
        from random import shuffle
        shuffle(values)
        values = values[:int(sample_ratio * len(values))]
        fw = open(output_file, 'w')
        for val in values:
            val = int(val)
            for n in range(scale):
                new_val = num_names * n + val
                fw.write(str(new_val))
                fw.write('\n')
                count += 1
        fw.close()
    print 'Num values', count
示例#15
0
def main():
    orig_folder = sys.argv[1]  #  use gnrs/ec2_data/pl_data_new_100K or *_1M
    final_folder = sys.argv[2]  #
    final_requests = int(sys.argv[3])
    orig_requests = 1000000
    print 'Initial number of requests is 1M'
    num_names = 10000  ## hardcoded
    num_names_final = 10000000  #int(sys.argv[3])
    scale = num_names_final / num_names  ## number of times we will multiply a name

    sample_ratio = 1.0 * final_requests / (orig_requests * scale)

    os.system('mkdir -p ' + final_folder)

    files = os.listdir(orig_folder)
    count = 0
    import random
    for f in files:
        print count, f
        orig_file = os.path.join(orig_folder, f)
        output_file = os.path.join(final_folder, f)
        from read_array_from_file import read_col_from_file
        values = read_col_from_file(orig_file)
        from random import shuffle
        shuffle(values)
        values = values[:int(sample_ratio * len(values))]
        fw = open(output_file, 'w')
        for val in values:
            val = int(val)
            for n in range(scale):
                new_val = num_names * n + val
                fw.write(str(new_val))
                fw.write('\n')
                count += 1
        fw.close()
    print 'Num values', count