Exemplo n.º 1
0
def get_net_bytes(rxbytes, txbytes, rxbytes_per_s, txbytes_per_s, cpu_util):
    SAMPLE_INTERVAL = 1.0
    util = psutil.cpu_percent(interval=SAMPLE_INTERVAL, percpu=True)
    cpu_util.append(util)
    rxbytes.append(int(ifcfg.default_interface()['rxbytes']))
    txbytes.append(int(ifcfg.default_interface()['txbytes']))
    rxbytes_per_s.append((rxbytes[-1] - rxbytes[-2]) / SAMPLE_INTERVAL)
    txbytes_per_s.append((txbytes[-1] - txbytes[-2]) / SAMPLE_INTERVAL)
Exemplo n.º 2
0
def get_net_bytes(rxbytes, txbytes):
    SAMPLE_INTERVAL = 1.0
    cpu_util = psutil.cpu_percent(interval=SAMPLE_INTERVAL, percpu=True)
    rxbytes.append(int(ifcfg.default_interface()['rxbytes']))
    txbytes.append(int(ifcfg.default_interface()['txbytes']))
    rxbytes_per_s = (rxbytes[-1] - rxbytes[-2]) / SAMPLE_INTERVAL
    txbytes_per_s = (txbytes[-1] - txbytes[-2]) / SAMPLE_INTERVAL
    return cpu_util, rxbytes_per_s, txbytes_per_s
Exemplo n.º 3
0
 def get_net_bytes(rxbytes, txbytes, rxbytes_per_s, txbytes_per_s):
     SAMPLE_INTERVAL = 1.0
     # schedule the function to execute every SAMPLE_INTERVAL seconds
     if STOP.is_set():
         threading.Timer(SAMPLE_INTERVAL, get_net_bytes, [rxbytes, txbytes, rxbytes_per_s, txbytes_per_s]).start() 
         rxbytes.append(int(ifcfg.default_interface()['rxbytes']))
         txbytes.append(int(ifcfg.default_interface()['txbytes']))
         rxbytes_per_s.append((rxbytes[-1] - rxbytes[-2])/SAMPLE_INTERVAL)
         txbytes_per_s.append((txbytes[-1] - txbytes[-2])/SAMPLE_INTERVAL)  
Exemplo n.º 4
0
def start_scan(verbose=False):
    default = ifcfg.default_interface()
    ip = default['inet']
    ip_list = ip.split(".")
    ip_bin = []
    for i in ip_list:
        each_bin = str(bin(int(i))).lstrip("0b")
        if len(each_bin) < 10:
            if len(each_bin) < 8:
                for j in range(1, (9 - len(each_bin))):
                    each_bin = '0' + str(each_bin)
            each_bin = '0b' + str(each_bin)
        ip_bin.append(each_bin)

    mask = default['netmask']
    mask = mask.split(".")
    mask_bin = []
    subnet = 0
    net_addr = []
    for i in mask:
        bin_eq = bin(int(i))
        mask_bin.append(bin_eq)
        for i in range(2, len(str(bin_eq))):
            if bin_eq[i] == "1":
                subnet += 1
    netid = calculate_network_id(ip_bin, subnet, net_addr)
    network_id = str(int(netid[0], 2)) + "." + str(int(
        netid[1], 2)) + "." + str(int(netid[2], 2)) + "." + str(
            int(netid[3], 2))
    #print(network_id)
    interface = default['device']
    ipandsub = network_id + "/" + str(subnet)
    return arp_scan(interface, ipandsub, verbose)
Exemplo n.º 5
0
def observe_self():
    interface = ifcfg.default_interface()
    db = Db.instance(current_app)
    obs = db.observations.add(interface.ether, interface.ipv4, interface.observation_type,  datetime.now())
    if obs is None:
        return abort(500)
    return jsonify(obs)
Exemplo n.º 6
0
 def test_default_interface(self):
     ifcfg.distro = 'Linux'
     ifcfg.Parser = LinuxParser
     route_output = ifconfig_out.ROUTE_OUTPUT
     res = ifcfg.default_interface(ifconfig=ifconfig_out.LINUX3,
                                   route_output=route_output)
     ok_(res)
Exemplo n.º 7
0
 def test_default_interface(self):
     ifcfg.distro = 'Linux'
     ifcfg.Parser = UnixIPParser
     res = ifcfg.default_interface(
         ifconfig=ip_out.LINUX,
         route_output=ip_out.ROUTE_OUTPUT_IPROUTE
     )
     ok_(res)
Exemplo n.º 8
0
 def test_default_interface(self):
     ifcfg.distro = 'Linux'
     ifcfg.Parser = UnixIPParser
     res = ifcfg.default_interface(
         ifconfig=ip_out.LINUX,
         route_output=ip_out.ROUTE_OUTPUT_IPROUTE
     )
     ok_(res)
Exemplo n.º 9
0
 def test_default_interface(self):
     ifcfg.distro = 'Linux'
     ifcfg.Parser = LinuxParser
     route_output = ifconfig_out.ROUTE_OUTPUT
     res = ifcfg.default_interface(
         ifconfig=ifconfig_out.LINUX3, route_output=route_output
     )
     ok_(res)
def find_matching_interfaces(selected, whitelist=None, blacklist_pattern=None):
    if selected == "default":
        dflt = ifcfg.default_interface()
        if dflt:
            return [_filter_interface_attrs(dflt)]
        else:
            sys.stderr.write(
                "No default interface found.  Randomly picking one.\n")
            selected = "random"

    interfaces = list(find_interfaces(whitelist))

    # Loop over interfaces.  eliminate blacklist matches.
    #   default:
    #       "(u|)tun\d+"       add match for PPP adapter for windows too

    if blacklist_pattern:
        blacklist_pattern = re.compile(blacklist_pattern)

        interfaces2 = [
            i for i in interfaces if not blacklist_pattern.match(i.dev)
        ]
        # ToDo:  Debug log:  show which interfaces were blacklisted...
        # ToDo:  Check to see if ALL interfaces have been eliminated by this filter.  (recover by passing in NO ip?)
        if len(interfaces) != len(interfaces2):
            sys.stderr.write(
                "Blacklist filter eliminated {} interface devices\n".format(
                    len(interfaces) - len(interfaces2)))
            interfaces = interfaces2

    if not interfaces:
        return InterfaceInfo(
            None, None, dict(_error="No non-blacklisted interfaces found."))
    if selected == "all":
        return interfaces
    elif selected == "random":
        return [random.choice(interfaces)]
    else:
        raise RuntimeError("Unknown selection type of {!r}".format(selected))
Exemplo n.º 11
0
def lambda_handler(event, context):
    id = int(event['id'])
    n = num_workers = int(event['n'])
    bucket_name = str(event['bucket_name'])
    path = str(event['path'])
    n_tasks = n

    STOP = threading.Event()
    LOGS_PATH = 'map-logs-' + str(n)

    class TimeLog:
        def __init__(self, enabled=True):
            self.enabled = enabled
            self.start = time.time()
            self.prev = self.start
            self.points = []
            self.sizes = []

        def add_point(self, title):
            if not self.enabled:
                return
            now = time.time()
            self.points += [(title, now - self.prev)]
            self.prev = now

    def upload_net_bytes(rclient, rxbytes_per_s, txbytes_per_s, timelogger,
                         reqid):
        #rclient = redis.Redis(host=REDIS_HOSTADDR_PRIV, port=6379, db=0)
        netstats = LOGS_PATH + '/netstats-' + reqid
        rclient.set(
            netstats,
            str({
                'lambda': reqid,
                'started': timelogger.start,
                'rx': rxbytes_per_s,
                'tx': txbytes_per_s
            }).encode('utf-8'))
        print "wrote netstats"
        return

    def get_net_bytes(rxbytes, txbytes, rxbytes_per_s, txbytes_per_s):
        SAMPLE_INTERVAL = 1.0
        # schedule the function to execute every SAMPLE_INTERVAL seconds
        if STOP.is_set():
            threading.Timer(
                SAMPLE_INTERVAL, get_net_bytes,
                [rxbytes, txbytes, rxbytes_per_s, txbytes_per_s]).start()
            rxbytes.append(int(ifcfg.default_interface()['rxbytes']))
            txbytes.append(int(ifcfg.default_interface()['txbytes']))
            rxbytes_per_s.append((rxbytes[-1] - rxbytes[-2]) / SAMPLE_INTERVAL)
            txbytes_per_s.append((txbytes[-1] - txbytes[-2]) / SAMPLE_INTERVAL)

    t0 = time.time()

    #[s3] read from input file: input<id>
    s3 = boto3.resource('s3')
    file_local = '/tmp/input_tmp'
    lines = []
    # read 4 100MB files
    m = 1000 / n_tasks
    for i in xrange(m):
        i += id * m
        key = path + 'input' + str(i)
        s3.Bucket(bucket_name).download_file(key, file_local)
        with open(file_local, "r") as f:
            lines += f.readlines()  #each line contains a 100b record
        os.remove(file_local)

    t1 = time.time()

    #partition
    p_list = [[] for x in xrange(n_tasks)]  #list of n partitions  #hardcode
    for line in lines:
        key1 = ord(line[0]) - 32  # key range 32-126
        key2 = ord(line[1]) - 32
        #126-32+1=95
        #p = n/95 # 2500/(126-32+1) ~ 26.3 = 26
        #index = int(26.3*(key1+key2/95.0))
        p = n_tasks / 95.0  # total of 250 tasks
        index = int(p * (key1 + key2 / 95.0))
        p_list[index].append(line)

    t1_2 = time.time()

    # start collecting network data
    iface = ifcfg.default_interface()
    rxbytes = [int(iface['rxbytes'])]
    txbytes = [int(iface['txbytes'])]
    rxbytes_per_s = []
    txbytes_per_s = []
    STOP.set()
    get_net_bytes(rxbytes, txbytes, rxbytes_per_s, txbytes_per_s)

    t2 = time.time()

    file_tmp = file_local
    for i in xrange(n_tasks):  #hardcode
        with open(file_tmp, "w+") as f:
            f.writelines(p_list[i])
            f.seek(0)
            p_list[i] = f.read()
        os.remove(file_tmp)

    #write to output files: shuffle<id 0> shuffle<id 1> shuffle<id num_workers-1>
    s3_client = boto3.client('s3')
    for i in range(n_tasks):
        file_name = 'shuffle/' + 'shuffle' + str(id) + '-' + str(i)
        result = s3_client.put_object(Bucket=bucket_name,
                                      Body=p_list[i],
                                      Key=file_name)

    t3 = time.time()

    #upload network data
    timelogger = TimeLog(enabled=True)
    startup_nodes = [{
        "host": "rediscluster.a9ith3.clustercfg.usw2.cache.amazonaws.com",
        "port": "6379"
    }]
    redis_client = StrictRedisCluster(startup_nodes=startup_nodes,
                                      skip_full_coverage_check=True)
    rclient = redis_client
    STOP.clear()
    upload_net_bytes(rclient, rxbytes_per_s, txbytes_per_s, timelogger,
                     str(id))

    # upload log
    log = {'id': id, 't0': t0, 't1': t1, 't1_2': t1_2, 't2': t2, 't3': t3}
    key = 's3-map-logs-100GB-' + str(n) + '-' + str(id)
    redis_client.set(key, pickle.dumps(log))

    log = [t1 - t0, t1_2 - t1, t3 - t2, t2 - t1_2]
    key = 's3-map-results-100GB-' + str(n) + '-' + str(id)
    redis_client.set(key, pickle.dumps(log))

    #return time spent (in sec) writing intermediate files
    #return [t1-t0, t1_2-t1, t3-t2, t2-t1_2] #read input, compute, write shuffle

    r = 'map finished ' + str(id)
    print r
    return r
Exemplo n.º 12
0
def get_address():
    address = ifcfg.default_interface()['inet']
    return address
Exemplo n.º 13
0
def lambda_handler(event, context):
    id = int(event['id'])
    n = num_workers = int(event['n'])
    bucket_name = str(event['bucket_name'])
    n_tasks = n

    STOP = threading.Event()
    LOGS_PATH = 'reduce-logs-' + str(n)

    class TimeLog:
        def __init__(self, enabled=True):
            self.enabled = enabled
            self.start = time.time()
            self.prev = self.start
            self.points = []
            self.sizes = []

        def add_point(self, title):
            if not self.enabled:
                return
            now = time.time()
            self.points += [(title, now - self.prev)]
            self.prev = now

    def upload_net_bytes(rclient, rxbytes_per_s, txbytes_per_s, timelogger,
                         reqid):
        #rclient = redis.Redis(host=REDIS_HOSTADDR_PRIV, port=6379, db=0)
        netstats = LOGS_PATH + '/netstats-' + reqid
        rclient.set(
            netstats,
            str({
                'lambda': reqid,
                'started': timelogger.start,
                'rx': rxbytes_per_s,
                'tx': txbytes_per_s
            }).encode('utf-8'))
        print "wrote netstats"
        return

    def get_net_bytes(rxbytes, txbytes, rxbytes_per_s, txbytes_per_s):
        SAMPLE_INTERVAL = 1.0
        # schedule the function to execute every SAMPLE_INTERVAL seconds
        if STOP.is_set():
            threading.Timer(
                SAMPLE_INTERVAL, get_net_bytes,
                [rxbytes, txbytes, rxbytes_per_s, txbytes_per_s]).start()
            rxbytes.append(int(ifcfg.default_interface()['rxbytes']))
            txbytes.append(int(ifcfg.default_interface()['txbytes']))
            rxbytes_per_s.append((rxbytes[-1] - rxbytes[-2]) / SAMPLE_INTERVAL)
            txbytes_per_s.append((txbytes[-1] - txbytes[-2]) / SAMPLE_INTERVAL)

    # start collecting network data
    iface = ifcfg.default_interface()
    rxbytes = [int(iface['rxbytes'])]
    txbytes = [int(iface['txbytes'])]
    rxbytes_per_s = []
    txbytes_per_s = []
    STOP.set()
    get_net_bytes(rxbytes, txbytes, rxbytes_per_s, txbytes_per_s)

    t0 = time.time()

    #read from input file: shuffle<0 id> shuffle<1 id> ... shuffle<id num_workers-1>
    startup_nodes = [{
        "host": "rediscluster.a9ith3.clustercfg.usw2.cache.amazonaws.com",
        "port": "6379"
    }]
    redis_client = StrictRedisCluster(startup_nodes=startup_nodes,
                                      skip_full_coverage_check=True)

    file_list = []
    for i in xrange(n_tasks):
        key = 'shuffle' + str(i) + '-' + str(id)  #correct
        body = redis_client.get(key)
        if body == None:
            return -1
        file_list.append(body)

    file_tmp = '/tmp/tmp'
    all_lines = []
    for i in xrange(n_tasks):
        with open(file_tmp, "w+") as f:
            f.write(file_list[i])
            f.seek(0)
            all_lines += f.readlines()
        os.remove(file_tmp)

    t1 = time.time()

    STOP.clear()

    t1_2 = time.time()

    #'''
    #merge & sort
    for i in xrange(len(all_lines)):
        all_lines[i] = (all_lines[i][:10], all_lines[i][12:])
    all_lines.sort(key=lambda x: x[0])

    for i in xrange(len(all_lines)):
        all_lines[i] = all_lines[i][0] + "  " + all_lines[i][1]
    #'''
    t2 = time.time()

    #[s3] write to output file: output<id>
    s3 = boto3.resource('s3')
    file_name = 'output/sorted_output'
    m = 1000 / n_tasks
    size = len(all_lines) / m
    for i in xrange(m):
        with open(file_tmp, "w+") as f:
            start = size * i
            end = start + size
            f.writelines(all_lines[start:end])
            f.seek(0)
            body = f.read()
        key = file_name + str(id * m + i)
        s3.Bucket(bucket_name).upload_file(file_tmp, key)

        os.remove(file_tmp)
    t3 = time.time()

    #upload network data
    timelogger = TimeLog(enabled=True)
    rclient = redis_client
    upload_net_bytes(rclient, rxbytes_per_s, txbytes_per_s, timelogger,
                     str(id))

    # upload log
    log = {'id': id, 't0': t0, 't1': t1, 't1_2': t1_2, 't2': t2, 't3': t3}
    key = 'redis-reduce-logs-100GB-' + str(n) + '-' + str(id)
    redis_client.set(key, pickle.dumps(log))

    log = [t1 - t0, t2 - t1_2, t3 - t2, t1_2 - t1]
    key = 'redis-reduce-results-100GB-' + str(n) + '-' + str(id)
    redis_client.set(key, pickle.dumps(log))

    #return time (in sec) spent reading intermediate files
    #return [t1-t0, t1_2-t1, t3-t2, t2-t1_2] #read shuffle, compute, write output

    r = 'reduce finished ' + str(id)
    print r
    return r
Exemplo n.º 14
0
def get_ifname() -> str:
    return ifcfg.default_interface()["device"]
Exemplo n.º 15
0
from collections import namedtuple
import socket
from random import randint
import time
import ifcfg
from netaddr import IPAddress
# import ipaddress

# TODO: use cjon if is faster despite the tuple conversion
from cPickle import loads, dumps
import select

# TODO: use raw tuples if are faster
Message = namedtuple('Message', ['data', 'from_', 'to_'])

default = ifcfg.default_interface()

bc_addr = (default['broadcast'], randint(20100, 65000))
bc_addr = (default['broadcast'], 20100)

bc_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)

# activate SO_BROADCAST
bc_sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)

# reusing same address means other process will be listening to same port
bc_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)

bc_sock.bind(bc_addr)

NEVER_EXPIRE = 0
Exemplo n.º 16
0
 def test_default_interface(self):
     res = ifcfg.default_interface()
     ok_(res)
Exemplo n.º 17
0
    LIBCLC = check_path_exists(os.environ['LIBCLC'])
    DASHBOARD_TEMPLATES = check_path_exists(os.environ['DASHBOARD_TEMPLATES'])
    DASHBOARD_STATIC = check_path_exists(os.environ['DASHBOARD_STATIC'])
    DATA_CL_INCLUDE = check_path_exists(os.environ['DATA_CL_INCLUDE'])
    AUX_INCLUDE = check_path_exists(os.environ['AUX_INCLUDE'])
    GREWE = check_path_exists(os.environ['GREWE'])
    CLDRIVE = check_path_exists(os.environ['CLDRIVE'], must_exist=False)
    MUTEC = check_path_exists(os.environ['MUTEC'], must_exist=False)
    SRCIROR_SRC = check_path_exists(os.environ['SRCIROR_SRC'],
                                    must_exist=False)
    SRCIROR_IR = check_path_exists(os.environ['SRCIROR_IR'], must_exist=False)
    CSMITH = check_path_exists(os.environ['CSMITH'], must_exist=False)
    CLSMITH = check_path_exists(os.environ['CLSMITH'], must_exist=False)
    CLSMITH_INCLUDE = check_path_exists(os.environ['CLSMITH_INCLUDE'],
                                        must_exist=False)
    INSTCOUNT = check_path_exists(os.environ['INSTCOUNT'])
    AUTOPHASE = check_path_exists(os.environ['AUTOPHASE'])
    MASTER_PORT = int(os.environ.get("MASTER_PORT", 8738))
    MASTER_ADDR = os.environ.get("MASTER_ADDR", "127.0.0.1")
    LOCAL_RANK = int(
        os.environ.get("LOCAL_RANK", os.environ.get("SLURM_LOCALID", 0)))
    WORLD_RANK = int(os.environ.get("RANK", os.environ.get("SLURM_PROCID", 0)))
    WORLD_SIZE = int(
        os.environ.get("WORLD_SIZE", os.environ.get("SLURM_NTASKS", 1)))
    if "GLOO_SOCKET_IFNAME" not in os.environ:
        os.environ["GLOO_SOCKET_IFNAME"] = ifcfg.default_interface()['device']
    if "NCCL_SOCKET_IFNAME" not in os.environ:
        os.environ["NCCL_SOCKET_IFNAME"] = ifcfg.default_interface()['device']
except Exception as e:
    raise e
Exemplo n.º 18
0
 def get_ip_addr(self):
     try:
         return ifcfg.default_interface()["inet"]
     except:
         logging.warning("no Internet or no Ip-Adresse found")
         return "0.0.0.0"
Exemplo n.º 19
0
import ifcfg
print(ifcfg.interfaces().items())
print(ifcfg.default_interface()['inet'])
Exemplo n.º 20
0
def lambda_handler(event, context):
    id = int(event['id'])
    n = num_workers = int(event['n'])

    LOGS_PATH = 'logs-' + str(n)
    STOP = threading.Event()

    class TimeLog:
        def __init__(self, enabled=True):
            self.enabled = enabled
            self.start = time.time()
            self.prev = self.start
            self.points = []
            self.sizes = []

        def add_point(self, title):
            if not self.enabled:
                return
            now = time.time()
            self.points += [(title, now - self.prev)]
            self.prev = now

    def upload_net_bytes(rclient, rxbytes_per_s, txbytes_per_s, cpu_util,
                         timelogger, reqid):
        #rclient = redis.Redis(host=REDIS_HOSTADDR_PRIV, port=6379, db=0)
        netstats = LOGS_PATH + '/netstats-' + reqid
        rclient.set(
            netstats,
            str({
                'lambda': reqid,
                'started': timelogger.start,
                'rx': rxbytes_per_s,
                'tx': txbytes_per_s,
                'cpu': cpu_util
            }).encode('utf-8'))
        print "wrote netstats"
        return

    def get_net_bytes(rxbytes, txbytes, rxbytes_per_s, txbytes_per_s,
                      cpu_util):
        SAMPLE_INTERVAL = 1.0
        # schedule the function to execute every SAMPLE_INTERVAL seconds
        if STOP.is_set():
            threading.Timer(
                SAMPLE_INTERVAL, get_net_bytes,
                [rxbytes, txbytes, rxbytes_per_s, txbytes_per_s, cpu_util
                 ]).start()
            rxbytes.append(int(ifcfg.default_interface()['rxbytes']))
            txbytes.append(int(ifcfg.default_interface()['txbytes']))
            rxbytes_per_s.append((rxbytes[-1] - rxbytes[-2]) / SAMPLE_INTERVAL)
            txbytes_per_s.append((txbytes[-1] - txbytes[-2]) / SAMPLE_INTERVAL)
            util = psutil.cpu_percent(interval=1.0)
            cpu_util.append(util)

    # start collecting network data
    iface = ifcfg.default_interface()
    rxbytes = [int(iface['rxbytes'])]
    txbytes = [int(iface['txbytes'])]
    rxbytes_per_s = []
    txbytes_per_s = []
    cpu_util = []
    STOP.set()
    timelogger = TimeLog(enabled=True)
    get_net_bytes(rxbytes, txbytes, rxbytes_per_s, txbytes_per_s, cpu_util)

    # create a file of size (datasize) bytes
    type = event['type']
    iter = int(event['iter'])
    datasize = int(event['datasize'])  #bytes
    file_tmp = '/tmp/file_tmp'
    with open(file_tmp, 'w') as f:
        text = 'a' * datasize
        f.write(text)

    # microbenchmark different storage
    test_redis_cli()
    #test_redis()
    #test_s3()

    # upload network data
    timelogger = TimeLog(enabled=True)
    startup_nodes = [{
        "host": "rediscluster-log.a9ith3.clustercfg.usw2.cache.amazonaws.com",
        "port": "6379"
    }]
    redis_client = StrictRedisCluster(startup_nodes=startup_nodes,
                                      skip_full_coverage_check=True)
    rclient = redis_client
    STOP.clear()
    upload_net_bytes(rclient, rxbytes_per_s, txbytes_per_s, cpu_util,
                     timelogger, str(id))

    os.remove(file_tmp)

    return "fnished"
Exemplo n.º 21
0
 def test_default_interface(self):
     res = ifcfg.default_interface()
     ok_(res)