예제 #1
0
파일: db.py 프로젝트: zezo010/IoT-IDS
def addAuth(status, time):
    db = init()
    cursor = db.cursor()
    cursor.execute("insert into auth(status, time) values(?, ?)",
                   (status, time))
    db.commit()
    l.default("Added auth data")
예제 #2
0
def online(sid, data):
    l.success(str(data))
    devInfo = db.getDeviceInfo(data['devId'])
    if devInfo['status'] == 0:
        sio.disconnect(sid)
        return
    l.default('Device: {0} is online'.format(data['devId']))
예제 #3
0
def add_data(emb, model, folds, fold, kappa, weight):
    conn = init()
    cursor = conn.cursor()
    log.default("[+] Adding data...")
    cursor.execute(
        "insert into results(emb, model, folds, fold, kappa, weight) values(?, ?, ?, ?, ?, ?)",
        (emb, model, folds, fold, kappa, weight))
    conn.commit()
    log.success("[+] Data added...")
예제 #4
0
def reset():
    d.default('[+] Resetting ')
    subprocess.check_call(['iptables', '-F'])
    subprocess.check_call(['iptables', '-X'])
    subprocess.check_call(['iptables', '-t', 'nat', '-F'])
    subprocess.check_call(['iptables', '-t', 'nat', '-X'])
    subprocess.check_call(['iptables', '-t', 'mangle', '-F'])
    subprocess.check_call(['iptables', '-t', 'mangle', '-X'])
    subprocess.check_call(['iptables', '-P', 'INPUT', 'ACCEPT'])
    subprocess.check_call(['iptables', '-P', 'FORWARD', 'ACCEPT'])
    subprocess.check_call(['iptables', '-P', 'OUTPUT', 'ACCEPT'])
예제 #5
0
def process(pkt, node, timeSeen):
    ip = pkt.getlayer(IP)
    ether = pkt.getlayer(Ether)
    pprint.pprint(pkt)
    d.default('[+] Time: {}'.format(timeSeen))
    # add to db
    id = db.addData(ether.src, ether.dst, ip.src, ip.dst, timeSeen, node, "",
                    "", "icmp")

    # forward data to classify
    cf.classify(pkt, "icmp", node, timeSeen, id)
예제 #6
0
def addData(dstMac, dstIP, srcMac, srcIP, timeSeen, node, sport, dport, proto):
    db = init()
    cursor = db.cursor()
    cpu_percent = func.cpu_percent()
    mem_p = func.memory()[2]
    cursor.execute(
        "insert into data(dstMac, dstIP, srcMac, srcIP, time, node, sport, dport, proto, cpu_percent, mem_p) values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
        (dstMac, dstIP, srcMac, srcIP, timeSeen, node, sport, dport, proto,
         cpu_percent, mem_p))
    db.commit()
    log.default('[-] {} data added'.format(proto))
    return cursor.lastrowid
예제 #7
0
파일: db.py 프로젝트: zezo010/IoT-IDS
def rsa(length):
    l.default('RSA, Generating key {0}'.format(str(length)))
    start = time.time()
    rsakey = RSA.generate(1024 * 1, Random.new().read)
    pubCipher = rsakey.publickey()
    privCipher = rsakey
    end = time.time()
    db = init()
    cursor = db.cursor()
    cursor.execute("insert into algo(category, len, time) values(?, ?, ?)",
                   ('rsa', length, end - start))
    db.commit()
    l.success('done...')
    print('\n')
예제 #8
0
파일: db.py 프로젝트: zezo010/IoT-IDS
def oaep(length):
    l.default('OAEP, Generating key {0}'.format(str(length)))
    start = time.time()
    random_generator = Random.new().read
    rsakey = RSA.generate(1024 * length, random_generator)
    pubCipher = PKCS1_OAEP.new(rsakey.publickey())
    privCipher = PKCS1_OAEP.new(rsakey)
    end = time.time()
    db = init()
    cursor = db.cursor()
    cursor.execute("insert into algo(category, len, time) values(?, ?, ?)",
                   ('oaep', length, end - start))
    db.commit()
    l.success('Done...')
    print('\n')
예제 #9
0
def addP(srcMac, dstMac, srcIP, dstIP, sport, dport, category):
    db = init()
    cursor = db.cursor()
    if category == "icmp":
        cursor.execute(
            "insert into icmp(srcMac, dstMac, srcIP, dstIP, time) values(?, ?, ?, ?, ?)",
            (srcMac, dstMac, srcIP, dstIP, time.time()))
    else:
        # udp and tcp
        cursor.execute(
            "insert into " + category +
            "(srcMac, dstMac, srcIP, dstIP, sport, dport, time) values(?, ?, ?, ?, ?, ?, ?)",
            (srcMac, dstMac, srcIP, dstIP, sport, dport, time.time()))
    db.commit()
    log.default("Added " + category + " Packet details")
    print("\n")
예제 #10
0
def packetHandler(pkt):
    if pkt.haslayer(ARP) and pkt.haslayer(Padding) and pkt.getlayer(
            Padding).load != fx.defaultPadding and pkt.getlayer(ARP).op == 2:
        lg.default(pformat(pkt))
        arp = pkt.getlayer(ARP)
        ip = arp.psrc
        mac = arp.hwsrc
        tm = time.time()
        padding = pkt.getlayer(Padding)
        decodedPadding = fx.decodePadding(padding.load)
        seq = decodedPadding[2]
        scn = decodedPadding[1]
        binValue = decodedPadding[3]
        category = decodedPadding[0]
        # log data
        db.logData('incoming', ip, mac, seq, tm, scn, binValue, category)
        print('')
예제 #11
0
def addP(srcMac, dstMac, srcIP, dstIP, sport, dport, category, scenario, node, psize):
    db = init()
    cursor = db.cursor()
    additionalParams = list(func.getAdditionalParams())
    if category == "icmp":
        params = [srcMac, dstMac, srcIP, dstIP, time.time(), scenario, node, psize]
        
        for _ in additionalParams: params.append(_)

        cursor.execute("insert into icmp(srcMac, dstMac, srcIP, dstIP, time, scenario, node, psize, freq_c, freq_min, freq_max, cpu_percent, ctx_switches, interrupts, soft_interrupts, syscalls, mtu, battery, fan, temp_c, temp_h, temp_crit, swap_t, swap_u, swap_f, swap_p, mem_t, mem_a, mem_p, mem_u, mem_f) values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", tuple(params))
    else:
        # udp and tcp
        params = [srcMac, dstMac, srcIP, dstIP, sport, dport, time.time(), scenario, node, psize]
        for _ in additionalParams: params.append(_)

        cursor.execute("insert into "+ category + "(srcMac, dstMac, srcIP, dstIP, sport, dport, time, scenario, node, psize, freq_c, freq_min, freq_max, cpu_percent, ctx_switches, interrupts, soft_interrupts, syscalls, mtu, battery, fan, temp_c, temp_h, temp_crit, swap_t, swap_u, swap_f, swap_p, mem_t, mem_a, mem_p, mem_u, mem_f) values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", tuple(params))
    db.commit()
    log.default("Added " + category + " Packet details")
    print("\n")
예제 #12
0
def mitm(iface, data):
    log.default('Detecting MITM => ' + str(data['ip']) + ' = ' + str(data['mac']))
    db_data = data['res']
    i_data_ip = data['ip']
    i_data_mac = data['mac']
    i_data_acq = data['acq']
    i_data_metric = data['valid']
    i_data_time = data['time']

    db_data_ip = db_data['ip']
    db_data_mac = db_data['mac']
    db_data_acq = db_data['acq']
    db_data_metric = db_data['valid']
    db_data_time = db_data['last_seen']

    # both macs are the same
    exists, i_db_data = db.find_mac(i_data_mac) # asset exists is true
    
    # checking if host previous_ip is up
    arp_packet = ARP(pdst=db_data_ip)
    ans, un = sr(arp_packet)
    if len(ans.sessions()) >= 1:
        # host is alive
        db.add_detection_time(time.time() - i_data_time)
        log.error('MITM Detected => IP: ' + i_db_data['ip'] + ', MAC: ' + i_db_data['mac'] + ' ::: Spoofing Client ::: IP: ' + i_data_ip + ', MAC: ' + db_data_mac)
        # add mitigation
        # delete incoming arp entry and keep new one
        arp.delete_entry(iface, i_data_ip)
        arp.add_entry(iface, db_data_ip, db_data_mac)
    else:
        # possible DOS;
        metric = arp.metric(db_data_ip)
        if metric != 1:
            # authorized client has been blocked
            # checking last time seen
            if (i_data_time - db_data_time) < arp.ttl(iface):
                # add detection
                db.add_detection_time(time.time() - i_data_time)
                # confirmed dos
                arp.delete_entry(iface, i_data_ip)
                 
                log.error('MITM Detected => IP: ' + i_db_data['ip'] + ', MAC: ' + i_db_data['mac'] + ' ::: Spoofing Client ::: IP: ' + i_data_ip + ', MAC: ' + db_data_mac)
예제 #13
0
def classify(pkt, proto, node, timeSeen, id):
    pprint.pprint(pkt)
    d.default('[-] Classifying: {}, {}, {}, {}'.format(proto, node, timeSeen,
                                                       id))
    pprint.pprint(pkt)
    output = 0
    md = model()
    print(md)
    nPkts = 0
    for x in db.getCData(time.time(), proto, node):
        nPkts += 1
        output += -1 if md.predict(np.array([[x['cpu_percent'], x['mem_p']]
                                             ]))[0] == 0 else 1

    if nPkts >= 4:
        if output == 0:
            d.success('[-] No DoS')
        else:
            d.error('[+] DoS Attack ooooo ')
            db.addDetection(node, proto, time.time() - timeSeen)
            nf.filter(pkt, proto, node, timeSeen, id)
    else:
        d.warning('[-] Can\'t decide: {} pkts'.format(nPkts))
예제 #14
0
def packetHandler(pkt):
    global mac
    global icmp_pkt_arr
    global udp_pkt_arr
    global tcp_pkt_arr
    global node
    pprint.pprint(pkt)

    if pkt.haslayer(IP):
        ip = pkt.getlayer(IP)
        ether = pkt.getlayer(Ether)
        if ip.dst != None and func.inSubnet(
                ip.dst) and ether.src != mac:  # removing data from AP
            if pkt.lastlayer().haslayer(ICMP):
                if icmp_pkt_arr == 0:
                    icmp_pkt_arr = time.time()
                else:
                    timeSeen = time.time()
                    if timeSeen - icmp_pkt_arr <= func.tau():
                        d.warning('[+] Possible ICMP Flood detected')
                        icmp_pkt_arr = timeSeen
                        icmp.process(pkt, node, timeSeen)
                    else:
                        # reset
                        d.default('[-] Resetting icmp_pkt_arr')
                        icmp_pkt_arr = 0
            if pkt.lastlayer().haslayer(UDP):
                if udp_pkt_arr == 0:
                    udp_pkt_arr = time.time()
                else:
                    timeSeen = time.time()
                    if timeSeen - udp_pkt_arr <= func.tau():
                        d.warning('[+] Possible UDP Flood detected')
                        udp_pkt_arr = timeSeen
                        udp.process(pkt, node, timeSeen)
                    else:
                        # reset
                        d.default('[-] Resetting udp_pkt_arr')
                        udp_pkt_arr = 0

            if pkt.lastlayer().haslayer(TCP):
                if tcp_pkt_arr == 0:
                    tcp_pkt_arr = time.time()
                else:
                    timeSeen = time.time()
                    if timeSeen - tcp_pkt_arr <= func.tau():
                        d.warning('[+] Possible TCP Flood detected')
                        tcp_pkt_arr = timeSeen
                        syn.process(pkt, node, timeSeen)
                    else:
                        # reset
                        d.default('[-] Resetting tcp_pkt_arr')
                        tcp_pkt_arr = 0
예제 #15
0
def process(pkt, label, t_start):
    ip = pkt.getlayer(IP)
    ether = pkt.getlayer(Ether)
    d.default(pprint.pformat(pkt))
    #func.addData(ip.src, ip.dst, "icmp", label)
    func.getDetectionData(pkt, ip.src, ip.dst, "icmp", label, t_start)
예제 #16
0
파일: fleet.py 프로젝트: zezo010/IoT-IDS
def on_message(data):
    l.default('message received with {0}'.format(str(data)))
    fx.sendDeviceMessage(str(data['fleetId']), data)
예제 #17
0
        >>> ls(ARP)                                                         
            hwtype     : XShortField                         = (1)
            ptype      : XShortEnumField                     = (2048)
            hwlen      : FieldLenField                       = (None)
            plen       : FieldLenField                       = (None)
            op         : ShortEnumField                      = (1)
            hwsrc      : MultipleTypeField                   = (None)
            psrc       : MultipleTypeField                   = (None)
            hwdst      : MultipleTypeField                   = (None)
            pdst       : MultipleTypeField                   = (None)
        """
        newPkt = fx.arpPacket(arp.pdst, mac, arp.psrc, arp.hwsrc, 2, paddingPayload.load)
        fx.sendPacket(iface, newPkt)
        lg.success(pformat(newPkt))
        print('')

if __name__ == '__main__':
    if len(sys.argv) != 3:
        usage()

    global mac
    global iface
    
    iface = sys.argv[1]
    mac = sys.argv[2]

    # sniff for packets
    lg.default('[-] Listening for packets')
    while True:
        sniff(iface=sys.argv[1], count=1, prn=packetHandler)
예제 #18
0
        if ip.dst != None and func.inSubnet(
                ip.dst) and ether.src != mac:  # removing data from AP
            if pkt.haslayer(ICMP):
                icmp.process(pkt, scenario, t_start)

            if pkt.haslayer(UDP):
                udp.process(pkt, scenario, t_start)

            if pkt.haslayer(TCP):
                tcp.process(pkt, scenario, t_start)


# entry
if __name__ == '__main__':
    if len(sys.argv) != 2:
        usage()

    # connect to rpc
    sio.connect(server)

    func.setSubnet(func.host_iface)
    global mac
    global scenario
    scenario = sys.argv[1]
    mac = db.getSubnet()['mac']

    # sniff for packets
    d.default('[+] Analyzing traffic on {}'.format(func.host_iface))
    while True:
        sniff(iface=func.host_iface, count=1, prn=packetHandler)
예제 #19
0
파일: ipc.py 프로젝트: zezo010/IoT-IDS
def analyze_packet(sid, data):
    d.default('Verifying ' + str(data))
    sio.emit('verify', data)
예제 #20
0
def connect(sid, environ):
    l.default('Client socket opened => {0}'.format(str(sid)))
예제 #21
0
        current = float(icmp['time'])
        if prev != 0:
            icmp_time.append(current - prev)
        prev = current

    logger.success("ICMP timing window: {0}".format(str(avg(icmp_time))))
    db.updatePInterval("icmp", avg(icmp_time))
    print("\n")

    prev = 0
    for udp in db.getP("udp"):
        current = float(udp['time'])
        if prev != 0:
            udp_time.append(current - prev)
        prev = current
    logger.warning("UDP timing window: {0}".format(str(avg(udp_time))))
    db.updatePInterval("udp", avg(udp_time))
    print("\n")


    prev = 0
    for tcp in db.getP("tcp"):
        current = float(tcp['time'])
        if prev != 0:
            tcp_time.append(current - prev)
        prev = current

    logger.default("TCP timing window: {0}".format(str(avg(tcp_time))))
    db.updatePInterval("tcp", avg(tcp_time))
    print("\n")
예제 #22
0
def tcp(sid, data):
    d.default(data)
예제 #23
0
def icmp(sid, data):
    d.default(data)
예제 #24
0
#!/usr/bin/python
"""
Data Analysis
"""

import func as fx
import db
import logger as lg
import matplotlib.pyplot as plt
import numpy as np

categories = [1, 2]
normal = []
mitm = []

lg.default('[-] Generating dataset...')
fileHandler = open(fx.dataset, 'w')
details = 'RTT,Category\n'
for iteration in fx.seqNumbers:
    for category in categories:
        for data in db.getTableScenarioCategory('incoming', iteration,
                                                category):
            for data2 in db.getTableScenarioCategorySeq(
                    'outgoing', data['scenario'], category, data['seq']):
                ctg = category
                rtt = np.abs(data['time'] - data2['time'])
                details += '{},{}\n'.format(rtt, ctg - 1)
                if ctg == 1:
                    normal.append(rtt)
                else:
                    mitm.append(rtt)
예제 #25
0
def udp(sid, data):
    d.default(data)
예제 #26
0
def detect(sid, data):
    d.default(data)
예제 #27
0
def auth(sid, data):
    l.default('Sending request to fleet server')
    sio.emit(str(data['fleetId']), data)
예제 #28
0
파일: fx.py 프로젝트: zezo010/IoT-IDS
def on_connect():
    global devId
    global data
    l.default('Sending request to IoT device')
    sio.emit(devId, data)
def train():
    from linear_schedule import Linear

    ledger = defaultdict(lambda: MovingAverage(Reporting.reward_average))

    M.config(file=os.path.join(RUN.log_directory, RUN.log_file))
    M.diff()

    with U.make_session(
            RUN.num_cpu), Logger(RUN.log_directory) as logger, contextify(
                gym.make(G.env_name)) as env:
        env = ScaledFloatFrame(wrap_dqn(env))

        if G.seed is not None:
            env.seed(G.seed)
        logger.log_params(G=vars(G), RUN=vars(RUN), Reporting=vars(Reporting))
        inputs = TrainInputs(action_space=env.action_space,
                             observation_space=env.observation_space)
        trainer = QTrainer(inputs=inputs,
                           action_space=env.action_space,
                           observation_space=env.observation_space)
        if G.prioritized_replay:
            replay_buffer = PrioritizedReplayBuffer(size=G.buffer_size,
                                                    alpha=G.alpha)
        else:
            replay_buffer = ReplayBuffer(size=G.buffer_size)

        class schedules:
            # note: it is important to have this start from the begining.
            eps = Linear(G.n_timesteps * G.exploration_fraction, 1,
                         G.final_eps)
            if G.prioritized_replay:
                beta = Linear(G.n_timesteps - G.learning_start, G.beta_start,
                              G.beta_end)

        U.initialize()
        trainer.update_target()
        x = np.array(env.reset())
        ep_ind = 0
        M.tic('episode')
        for t_step in range(G.n_timesteps):
            # schedules
            eps = 0 if G.param_noise else schedules.eps[t_step]
            if G.prioritized_replay:
                beta = schedules.beta[t_step - G.learning_start]

            x0 = x
            M.tic('sample', silent=True)
            (action, *_), action_q, q = trainer.runner.act([x], eps)
            x, rew, done, info = env.step(action)
            ledger['action_q_value'].append(action_q.max())
            ledger['action_q_value/mean'].append(action_q.mean())
            ledger['action_q_value/var'].append(action_q.var())
            ledger['q_value'].append(q.max())
            ledger['q_value/mean'].append(q.mean())
            ledger['q_value/var'].append(q.var())
            ledger['timing/sample'].append(M.toc('sample', silent=True))
            # note: adding sample to the buffer is identical between the prioritized and the standard replay strategy.
            replay_buffer.add(s0=x0,
                              action=action,
                              reward=rew,
                              s1=x,
                              done=float(done))

            logger.log(
                t_step, {
                    'q_value': ledger['q_value'].latest,
                    'q_value/mean': ledger['q_value/mean'].latest,
                    'q_value/var': ledger['q_value/var'].latest,
                    'q_value/action': ledger['action_q_value'].latest,
                    'q_value/action/mean':
                    ledger['action_q_value/mean'].latest,
                    'q_value/action/var': ledger['action_q_value/var'].latest
                },
                action=action,
                eps=eps,
                silent=True)

            if G.prioritized_replay:
                logger.log(t_step, beta=beta, silent=True)

            if done:
                ledger['timing/episode'].append(M.split('episode',
                                                        silent=True))
                ep_ind += 1
                x = np.array(env.reset())
                ledger['rewards'].append(info['total_reward'])

                silent = (ep_ind % Reporting.print_interval != 0)
                logger.log(t_step,
                           timestep=t_step,
                           episode=green(ep_ind),
                           total_reward=ledger['rewards'].latest,
                           episode_length=info['timesteps'],
                           silent=silent)
                logger.log(t_step, {
                    'total_reward/mean':
                    yellow(ledger['rewards'].mean, lambda v: f"{v:.1f}"),
                    'total_reward/max':
                    yellow(ledger['rewards'].max, lambda v: f"{v:.1f}"),
                    "time_spent_exploring":
                    default(eps, percent),
                    "timing/episode":
                    green(ledger['timing/episode'].latest, sec),
                    "timing/episode/mean":
                    green(ledger['timing/episode'].mean, sec),
                },
                           silent=silent)
                try:
                    logger.log(t_step, {
                        "timing/sample":
                        default(ledger['timing/sample'].latest, sec),
                        "timing/sample/mean":
                        default(ledger['timing/sample'].mean, sec),
                        "timing/train":
                        default(ledger['timing/train'].latest, sec),
                        "timing/train/mean":
                        green(ledger['timing/train'].mean, sec),
                        "timing/log_histogram":
                        default(ledger['timing/log_histogram'].latest, sec),
                        "timing/log_histogram/mean":
                        default(ledger['timing/log_histogram'].mean, sec)
                    },
                               silent=silent)
                    if G.prioritized_replay:
                        logger.log(t_step, {
                            "timing/update_priorities":
                            default(ledger['timing/update_priorities'].latest,
                                    sec),
                            "timing/update_priorities/mean":
                            default(ledger['timing/update_priorities'].mean,
                                    sec)
                        },
                                   silent=silent)
                except Exception as e:
                    pass
                if G.prioritized_replay:
                    logger.log(
                        t_step,
                        {"replay_beta": default(beta, lambda v: f"{v:.2f}")},
                        silent=silent)

            # note: learn here.
            if t_step >= G.learning_start and t_step % G.learn_interval == 0:
                if G.prioritized_replay:
                    experiences, weights, indices = replay_buffer.sample(
                        G.replay_batch_size, beta)
                    logger.log_histogram(t_step, weights=weights)
                else:
                    experiences, weights = replay_buffer.sample(
                        G.replay_batch_size), None
                M.tic('train', silent=True)
                x0s, actions, rewards, x1s, dones = zip(*experiences)
                td_error_val, loss_val = trainer.train(s0s=x0s,
                                                       actions=actions,
                                                       rewards=rewards,
                                                       s1s=x1s,
                                                       dones=dones,
                                                       sample_weights=weights)
                ledger['timing/train'].append(M.toc('train', silent=True))
                M.tic('log_histogram', silent=True)
                logger.log_histogram(t_step, td_error=td_error_val)
                ledger['timing/log_histogram'].append(
                    M.toc('log_histogram', silent=True))
                if G.prioritized_replay:
                    M.tic('update_priorities', silent=True)
                    new_priorities = np.abs(td_error_val) + eps
                    replay_buffer.update_priorities(indices, new_priorities)
                    ledger['timing/update_priorities'].append(
                        M.toc('update_priorities', silent=True))

            if t_step % G.target_network_update_interval == 0:
                trainer.update_target()

            if t_step % Reporting.checkpoint_interval == 0:
                U.save_state(os.path.join(RUN.log_directory, RUN.checkpoint))
예제 #30
0
#!/usr/bin/python
"""
IPC
"""

import socketio
import eventlet
import logger as d
import func as fx

## socket defaults
sio = socketio.Server()
app = socketio.WSGIApp(sio, static_files={
    '/': {'content_type': 'text/html', 'filename': '../index.html'}
})

# events
@sio.on('connect')
def connect(sid, environ):
    d.success('Client socket opened => {}'.format(sid))

@sio.on('disconnect')
def disconnect(sid):
    d.error('Client socket closed => {}'.format(sid))


# daemon
if __name__ == '__main__':
    d.default('[+] IPC running: {}:{}'.format(fx.host_ip, fx.host_port))
    eventlet.wsgi.server(eventlet.listen((fx.host_ip, fx.host_port)), app)