Esempio n. 1
0
 def sendingLoop(self):
     while True:
         if not self._sendingQueue.empty():
             print("Sending...")
             tup = self._sendingQueue.get()
             client(rp_addresses[int(tup[1, 0])], tup)
         time.sleep(3)
Esempio n. 2
0
 def sending_loop(self):
     index = 0
     while True:
         if not self._sendingQueue.empty():
             send_time_start = time.time()
             tup = self._sendingQueue.get()
             print("Sending to worker", index)
             client(rp_addresses[index], tup)
             print("Finished Sending")
             index += 1
             print("index: ", index)
             if index == (self._n - self._s):
                 print("Index reset")
                 index = 0
             # Time stamp
             send_time_end = time.time()
             self._time_send_list.append(send_time_end - send_time_start)
             print("Sending iteration time: ", send_time_end - send_time_start)
         time.sleep(3)
Esempio n. 3
0
def master_server(host, n):
    #def aggr_server(host,N,que):  # add N as input (number of nodes)
    global N, keep_running, sel, numconn, result
    N = n
    #num = num_con # number of nodes to listen for
    numconn = 0  # number times has been connected to and received a vector
    sel = selectors.DefaultSelector()
    keep_running = True

    w = np.zeros((N, 784))
    #w = []
    fn = []
    result = types.SimpleNamespace(w=w, fn=fn)
    host = host
    port = 65432

    server_addr = (host, port)
    server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    # Avoid bind() exception: OSERROR: [Errno 48] Address already in use
    server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    server.setblocking(False)
    server.bind(server_addr)
    server.listen()
    #print('listening on', (host, port))

    sel.register(server, selectors.EVENT_READ, accept)  # call accept()

    while keep_running:
        event = sel.select()
        for key, mask in event:
            callback = key.data  # .data = accept() or read()
            #print(callback)
            callback(key.fileobj, mask)  # .fileobj is socket,
        if not (recv_time == 0) and (time.time() > recv_time + 1.5
                                     ):  # triggers resend protocol after 1.5s
            for i in range(0, len(node_recvd)):
                aggr_client.client(node_recvd[i], 'resend')

    sel.close()

    return result
Esempio n. 4
0
def run(K, tau=0, shuff=0, avc=0):

    # node detection
    router_ip = '192.168.0.1'
    host, iplist = nodeDetection.run(router_ip)
    node_dict = {}
    # determine number of nodes
    n = 0
    for ind in iplist:
        node_dict[ind] = n
        n += 1
    N = n
    # initialize data size d, iterations tau, and matrices for w, averages, and loss functions.
    if tau == 0:
        tau = N
    multiplier = 3
    d = multiplier * n  # number data points per node This value can be changed as desired.
    w = np.zeros(784)
    fnfn = np.zeros(shape=(N + 1, K * tau))
    accs = np.zeros(K)

    ## Start global updates
    for k in range(0, K):  # aggregator as client, k global iterations
        # send global update information to nodes
        ### Currently using the same dataset throughout. change k=k to refresh data
        data = types.SimpleNamespace(
            w=w,
            k=0,
            host=host,
            node_dict=node_dict,
            d=d,
            tau=tau,
            shuff=shuff)  #data_pts=data_pts) #data on nodes

        # Send Data
        #mcast_send.send(data) # alternative to sending to nodes one at a time.
        for i in range(0, N):  ## Send to each node
            aggr_client.client(iplist[i], data)

        # aggregator as server; get ws from nodes
        result = aggr_server.aggr_server(host, N)
        ww = result.w
        for i in range(0, N):
            fnfn[i, k * tau:(k + 1) * tau] = result.fn[i * tau:i * tau + tau]

# Process the w
        w = ww
        if avc == 1:
            w = med_avg.med(ww)
        elif avc == 2:
            w = med_avg.med_avg(ww)
        else:
            w = med_avg.mean(ww)
        data.w = w  #update w in data

        accs[k] = AccTest.AccTest(w)
    fnfn[-1, :] = med_avg.mean(fnfn[0:-1, :])  #average all loss functions

    # Graph the loss functions
    stypes = ['No', 'Random', 'RoundRobin', 'SegShift']
    avtypes = ['mean', 'median', 'med_avg']
    lable = '%s shuffling and %s processing' % (stypes[shuff], avtypes[avc])
    ParsFile.ParsFile(np.transpose(fnfn), lable)
    print(accs)
    return w, fnfn, accs
def run(K=5, tau=0, avc=0, d=4, shuff=2, pad_value=1, graph=False):
    router_ip = '192.168.0.1'
    not5 = True
    while not5:
        host, iplist = nodeDetection.run(router_ip)  # host is agg ip address
        # finds other nodes connected to router and gives its TCP ip address
        # may ditch this to implement full multicast including agg
        print('waiting for 5 nodes')
        not5 = (iplist.__len__() != 5)
        time.sleep(1.0)
    tinit = time.time()
    node_dict = {}
    # assign node numbers
    print('assigning nodes')
    n = 0
    for ind in iplist:
        node_dict[ind] = n
        n += 1
    N = n
    # initialize data size d, iterations tau, and matrices for w, averages, and loss functions.
    print('initializing data')
    if tau == 0:
        tau = N
    w = np.zeros(784)
    fnfn = np.zeros(shape=(K * tau, 5))
    accs = np.zeros(K)
    print('starting global updates')

    # Start global updates
    for k in range(0, K):  # aggregator as client, k global iterations
        # send global update information to nodes
        # Currently using the same dataset throughout. change k=k to refresh data
        # d is number of data points per node work file (each node actually stores 2*d
        data = types.SimpleNamespace(
            w=w,
            k=k,
            K=K,
            host=host,
            pad_value=pad_value,
            node_dict=node_dict,
            d=d,
            tau=tau,
            shuff=shuff)  #data_pts=data_pts) #data on nodes

        # Send Data
        # mcast_send.send(data) # alternative to sending to nodes one at a time.
        for i in range(0, N):  # Send to each node
            aggr_client.client(iplist[i], data)

        # aggregator as server; get ws from nodes
        result = aggr_server.aggr_server(host, N)
        ww = result.w
        # average loss function across all nodes
        for j in range(0, tau):
            fnfn[j + k * (tau - 1), 0] = result.fn[j]
            fnfn[j + k * (tau - 1), 1] = result.fn[j + (tau - 1)]
            fnfn[j + k * (tau - 1), 2] = result.fn[j + 2 * (tau - 1)]
            fnfn[j + k * (tau - 1), 3] = result.fn[j + 3 * (tau - 1)]
            fnfn[j + k * (tau - 1), 4] = result.fn[j + 4 * (tau - 1)]

        # Process the w
        if avc == 1:
            w = med_avg.med(ww)
        elif avc == 2:
            w = med_avg.med_avg(ww)
        else:
            w = med_avg.mean(ww)
        data.w = w  # update w in data

        accs[k] = AccTest.AccTest(w)
    tfin = time.time()
    t_total = tfin - tinit
    # Graph the loss functions
    if graph:
        plt.plot(fnfn)
        plt.title('Loss Functions for each Node')
        plt.show()
    print(accs)
    return t_total