Exemple #1
0
def sing_run(te):
    print te
    #data, label = make_moons(n_samples=2000, shuffle=True, noise=0.01,random_state = int(time.time()))
    img_data = np.asarray(pd.read_csv("train.csv", sep=',', header=None, low_memory=False))
	# get labels
    
    
    img_data = np.delete(img_data, 0, axis=0)
    labels = np.asarray(img_data[:,0], np.dtype(int))
    img_data = np.delete(img_data, 0, axis=1)

    img_data = img_data.astype(np.float64)
    #img_data, labels = make_moons(n_samples=2000, shuffle=False, noise=0.01,random_state = 4)#int(time.time())

    
    #validation_data = np.ones(shape = (1,786))
    #validation_label = np.ones(shape = (1,1))
    #data,validation_data,label,validation_label = train_test_split(data,label,train_size = .50)
    total_data = [[] for i in range(3)]
    total_label = [[] for i in range(3)]
    for sas in range(len(img_data)):
      if labels[sas] == 8:
        total_data[0].append(img_data[sas])
        total_label[0].append(0)
      if labels[sas] == 1:
        total_data[1].append(img_data[sas])
        total_label[1].append(1)
      if labels[sas] == 7:
        total_data[2].append(img_data[sas])
        total_label[2].append(2)
    print "built lists"

    total_data[0],v,total_label[0],l = train_test_split(total_data[0],total_label[0],train_size = .2)
    validation_data = v[:1000]
    validation_label = l[:1000] 
    total_data[1],v,total_label[1],l = train_test_split(total_data[1],total_label[1],train_size = .2)
    #print validation_data[0]    

    validation_data = np.r_[validation_data,v[:1000]]

    validation_label = np.r_[validation_label,l[:1000]] 
    #for item in validation_label:
      #print item
    #exit(1)
    
    #HERE    
    
    total_data[2],v,total_label[2],l = train_test_split(total_data[2],total_label[2],train_size = .2)
    validation_data = np.r_[validation_data,v[:1000]]
    validation_label = np.r_[validation_label,l[:1000]]  


    #Here, we a list of three lists for each piece of the "moon"
    #total_data, total_label = split(data,label)
    
    #total_data, total_label = randomize(total_data,total_label,4)
    
    #find the minimum between the three sides.
    minim = min(min(len(total_data[0]),len(total_data[1])),len(total_data[2]))

    iters = 800

    #Our five neural networks
    nnTogetherClassic = nnS.nn_build(1,[784,20,20,3],eta=eta,nonlin="tanh")
    nnHorn1 = nnS.nn_build(1,[784,20,20,3],eta=eta,nonlin="tanh")
    nnHorn2 = nnS.nn_build(1,[784,20,20,3],eta=eta,nonlin="tanh")
    nnMiddle = nnS.nn_build(1,[784,20,20,3],eta=eta,nonlin="tanh")
    nnDecent = []
    for i in range(3):
      nnDecent.append(nnDif.nn_build(1,[784,20,20,3],eta=eta,nonlin="tanh"))
    #one = accuracy(nnTogetherClassic, validation_data, validation_label, thr=0.5)
    #total_data, total_label = randomize(total_data,total_label,time.time())
    new_total_data,new_total_label = organize_data(total_data, total_label,minim,minim)
    horn1_data,horn1_label = [x for x in iter_minibatches(1,total_data[0],total_label[0])]
    horn2_data,horn2_label = [x for x in iter_minibatches(1,total_data[2],total_label[2])]
    middle_data,middle_label = [x for x in iter_minibatches(1,total_data[1],total_label[1])]
    
    #HERE
    
    centralized_data,centralized_label = [x for x in iter_minibatches(1,total_data[0]+total_data[1]+total_data[2],total_label[0]+total_label[1]+total_label[2])]
    batches_decent_data, batches_decent_label = [x for x in iter_minibatches(3,new_total_data,new_total_label)]

    visitbatches(nnDecent,batches_decent_data,batches_decent_label,validation_data, validation_label,nn1Acc,te, it=iters)
    print "finished decents"

    visitClassicBatches(nnTogetherClassic,batches_decent_data,batches_decent_label,validation_data, validation_label,classAcc,te,it=iters)
    print "finished cents"
    
    visitClassicBatches(nnHorn1,horn1_data,horn1_label,validation_data, validation_label,eights,te,it=iters)
    print "finished horn1"
      
    visitClassicBatches(nnHorn2,horn2_data,horn2_label,validation_data, validation_label,sevens,te,it=iters)
        
    visitClassicBatches(nnMiddle,middle_data,middle_label,validation_data, validation_label,zeros,te,it=iters)
def sing_run(te):
    print te
    #data, label = make_moons(n_samples=2000, shuffle=True, noise=0.01,random_state = int(time.time()))
    img_data = np.asarray(
        pd.read_csv("train.csv", sep=',', header=None, low_memory=False))
    # get labels

    img_data = np.delete(img_data, 0, axis=0)
    labels = np.asarray(img_data[:, 0], np.dtype(int))
    img_data = np.delete(img_data, 0, axis=1)

    img_data = img_data.astype(np.float64)
    #img_data, labels = make_moons(n_samples=2000, shuffle=False, noise=0.01,random_state = 4)#int(time.time())
    total_data = [[] for i in range(3)]
    total_label = [[] for i in range(3)]
    for sas in range(len(img_data)):
        if labels[sas] == 0:
            total_data[0].append(img_data[sas])
            total_label[0].append(0)
        if labels[sas] == 1:
            total_data[0].append(img_data[sas])
            total_label[0].append(1)
        if labels[sas] == 2:
            total_data[0].append(img_data[sas])
            total_label[0].append(2)
        if labels[sas] == 3:
            total_data[0].append(img_data[sas])
            total_label[0].append(3)
        if labels[sas] == 4:
            total_data[0].append(img_data[sas])
            total_label[0].append(4)
        if labels[sas] == 5:
            total_data[0].append(img_data[sas])
            total_label[0].append(5)
        if labels[sas] == 6:
            total_data[0].append(img_data[sas])
            total_label[0].append(6)
        if labels[sas] == 7:
            total_data[0].append(img_data[sas])
            total_label[0].append(7)
        if labels[sas] == 8:
            total_data[0].append(img_data[sas])
            total_label[0].append(8)
        if labels[sas] == 9:
            total_data[0].append(img_data[sas])
            total_label[0].append(9)
    print "built lists"
    total_data[1] = [0] * 10
    total_data[2] = [0] * 10
    total_data, total_label = randomize(total_data, total_label, time.time())
    total_data[0], v, total_label[0], l = train_test_split(total_data[0],
                                                           total_label[0],
                                                           train_size=.3)
    validation_data = v[:1000]
    validation_label = l[:1000]

    total_data[0] = total_data[0][:300]
    total_label[0] = total_label[0][:300]

    #Here, we a list of three lists for each piece of the "moon"
    #total_data, total_label = split(data,label)

    #total_data, total_label = randomize(total_data,total_label,4)

    #find the minimum between the three sides.
    minim = min(min(len(total_data[0]), len(total_data[1])),
                len(total_data[2]))
    print len(total_data[0])
    iters = 300

    #Our five neural networks
    nnTogetherClassic = nnS.nn_build(1, [784, 20, 20, 10],
                                     eta=eta,
                                     nonlin="tanh")
    nnHorn1 = nnS.nn_build(1, [784, 20, 20, 10], eta=eta, nonlin="tanh")
    nnHorn2 = nnS.nn_build(1, [784, 20, 20, 10], eta=eta, nonlin="tanh")
    nnMiddle = nnS.nn_build(1, [784, 20, 20, 10], eta=eta, nonlin="tanh")
    nnHorn3 = nnS.nn_build(1, [784, 20, 20, 10], eta=eta, nonlin="tanh")
    nnHorn4 = nnS.nn_build(1, [784, 20, 20, 10], eta=eta, nonlin="tanh")
    nnMiddle2 = nnS.nn_build(1, [784, 20, 20, 10], eta=eta, nonlin="tanh")
    nnDecent = []
    for i in range(6):
        nnDecent.append(
            nnDif.nn_build(1, [784, 20, 20, 10], eta=eta, nonlin="tanh"))
    #one = accuracy(nnTogetherClassic, validation_data, validation_label, thr=0.5)
    total_data, total_label = randomize(total_data, total_label, time.time())
    #new_total_data,new_total_label = organize_data(total_data, total_label,minim,minim)
    horn1_data, horn1_label = [
        x for x in iter_minibatches(1, total_data[0][:50], total_label[0][:50])
    ]
    horn2_data, horn2_label = [
        x for x in iter_minibatches(1, total_data[0][50:100], total_label[0]
                                    [50:100])
    ]
    middle_data, middle_label = [
        x for x in iter_minibatches(1, total_data[0][100:150], total_label[0]
                                    [100:150])
    ]
    middle_data, middle_label = [
        x for x in iter_minibatches(1, total_data[0][150:200], total_label[0]
                                    [150:200])
    ]
    middle_data, middle_label = [
        x for x in iter_minibatches(1, total_data[0][200:250], total_label[0]
                                    [200:250])
    ]
    middle_data, middle_label = [
        x for x in iter_minibatches(1, total_data[0][250:300], total_label[0]
                                    [250:300])
    ]

    #HERE
    #total_data[1] = [0] * 10
    #total_data[2] = [0] * 10

    #total_data,total_label = randomize(total_data,total_label,time.time())
    centralized_data, centralized_label = [
        x for x in iter_minibatches(1, total_data[0], total_label[0])
    ]
    batches_decent_data, batches_decent_label = [
        x for x in iter_minibatches(6, total_data[0], total_label[0])
    ]
    print len(centralized_data)
    visitbatches(nnDecent,
                 batches_decent_data,
                 batches_decent_label,
                 validation_data,
                 validation_label,
                 nn1Acc,
                 te,
                 it=iters)
    print "finished decents"
    #np.savetxt("decent-remGrad-all.txt",nn1Acc)
    visitClassicBatches(nnTogetherClassic,
                        batches_decent_data,
                        batches_decent_label,
                        validation_data,
                        validation_label,
                        classAcc,
                        te,
                        it=iters)
    print "finished cents"
    #np.savetxt("cent-remGrad-all.txt",classAcc)
    visitClassicBatches(nnHorn1,
                        horn1_data,
                        horn1_label,
                        validation_data,
                        validation_label,
                        eights,
                        te,
                        it=iters)
    print "finished horn1"
    #np.savetxt("eights-remGrad-all.txt",eights)
    visitClassicBatches(nnHorn2,
                        horn2_data,
                        horn2_label,
                        validation_data,
                        validation_label,
                        sevens,
                        te,
                        it=iters)
    #np.savetxt("sevens-remGrad-all.txt",sevens)
    visitClassicBatches(nnMiddle,
                        middle_data,
                        middle_label,
                        validation_data,
                        validation_label,
                        zeros,
                        te,
                        it=iters)
def sing_run(te):
    print te
    data, label = make_moons(n_samples=2000,
                             shuffle=True,
                             noise=0.01,
                             random_state=int(time.time()))

    data, validation_data, label, validation_label = train_test_split(
        data, label, train_size=.50)

    #Here, we a list of three lists for each piece of the "moon"
    total_data, total_label = split(data, label)

    total_data, total_label = randomize(total_data, total_label, time.time())
    #for item in validation_label:
    #print item
    #exit(1)

    #HERE

    print len(total_data)
    print len(total_data[0])
    horn1_data, horn1_label = [
        x for x in iter_minibatches(1, total_data[0][:sample_size],
                                    total_label[0])
    ]
    horn2_data, horn2_label = [
        x for x in iter_minibatches(1, total_data[2][:sample_size],
                                    total_label[2])
    ]
    middle_data, middle_label = [
        x for x in iter_minibatches(1, total_data[1][:sample_size],
                                    total_label[1])
    ]
    iters = 500

    #These are the total data pools.  We then turn them in mini batches for centralized and decentralized
    new_total_data, new_total_label = organize_data(total_data, total_label,
                                                    sample_size, sample_size)
    print len(new_total_data)
    centralized_data, centralized_label = [
        x for x in iter_minibatches(1, new_total_data, new_total_label)
    ]
    #print str(new_total_data[0]) + " " + str(new_total_label[0])
    #print str(new_total_data[1]) + " " + str(new_total_label[1])
    #print str(new_total_data[2]) + " " + str(new_total_label[2])
    #print str(new_total_data[3]) + " " + str(new_total_label[3])
    #print str(new_total_data[4]) + " " + str(new_total_label[4])
    #print str(new_total_data[5]) + " " + str(new_total_label[5])
    #print "break"
    batches_decent_data, batches_decent_label = [
        x for x in iter_minibatches(3, new_total_data, new_total_label)
    ]

    #Here, we a list of three lists for each piece of the "moon"
    #total_data, total_label = split(data,label)

    #total_data, total_label = randomize(total_data,total_label,4)

    #find the minimum between the three sides.

    minim = min(min(len(total_data[0]), len(total_data[1])),
                len(total_data[2]))
    print minim

    #Our five neural networks
    nnTogetherClassic = nnS.nn_build(1, [2, 6, 6, 1], eta=eta, nonlin="tanh")
    nnHorn1 = nnS.nn_build(1, [2, 6, 6, 1], eta=eta, nonlin="tanh")
    nnHorn2 = nnS.nn_build(1, [2, 6, 6, 1], eta=eta, nonlin="tanh")
    nnMiddle = nnS.nn_build(1, [2, 6, 6, 1], eta=eta, nonlin="tanh")
    nnDecent = []
    for i in range(3):
        nnDecent.append(nnS.nn_build(1, [2, 6, 6, 1], eta=eta, nonlin="tanh"))
    #one = accuracy(nnTogetherClassic, validation_data, validation_label, thr=0.5)
    total_data, total_label = randomize(total_data, total_label, time.time())
    #new_total_data,new_total_label = organize_data(total_data, total_label,minim,minim)

    #HERE

    #centralized_data,centralized_label = [x for x in iter_minibatches(1,total_data[0]+total_data[1]+total_data[2],total_label[0]+total_label[1]+total_label[2])]
    batches_decent_data, batches_decent_label = [
        x for x in iter_minibatches(3, new_total_data, new_total_label)
    ]
    print len(batches_decent_data[0])
    visitbatches(nnDecent,
                 batches_decent_data,
                 batches_decent_label,
                 validation_data,
                 validation_label,
                 nn1Acc,
                 te,
                 it=iters)
    print "finished decents"

    visitClassicBatches(nnTogetherClassic,
                        centralized_data,
                        centralized_label,
                        validation_data,
                        validation_label,
                        classAcc,
                        te,
                        it=iters)
    print "finished cents"

    visitClassicBatches(nnHorn1,
                        horn1_data,
                        horn1_label,
                        validation_data,
                        validation_label,
                        eights,
                        te,
                        it=iters)
    print "finished horn1"

    visitClassicBatches(nnHorn2,
                        horn2_data,
                        horn2_label,
                        validation_data,
                        validation_label,
                        sevens,
                        te,
                        it=iters)

    visitClassicBatches(nnMiddle,
                        middle_data,
                        middle_label,
                        validation_data,
                        validation_label,
                        zeros,
                        te,
                        it=iters)
def sing_run(te):
    print te
    #data, label = make_moons(n_samples=2000, shuffle=True, noise=0.01,random_state = int(time.time()))
    img_data = np.asarray(pd.read_csv("train.csv", sep=',', header=None, low_memory=False))
	# get labels
    
    
    img_data = np.delete(img_data, 0, axis=0)
    labels = np.asarray(img_data[:,0], np.dtype(int))
    img_data = np.delete(img_data, 0, axis=1)

    img_data = img_data.astype(np.float64)
    #img_data, labels = make_moons(n_samples=2000, shuffle=False, noise=0.01,random_state = 4)#int(time.time())
    total_data = [[] for i in range(3)]
    total_label = [[] for i in range(3)]
    for sas in range(len(img_data)):
      if labels[sas] == 0:
        total_data[0].append(img_data[sas])
        total_label[0].append(0)
      if labels[sas] == 3:
        total_data[0].append(img_data[sas])
        total_label[0].append(1)
      if labels[sas] == 8:
        total_data[0].append(img_data[sas])
        total_label[0].append(2)

    print "built lists"
    total_data[1] = [0]*10
    total_data[2] = [0]*10
    total_data,total_label = randomize(total_data,total_label,time.time())
    total_data[0],v,total_label[0],l = train_test_split(total_data[0],total_label[0],train_size = .3)
    validation_data = v[:1000]
    validation_label = l[:1000] 
    
    total_data[0] = total_data[0][:120]
    total_label[0] = total_label[0][:120]

    #Here, we a list of three lists for each piece of the "moon"
    #total_data, total_label = split(data,label)
    
    #total_data, total_label = randomize(total_data,total_label,4)
    
    #find the minimum between the three sides.
    minim = min(min(len(total_data[0]),len(total_data[1])),len(total_data[2]))
    print len(total_data[0])
    iters = 2500

    #Our five neural networks
    nnTogetherClassic = nnS.nn_build(1,[784,20,20,3],eta=eta,nonlin="tanh")
    nnHorn1 = nnS.nn_build(1,[784,20,20,3],eta=eta,nonlin="tanh")
    nnHorn2 = nnS.nn_build(1,[784,20,20,3],eta=eta,nonlin="tanh")
    nnMiddle = nnS.nn_build(1,[784,20,20,3],eta=eta,nonlin="tanh")
    nnHorn3 = nnS.nn_build(1,[784,20,20,3],eta=eta,nonlin="tanh")
    nnHorn4 = nnS.nn_build(1,[784,20,20,3],eta=eta,nonlin="tanh")
    nnMiddle2 = nnS.nn_build(1,[784,20,20,3],eta=eta,nonlin="tanh")
    nnDecent = []
    for i in range(3):
      nnDecent.append(nnDif.nn_build(1,[784,20,20,3],eta=eta,nonlin="tanh"))
    #one = accuracy(nnTogetherClassic, validation_data, validation_label, thr=0.5)
    #total_data,total_label = randomize(total_data,total_label,time.time())
    #new_total_data,new_total_label = organize_data(total_data, total_label,minim,minim)
    horn1_data,horn1_label = [x for x in iter_minibatches(1,total_data[0][:40],total_label[0][:40])]
    horn2_data,horn2_label = [x for x in iter_minibatches(1,total_data[0][40:80],total_label[0][40:80])]
    middle_data,middle_label = [x for x in iter_minibatches(1,total_data[0][80:120],total_label[0][80:120])]
    #middle_data,middle_label = [x for x in iter_minibatches(1,total_data[0][150:200],total_label[0][150:200])]
    #middle_data,middle_label = [x for x in iter_minibatches(1,total_data[0][200:250],total_label[0][200:250])]
   # middle_data,middle_label = [x for x in iter_minibatches(1,total_data[0][250:300],total_label[0][250:300])]

    #HERE
    #total_data[1] = [0] * 10
    #total_data[2] = [0] * 10
    new_total_data = [total_data[0][:40],total_data[0][40:80],total_data[0][80:120]]
    new_total_label = [total_label[0][:40],total_label[0][40:80],total_label[0][80:120]]
    print len(new_total_data)
    print len(new_total_data[0])
    x_new, y_new = organize_data(new_total_data,new_total_label,40,40)
    #total_data,total_label = randomize(total_data,total_label,time.time())
    centralized_data,centralized_label = [x for x in iter_minibatches(1,x_new,y_new)]
    batches_decent_data, batches_decent_label = [x for x in iter_minibatches(3,x_new,y_new)]
    print len(centralized_data)
    visitbatches(nnDecent,batches_decent_data,batches_decent_label,validation_data, validation_label,nn1Acc,te, it=iters)
    print "finished decents"
    #np.savetxt("decent-remGrad-all.txt",nn1Acc)
    visitClassicBatches(nnTogetherClassic,centralized_data,centralized_label,validation_data, validation_label,classAcc,te,it=iters)
    print "finished cents"
    #np.savetxt("cent-remGrad-all.txt",classAcc)
    visitClassicBatches(nnHorn1,horn1_data,horn1_label,validation_data, validation_label,eights,te,it=iters)
    print "finished horn1"
    #np.savetxt("eights-remGrad-all.txt",eights)    
    visitClassicBatches(nnHorn2,horn2_data,horn2_label,validation_data, validation_label,sevens,te,it=iters)
    #np.savetxt("sevens-remGrad-all.txt",sevens)    
    visitClassicBatches(nnMiddle,middle_data,middle_label,validation_data, validation_label,zeros,te,it=iters)
def sing_run(te):
    print te
    #data, label = make_moons(n_samples=2000, shuffle=True, noise=0.01,random_state = int(time.time()))
    img_data = np.asarray(pd.read_csv("train.csv", sep=',', header=None, low_memory=False))
	# get labels
    
    print len(img_data)
    img_data = np.delete(img_data, 0, axis=0)
    labels = np.asarray(img_data[:,0], np.dtype(int))
    img_data = np.delete(img_data, 0, axis=1)

    img_data = img_data.astype(np.float64)
    #img_data, labels = make_moons(n_samples=2000, shuffle=False, noise=0.01,random_state = 4)#int(time.time())
    print len(img_data)
    print len(labels)
    
    #validation_data = np.ones(shape = (1,786))
    #validation_label = np.ones(shape = (1,1))
    #data,validation_data,label,validation_label = train_test_split(data,label,train_size = .50)
    total_data = [[] for i in range(3)]
    total_label = [[] for i in range(3)]
    for sas in range(len(img_data)):
      if labels[sas] == 8:
        total_data[0].append(img_data[sas])
        total_label[0].append(0)
      if labels[sas] == 1:
        total_data[0].append(img_data[sas])
        total_label[0].append(1)
      if labels[sas] == 7:
        total_data[0].append(img_data[sas])
        total_label[0].append(2)
    print "built lists"

    total_data[0],v,total_label[0],l = train_test_split(total_data[0],total_label[0],train_size = .2, random_state = 2)
    validation_data = v[:1000]
    validation_label = l[:1000] 
    total_data[1],v,total_label[1],l = train_test_split(total_data[1],total_label[1],train_size = .2, random_state = 2)
    #print validation_data[0]    

    #validation_data = np.r_[validation_data,v[:1000]]

    #validation_label = np.r_[validation_label,l[:1000]] 
    #for item in validation_label:
      #print item
    #exit(1)
    
    #HERE    
    
    #total_data[2],v,total_label[2],l = train_test_split(total_data[2],total_label[2],train_size = .2)
    #validation_data = np.r_[validation_data,v[:1000]]
    #validation_label = np.r_[validation_label,l[:1000]]  


    #Here, we a list of three lists for each piece of the "moon"
    #total_data, total_label = split(data,label)
    
    #total_data, total_label = randomize(total_data,total_label,4)
    
    #find the minimum between the three sides.
    minim = len(total_data[0])



    #Our five neural networks
    nnTogetherClassic = nnS.nn_build(1,[784,20,20,3],eta=eta,nonlin=nonlin)
    nnHorn1 = nnS.nn_build(1,[784,20,20,3],eta=eta,nonlin="tanh")
    nnHorn2 = nnS.nn_build(1,[784,20,20,3],eta=eta,nonlin=nonlin)
    nnMiddle = nnS.nn_build(1,[784,20,20,3],eta=eta,nonlin=nonlin)
    nnDecent = nnS.nn_build(1,[784,20,20,3],eta=eta,nonlin=nonlin)

    print minim
    data_map = []
    label_map = []
    for i in range(sample_size,minim - minim%sample_size,sample_size):#

        groups_data = []
        groups_label = []
        nets = []
        batches = []

        horn1_data,horn1_label = [x for x in iter_minibatches(1,total_data[0],total_label[0])]
        horn2_data,horn2_label = [x for x in iter_minibatches(1,total_data[2],total_label[2])]
        middle_data,middle_label = [x for x in iter_minibatches(1,total_data[1],total_label[1])]
        iters = 500
        
        #These are the total data pools.  We then turn them in mini batches for centralized and decentralized
        new_total_data,new_total_label = organize_data(total_data, total_label,i,sample_size)
        centralized_data,centralized_label = [x for x in iter_minibatches(1,new_total_data,new_total_label)]
	#print str(new_total_data[0]) + " " + str(new_total_label[0])
	#print str(new_total_data[1]) + " " + str(new_total_label[1])
	#print str(new_total_data[2]) + " " + str(new_total_label[2])
	#print str(new_total_data[3]) + " " + str(new_total_label[3])
	#print str(new_total_data[4]) + " " + str(new_total_label[4])
	#print str(new_total_data[5]) + " " + str(new_total_label[5])
	#print "break"
        batches_decent_data, batches_decent_label = [x for x in iter_minibatches(3,new_total_data,new_total_label)]
	#print str(batches_decent_data[0][0]) + " " + str(batches_decent_label[0][0])
	#print str(batches_decent_data[0][1]) + " " + str(batches_decent_label[0][1])
	#print str(batches_decent_data[0][2]) + " " + str(batches_decent_label[0][2])
	#print str(batches_decent_data[1][0]) + " " + str(batches_decent_label[1][0])
	#print str(batches_decent_data[1][1]) + " " + str(batches_decent_label[1][1])
	#print str(batches_decent_data[1][2]) + " " + str(batches_decent_label[1][2])

        print len(batches_decent_data)
        print len(batches_decent_data[0])
        print len(batches_decent_label[0])
        print len(centralized_data)
	#Visit batches in this order:  decentralized, centralized, first horn, second horn
	# and the middle part of the crescent
        visitClassicBatches(nnDecent,batches_decent_data,batches_decent_label, it=iters)
        visitClassicBatches(nnTogetherClassic,centralized_data,centralized_label,it=iters)
        print len(horn1_data[i-sample_size:i])
        #visitClassicBatches(nnHorn1,horn1_data[i-sample_size:i],horn1_label[i-sample_size:i],it=iters)
        #visitClassicBatches(nnHorn2,horn2_data[i-sample_size:i],horn2_label[i-sample_size:i],it=iters)
        #visitClassicBatches(nnMiddle,middle_data[i-sample_size:i],middle_label[i-sample_size:i],it=iters)
        
	#The accuracies of our neural networks
        togetherAcc = accuracy(nnTogetherClassic,validation_data,validation_label,thr=0.5)
        one = accuracy(nnDecent, validation_data, validation_label, thr=0.5)
        #oneAcc = accuracy(nnHorn1, validation_data, validation_label, thr=0.5)
        #twoAcc = accuracy(nnHorn2, validation_data, validation_label, thr=0.5)
        #midAcc = accuracy(nnMiddle, validation_data, validation_label, thr=0.5)
        
        
        
        print "accuracies"
        print one
        print togetherAcc
        #print oneAcc
        #print twoAcc
        #print midAcc

        nn1Acc[te][i/10] = one
        classAcc1[te][i/10] = togetherAcc
Exemple #6
0
def sing_run(te):
    print te
    data, label = make_moons(n_samples=2000,
                             shuffle=True,
                             noise=0.01,
                             random_state=int(time.time()))

    data, validation_data, label, validation_label = train_test_split(
        data, label, train_size=.50)

    #Here, we a list of three lists for each piece of the "moon"
    total_data, total_label = split(data, label)

    total_data, total_label = randomize(total_data, total_label, 4)

    #find the minimum between the three sides.
    minim = min(min(len(total_data[0]), len(total_data[1])),
                len(total_data[2]))

    #Our five neural networks
    nnTogetherClassic = nnS.nn_build(1, [2, 6, 6, 1], eta=eta, nonlin=nonlin)
    nnHorn1 = nnS.nn_build(1, [2, 6, 6, 1], eta=eta, nonlin=nonlin)
    nnHorn2 = nnS.nn_build(1, [2, 6, 6, 1], eta=eta, nonlin=nonlin)
    nnMiddle = nnS.nn_build(1, [2, 6, 6, 1], eta=eta, nonlin=nonlin)

    nnDecent = []
    for i in range(3):
        nnDecent.append(nnS.nn_build(1, [2, 6, 6, 1], eta=eta, nonlin=nonlin))
    print minim
    data_map = []
    label_map = []
    for i in range(sample_size, minim - minim % sample_size, sample_size):  #

        groups_data = []
        groups_label = []
        nets = []
        batches = []

        horn1_data, horn1_label = [
            x for x in iter_minibatches(1, total_data[0], total_label[0])
        ]
        horn2_data, horn2_label = [
            x for x in iter_minibatches(1, total_data[2], total_label[2])
        ]
        middle_data, middle_label = [
            x for x in iter_minibatches(1, total_data[1], total_label[1])
        ]
        iters = 3000

        #These are the total data pools.  We then turn them in mini batches for centralized and decentralized
        new_total_data, new_total_label = organize_data(
            total_data, total_label, i, sample_size)
        centralized_data, centralized_label = [
            x for x in iter_minibatches(1, new_total_data, new_total_label)
        ]
        #print str(new_total_data[0]) + " " + str(new_total_label[0])
        #print str(new_total_data[1]) + " " + str(new_total_label[1])
        #print str(new_total_data[2]) + " " + str(new_total_label[2])
        #print str(new_total_data[3]) + " " + str(new_total_label[3])
        #print str(new_total_data[4]) + " " + str(new_total_label[4])
        #print str(new_total_data[5]) + " " + str(new_total_label[5])
        #print "break"
        batches_decent_data, batches_decent_label = [
            x for x in iter_minibatches(3, new_total_data, new_total_label)
        ]
        #print str(batches_decent_data[0][0]) + " " + str(batches_decent_label[0][0])
        #print str(batches_decent_data[0][1]) + " " + str(batches_decent_label[0][1])
        #print str(batches_decent_data[0][2]) + " " + str(batches_decent_label[0][2])
        #print str(batches_decent_data[1][0]) + " " + str(batches_decent_label[1][0])
        #print str(batches_decent_data[1][1]) + " " + str(batches_decent_label[1][1])
        #print str(batches_decent_data[1][2]) + " " + str(batches_decent_label[1][2])
        data_map = data_map + batches_decent_data
        label_map = label_map + batches_decent_label
        #Debug tool to visualize data
        plotArrX = []
        plotArrY = []
        plotColor = []

        #Visit batches in this order:  decentralized, centralized, first horn, second horn
        # and the middle part of the crescent
        visitbatches(nnDecent,
                     batches_decent_data,
                     batches_decent_label,
                     it=iters)

        visitClassicBatches(nnTogetherClassic,
                            centralized_data,
                            centralized_label,
                            it=iters)

        visitClassicBatches(nnHorn1,
                            horn1_data[i - sample_size:i],
                            horn1_label[i - sample_size:i],
                            it=iters)
        visitClassicBatches(nnHorn2,
                            horn2_data[i - sample_size:i],
                            horn2_label[i - sample_size:i],
                            it=iters)
        visitClassicBatches(nnMiddle,
                            middle_data[i - sample_size:i],
                            middle_label[i - sample_size:i],
                            it=iters)

        #The accuracies of our neural networks
        togetherAcc = accuracy(nnTogetherClassic,
                               validation_data,
                               validation_label,
                               thr=0.5)
        one = accuracy(nnDecent[0], validation_data, validation_label, thr=0.5)
        oneAcc = accuracy(nnHorn1, validation_data, validation_label, thr=0.5)
        twoAcc = accuracy(nnHorn2, validation_data, validation_label, thr=0.5)
        midAcc = accuracy(nnMiddle, validation_data, validation_label, thr=0.5)
        two = accuracy(nnDecent[1], validation_data, validation_label, thr=0.5)
        three = accuracy(nnDecent[2],
                         validation_data,
                         validation_label,
                         thr=0.5)

        print "accuracies"
        print one
        print two
        print three
        print togetherAcc
        print oneAcc
        print twoAcc
        print midAcc

        nn1Acc[0][te][i / 10] = one
        nn1Acc[1][te][i / 10] = two
        nn1Acc[2][te][i / 10] = three
        classAcc1[te][i / 10] = togetherAcc
        horn1Acc[te][i / 10] = oneAcc
        horn2Acc[te][i / 10] = twoAcc
        middleAcc[te][i / 10] = midAcc
def sing_run(te):
    print te
    #data, label = make_moons(n_samples=2000, shuffle=True, noise=0.01,random_state = int(time.time()))
    img_data = np.asarray(
        pd.read_csv("train.csv", sep=',', header=None, low_memory=False))
    # get labels

    print len(img_data)
    img_data = np.delete(img_data, 0, axis=0)
    labels = np.asarray(img_data[:, 0], np.dtype(int))
    img_data = np.delete(img_data, 0, axis=1)

    img_data = img_data.astype(np.float64)
    #img_data, labels = make_moons(n_samples=2000, shuffle=False, noise=0.01,random_state = 4)#int(time.time())
    print len(img_data)
    print len(labels)

    #validation_data = np.ones(shape = (1,786))
    #validation_label = np.ones(shape = (1,1))
    #data,validation_data,label,validation_label = train_test_split(data,label,train_size = .50)
    total_data = [[] for i in range(3)]
    total_label = [[] for i in range(3)]
    for sas in range(len(img_data)):
        if labels[sas] == 8:
            total_data[0].append(img_data[sas])
            total_label[0].append(0)
        if labels[sas] == 1:
            total_data[0].append(img_data[sas])
            total_label[0].append(1)
        if labels[sas] == 7:
            total_data[0].append(img_data[sas])
            total_label[0].append(2)
    print "built lists"

    total_data[0], v, total_label[0], l = train_test_split(total_data[0],
                                                           total_label[0],
                                                           train_size=.2,
                                                           random_state=2)
    validation_data = v[:1000]
    validation_label = l[:1000]
    total_data[1], v, total_label[1], l = train_test_split(total_data[1],
                                                           total_label[1],
                                                           train_size=.2,
                                                           random_state=2)
    #print validation_data[0]

    #validation_data = np.r_[validation_data,v[:1000]]

    #validation_label = np.r_[validation_label,l[:1000]]
    #for item in validation_label:
    #print item
    #exit(1)

    #HERE

    #total_data[2],v,total_label[2],l = train_test_split(total_data[2],total_label[2],train_size = .2)
    #validation_data = np.r_[validation_data,v[:1000]]
    #validation_label = np.r_[validation_label,l[:1000]]

    #Here, we a list of three lists for each piece of the "moon"
    #total_data, total_label = split(data,label)

    #total_data, total_label = randomize(total_data,total_label,4)

    #find the minimum between the three sides.
    minim = len(total_data[0])

    #Our five neural networks
    nnTogetherClassic = nnS.nn_build(1, [784, 20, 20, 3],
                                     eta=eta,
                                     nonlin=nonlin)
    nnHorn1 = nnS.nn_build(1, [784, 20, 20, 3], eta=eta, nonlin="tanh")
    nnHorn2 = nnS.nn_build(1, [784, 20, 20, 3], eta=eta, nonlin=nonlin)
    nnMiddle = nnS.nn_build(1, [784, 20, 20, 3], eta=eta, nonlin=nonlin)
    nnDecent = nnS.nn_build(1, [784, 20, 20, 3], eta=eta, nonlin=nonlin)

    print minim
    data_map = []
    label_map = []
    for i in range(sample_size, minim - minim % sample_size, sample_size):  #

        groups_data = []
        groups_label = []
        nets = []
        batches = []

        horn1_data, horn1_label = [
            x for x in iter_minibatches(1, total_data[0], total_label[0])
        ]
        horn2_data, horn2_label = [
            x for x in iter_minibatches(1, total_data[2], total_label[2])
        ]
        middle_data, middle_label = [
            x for x in iter_minibatches(1, total_data[1], total_label[1])
        ]
        iters = 500

        #These are the total data pools.  We then turn them in mini batches for centralized and decentralized
        new_total_data, new_total_label = organize_data(
            total_data, total_label, i, sample_size)
        centralized_data, centralized_label = [
            x for x in iter_minibatches(1, new_total_data, new_total_label)
        ]
        #print str(new_total_data[0]) + " " + str(new_total_label[0])
        #print str(new_total_data[1]) + " " + str(new_total_label[1])
        #print str(new_total_data[2]) + " " + str(new_total_label[2])
        #print str(new_total_data[3]) + " " + str(new_total_label[3])
        #print str(new_total_data[4]) + " " + str(new_total_label[4])
        #print str(new_total_data[5]) + " " + str(new_total_label[5])
        #print "break"
        batches_decent_data, batches_decent_label = [
            x for x in iter_minibatches(3, new_total_data, new_total_label)
        ]
        #print str(batches_decent_data[0][0]) + " " + str(batches_decent_label[0][0])
        #print str(batches_decent_data[0][1]) + " " + str(batches_decent_label[0][1])
        #print str(batches_decent_data[0][2]) + " " + str(batches_decent_label[0][2])
        #print str(batches_decent_data[1][0]) + " " + str(batches_decent_label[1][0])
        #print str(batches_decent_data[1][1]) + " " + str(batches_decent_label[1][1])
        #print str(batches_decent_data[1][2]) + " " + str(batches_decent_label[1][2])

        print len(batches_decent_data)
        print len(batches_decent_data[0])
        print len(batches_decent_label[0])
        print len(centralized_data)
        #Visit batches in this order:  decentralized, centralized, first horn, second horn
        # and the middle part of the crescent
        visitClassicBatches(nnDecent,
                            batches_decent_data,
                            batches_decent_label,
                            it=iters)
        visitClassicBatches(nnTogetherClassic,
                            centralized_data,
                            centralized_label,
                            it=iters)
        print len(horn1_data[i - sample_size:i])
        #visitClassicBatches(nnHorn1,horn1_data[i-sample_size:i],horn1_label[i-sample_size:i],it=iters)
        #visitClassicBatches(nnHorn2,horn2_data[i-sample_size:i],horn2_label[i-sample_size:i],it=iters)
        #visitClassicBatches(nnMiddle,middle_data[i-sample_size:i],middle_label[i-sample_size:i],it=iters)

        #The accuracies of our neural networks
        togetherAcc = accuracy(nnTogetherClassic,
                               validation_data,
                               validation_label,
                               thr=0.5)
        one = accuracy(nnDecent, validation_data, validation_label, thr=0.5)
        #oneAcc = accuracy(nnHorn1, validation_data, validation_label, thr=0.5)
        #twoAcc = accuracy(nnHorn2, validation_data, validation_label, thr=0.5)
        #midAcc = accuracy(nnMiddle, validation_data, validation_label, thr=0.5)

        print "accuracies"
        print one
        print togetherAcc
        #print oneAcc
        #print twoAcc
        #print midAcc

        nn1Acc[te][i / 10] = one
        classAcc1[te][i / 10] = togetherAcc
def sing_run(te):
    print te
    data, label = make_moons(n_samples=2000, shuffle=True, noise=0.01,random_state = int(time.time()))
    
    data,validation_data,label,validation_label = train_test_split(data,label,train_size = .50)
    
    #Here, we a list of three lists for each piece of the "moon"
    total_data, total_label = split(data,label)
    
    total_data, total_label = randomize(total_data,total_label,4)

    #find the minimum between the three sides.
    minim = min(min(len(total_data[0]),len(total_data[1])),len(total_data[2]))

    


    #Our five neural networks
    nnTogetherClassic = nnS.nn_build(1,[2,6,6,1],eta=eta,nonlin=nonlin)
    nnHorn1 = nnS.nn_build(1,[2,6,6,1],eta=eta,nonlin=nonlin)
    nnHorn2 = nnS.nn_build(1,[2,6,6,1],eta=eta,nonlin=nonlin)
    nnMiddle = nnS.nn_build(1,[2,6,6,1],eta=eta,nonlin=nonlin)

    nnDecent = []
    for i in range(3):
      nnDecent.append(nnS.nn_build(1,[2,6,6,1],eta=eta,nonlin=nonlin))
    print minim
    data_map = []
    label_map = []
    for i in range(sample_size,minim - minim%sample_size,sample_size):#

        groups_data = []
        groups_label = []
        nets = []
        batches = []

        horn1_data,horn1_label = [x for x in iter_minibatches(1,total_data[0],total_label[0])]
        horn2_data,horn2_label = [x for x in iter_minibatches(1,total_data[2],total_label[2])]
        middle_data,middle_label = [x for x in iter_minibatches(1,total_data[1],total_label[1])]
        iters = 3000
        
        #These are the total data pools.  We then turn them in mini batches for centralized and decentralized
        new_total_data,new_total_label = organize_data(total_data, total_label,i,sample_size)
        centralized_data,centralized_label = [x for x in iter_minibatches(1,new_total_data,new_total_label)]
    #print str(new_total_data[0]) + " " + str(new_total_label[0])
    #print str(new_total_data[1]) + " " + str(new_total_label[1])
    #print str(new_total_data[2]) + " " + str(new_total_label[2])
    #print str(new_total_data[3]) + " " + str(new_total_label[3])
    #print str(new_total_data[4]) + " " + str(new_total_label[4])
    #print str(new_total_data[5]) + " " + str(new_total_label[5])
    #print "break"
        batches_decent_data, batches_decent_label = [x for x in iter_minibatches(3,new_total_data,new_total_label)]
    #print str(batches_decent_data[0][0]) + " " + str(batches_decent_label[0][0])
    #print str(batches_decent_data[0][1]) + " " + str(batches_decent_label[0][1])
    #print str(batches_decent_data[0][2]) + " " + str(batches_decent_label[0][2])
    #print str(batches_decent_data[1][0]) + " " + str(batches_decent_label[1][0])
    #print str(batches_decent_data[1][1]) + " " + str(batches_decent_label[1][1])
    #print str(batches_decent_data[1][2]) + " " + str(batches_decent_label[1][2])
        data_map = data_map + batches_decent_data
        label_map = label_map + batches_decent_label
    #Debug tool to visualize data
        plotArrX = []
        plotArrY = []
        plotColor = []

    #Visit batches in this order:  decentralized, centralized, first horn, second horn
    # and the middle part of the crescent
        visitbatches(nnDecent,batches_decent_data,batches_decent_label, it=iters)
    
        visitClassicBatches(nnTogetherClassic,centralized_data,centralized_label,it=iters)

        visitClassicBatches(nnHorn1,horn1_data[i-sample_size:i],horn1_label[i-sample_size:i],it=iters)
        visitClassicBatches(nnHorn2,horn2_data[i-sample_size:i],horn2_label[i-sample_size:i],it=iters)
        visitClassicBatches(nnMiddle,middle_data[i-sample_size:i],middle_label[i-sample_size:i],it=iters)
    
    #The accuracies of our neural networks
        togetherAcc = accuracy(nnTogetherClassic,validation_data,validation_label,thr=0.5)
        one = accuracy(nnDecent[0], validation_data, validation_label, thr=0.5)
        oneAcc = accuracy(nnHorn1, validation_data, validation_label, thr=0.5)
        twoAcc = accuracy(nnHorn2, validation_data, validation_label, thr=0.5)
        midAcc = accuracy(nnMiddle, validation_data, validation_label, thr=0.5)
        two = accuracy(nnDecent[1], validation_data, validation_label, thr=0.5)
        three = accuracy(nnDecent[2], validation_data, validation_label, thr=0.5)

        
        
        print "accuracies"
        print one
        print two
        print three
        print togetherAcc
        print oneAcc
        print twoAcc
        print midAcc

        nn1Acc[0][te][i/10] = one
        nn1Acc[1][te][i/10] = two
        nn1Acc[2][te][i/10] = three
        classAcc1[te][i/10] = togetherAcc
        horn1Acc[te][i/10] = oneAcc
        horn2Acc[te][i/10] = twoAcc
        middleAcc[te][i/10] = midAcc
def sing_run(te):
    print te
    data, label = make_moons(n_samples=2000, shuffle=True, noise=0.01,random_state = int(time.time()))
    
    data,validation_data,label,validation_label = train_test_split(data,label,train_size = .50)
    
    #Here, we a list of three lists for each piece of the "moon"
    total_data, total_label = split(data,label)
    
    total_data, total_label = randomize(total_data,total_label,time.time())
    #for item in validation_label:
      #print item
    #exit(1)
    
    #HERE    
    
    print len(total_data)
    print len(total_data[0])
    horn1_data,horn1_label = [x for x in iter_minibatches(1,total_data[0][:sample_size],total_label[0])]
    horn2_data,horn2_label = [x for x in iter_minibatches(1,total_data[2][:sample_size],total_label[2])]
    middle_data,middle_label = [x for x in iter_minibatches(1,total_data[1][:sample_size],total_label[1])]
    iters = 500
        
    #These are the total data pools.  We then turn them in mini batches for centralized and decentralized
    new_total_data,new_total_label = organize_data(total_data, total_label,sample_size,sample_size)
    print len(new_total_data)
    centralized_data,centralized_label = [x for x in iter_minibatches(1,new_total_data,new_total_label)]
    #print str(new_total_data[0]) + " " + str(new_total_label[0])
    #print str(new_total_data[1]) + " " + str(new_total_label[1])
    #print str(new_total_data[2]) + " " + str(new_total_label[2])
    #print str(new_total_data[3]) + " " + str(new_total_label[3])
    #print str(new_total_data[4]) + " " + str(new_total_label[4])
    #print str(new_total_data[5]) + " " + str(new_total_label[5])
    #print "break"
    batches_decent_data, batches_decent_label = [x for x in iter_minibatches(3,new_total_data,new_total_label)]
    

    #Here, we a list of three lists for each piece of the "moon"
    #total_data, total_label = split(data,label)
    
    #total_data, total_label = randomize(total_data,total_label,4)
    
    #find the minimum between the three sides.
    
    minim = min(min(len(total_data[0]),len(total_data[1])),len(total_data[2]))
    print minim


    #Our five neural networks
    nnTogetherClassic = nnS.nn_build(1,[2,6,6,1],eta=eta,nonlin="tanh")
    nnHorn1 = nnS.nn_build(1,[2,6,6,1],eta=eta,nonlin="tanh")
    nnHorn2 = nnS.nn_build(1,[2,6,6,1],eta=eta,nonlin="tanh")
    nnMiddle = nnS.nn_build(1,[2,6,6,1],eta=eta,nonlin="tanh")
    nnDecent = []
    for i in range(3):
      nnDecent.append(nnS.nn_build(1,[2,6,6,1],eta=eta,nonlin="tanh"))
    #one = accuracy(nnTogetherClassic, validation_data, validation_label, thr=0.5)
    total_data, total_label = randomize(total_data,total_label,time.time())
    #new_total_data,new_total_label = organize_data(total_data, total_label,minim,minim)
   
    #HERE
    
    #centralized_data,centralized_label = [x for x in iter_minibatches(1,total_data[0]+total_data[1]+total_data[2],total_label[0]+total_label[1]+total_label[2])]
    batches_decent_data, batches_decent_label = [x for x in iter_minibatches(3,new_total_data,new_total_label)]
    print len(batches_decent_data[0])
    visitbatches(nnDecent,batches_decent_data,batches_decent_label,validation_data, validation_label,nn1Acc,te, it=iters)
    print "finished decents"

    visitClassicBatches(nnTogetherClassic,centralized_data,centralized_label,validation_data, validation_label,classAcc,te,it=iters)
    print "finished cents"
    
    visitClassicBatches(nnHorn1,horn1_data,horn1_label,validation_data, validation_label,eights,te,it=iters)
    print "finished horn1"
      
    visitClassicBatches(nnHorn2,horn2_data,horn2_label,validation_data, validation_label,sevens,te,it=iters)
        
    visitClassicBatches(nnMiddle,middle_data,middle_label,validation_data, validation_label,zeros,te,it=iters)