Example #1
0
def main():
    csv_data = pd.read_csv(
        'https://firebasestorage.googleapis.com/v0/b/fir-crud-36cbe.appspot.com/o/Iris.csv?alt=media&token=71bdac3f-96e5-4aae-9b60-78025c1d3330'
    )
    csv_data = preprocessing.normalization(csv_data)
    dfTraining, dfTesting = preprocessing.splitData(csv_data)
    biases = (0.0001, 0.0001)
    weights = learn.learning(dfTraining, 0.5, biases)
    test.testing(dfTesting, weights, biases)
Example #2
0
def faceDetect(q1, q2, q3, q4):  #3초마다 화면의 얼굴을 detection하는 역할
    #실행하려면 아래 경로 수정해야 함
    detector = cv2.CascadeClassifier(
        'C:\\Users\\LG\\Desktop\\facedetection\\opencv-master\\haarcascade_frontalface_default.xml'
    )
    content = {}  #한 사이클에 포함된 정보의 카테고리와 송출될 때 인식된 사람의 최대 수 저장
    makeD = learning.learning()  #학습 돌리는 클래스 가져옴

    numCycle = 0  #사이클 몇 번 돌지 저장
    numCycle_in = []  #한 사이클 안에 어떤 정보가 송출되는지 목록 저장
    cycleNow = 1  #현재 몇 번째 사이클을 돌고 있는지 저장
    checkN = 0  #현재 몇 번째 정보를 송출했는지 저장
    timeP = datetime.datetime.now()  #하루 단위로 학습돌리기 위해 시간 저장

    while True:
        if timeP.strftime('%D') != datetime.datetime.now().strftime(
                '%D'):  #하루가 지나면 학습 돌림
            data = makeD.data
            p1 = multiprocessing.Process(target=makeD.predict,
                                         args=(
                                             data,
                                             q4,
                                         ))
            p1.start()
            cycleNow = 1  #처음부터 다시 사이클을 돌기 위해 초기화
            numCycle = 0
            numCycle_in = []
            for k in makeD.data.keys():
                makeD.data[k] = []
            timeP = datetime.datetime.now()

        if q3.qsize():  #한 사이클이 들어옴
            numCycle_in.append(q3.get())  #사이클에 몇 개의 정보가 들어있는지 저장
            numCycle += 1

        if q1.qsize() and q2.qsize():  #큐에 frame이 들어오면 detection 수행
            name = q2.get()  #송출되고 있는 정보의 정보 저장
            if not (name in content):
                content[name] = 0

            frame = q1.get()
            gray = cv2.cvtColor(frame,
                                cv2.COLOR_BGR2GRAY)  #흑백만 사용 가능하므로 흑백으로 변환
            gray = cv2.equalizeHist(gray)  #정확도 높이기 위해 사용(색의 양극화를 줄임)
            faces = detector.detectMultiScale(gray, 1.3, 5)  #얼굴 찾아냄

            for k in content.keys():  #최대 얼굴 수 저장
                if name == k and content[name] < len(faces):
                    content[name] = len(faces)

            s = datetime.datetime.now().strftime('%S')  #감지되고 있는 얼굴 단위시간마다 출력
            print(s + "\t" + str(len(faces)) + "명\n")

            checkN += 1

        if (len(numCycle_in) == cycleNow) and numCycle_in[
                cycleNow - 1] == checkN:  #한 사이클 다 돌았으면 학습 돌리기 전에 사이클 단위로 저장
            makeD.makeRow(content, cycleNow)
            content.clear()  #새로운 사이클을 돌리기 위해 초기화
            checkN = 0
            if cycleNow < numCycle:  #새로운 사이클이 이미 들어와 있을 경우
                cycleNow += 1
Example #3
0
y = ty["loan_status"][select_index].values
X = tX[select_index].values

# test set
testX = tX[~select_index].values
# test id
testid = df[["member_id"]][~select_index]
# current loan status of test set
test_sts = loansts[~select_index].values


### training models #####
depth = 9
eta = 0.05
rounds = 100
trs, tes, bst = ln.learning(X, y, depth, eta, rounds, 1.0)

print(trs)
print(tes)

dtestX = xgb.DMatrix(testX)
output = bst.predict(dtestX)

#### output results #####
testid["loan_status"] = output
testid["loan_status"] = testid["loan_status"].map(lambda x: "Fully Paid" if x > 0.5 else "Charged Off")
testid.to_csv("prediction.csv")


####### visualization ######
Example #4
0
loop_seconds = 10

print("# Initialising Movement Detection ...")
webcam = mvd.diffDetection(loop_seconds - 1)
webcam.grabFrame()

#djstat = djstatus.djstatus()
print("# Initialising Spotify ...")
spotcontrol = spot_control.spot_control(True)

print("# Starting Threads ...")
threading.Thread(target=webservice.run).start()
threading.Thread(target=webcam.run).start()

qlearn = learning.learning()

# getRandomParamter();

print("# Enter Main-Loop ...")

wait = False
while True:

    print("# Get Next Parameters/State ...")
    params = qlearn.getNextParamters()
    # params[0][0] is genre
    #TODO consider reduction to genre+energy
    #duration = spotcontrol.play(params[0], params[1], params[3], params[2], wait)
    duration = spotcontrol.play(params[0], params[1], wait)
    djstatus.clear_vote()
Example #5
0
def execute(extracting_dic, learning_dic):

    extracting(**extracting_dic)
    learning(**learning_dic)
Example #6
0
def benchmark(param, param_range=[0, 1, 0.1], learn=[], learn_i=[], test=[], test_i=[], learn_v="auto", test_v="auto", options={}, folder_learn="src/learning/", folder_test="src/tests/", neurons=(100), curve="interpolate"):
    """
    Effectue un banc de tests avec des paramètres donnés en évaluant la précision du réseau de neurones artificiels.

    :param param: Nom du paramètre à changer (doit être le nom de la variable tel que défini dans la fonction compare)
    :type param: string (paramètre accepté par la fonction compare)
    :param param_range: Tableau tridimensionnel contenant la valeur de début, de fin et le pas
    :type param_range: number[start, end, step]
    :param learn: Liste de noms formattable des échantillons d'apprentissage
    :type learn: string[]
    :param test: Liste de noms formattable des échantillons de test
    :type test: string[]
    :param learn_i: Liste de range (de 1 à n) pour la génération des fichiers du paramètre learn
    :type learn_i: number[]
    :param test_i: Liste de range (de 1 à n) pour la génération des fichiers du paramètre test
    :type test_i: number[]
    :param learn_v: Liste des valeurs des échantillons d'apprentissage (déterminé selon le nom du fichier si non précisé)
    :type learn_v: string[]
    :param test_v: Liste des valeurs des échantillons de test (déterminé selon le nom du fichier si non précisé)
    :type test_v: string[]
    :param folder_learn: Dossier contenant les échantillons d'apprentissage
    :type folder_learn: string
    :param folder_test: Dossier contenant les échantillons de tests
    :type folder_test: string
    :param options: Options de comparaison (voir la documentation de compare, certains paramètres sont requis)
    :type options: object
    :param neurons: Nombre de couches et de neuronnes
    :type neurons: (number, ...)
    """
    # Initialisation
    x = []
    y = []
    # Barre de progression
    progress = FloatProgress(min=param_range[0], max=param_range[1]+param_range[2], description="En attente...")
    display(progress)
    # Récupération des noms de fichiers pour réduire les temps de calculs
    #_learn = learning_files(learn, learn_i)
    #_test = learning_files(test, test_i)
    # Variation du paramètre
    for value in np.arange(param_range[0], param_range[1]+param_range[2], param_range[2]):
        # Calculs en cours
        progress.value = value
        progress.description = "{p} : {v}".format(p=param, v=value)
        options[param] = value
        x.append(value)
        y.append(learning(
            learn=learn, learn_i=learn_i, test=test, test_i=test_i, learn_v=learn_v, test_v=test_v,
            debug=False, benchmark_only=True, progress=[progress, param_range[2]],
            options=options, folder_learn=folder_learn, folder_test=folder_test, neurons=neurons
        ))
    # Mise à jour de la barre de progression
    progress.value = progress.max
    progress.description = "Terminé !"
    progress.bar_style = "success"
    # Nouvelle figure
    plt.figure(figsize=(8, 8), dpi= 80, facecolor="w", edgecolor="k")
    ax = plt.subplot(111)
    if (curve == "interpolate") and (len(x) >= 3):
        xi = np.linspace(x[0], x[-1], num=len(x)*10)
        ax.plot(xi, interp1d(x, y, kind="cubic")(xi))
    else:
        ax.plot(x, y)
    ax.set_xlabel("Variation du paramètre {x}".format(x=param))
    ax.set_xlim(param_range[0], param_range[1])
    ax.set_ylabel("Précision")
    ax.set_ylim(0, 1)
    plt.show()

    return x, y
Example #7
0
# Define path and size of the ID of the image
path = "./image/*.jpg"
tam = len(path) - 5

x = g.glob(path)

# Establish the connection with our mysql database
db = sql.connect(host="localhost", user="******", passwd="root", db="gdsa")


nfile = raw_input("Insert the name of the learning file: ")

# if this learning file doesn't exist we create it
if not os.path.isfile(nfile):
    start = time.time()
    dlearn = learn.learning(x, tam, db)
    stop = time.time()
    l.serialization(dlearn, nfile)
    print "Total time to learn: " + str(stop - start) + " seconds"
else:
    dlearn = l.deserialization(nfile)


path = raw_input(
    "Insert the path of the folder of images that you want to classify, the path can be relative or absolut: \n"
)
tam = len(path) + 1
x = g.glob(path + "/*.jpg")

# Image clasification
start = time.time()
Example #8
0
    for iter in range(shots_N):
        for t in range(iter * emg_chunk_size, (iter + 1) * emg_chunk_size):
            hist.step(emg[t, :] + [myRand() for x in range(channels_N)])

        one_class_data.append(
            hist.vals.reshape((hist.N * hist.N * hist.N)).copy())

    data_learn.append(one_class_data)
data_learn = torch.tensor(data_learn, dtype=torch.float, requires_grad=False)
# ,requires_grad=False

# обучим Сетку

net = Net(hist.N * hist.N * hist.N)
learning(net=net,
         lr=.6,
         epoches_N=1400,
         data_learn=data_learn,
         targs_learn=targs_learn)
print(net(data_learn[0]))
print(net(data_learn[1]))
print(net(data_learn[2]))
print(net(data_learn[3]))
print(net(data_learn[4]))

# print("test time!")
#print(net(torch.tensor(data_test[0],dtype=torch.float)))
# print('\n\n\n')
#print(net(torch.tensor(data_test[1],dtype=torch.float)))
Example #9
0
			ind += 1
		cont += 1
	while cont < len(x):
		xc.append(x[ind])
		if ind == len(x) - 1:
			ind = 0
		else:
			ind += 1
		cont += 1

	nfile = raw_input("Insert the name of the learning file: ")

	#if this learning file doesn't exist we create it
	if not os.path.isfile(nfile):
		start = time.time()
		dlearn = learn.learning(xl,tam,db)
		stop = time.time()
		l.serialization(dlearn,nfile)
		print "Total time to learn: "+str(stop - start) + " seconds"
	else :
		while os.path.isfile(nfile):
			nfile = raw_input("That file already exists. Insert a valid name of the learning file: ")
			if not os.path.isfile(nfile):
				start = time.time()
	       			dlearn = learn.learning(xl,tam,db)
				stop = time.time()
				l.serialization(dlearn,nfile)
				print "Total time to learning: "+str(stop - start) + " seconds"
				break

	#Image clasification
Example #10
0
        self.trainset, self.trainsetdata = [[words]], [words]
        device = 'cuda' if params.cuda else 'cpu'
        self.testsetdata = [torch.from_numpy(words.astype(np.float32)).clone().to(device)]

name = 'text'

logger = logging.getLogger('{}Log'.format(name)) # ログの出力名を設定
logger.setLevel(20) # ログレベルの設定
logger.addHandler(logging.StreamHandler()) # ログのコンソール出力の設定
logging.basicConfig(filename='{}.log'.format(name), format="%(message)s", filemode='w') # ログのファイル出力先を設定

text_src_path = 'humanRights/English'
# dico,embeddings = read_txt_embeddings(params,source=True)
# src_words = loadFile(text_src_path,embeddings,word2id)
# np.save('words_vec/'+text_src_path, words)
src_words = np.load('words_vec/'+text_src_path+'.npy')

text_tgt_path = 'humanRights/Spanish'
# dico,embeddings = read_txt_embeddings(params,source=False)
# tgt_words = loadFile(text_tgt_path,embeddings,dico.word2id)
# np.save('words_vec/'+text_tgt_path, tgt_words)
tgt_words = np.load('words_vec/'+text_tgt_path+'.npy')

M = [len(src_words), len(tgt_words)] #シーケンスの長さ
print(M)
options = Options()
src_data = Dataset(src_words)
tgt_data = Dataset(tgt_words)

src_data, tgt_data = learning(params,src_data,tgt_data,options)
Example #11
0
def main(unused_argv):
    def setparam():
        model_params = tf.contrib.training.HParams(
            archi=archi,
            epoch=epoch,
            s_trainable=s_trainable,
            total_loss=total_loss,
            h_trainable=h_trainable,
            modalities=modalities,
            num_layers=num_layers,
            connection=connection,
            combination=combination,
            # input params
            text_dim=text_dim,
            audio_dim=audio_dim,
            video_dim=video_dim,
            audiofeature=audiofeature,
            videofeature=videofeature,
            # output params
            out_dim=out_dim,
            MEAN=MEAN,
            STD=STD,
            # embedding params
            str2id_path=str2id_path,
            embed_path=embed_path,
            UNK_path=UNK_path,
            num_trainable_words=num_trainable_words,
            # fnn params
            denses=denses,
            # cnn params
            cksizes=cksizes,
            wksizes=wksizes,
            fsizes=fsizes,
            cstrides=cstrides,
            wstrides=wstrides,
            batch_norm=batch_norm,
            pool_type=pool_type,
            pool_size=pool_size,
            # rnn params
            rnns=rnns,
            bidirectional=bidirectional,
            cell_type=cell_type,
            # activation params,
            act=act,
            rnnact=rnnact,
            gateact=gateact,
            # pool params
            globalpool_type=globalpool_type,
            # learning params
            L2=L2,
            batchsize=batchsize,
            dropout=dropout,
            lamda=lamda,
            learning_rate=learning_rate,
            gradient_clipping_norm=gradient_clipping_norm,
            losses=losses)
        return model_params

    MEAN, STD = np.load(
        '%s/firstimpression%s/tfrecord/big5/train/mean_std.npy' %
        (datapath, dataset))
    MEAN = MEAN[:-1].tolist()
    STD = STD[:-1].tolist()
    num_layers = 1
    connection = 'stack'  # stack, dense
    combination = 'normal'  # normal, parallel, residual
    # input params
    text_dim = [256]
    audio_dim = [512]
    video_dim = [1024]
    audiofeature = '512'
    videofeature = '1024'
    # output params
    out_dim = 6
    # embedding params
    str2id_path = '%s/firstimpressionV2/tfrecord/text/word2ID.txt' % (datapath)
    embed_path = '%s/firstimpressionV2/tfrecord/text/embedding_matrix.npy' % (
        datapath)
    UNK_path = '%s/firstimpressionV2/tfrecord/text/UNK.npy' % (datapath)
    num_trainable_words = 7
    # cnn params
    cksizes = 2
    wksizes = 6
    fsizes = 128
    cstrides = 1
    wstrides = 1
    batch_norm = 0
    # rnn params
    rnns = [128]
    bidirectional = 0
    cell_type = 'gru'
    # activation params
    act = 'relu'
    rnnact = 'tanh'
    gateact = 'tanh'
    # pool params
    globalpool_type = 'avg'
    pool_type = 'avg'
    pool_size = 2
    # learning params
    L2 = 0
    batchsize = 16
    learning_rate = 1e-4
    gradient_clipping_norm = 5.0
    lamda = 0.1
    modalities = ['text', 'audio', 'video']
    losses = ['att']
    '''for s in archi.split('_'):
        total_loss = int(s[0])
        h_trainable =int(s[1])
        if len(s)==3:
            s_trainable =int(s[2])'''
    dropout = 0.5

    denses = [128]
    batchsize = 16
    archi = '3782_3'
    total_loss = 1
    h_trainable = 1
    s_trainable = 1
    epoch = 3

    model_params = setparam()
    learning.learning(FLAGS, model_params, logpath)
    total_loss = 2
    h_trainable = 0
    s_trainable = 1
    epoch = 3

    model_params = setparam()
    learning.learning(FLAGS, model_params, logpath)
    total_loss = 3
    h_trainable = 0
    s_trainable = 0
    epoch = 1

    model_params = setparam()
    learning.learning(FLAGS, model_params, logpath)
Example #12
0
    def __init__(self, ip_addr, port_num):
        ready = False
        #init serial comms
        self.Scomms = serial_communication_checksum.serial_communication()
        #init wireless comms
        self.Wcomms = auth_client.client(ip_addr, port_num)
        self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        server_address = (ip_addr, port_num)
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        sock.connect(server_address)
       
        file = 'takingReadings.csv'
        csv = open(file, "w")
        #while ready == False:
            #print('no handshake')
            #ready = self.Scomms.handshake()
        print("Entering training")
        self.Ml = learning.learning()
        model = self.Ml.machineTrain()
        data = numpy.zeros((30, 36))
        count = 0
        moveConcluded = [[],[]]
        consecutiveCount = 0
        print("Entering handshake")
        while ready == False:
            ready = self.Scomms.handshake()

        print("Entering data receiving mode")
        while ready:
            try:
                receivedData = self.Scomms.receiveData()
                sensorData = receivedData.split('|')[0]
                csv.write(sensorData + '\n')
                current = receivedData.split('|')[1]
                voltage = receivedData.split('|')[2]
                power = receivedData.split('|')[3]
                cumpower = receivedData.split('|')[4]
               #print([int(x) for x in sensorData.split(',')])
                #data[count, 24:36] = [int(x) for x in sensorData.split(',')]
                if (count >= 0):
                    data[count%30, (count//30) * 12 : (count//30)*12 + 12] = [int(x) for x in sensorData.split(',')]
                count = count + 1
                if (count == 90) :
                    count = 0
                    move = self.Ml.processData(data, model)
                    moveConcluded[consecutiveCount] = move
                    consecutiveCount = (consecutiveCount + 1) % 2
                    #data[:,:12] = data[:,12:24]
                    #data[:,12:24] = data[:,24:36]
                    print(move)
                    print(sensorData)
                    if (all((x != ["nomove"] and x == moveConcluded[0]) for x in moveConcluded)) :
                        if (move == ["turnclap"] or move == ["squatturnclap"] or move ==["windowcleaner360"]) :
                            count = -90
                        else :
                            count = -60
                        msg = self.Wcomms.packData(str(move), voltage, current, power, cumpower)
                        #print(msg)
                        sock.sendall(msg)
                        if (move == ["final"]):
                            break
                        moveConcluded = [[],[]]
                    	# print('message sent')

            except Exception as e:
                print(e)
Example #13
0
data_learn=[torch.tensor(data_learn_mid,dtype=torch.float),
            torch.tensor(data_learn_prox ,dtype=torch.float)]
# for a in data_learn:
#     a=torch.tensor(a,dtype=torch.float)

data_test=[data_test_mid,data_test_prox]
targs_learn=torch.tensor([[[1,0]],[[0,1]]],dtype=torch.float)
# targs_learn=[[torch.tensor([1,0])],[torch.tensor([0,1])]]

norm_val=1./70
for x,y in data_test,data_learn:
    x*=norm_val
    y*=norm_val

learning(net=net, lr=0.4,epoches_N=1000 , 
         data_learn=data_learn, targs_learn=targs_learn)

    
    
print("test time!")
#print(net(torch.tensor(data_test[0],dtype=torch.float)))
print('\n\n\n')
#print(net(torch.tensor(data_test[1],dtype=torch.float)))

o1= net(torch.tensor(data_test[0],dtype=torch.float))   
o2=net(torch.tensor(data_test[1],dtype=torch.float))
sh_beg1 = 1 * o1.shape[0]//3
sh_beg2 = 1 * o2.shape[0]//3
sh_end1 = 2 * o1.shape[0]//3  
sh_end2 = 2 * o2.shape[0]//3  
#print(torch.sum(o1[:,0]>o1[:,1],dtype=torch.float)/o1.shape[0])