Пример #1
0
def connectToWorkers(debug, workers, wport):
	loadClients = []
	
	for worker in workers:
		common.debugPrint(debug, 'Connecting to Worker at ' + worker + ':' + str(wport))
	
		s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
		s.connect((worker, wport))
		loadClients.append(s)
		
	return loadClients
Пример #2
0
def predict_emotion(data: deque) -> None:
    debugPrint("Loading model....")
    model_path = f"{Path.cwd()}/model.h5"
    cpu = False
    model_path
    if cpu:
        with tf.device('/cpu:0'):
            model = keras.models.load_model(model_path)
    else:
        model = build_model()

    prediction = model.predict(np.array(list(data)).reshape(150, 1))
    emotions = ["happy", "melancholy", "surprised", "calm"]
    choice = np.argmax(prediction[np.argmax(prediction)])

    try:
        emotion = emotions[choice]
        return emotion
    except Exception as e:
        debugPrint(
            f"An error occured when trying to return the emotion: {e}, with choice index of: {choice}"
        )
        return random.choice(emotions)
Пример #3
0
	sys.exit(1)

delay = '5' # Milliseconds in between LoadClient requests.


# Main.

common.printAndFlush('Configuration: server=' + serverName + ', worker=' + hostname)
common.printAndFlush('Global: versions=' + str(versions) + ', numClients=' + str(numClients) + ', messageSizes=' + str(messageSizes) + ', sessionLengths=' + str(sessionLengths))

serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

serverSocket.bind((hostname, int(wport)))
serverSocket.listen(5) # 5 seems to be a kind of default.

common.debugPrint(debug, 'Listening on port: ' + wport)

(s, address) = serverSocket.accept()

common.debugPrint(debug, 'Server script connected, starting main loop...')

for v in versions:
	for clients in numClients:
		clients = str(int(clients) / len(workers))
			 
		for size in messageSizes:
			for length in sessionLengths:
				for i in range(0, repeats): 
					common.printAndFlush('Parameters: version=' + v + ', clients=' + clients + ', size=' + size + ', length=' + length + ', trial=' + str(i))
								 
					s.recv(1024);
Пример #4
0
# Seconds.
serverWarmup = 3 
workerWarmup = 3
coolDown = 3
loadSpinStart = 10


# Main.

common.printAndFlush('Configuration: server=' + socket.gethostname() + ', workers=' + str(workers) + ', client=' + client)
common.printAndFlush('Global: versions=' + str(versions) + ', numClients=' + str(numClients) + ', messageSizes=' + str(messageSizes) + ', sessionLengths=' + str(sessionLengths))

loadClients = connectToWorkers(debug, workers, wport)

common.debugPrint(debug, 'Connecting to Client at ' + client + ':' + str(cport))

timerClient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
timerClient.connect((client, cport))
	
common.debugPrint(debug, 'Starting main loop...')
	
for v in versions:
	for clients in numClients:
		for size in messageSizes:
			for length in sessionLengths:
				for i in range(0, repeats):
					common.printAndFlush('Parameters: version=' + v + ', size=' + size + ', length=' + length + ', trial=' + str(i))
									
					command = renv
					
Пример #5
0
import random
import numpy as np
from common import debugPrint
from collections import deque
from pathlib import Path
from train import build_model

try:
    import tensorflow as tf
    import tensorflow.keras as keras
except ImportError:
    debugPrint("Unable to import Tensorflow! Is it installed?")


def predict_emotion(data: deque) -> None:
    debugPrint("Loading model....")
    model_path = f"{Path.cwd()}/model.h5"
    cpu = False
    model_path
    if cpu:
        with tf.device('/cpu:0'):
            model = keras.models.load_model(model_path)
    else:
        model = build_model()

    prediction = model.predict(np.array(list(data)).reshape(150, 1))
    emotions = ["happy", "melancholy", "surprised", "calm"]
    choice = np.argmax(prediction[np.argmax(prediction)])

    try:
        emotion = emotions[choice]
Пример #6
0
	window = 3
else:
	window = 40


# Main.

common.printAndFlush('Configuration: server=' + serverName + ', client=' + hostname)
common.printAndFlush('Global: window=' + str(window) + ', versions=' + str(versions) + ', numClients=' + str(numClients) + ', messageSizes=' + str(messageSizes) + ', sessionLengths=' + str(sessionLengths))

serverSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

serverSocket.bind((hostname, cport))
serverSocket.listen(5) # 5 seems to be a kind of default.

common.debugPrint(debug, 'Listening on port: ' + str(cport))	

(s, address) = serverSocket.accept()

common.debugPrint(debug, 'Server script connected, starting main loop...')

for v in versions:
	for clients in numClients:
		for size in messageSizes:
			for length in sessionLengths:
				for i in range(0, outers):
					common.printAndFlush('Parameters: version=' + v + ', clients=' + clients + ', size=' + size + ', length=' + length + ', trial=' + str(i))
		
					s.recv(1024)
					
					prefix = renv + ' -cp tests/classes aplas.bmarks2.micro.SignalClient ' + serverName + ' ' + sport 
import time
import json
import random
from common import debugPrint
from LSL_helper import connectToEEG, recordEEG
from model_helper import predict_emotion


## Call this function to send the predicted emotion to the desktop
## Calling this function will end this process
def sendPredictedEmotion(emotion: str) -> None:
  print(json.dumps({ 'emotion' : emotion }))

## Call to signal to desktop that a connection has been established
def confirmConnection() -> None:
  print(json.dumps({ 'hasConfirmed' : True }))

# TODO: Error handling for None
# TODO: Add remaining model code
stream_connection = connectToEEG()
confirmConnection()
data = recordEEG(stream_connection)
emotion = predict_emotion(data)
debugPrint(f"the emotion was: {emotion}")
sendPredictedEmotion(emotion)