コード例 #1
0
 def __init__(self, master):
     Frame.__init__(self, master)
     self.grid(column=4, row=10)
     self.create_widgets()
     self.recorder = Recording.Recording(wave_output_filename, CHANNELS,
                                         RATE, CHUNK)
     self.recordingtest = False
コード例 #2
0
    def __init__(self, master):
        Frame.__init__(self, master)
        self.grid(column=1, row=5)

        self.create_widgets()
        self.recorder = Recording.Recording(wave_output_filename, CHANNELS,
                                            RATE, CHUNK)
        self.processor = emotionProcessor.EmotionProcessor(
            wave_output_filename)
        self.recordingtest = False
コード例 #3
0
 def recordAudio(self):
     if (self.recordingtest == True):
         print("Already Recording!")
     else:
         self.recorder = Recording.Recording(wave_output_filename, CHANNELS,
                                             RATE, CHUNK)
         self.recorder.startAudio()
         self.emotionalPrediction.set("Recording..")
         self.recordingtest = True
     return self
コード例 #4
0
from connections.postoffice import MasterConnection, ConnectionPostOfficeEnd
import Recording
import BCI
import PostOffice

if __name__ == "__main__":
    main_connection = ConnectionPostOfficeEnd.MainConnection()
    connections = MasterConnection.MasterConnection()
    recording = Recording.Recording()
    bci_controller = BCI.BCI(connections, main_connection, recording)
    PostOffice.PostOffice(main_connection, connections, bci_controller)
コード例 #5
0
usbPath = "/media/pi/VIDEOS"
localPath = "/home/pi/Documents/localVids"

# Main
fileNameList = []
duration = 0
startTime = 0
stopTime = 0

# Device Initialization
x = di.DeviceInitialization()
while (x == -1):
    x = di.DeviceInitialization()

print("SETUP COMPLETE \n")

while (True):
    # Recording
    duration, fileNameList = rc.Recording()
    #print("Done Recording")
    #print("in driver Duration : ",duration)
    #print("in driver FNL : ",fileNameList)

    # Processing
    vr = pro.Processing(fileNameList, duration)
    #print("verify processing return :",vr)
    print("Done Procesing")

    # Finish
    di.finishInitialization()
コード例 #6
0
print("OUTPUT A1 : ", outputA1)
if (outputA1 == 0):
    print("UNIT TEST A1 DEVICE INITIALIZATION STATUS : PASS")
elif (ouputA1 == -1):
    print("UNIT TEST A1 DEVICE INITIALIZATION STATUS : FAIL")
print("\n")

# UNIT TEST : A2, A3
print("UNIT TEST A2, A3 : RECORDING")
print("INPUTS : None")
print(
    "EXPECTED OUTPUTS : 1. duration (float object) 2. fileNameList (list object)"
)
print("\n")
# duration, fnl
outputA23_1, outputA23_2 = Recording.Recording()
print("\n")
print("OUTPUTS : ", outputA23_1, outputA23_2)
if ((type(outputA23_1) == float) and (type(outputA23_2) == list)):
    print("UNIT TEST A2, A3 RECORDING STATUS : PASS")
else:
    print("UNIT TEST A2, A3 RECORDING STATUS : FAIL")

print("\n")

# UNIT TEST A4
print("UNIT TEST A4 : PROCESSING")
print("INPUTS : 1. fileNameList (list object) 2. duration (float object)")
print("EXPECTED OUTPUT : True")
print("\n")
inputA4_1 = outputA23_2
コード例 #7
0
    def __init__(self,
                 pid,
                 eventfile,
                 datafile,
                 fixfile,
                 segfile,
                 log_time_offset=None,
                 aoifile=None,
                 prune_length=None,
                 require_valid_segs=True,
                 auto_partition_low_quality_segments=False,
                 rpsdata=None,
                 export_pupilinfo=False):
        """Inits BasicParticipant class
        Args:
            pid: Participant id
            
            eventfile: a string containing the name of the "Event-Data.tsv" file for this participant
            
            datafile: a string containing the name of the "All-Data.tsv" file for this participant
            
            fixfile: a string containing the name of the "Fixation-Data.tsv" file for this participant
            
            segfile: a string containing the name of the '.seg' file for this participant
            
            log_time_offset: If not None, an integer indicating the time offset between the 
                external log file and eye tracking logs
            
            aoifile: If not None, a string containing the name of the '.aoi' file 
                with definitions of the "AOI"s.
            
            prune_length: If not None, an integer that specifies the time 
                interval (in ms) from the beginning of each Segment in which
                samples are considered in calculations.  This can be used if, 
                for example, you only wish to consider data in the first 
                1000 ms of each Segment. In this case (prune_length = 1000),
                all data beyond the first 1000ms of the start of the "Segment"s
                will be disregarded.
                
            auto_partition_low_quality_segments: a boolean indicating whether EMDAT should 
                split the "Segment"s which have low sample quality, into two new 
                sub "Segment"s discarding the largest gap of invalid samples.
            
            rpsdata: rest pupil sizes for all scenes if available
            
        Yields:
            a BasicParticipant object
        """

        Participant.__init__(self, pid, eventfile, datafile, fixfile, segfile,
                             log_time_offset, aoifile, prune_length,
                             require_valid_segs,
                             auto_partition_low_quality_segments,
                             rpsdata)  #calling the Participan's constructor

        print "reading the files"
        self.features = {}
        rec = Recording.Recording(datafile,
                                  fixfile,
                                  event_file=eventfile,
                                  media_offset=params.MEDIA_OFFSET)
        print "Done!"

        scenelist, self.numofsegments = partition_Basic(segfile)
        print "partition done!"
        if aoifile != None:
            aois = Recording.read_aois_Tobii(aoifile)
        else:
            aois = None

        self.features['numofsegments'] = self.numofsegments

        self.segments, self.scenes = rec.process_rec(
            scenelist=scenelist,
            aoilist=aois,
            prune_length=prune_length,
            require_valid_segs=require_valid_segs,
            auto_partition_low_quality_segments=
            auto_partition_low_quality_segments,
            rpsdata=rpsdata,
            export_pupilinfo=export_pupilinfo)
        Segments = self.segments
        self.whole_scene = Scene('P' + str(pid), [],
                                 rec.all_data,
                                 rec.fix_data,
                                 event_data=rec.event_data,
                                 Segments=self.segments,
                                 aoilist=aois,
                                 prune_length=prune_length,
                                 require_valid=require_valid_segs,
                                 export_pupilinfo=export_pupilinfo)
        self.scenes.insert(0, self.whole_scene)

        for sc in self.scenes:
            sc.clean_memory()
コード例 #8
0
def main(morphFileFolder, outputFolder, embeddings_file, positive_words,
         negatve_words):
    print "loading embedding vectors..."
    e = Embeddings(embeddings_file)
    print "done!"
    positive_df = pd.read_csv(positive_words)
    negative_df = pd.read_csv(negatve_words)
    positive_df["vector"] = positive_df["word"].apply(lambda x: e.get(x))
    negative_df["vector"] = negative_df["word"].apply(lambda x: e.get(x))

    files = [
        f for f in listdir(morphFileFolder) if isfile(join(morphFileFolder, f))
    ]
    recordings = []
    for f in files:
        recordings.append(Recording.Recording(join(morphFileFolder, f)))

    questionSummaries = []
    for r in recordings:
        for i in xrange(len(r.questions)):
            if len(questionSummaries) < (i + 1):
                questionSummaries.append(Question.Question([]))

            questionSummaries[i].mergeWith(r.questions[i])

    #specific metrics comparison across all questions
    nouns = {}
    verbs = {}
    adjectives = {}
    adverbs = {}
    content = {}
    person_1 = {}
    person_2 = {}
    person_3 = {}

    for i, q in enumerate(questionSummaries):
        norm_pos = counter2normDictionary(q.pos, q.word_count)
        norm_per = counter2normDictionary(q.person, q.word_count)
        nouns[i + 1] = norm_pos["noun"]
        verbs[i + 1] = norm_pos["verb"]
        adjectives[i + 1] = norm_pos["adjective"]
        adverbs[i + 1] = norm_pos["adverb"]
        content[i + 1] = norm_pos["noun"] + norm_pos["verb"] + norm_pos[
            "adjective"] + norm_pos["adverb"]
        person_1[i + 1] = norm_per["1"]
        person_2[i + 1] = norm_per["2"]
        person_3[i + 1] = norm_per["3"]
        print "Question " + ` (i + 1) ` + ", avg word count: " + ` (
            q.word_count / len(questionSummaries)) `

    counter2hist(nouns, 'Nouns', outputFolder)
    counter2hist(verbs, 'Verbs', outputFolder)
    counter2hist(adjectives, 'Adjectives', outputFolder)
    counter2hist(adverbs, 'Adverbs', outputFolder)
    counter2hist(content, 'Content words', outputFolder)
    counter2hist(person_1, '1st person', outputFolder)
    counter2hist(person_2, '2nd person', outputFolder)
    counter2hist(person_3, '3rd person', outputFolder)

    #raw metrics for each question
    sentiment_scores = {}
    for i, q in enumerate(questionSummaries):

        positive_score = calculate_sentiment_score(q.words, positive_df, e)
        negative_score = calculate_sentiment_score(q.words, negative_df, e)

        print "Question " + ` (
            i + 1
        ) ` + ", Positive: " + ` positive_score ` + ", Negative: " + ` negative_score ` + ", Overall: " + ` (
            positive_score / negative_score) `
        sentiment_scores[i + 1] = (positive_score / negative_score)
        buildWordCloud(q.contentWords, True, 'Question ' + `
                       (i + 1) ` + ' Content Word Cloud', outputFolder)
        counter2hist(counter2normDictionary(q.pos, q.word_count),
                     'Question ' + ` (i + 1) ` + ' POS', outputFolder)
        counter2hist(counter2normDictionary(q.person, q.word_count),
                     'Question ' + ` (i + 1) ` + ' Person', outputFolder)

    counter2hist(sentiment_scores, 'Sentiment scores', outputFolder)