Esempio n. 1
0
File: tobii.py Progetto: m-wu/EMDAT
    def read_all_data(self, all_file):
        """Returns a list of "Datapoint"s read from an "All-Data" file.

        Args:
            all_file:A string containing the name of the 'All-Data.tsv' file output by the Tobii software.

        Returns:
            a list of "Datapoint"s
        """
        all_data = []
        with open(all_file, 'r') as f:
            for _ in xrange(params.ALLDATAHEADERLINES + params.NUMBEROFEXTRAHEADERLINES - 1):
                next(f)
            reader = csv.DictReader(f, delimiter="\t")
            for row in reader:
                if not row["Number"]:  # ignore invalid data point
                    continue
                pupil_left = utils.cast_float(row["PupilLeft"], -1)
                pupil_right = utils.cast_float(row["PupilRight"], -1)
                distance_left = utils.cast_float(row["DistanceLeft"], -1)
                distance_right = utils.cast_float(row["DistanceRight"], -1)
                data = {"timestamp": utils.cast_int(row["Timestamp"]),
                        "pupilsize": Recording.get_pupil_size(pupil_left, pupil_right),
                        "distance": Recording.get_distance(distance_left, distance_right),
                        "is_valid": utils.cast_int(row["ValidityRight"]) < 2 or utils.cast_int(row["ValidityLeft"]) < 2,
                        "stimuliname": row["StimuliName"],
                        "fixationindex": utils.cast_int(row["FixationIndex"]),
                        "gazepointxleft": utils.cast_float(row["GazePointXLeft"])}
                all_data.append(Datapoint(data))

        return all_data
Esempio n. 2
0
    def read_all_data(self, all_file):
        """Returns a list of "Datapoint"s read from an "All-Data" file.

        Args:
            all_file:A string containing the name of the 'All-Data.tsv' file output by the Tobii software.

        Returns:
            a list of "Datapoint"s
        """
        all_data = []
        with open(all_file, 'r') as f:
            reader = csv.DictReader(f, delimiter="\t")
            for row in reader:
                if row["MediaName"] != 'ScreenRec':  # ignore non-recording data point
                    continue
                if not row["ValidityLeft"] or not row["ValidityRight"]: #ignore data point with no validity information
                    continue
                pupil_left = utils.cast_float(row["PupilLeft"], -1)
                pupil_right = utils.cast_float(row["PupilRight"], -1)
                distance_left = utils.cast_float(row["DistanceLeft"], -1)
                distance_right = utils.cast_float(row["DistanceRight"], -1)
                data = {"timestamp": utils.cast_int(row["RecordingTimestamp"]),
                        "pupilsize": Recording.get_pupil_size(pupil_left, pupil_right),
                        "distance": Recording.get_distance(distance_left, distance_right),
                        "is_valid": utils.cast_int(row["ValidityRight"]) < 2 or utils.cast_int(row["ValidityLeft"]) < 2,
                        "stimuliname": row["MediaName"],
                        "fixationindex": utils.cast_int(row["FixationIndex"]),
                        "gazepointxleft": utils.cast_float(row["GazePointLeftX (ADCSpx)"])}
                all_data.append(Datapoint(data))

        return all_data
 def __generate_single_trial(self, label_nr, samples=1200):
   t0 = 0.5*samples
   rec = Recording(self.number_of_electrodes, samples)
   for i in range(self.number_of_electrodes):
     for t in range(samples):
       fraction_of_signal = 1.0/(1+ math.exp(-(t-t0)))
       rec.set_data(i, t, (1.0-fraction_of_signal)*np.random.normal(0.0, 0.1) + fraction_of_signal*(np.random.normal(0.0, 0.2*(label_nr+1)) + 0.1*math.cos(t*label_nr)))
       rec.set_label(t, label_nr if t >= t0 else 0)
   return rec
 def __init__(self, master):
     Frame.__init__(self, master)
     self.grid(column=4, row=10)
     self.create_widgets()
     self.recorder = Recording.Recording(wave_output_filename, CHANNELS,
                                         RATE, CHUNK)
     self.recordingtest = False
    def __init__(self, scr):
        self.scr = scr
        
        self.backGround = pygame.image.load(r"data\image\record.jpg")
        self.backGround = pygame.transform.scale(self.backGround, (Screen.screen.W, Screen.screen.H) )

        self.generateButton = pygame.image.load(r"data\image\generate.png")
        self.generateButton = pygame.transform.scale(self.generateButton, (int(Screen.screen.W/5), int(Screen.screen.H/10)) )

        self.micIcon = pygame.image.load(r"data\image\mic.png")
        self.micIcon = pygame.transform.scale(self.micIcon, (int(Screen.screen.W/25), int(Screen.screen.W/25)) )

        self.micIcon2 = pygame.image.load(r"data\image\mic2.png")
        self.micIcon2 = pygame.transform.scale(self.micIcon2, (int(Screen.screen.W/25), int(Screen.screen.W/25)) )

        self.tickIcon = pygame.image.load(r"data\image\tick.png")
        self.tickIcon = pygame.transform.scale(self.tickIcon, (int(Screen.screen.W/40), int(Screen.screen.W/40)) )

        self.crossIcon = pygame.image.load(r"data\image\cross.png")
        self.crossIcon = pygame.transform.scale(self.crossIcon, (int(Screen.screen.W/40), int(Screen.screen.W/40)) )

        self.sentences = open(r"data\text\record.txt").readlines()

        self.font = pygame.font.Font('freesansbold.ttf', Screen.screen.W // 52)

        self.button = Button.RButton(Screen.screen.W/8, Screen.screen.H/6.5 ,Screen.screen.W/25, scr)

        self.recorded = [0 for i in range(len(self.sentences))]
        self.iconType = [False for i in range(len(self.sentences))]
        
        self.rec = Recording.recording()
        #screenSize(Screen.screen.W, Screen.screen.H, fullscreen= True)

        self.filename = ""
 def load_recordings(self):
   try:
     data = np.loadtxt( self.filename )
   except IOError as e:
     print "I/O error({0}): {1}".format(e.errno, e.strerror)
   
   samples = data.size/3 # a single recording
   # samples = round(samples*0.03) # SPEEDUP HACK
   nr_electrodes = data[0].size - 1 # first column is labels
   
   rec = Recording(nr_electrodes, samples)
   sample_index = 0
   for t_vec in data[:samples]:
     rec.set_label(sample_index, round(t_vec[0]))
     for e in range(nr_electrodes):
       rec.set_data(e, sample_index, t_vec[1+e])
     sample_index += 1
   
   return [rec]
 def recordAudio(self):
     if (self.recordingtest == True):
         print("Already Recording!")
     else:
         self.recorder = Recording.Recording(wave_output_filename, CHANNELS,
                                             RATE, CHUNK)
         self.recorder.startAudio()
         self.emotionalPrediction.set("Recording..")
         self.recordingtest = True
     return self
    def __init__(self, master):
        Frame.__init__(self, master)
        self.grid(column=1, row=5)

        self.create_widgets()
        self.recorder = Recording.Recording(wave_output_filename, CHANNELS,
                                            RATE, CHUNK)
        self.processor = emotionProcessor.EmotionProcessor(
            wave_output_filename)
        self.recordingtest = False
Esempio n. 9
0
    def read_all_data(self, all_file):
        """Returns a list of "Datapoint"s read from an "All-Data" file.

        Args:
            all_file:A string containing the name of the 'All-Data.tsv' file output by the Tobii software.

        Returns:
            a list of "Datapoint"s
        """
        all_data = []
        with open(all_file, 'r') as f:
            reader = csv.DictReader(f, delimiter="\t")
            for row in reader:
                if row["MediaName"] != 'ScreenRec':  # ignore non-recording data point
                    continue
                if not row["ValidityLeft"] or not row[
                        "ValidityRight"]:  #ignore data point with no validity information
                    continue
                pupil_left = utils.cast_float(row["PupilLeft"], -1)
                pupil_right = utils.cast_float(row["PupilRight"], -1)
                distance_left = utils.cast_float(row["DistanceLeft"], -1)
                distance_right = utils.cast_float(row["DistanceRight"], -1)
                data = {
                    "timestamp":
                    utils.cast_int(row["RecordingTimestamp"]),
                    "pupilsize":
                    Recording.get_pupil_size(pupil_left, pupil_right),
                    "distance":
                    Recording.get_distance(distance_left, distance_right),
                    "is_valid":
                    utils.cast_int(row["ValidityRight"]) < 2
                    or utils.cast_int(row["ValidityLeft"]) < 2,
                    "stimuliname":
                    row["MediaName"],
                    "fixationindex":
                    utils.cast_int(row["FixationIndex"]),
                    "gazepointxleft":
                    utils.cast_float(row["GazePointLeftX (ADCSpx)"])
                }
                all_data.append(Datapoint(data))

        return all_data
Esempio n. 10
0
    def read_all_data(self, all_file):
        """Returns a list of "Datapoint"s read from an "All-Data" file.

        Args:
            all_file:A string containing the name of the 'All-Data.tsv' file output by the Tobii software.

        Returns:
            a list of "Datapoint"s
        """
        all_data = []
        with open(all_file, 'r') as f:
            for _ in xrange(params.ALLDATAHEADERLINES +
                            params.NUMBEROFEXTRAHEADERLINES - 1):
                next(f)
            reader = csv.DictReader(f, delimiter="\t")
            for row in reader:
                if not row["Number"]:  # ignore invalid data point
                    continue
                pupil_left = utils.cast_float(row["PupilLeft"], -1)
                pupil_right = utils.cast_float(row["PupilRight"], -1)
                distance_left = utils.cast_float(row["DistanceLeft"], -1)
                distance_right = utils.cast_float(row["DistanceRight"], -1)
                data = {
                    "timestamp":
                    utils.cast_int(row["Timestamp"]),
                    "pupilsize":
                    Recording.get_pupil_size(pupil_left, pupil_right),
                    "distance":
                    Recording.get_distance(distance_left, distance_right),
                    "is_valid":
                    utils.cast_int(row["ValidityRight"]) < 2
                    or utils.cast_int(row["ValidityLeft"]) < 2,
                    "stimuliname":
                    row["StimuliName"],
                    "fixationindex":
                    utils.cast_int(row["FixationIndex"]),
                    "gazepointxleft":
                    utils.cast_float(row["GazePointXLeft"])
                }
                all_data.append(Datapoint(data))

        return all_data
def partition_Basic(segfile):
    """Generates the scenelist based on a .seg file
    
    Args:
        segfile: a string containing the name of the '.seg' file
    
    Returns:
        a dict with scid as the key and tuples of (segid, start , end) for segments that belong to
            that scene as value
        an integer determining the number of segments
    """
    scenelist = Recording.read_segs(segfile)
    segcount = 0
    for l in scenelist.itervalues():
        segcount += len(l)
    return scenelist, segcount
def partition_Basic(segfile):
    """Generates the scenelist based on a .seg file
    
    Args:
        segfile: a string containing the name of the '.seg' file
    
    Returns:
        a dict with scid as the key and tuples of (segid, start , end) for segments that belong to
            that scene as value
        an integer determining the number of segments
    """
    scenelist = Recording.read_segs(segfile)
    segcount = 0
    for l in scenelist.itervalues():
        segcount += len(l)
    return scenelist, segcount 
Esempio n. 13
0
File: smi.py Progetto: m-wu/EMDAT
    def read_all_data(self, all_file):
        all_data = []
        with open(all_file, 'r') as f:
            reader = csv.DictReader(f)
            for row in reader:
                if row["L Event Info"] != "Fixation":  # ignore data points other than fixations (gaze points)
                    continue
                pupil_left = utils.cast_float(row["L Pupil Diameter [mm]"])
                pupil_right = utils.cast_float(row["R Pupil Diameter [mm]"])
                data = {"timestamp": utils.cast_int(row["Time"]),
                        "pupilsize": Recording.get_pupil_size(pupil_left, pupil_right),
                        "distance": 0,  # temporarily set to 0
                        "is_valid": True,  # temporarily set to true for all
                        "stimuliname": "Screen",  # temporarily set to the same stimuli
                        "fixationindex": utils.cast_int(row["Time"]),
                        "gazepointxleft": utils.cast_float(row["L POR X [px]"])}
                all_data.append(Datapoint(data))

        return all_data
Esempio n. 14
0
    def read_all_data(self, all_file):
        all_data = []
        with open(all_file, 'r') as f:
            reader = csv.DictReader(f)
            for row in reader:
                if row["L Event Info"] != "Fixation":  # ignore data points other than fixations (gaze points)
                    continue
                pupil_left = utils.cast_float(row["L Pupil Diameter [mm]"])
                pupil_right = utils.cast_float(row["R Pupil Diameter [mm]"])
                data = {
                    "timestamp": utils.cast_int(row["Time"]),
                    "pupilsize":
                    Recording.get_pupil_size(pupil_left, pupil_right),
                    "distance": 0,  # temporarily set to 0
                    "is_valid": True,  # temporarily set to true for all
                    "stimuliname":
                    "Screen",  # temporarily set to the same stimuli
                    "fixationindex": utils.cast_int(row["Time"]),
                    "gazepointxleft": utils.cast_float(row["L POR X [px]"])
                }
                all_data.append(Datapoint(data))

        return all_data
Esempio n. 15
0
from connections.postoffice import MasterConnection, ConnectionPostOfficeEnd
import Recording
import BCI
import PostOffice

if __name__ == "__main__":
    main_connection = ConnectionPostOfficeEnd.MainConnection()
    connections = MasterConnection.MasterConnection()
    recording = Recording.Recording()
    bci_controller = BCI.BCI(connections, main_connection, recording)
    PostOffice.PostOffice(main_connection, connections, bci_controller)
usbPath = "/media/pi/VIDEOS"
localPath = "/home/pi/Documents/localVids"

# Main
fileNameList = []
duration = 0
startTime = 0
stopTime = 0

# Device Initialization
x = di.DeviceInitialization()
while (x == -1):
    x = di.DeviceInitialization()

print("SETUP COMPLETE \n")

while (True):
    # Recording
    duration, fileNameList = rc.Recording()
    #print("Done Recording")
    #print("in driver Duration : ",duration)
    #print("in driver FNL : ",fileNameList)

    # Processing
    vr = pro.Processing(fileNameList, duration)
    #print("verify processing return :",vr)
    print("Done Procesing")

    # Finish
    di.finishInitialization()
Esempio n. 17
0
 def record(self):
     self.dlg.setHidden(True)
     print("new")
     new_recording = Recording.Ui_Dialog(self.file_location, self.file_name,
                                         self.audio)
Esempio n. 18
0
def main(morphFileFolder, outputFolder, embeddings_file, positive_words,
         negatve_words):
    print "loading embedding vectors..."
    e = Embeddings(embeddings_file)
    print "done!"
    positive_df = pd.read_csv(positive_words)
    negative_df = pd.read_csv(negatve_words)
    positive_df["vector"] = positive_df["word"].apply(lambda x: e.get(x))
    negative_df["vector"] = negative_df["word"].apply(lambda x: e.get(x))

    files = [
        f for f in listdir(morphFileFolder) if isfile(join(morphFileFolder, f))
    ]
    recordings = []
    for f in files:
        recordings.append(Recording.Recording(join(morphFileFolder, f)))

    questionSummaries = []
    for r in recordings:
        for i in xrange(len(r.questions)):
            if len(questionSummaries) < (i + 1):
                questionSummaries.append(Question.Question([]))

            questionSummaries[i].mergeWith(r.questions[i])

    #specific metrics comparison across all questions
    nouns = {}
    verbs = {}
    adjectives = {}
    adverbs = {}
    content = {}
    person_1 = {}
    person_2 = {}
    person_3 = {}

    for i, q in enumerate(questionSummaries):
        norm_pos = counter2normDictionary(q.pos, q.word_count)
        norm_per = counter2normDictionary(q.person, q.word_count)
        nouns[i + 1] = norm_pos["noun"]
        verbs[i + 1] = norm_pos["verb"]
        adjectives[i + 1] = norm_pos["adjective"]
        adverbs[i + 1] = norm_pos["adverb"]
        content[i + 1] = norm_pos["noun"] + norm_pos["verb"] + norm_pos[
            "adjective"] + norm_pos["adverb"]
        person_1[i + 1] = norm_per["1"]
        person_2[i + 1] = norm_per["2"]
        person_3[i + 1] = norm_per["3"]
        print "Question " + ` (i + 1) ` + ", avg word count: " + ` (
            q.word_count / len(questionSummaries)) `

    counter2hist(nouns, 'Nouns', outputFolder)
    counter2hist(verbs, 'Verbs', outputFolder)
    counter2hist(adjectives, 'Adjectives', outputFolder)
    counter2hist(adverbs, 'Adverbs', outputFolder)
    counter2hist(content, 'Content words', outputFolder)
    counter2hist(person_1, '1st person', outputFolder)
    counter2hist(person_2, '2nd person', outputFolder)
    counter2hist(person_3, '3rd person', outputFolder)

    #raw metrics for each question
    sentiment_scores = {}
    for i, q in enumerate(questionSummaries):

        positive_score = calculate_sentiment_score(q.words, positive_df, e)
        negative_score = calculate_sentiment_score(q.words, negative_df, e)

        print "Question " + ` (
            i + 1
        ) ` + ", Positive: " + ` positive_score ` + ", Negative: " + ` negative_score ` + ", Overall: " + ` (
            positive_score / negative_score) `
        sentiment_scores[i + 1] = (positive_score / negative_score)
        buildWordCloud(q.contentWords, True, 'Question ' + `
                       (i + 1) ` + ' Content Word Cloud', outputFolder)
        counter2hist(counter2normDictionary(q.pos, q.word_count),
                     'Question ' + ` (i + 1) ` + ' POS', outputFolder)
        counter2hist(counter2normDictionary(q.person, q.word_count),
                     'Question ' + ` (i + 1) ` + ' Person', outputFolder)

    counter2hist(sentiment_scores, 'Sentiment scores', outputFolder)
    def __init__(self,
                 pid,
                 eventfile,
                 datafile,
                 fixfile,
                 segfile,
                 log_time_offset=None,
                 aoifile=None,
                 prune_length=None,
                 require_valid_segs=True,
                 auto_partition_low_quality_segments=False,
                 rpsdata=None,
                 export_pupilinfo=False):
        """Inits BasicParticipant class
        Args:
            pid: Participant id
            
            eventfile: a string containing the name of the "Event-Data.tsv" file for this participant
            
            datafile: a string containing the name of the "All-Data.tsv" file for this participant
            
            fixfile: a string containing the name of the "Fixation-Data.tsv" file for this participant
            
            segfile: a string containing the name of the '.seg' file for this participant
            
            log_time_offset: If not None, an integer indicating the time offset between the 
                external log file and eye tracking logs
            
            aoifile: If not None, a string containing the name of the '.aoi' file 
                with definitions of the "AOI"s.
            
            prune_length: If not None, an integer that specifies the time 
                interval (in ms) from the beginning of each Segment in which
                samples are considered in calculations.  This can be used if, 
                for example, you only wish to consider data in the first 
                1000 ms of each Segment. In this case (prune_length = 1000),
                all data beyond the first 1000ms of the start of the "Segment"s
                will be disregarded.
                
            auto_partition_low_quality_segments: a boolean indicating whether EMDAT should 
                split the "Segment"s which have low sample quality, into two new 
                sub "Segment"s discarding the largest gap of invalid samples.
            
            rpsdata: rest pupil sizes for all scenes if available
            
        Yields:
            a BasicParticipant object
        """

        Participant.__init__(self, pid, eventfile, datafile, fixfile, segfile,
                             log_time_offset, aoifile, prune_length,
                             require_valid_segs,
                             auto_partition_low_quality_segments,
                             rpsdata)  #calling the Participan's constructor

        print "reading the files"
        self.features = {}
        rec = Recording.Recording(datafile,
                                  fixfile,
                                  event_file=eventfile,
                                  media_offset=params.MEDIA_OFFSET)
        print "Done!"

        scenelist, self.numofsegments = partition_Basic(segfile)
        print "partition done!"
        if aoifile != None:
            aois = Recording.read_aois_Tobii(aoifile)
        else:
            aois = None

        self.features['numofsegments'] = self.numofsegments

        self.segments, self.scenes = rec.process_rec(
            scenelist=scenelist,
            aoilist=aois,
            prune_length=prune_length,
            require_valid_segs=require_valid_segs,
            auto_partition_low_quality_segments=
            auto_partition_low_quality_segments,
            rpsdata=rpsdata,
            export_pupilinfo=export_pupilinfo)
        Segments = self.segments
        self.whole_scene = Scene('P' + str(pid), [],
                                 rec.all_data,
                                 rec.fix_data,
                                 event_data=rec.event_data,
                                 Segments=self.segments,
                                 aoilist=aois,
                                 prune_length=prune_length,
                                 require_valid=require_valid_segs,
                                 export_pupilinfo=export_pupilinfo)
        self.scenes.insert(0, self.whole_scene)

        for sc in self.scenes:
            sc.clean_memory()
    def __init__(self, pid, eventfile, datafile, fixfile, segfile, log_time_offset = None, aoifile = None, prune_length= None, 
                 require_valid_segs = True, auto_partition_low_quality_segments = False, rpsdata = None, export_pupilinfo = False):
        """Inits BasicParticipant class
        Args:
            pid: Participant id
            
            eventfile: a string containing the name of the "Event-Data.tsv" file for this participant
            
            datafile: a string containing the name of the "All-Data.tsv" file for this participant
            
            fixfile: a string containing the name of the "Fixation-Data.tsv" file for this participant
            
            segfile: a string containing the name of the '.seg' file for this participant
            
            log_time_offset: If not None, an integer indicating the time offset between the 
                external log file and eye tracking logs
            
            aoifile: If not None, a string containing the name of the '.aoi' file 
                with definitions of the "AOI"s.
            
            prune_length: If not None, an integer that specifies the time 
                interval (in ms) from the beginning of each Segment in which
                samples are considered in calculations.  This can be used if, 
                for example, you only wish to consider data in the first 
                1000 ms of each Segment. In this case (prune_length = 1000),
                all data beyond the first 1000ms of the start of the "Segment"s
                will be disregarded.
                
            auto_partition_low_quality_segments: a boolean indicating whether EMDAT should 
                split the "Segment"s which have low sample quality, into two new 
                sub "Segment"s discarding the largest gap of invalid samples.
            
            rpsdata: rest pupil sizes for all scenes if available
            
        Yields:
            a BasicParticipant object
        """
        

        Participant.__init__(self, pid, eventfile, datafile, fixfile, segfile, log_time_offset, aoifile, prune_length, 
                 require_valid_segs, auto_partition_low_quality_segments, rpsdata)   #calling the Participan's constructor
        
        print "reading the files"
        self.features={}
        rec = Recording.Recording(datafile, fixfile, event_file=eventfile, media_offset=params.MEDIA_OFFSET)
        print "Done!"
        
        scenelist,self.numofsegments = partition_Basic(segfile)
        print "partition done!"
        if aoifile != None:
            aois = Recording.read_aois_Tobii(aoifile)
        else:
            aois = None
        
        self.features['numofsegments']= self.numofsegments
        
        self.segments, self.scenes = rec.process_rec(scenelist = scenelist,aoilist = aois,prune_length = prune_length, require_valid_segs = require_valid_segs, 
                                                     auto_partition_low_quality_segments = auto_partition_low_quality_segments, rpsdata = rpsdata, export_pupilinfo=export_pupilinfo)
        Segments = self.segments
        self.whole_scene = Scene('P'+str(pid),[],rec.all_data,rec.fix_data, event_data = rec.event_data, Segments = self.segments, aoilist = aois,prune_length = prune_length, require_valid = require_valid_segs, export_pupilinfo=export_pupilinfo )
        self.scenes.insert(0,self.whole_scene)

        for sc in self.scenes:
            sc.clean_memory()
Esempio n. 21
0
print("OUTPUT A1 : ", outputA1)
if (outputA1 == 0):
    print("UNIT TEST A1 DEVICE INITIALIZATION STATUS : PASS")
elif (ouputA1 == -1):
    print("UNIT TEST A1 DEVICE INITIALIZATION STATUS : FAIL")
print("\n")

# UNIT TEST : A2, A3
print("UNIT TEST A2, A3 : RECORDING")
print("INPUTS : None")
print(
    "EXPECTED OUTPUTS : 1. duration (float object) 2. fileNameList (list object)"
)
print("\n")
# duration, fnl
outputA23_1, outputA23_2 = Recording.Recording()
print("\n")
print("OUTPUTS : ", outputA23_1, outputA23_2)
if ((type(outputA23_1) == float) and (type(outputA23_2) == list)):
    print("UNIT TEST A2, A3 RECORDING STATUS : PASS")
else:
    print("UNIT TEST A2, A3 RECORDING STATUS : FAIL")

print("\n")

# UNIT TEST A4
print("UNIT TEST A4 : PROCESSING")
print("INPUTS : 1. fileNameList (list object) 2. duration (float object)")
print("EXPECTED OUTPUT : True")
print("\n")
inputA4_1 = outputA23_2