Example #1
0
    def run(self):
        self.running = True

        self.session = db.Session()
        self.session.add(self.search)
        self.session.add(self.customer)
        lr = LogReader(self.log_path, self.customer.get_safe_short_name(), \
                self.search.start_dt, self.search.finish_dt)
        self.search.status = Search.Status.RUNNING
        self.search.count = 0
        self.update_search()

        while self.running == True:
            event = lr.next()
            if event == None:
                break

            row = self.sql_expression.evaluate(event) 
            if row != None:
                #TODO: do we restrict the rows here?
                self.fh.write(json.dumps(event))
                self.fh.write("\n")
                self.search.count += 1
                self.update_search()

        self.fh.close()
        self.search.status = Search.Status.DONE
        self.update_search()
        self.session.close()

        print "Done"
Example #2
0
def getLength(strPath):
    rdrLog = LogReader()
    log = rdrLog.loadLog(sys.argv[1])

    owl = log.getOwlData()
    meta = owl["metadata"]
    tti = owl["task-tree-individuals"]

    root = tti[meta.subActions()[0]]

    return root.time()
Example #3
0
def getLength(strPath):
    rdrLog = LogReader()
    log = rdrLog.loadLog(sys.argv[1])
    
    owl = log.getOwlData()
    meta = owl["metadata"]
    tti = owl["task-tree-individuals"]
    
    root = tti[meta.subActions()[0]]
    
    return root.time()
Example #4
0
class GameHandler:
    def __init__(self, log):
        self.game  = Game()
        self.reader = LogReader(log)
        self.begin  = -1
        self.line   = -1
        self._first_game = True

    # Checks whether a game has begun
    def _new_game(self):
        return (self.reader.game_beginning() != self.begin)

    # Creates a new game objects
    def _create_new_game(self):
        self.game  = Game()
        self.begin = self.reader.game_beginning()
        self.line  = self.reader.game_beginning()
        self.turn  = 1
        self._first_game = False

    # Handles each new line in the log
    def _handle_news(self, line):
        if (self.reader.played_self(line) != None):
            cur_play = self.reader.played_self(line)
            self.game.play(cur_play[0], cur_play[1], self.turn)
        elif (self.reader.played_oppo(line) != None):
            cur_play = self.reader.played_oppo(line)
            self.game.play(cur_play[0], cur_play[1], self.turn)
        elif (self.reader.next_turn(line)):
            self.turn += 1

    # Updates game information
    def update(self):
        if (self._new_game()):
            if (not self._first_game):
                self.game.export()
            self._create_new_game()

        elif (self.line != self.reader.length()):
            for line in range(self.line, self.reader.length()):
                self._handle_news(line)
                self.line += 1

    # Returns all the cards played (used for UI)
    def info(self):
        if (self.line != -1):
            return self.game.cards_played()
        else:
            return ([],[])
Example #5
0
 def __init__(self, logfile):
     """ Constructor
     
     Usage example:
         patternfilter = ADPatternFilter(logfile)
         
     Parameters
     ----------        
     logfile : string
         the name of a text file generated as a log of the AD tool
                 
     Returns
     ----------
     Instance of the class
     
     """    
     # Create an instance of LogReader
     self.log = LogReader(logfile)
     self.removedPatterns = []
     self.defPatternlist = []
class UnitedWeStand(object):
    '''
    classdocs
    '''

    def __init__(self):
        '''
        Constructor
        '''
        self.log_reader = LogReader()

    def send_record(self, filename, xml_record_str):
        # print 'Sending message for file=', filename, 'message=',
        # xml_record_str
        pass

    def generate_messages(self, filename, database_count):
        index = database_count + 1
        try:
            while(True):
                record = self.log_reader.getRecord(filename, index)
                index += 1
                self.send_record(filename, record)
        except RuntimeError:
            return index - 1

    def update_file_entry(self, filename):
        with DatabaseHandler() as d:
            entry = d.get_filecount_entry(filename)
            if(entry is None):
                entry = 0
#             count = self.log_reader.getEventCount(file)
            count = self.generate_messages(filename, entry)
            d.add_filecount_entry(filename, count)

    def intialize_database(self):
        if(os.name == 'posix'):
            log_dir = log_dir_linux
        else:
            log_dir = log_dir_windows
        for file in os.listdir(log_dir):
            filename, filetype = os.path.splitext(file)
            if(filetype == '.evtx'):
                print 'Initialization: updating entry for', file
                try:
                    self.update_file_entry(file)
                except Exception:
                    traceback.print_exc()
        x = DatabaseHandler()
        x.get_all_records()
Example #7
0
    def test_exampleLog(self):
        reader = LogReader('tests/example/test.log')
        self.assertEqual(reader.injects, [(0.0, 123.0), (2.0, 10.0)])
        self.assertEqual(reader.errors, [1.0, 20.0])
        self.assertEqual(len(reader.algPackets), 1)

        algSendingEntry = reader.algPackets[0]
        self.assertEqual(algSendingEntry.packet, 123.0)
        self.assertEqual(algSendingEntry.start, 2.0)
        self.assertEqual(algSendingEntry.end, 20.0)
        self.assertEqual(algSendingEntry.successful, False)

        advSendingEntry = reader.advPackets[0]
        self.assertEqual(advSendingEntry.packet, 10.0)
        self.assertEqual(advSendingEntry.start, 2.0)
        self.assertEqual(advSendingEntry.end, 12.0)
        self.assertEqual(advSendingEntry.successful, True)
    def toFile(self):
        eventsForAt = []
        eventsForAr = []
        eventsForAudioDevice = []

        for logFile in self.logFiles:
            self.reader = LogReader(logFile)
            for item in self.loggerEvent:
                if item == "AT":
                    eventsForAt = eventsForAt + self.getAudioTrackEvent()
                elif item == "AR":
                    eventsForAr = eventsForAr + self.getAudioRecordEvent()
                elif item == "Device":
                    eventsForAudioDevice = eventsForAudioDevice + self.getAudioDevicePlugEvent()
                else:
                    continue

        events = ["="*50+"AudioTrack Event Begin"+"="*50] + \
                 eventsForAt + \
                 ["="*50+"AudioTrack Event end"+"="*50, "\n"] + \
                 ["=" * 50 + "AudioRecord Event Begin" + "=" * 50] + \
                 eventsForAr + \
                 ["=" * 50 + "AudioRecord Event end" + "=" * 50, "\n"] + \
                 ["=" * 50 + "AudioDevicePlug Event Begin" + "=" * 50] + \
                 eventsForAudioDevice + \
                 ["=" * 50 + "AudioDevicePlug Event end" + "=" * 50, "\n"]

        strings = ""
        for item in events:
            strings = strings + str(item) + "\n"

        if strings == "":
            return

        fd = open("result", "w+")
        fd.write(strings)
        fd.close()
Example #9
0
class TimeConfidence:
    def __init__(self):
        self.rdrLog = LogReader()
        
    def confidence(self, strPath):
        dataOwl = None
        
        log = self.rdrLog.loadLog(strPath)
        dataOwl = log.getOwlData()
        
        self.tti = dataOwl["task-tree-individuals"]
        owlMeta = dataOwl["metadata"]
        owlAnnot = dataOwl["annotation"]
        
        if owlMeta:
            toplevel_nodes = owlMeta.subActions()
        else:
            print "No meta data in file!"
        
        self.timeSpans = {}
        self.findTimeSpansPerTask(toplevel_nodes)
        
        for ctx in self.timeSpans:
            print ctx, mean_confidence_interval(self.timeSpans[ctx])
    
    def findTimeSpansPerTask(self, nodes):
        for node in nodes:
            owlNode = self.tti[node]
            
            ctx = owlNode.taskContext()
            if not ctx in self.timeSpans:
                self.timeSpans[ctx] = []
            
            self.timeSpans[ctx].append(owlNode.time())
            
            self.findTimeSpansPerTask(owlNode.subActions())
Example #10
0
class TimeConfidence:
    def __init__(self):
        self.rdrLog = LogReader()

    def confidence(self, strPath):
        dataOwl = None

        log = self.rdrLog.loadLog(strPath)
        dataOwl = log.getOwlData()

        self.tti = dataOwl["task-tree-individuals"]
        owlMeta = dataOwl["metadata"]
        owlAnnot = dataOwl["annotation"]

        if owlMeta:
            toplevel_nodes = owlMeta.subActions()
        else:
            print "No meta data in file!"

        self.timeSpans = {}
        self.findTimeSpansPerTask(toplevel_nodes)

        for ctx in self.timeSpans:
            print ctx, mean_confidence_interval(self.timeSpans[ctx])

    def findTimeSpansPerTask(self, nodes):
        for node in nodes:
            owlNode = self.tti[node]

            ctx = owlNode.taskContext()
            if not ctx in self.timeSpans:
                self.timeSpans[ctx] = []

            self.timeSpans[ctx].append(owlNode.time())

            self.findTimeSpansPerTask(owlNode.subActions())
    def __init__(self, logfile):
        log_reader = LogReader(logfile)
        log_reader.extract_string('"(0x)')
        log_reader.extract_value('"(0x)')

        self.value_list = log_reader.value_list
        self.lines = log_reader.lines

        self.functions_present = []
        self.wheel_revolution_list = []
        self.wheel_event_time_list = []
        self.crank_revolution_list = []
        self.crank_event_time_list = []
        self.crank_rpm = []
        self.power_list = []
        self.speed = []
        self.rpm = []
        self.wheel_circumference = 2.132
        self.value_converter = ValueConverter()
Example #12
0
 def loadLog(self, filename):
     self.log = LogReader(filename)
Example #13
0
from flask import Flask
from LogReader import LogReader
from flask import Flask, render_template, flash, request
from wtforms import Form, TextField, TextAreaField, validators, StringField, SubmitField


app = Flask(__name__)
app.config.from_object(__name__)
app.config['SECRET_KEY'] = '7d441f27d441f27567d441f2b743254'


class ReusableForm(Form):
    Exch = StringField('ExCh:', validators=[validators.required()])
    Feed = StringField('Feed:', validators=[validators.required()])

lr = LogReader()

@app.route("/", methods=['GET', 'POST'])
def hello_world():
    global lr

    form = ReusableForm(request.form)
    data={}

    if request.method == 'POST':

        exch = request.form['Exch']
        feed = request.form['Feed']

        global lr
        good_data = lr.fake_data(start_date='1-Aug-2018',end_date='4-Aug-2018')
Example #14
0
 def __init__(self):
     self.rdrLog = LogReader()
     self.tti = {}
Example #15
0
#!/usr/bin/python

from LogReader import LogReader
import sys

rdrLog = LogReader()
log = rdrLog.loadLog(sys.argv[1])

owl = log.getOwlData()
tti = owl["task-tree-individuals"]

count_eff = 0
count_eff_fail = 0


def hasSubActionUIMAPerceiveFailed(owlIndiv):
    if owlIndiv.taskContext() == "UIMA-PERCEIVE":
        if len(
                owlIndiv.tagAttributeValues("knowrob:perceptionResult",
                                            "rdf:resource")) == 0:
            return True
    else:
        subactions = owlIndiv.subActions()

        for subaction in subactions:
            if hasSubActionUIMAPerceiveFailed(tti[subaction]):
                return True

    return False

 def __init__(self):
     '''
     Constructor
     '''
     self.log_reader = LogReader()
stepsPerRot1 = float(config[robotName]["StepsPerRot1"])
stepsPerRot2 = float(config[robotName]["StepsPerRot2"])
unitsPerRot1 = float(config[robotName]["UnitsPerRot1"])
unitsPerRot2 = float(config[robotName]["UnitsPerRot2"])
Gearing1 = int(config[robotName]["Gearing1"])
Gearing2 = int(config[robotName]["Gearing2"])
sizeX = int(config[robotName]["SizeX"])
sizeY = int(config[robotName]["SizeY"])
originX = int(config[robotName]["OriginX"])
originY = int(config[robotName]["OriginY"])
robotGeom = config[robotName]["RobotGeom"]

dataReader = None
serial1 = serial.Serial(serialPort1, baudRate)
if monitorFromLog:
    dataReader = LogReader(serial1, stepsPerRot1, stepsPerRot2)
else:
    serial2 = serial.Serial(serialPort2, baudRate)
    dataReader = EncoderReader(serial1, serial2)

print("Monitoring press esc to stop, c to clear, r to reset")


def calcSingleArmScara(degs1, degs2):
    ang1 = degs1 * math.pi / 180
    ang2 = (180 - degs2) * math.pi / 180
    l1 = sizeX / 4
    l2 = sizeX / 4
    elbowX = math.sin(ang1) * l1
    elbowY = math.cos(ang1) * l1
    curX = elbowX + math.sin(ang2) * l2
    return np.array(range(first_id, last_id))


def rand_perm(n):
    p = range(0, n)
    for i in range(0, n):
        r = random.randint(i, n - 1)
        x = p[i]
        p[i] = p[r]
        p[r] = x
    return p


if __name__ == "__main__":

    train_log_object = LogReader('../data/oakland_part3_an_rf.node_features')
    train_points = train_log_object.read()
    test_log_object = LogReader('../data/oakland_part3_am_rf.node_features')
    test_points = test_log_object.read()

    X = np.array([point._feature for point in train_points])
    Y = np.array([point._label for point in train_points])

    perm_idx = np.random.permutation(X.shape[0]) #range(X.shape[0])
    ds_train = Dataset(X[perm_idx,:],Y[perm_idx])
    cv_fold = 5
    param_num = 9
    cm = np.zeros([cv_fold,cv_fold,5,5])
    acc = np.zeros([cv_fold,cv_fold])
    params = [[5,0.01],[10,0.01],[15,0.01],[5,0.001],[10,0.001],[15,0.001],[5,0.1],[10,0.1],[15,0.1]]
    cum_cm = np.zeros([cv_fold,5,5])
Example #19
0
class GraspExtractor:
    def __init__(self):
        self.rdrLog = LogReader()
    
    def getDesignatorSuccessor(self, strDesignatorID):
        desig = self.di[strDesignatorID]
        if desig:
            successors = desig.tagAttributeValues("knowrob:successorDesignator", "rdf:resource")
            if successors and len(successors) > 0:
                return successors[0].split("#")[1]
    
    def getNamedDesignator(self, dataDesignators, strName):
        for designator in dataDesignators:
            if designator["designator"]["_id"] == strName:
                return designator["designator"]
    
    def processPerform(self, owlPerform):
        desigsGraspDetails = owlPerform.tagAttributeValues("knowrob:graspDetails", "rdf:resource")
        
        if len(desigsGraspDetails) > 0:
            desigGraspDetails = self.getNamedDesignator(self.log.getDesignatorData(), desigsGraspDetails[0].split("#")[1])["GRASP"]
            
            dicGraspPose = desigGraspDetails["GRASP-POSE"]
            dicPregraspPose = desigGraspDetails["PREGRASP-POSE"]
            dicObjectPose = desigGraspDetails["OBJECT-POSE"]
            strObjectName = desigGraspDetails["OBJECT-NAME"]
            strSide = desigGraspDetails["ARM"]
            strEffort = desigGraspDetails["EFFORT"]
            strGraspType = desigGraspDetails["GRASP-TYPE"]["QUOTE"]
            
            print " -- Grasp action --"
            
            timeSpan = owlPerform.timeSpan()
            print "Time elapsed  :", (float(timeSpan[1]) - float(timeSpan[0])), "seconds"
            
            if owlPerform.taskSuccess():
                print "Success       : True"
            else:
                print "Success       : False"
            
            print "Side          :", strSide
            print "Grasp Type    :", strGraspType
            print "Object Name   :", strObjectName
            print "Object Pose   :"
            self.printPose(dicObjectPose)
            print "Grasp Pose    :"
            self.printPose(dicGraspPose)
            print "Pregrasp Pose :"
            self.printPose(dicPregraspPose)
            print
    
    def extractGrasps(self, strPath):
        # Load Log
        self.log = self.rdrLog.loadLog(strPath)
        self.tti = self.log.getOwlData()["task-tree-individuals"]
        self.di = self.log.getOwlData()["designator-individuals"]
        annot = self.log.getOwlData()["annotation"]
        meta = self.log.getOwlData()["metadata"]
        
        for key in self.tti:
            owlIndiv = self.tti[key]
            
            if owlIndiv.type() == "AnnotationInformation":
                annot = owlIndiv
                if annot and meta: break
            elif owlIndiv.type() == "RobotExperiment":
                meta = owlIndiv
                if annot and meta: break
        
        if annot and meta:
            for indiv in self.tti:
                if self.tti[indiv].taskContext() == "GRASP":
                    self.processPerform(self.tti[indiv])

    def printPose(self, pose):
        print "   Frame       :", pose["header"]["frame_id"] + "\n" + \
              "   Position    : x =", str(pose["pose"]["position"]["x"]) + "\n" + \
              "                 y =", str(pose["pose"]["position"]["y"]) + "\n" + \
              "                 z =", str(pose["pose"]["position"]["z"]) + "\n" + \
              "   Orientation : x =", str(pose["pose"]["orientation"]["x"]) + "\n" + \
              "                 y =", str(pose["pose"]["orientation"]["y"]) + "\n" + \
              "                 z =", str(pose["pose"]["orientation"]["z"]) + "\n" + \
              "                 w =", str(pose["pose"]["orientation"]["w"])
Example #20
0
#!/usr/bin/python

import sys
from LogReader import LogReader

lr = LogReader()

log = lr.loadLog(sys.argv[1])
od = log.getOwlData()
tti = od["task-tree-individuals"]

printable_types = ["CRAMAchieve"]
colors = [
    "#ddffff", "#ffddff", "#ffffdd", "#ddddff", "#ffdddd", "#ddffdd",
    "#dddddd", "#ffffff"
]
color_assignments = {}
color_index = -1


def printIndividual(individual, parent_name):
    global color_index
    global color_assignments

    strdot = ""

    strlbl = individual.goalContext()
    if not strlbl:
        strlbl = individual.name()

    # if len(individual.failures()) > 0:
Example #21
0
class DataCondenser:
    def __init__(self):
        self.rdrLog = LogReader()

    def condenseData(self, strPath):
        dataOwl = None

        log = self.rdrLog.loadLog(strPath)
        dataOwl = log.getOwlData()

        self.tti = dataOwl["task-tree-individuals"]
        owlMeta = dataOwl["metadata"]
        owlAnnot = dataOwl["annotation"]

        if owlMeta:
            result = {
                "Toplevel": self.condenseNodes("", owlMeta.subActions())
            }

            with open("out.json", "wb") as f:
                json.dump(result, f)

            with open("generalized_model.pkl", "wb") as f:
                pickle.dump(
                    {
                        "model": result,
                        "parameters": owlAnnot.annotatedParameterTypes()
                    }, f, pickle.HIGHEST_PROTOCOL)
        else:
            print "No meta data in file!"

    def condenseNodes(self, strParentNode, arrNodes, nLevel=0):
        arrTypes = {}
        arrIndividuals = {}

        for strNode in arrNodes:
            owlNode = self.tti[strNode]
            ident = owlNode.taskContext()  #.type()

            failures = owlNode.failures()
            failure = ""
            if len(failures) > 0:
                failure = self.tti[failures[0]].type()

            result = self.condenseNodes(strNode, owlNode.subActions(),
                                        nLevel + 1)
            if not ident in arrTypes:
                arrTypes[ident] = result
            else:
                arrTypes[ident] = self.unifyResults(arrTypes[ident], result)

            arrTypes[ident]["individuals"][strNode] = {
                "parameters": owlNode.annotatedParameters(True),
                "parent": strParentNode,
                "failure": failure
            }

        return {"subTypes": arrTypes, "individuals": {}}

    def unifyResults(self, res1, res2):
        resparams = {}
        if len(res1["individuals"]) > 0:
            resparams = res1["individuals"]

        if len(res2["individuals"]) > 0:
            resparams = dict(resparams.items() + res2["individuals"].items())

        unified = {"subTypes": {}, "individuals": resparams}

        for ressub1 in res1["subTypes"]:
            if ressub1 in res2["subTypes"]:
                unified["subTypes"][ressub1] = self.unifyResults(
                    res1["subTypes"][ressub1], res2["subTypes"][ressub1])
            else:
                unified["subTypes"][ressub1] = res1["subTypes"][ressub1]

        for ressub2 in res2["subTypes"]:
            if not ressub2 in res1["subTypes"]:
                unified["subTypes"][ressub2] = res2["subTypes"][ressub2]

        return unified
Example #22
0
class DataExtractor:
    def __init__(self):
        self.rdrLog = LogReader()

    def extractData(self, strPath):
        log = self.rdrLog.loadLog(strPath)
        self.tti = log.getOwlData()["task-tree-individuals"]
        annot = None
        meta = None

        for key in self.tti:
            owlIndiv = self.tti[key]

            if owlIndiv.type() == "AnnotationInformation":
                annot = owlIndiv
                if annot and meta:
                    break
            elif owlIndiv.type() == "RobotExperiment":
                meta = owlIndiv
                if annot and meta:
                    break

        if annot and meta:
            params = annot.tagNodeValues("knowrob:annotatedParameterType")

            param_set = {}
            for param in params:
                param_set[param] = "?"

            strHeadline = "TASK-CONTEXT, RESULT"
            for param in params:
                strHeadline += ", " + param

            print strHeadline

            toplevelnodes = meta.tagAttributeValues("knowrob:subAction", "rdf:resource")

            for toplevelnode in toplevelnodes:
                node_key = toplevelnode.split("#")[1]
                self.printNode(node_key, param_set)

    def printNode(self, node_key, param_set):
        owlIndiv = self.tti[node_key]
        subactions = owlIndiv.tagAttributeValues("knowrob:subAction", "rdf:resource")

        for param in param_set:
            tnv = owlIndiv.tagNodeValues("knowrob:" + param)

            if len(tnv) > 0:
                param_set[param] = tnv[0]

        strLine = ""
        strLine += owlIndiv.tagNodeValues("knowrob:taskContext")[0]

        failures = owlIndiv.tagAttributeValues("knowrob:eventFailure", "rdf:resource")

        if len(failures) > 0:
            failure = failures[0].split("#")[1]
            owlFailure = self.tti[failure]

            strLine += ", " + owlFailure.type()
        else:
            strLine += ", Success"

        for param in param_set:
            strLine += ", " + param_set[param]

        for subaction in subactions:
            self.printNode(subaction.split("#")[1], param_set)

        print strLine
Example #23
0
 def __init__(self):
     self.rdrLog = LogReader()
     self.arrColors = ["white", "red", "blue", "yellow", "black"]
    if "BehaviorStateComplete" in frame.messages:
      m, o = frame["BehaviorStateComplete"]
    else:
      m, o = frame["BehaviorStateSparse"]

    return [m["robot_pose.x"], m["robot_pose.y"], m["fall_down_state"]]
    
  except KeyError as k:
    raise StopIteration


if __name__ == "__main__":

  parser = BehaviorParser()
  fileName = "./game.log"
  log = LogReader(fileName, parser)#, filter=headYaw)
  
  # we want only the frames which contain BehaviorState
  b = [behavior(f) for f in log if "BehaviorStateComplete" in f.messages or "BehaviorStateSparse" in f.messages];
  
  upright = filter(lambda m: m[2] == 1, b)
  fall = filter(lambda m: m[2] != 1, b)
  
  print "step 2"
  du = zip(*upright)
  df = zip(*fall)
  
  pyplot.plot(du[0], du[1], '.')
  pyplot.plot(df[0], df[1], 'o')

  pyplot.ylabel('y')
Example #25
0
class DataCondenser:
    def __init__(self):
        self.rdrLog = LogReader()
        
    def condenseData(self, strPath):
        dataOwl = None
        
        log = self.rdrLog.loadLog(strPath)
        dataOwl = log.getOwlData()
        
        self.tti = dataOwl["task-tree-individuals"]
        owlMeta = dataOwl["metadata"]
        owlAnnot = dataOwl["annotation"]
        
        if owlMeta:
            result = {"Toplevel" : self.condenseNodes("", owlMeta.subActions())};
            
            with open("out.json", "wb") as f:
                json.dump(result, f)
            
            with open("generalized_model.pkl", "wb") as f:
                pickle.dump({"model" : result,
                             "parameters" : owlAnnot.annotatedParameterTypes()},
                            f, pickle.HIGHEST_PROTOCOL)
        else:
            print "No meta data in file!"
    
    def condenseNodes(self, strParentNode, arrNodes, nLevel = 0):
        arrTypes = {}
        arrIndividuals = {}
        
        for strNode in arrNodes:
            owlNode = self.tti[strNode]
            ident = owlNode.taskContext()#.type()
            
            failures = owlNode.failures()
            failure = ""
            if len(failures) > 0:
                failure = self.tti[failures[0]].type()
            
            result = self.condenseNodes(strNode, owlNode.subActions(), nLevel + 1)
            if not ident in arrTypes:
                arrTypes[ident] = result
            else:
                arrTypes[ident] = self.unifyResults(arrTypes[ident], result)
            
            arrTypes[ident]["individuals"][strNode] = {"parameters" : owlNode.annotatedParameters(True),
                                                       "parent" : strParentNode,
                                                       "failure" : failure}
        
        return {"subTypes" : arrTypes,
                "individuals" : {}}
    
    def unifyResults(self, res1, res2):
        resparams = {}
        if len(res1["individuals"]) > 0:
            resparams = res1["individuals"]
        
        if len(res2["individuals"]) > 0:
            resparams = dict(resparams.items() + res2["individuals"].items())
        
        unified = {"subTypes" : {},
                   "individuals" : resparams}
        
        for ressub1 in res1["subTypes"]:
            if ressub1 in res2["subTypes"]:
                unified["subTypes"][ressub1] = self.unifyResults(res1["subTypes"][ressub1],
                                                                 res2["subTypes"][ressub1])
            else:
                unified["subTypes"][ressub1] = res1["subTypes"][ressub1]
        
        for ressub2 in res2["subTypes"]:
            if not ressub2 in res1["subTypes"]:
                unified["subTypes"][ressub2] = res2["subTypes"][ressub2]
        
        return unified
Example #26
0
#!/usr/bin/python

import sys
from LogReader import LogReader

sys.setrecursionlimit(100000)

rdrLog = LogReader()
log = rdrLog.loadLog(sys.argv[1])
dataOwl = log.getOwlData()


def findDepth(indivkey, lvl = 0):
    indiv = dataOwl["task-tree-individuals"][indivkey]
    if len(indiv.subActions()) > 0:
        for sa in indiv.subActions():
            findDepth(sa, lvl + 1)
    else:
        print lvl

findDepth(dataOwl["metadata"].subActions()[0])
Example #27
0
 def __init__(self):
     self.rdrLog = LogReader()
Example #28
0
class ADPatternFilter:
    
    # Constructor
    def __init__(self, logfile):
        """ Constructor
        
        Usage example:
            patternfilter = ADPatternFilter(logfile)
            
        Parameters
        ----------        
        logfile : string
            the name of a text file generated as a log of the AD tool
                    
        Returns
        ----------
        Instance of the class
        
        """    
        # Create an instance of LogReader
        self.log = LogReader(logfile)
        self.removedPatterns = []
        self.defPatternlist = []
        
    def filter_patterns(self):
        """ Method to filter spurious patterns.
            
        Usage example:
            filter_patterns()
                
        Parameters
        ----------
        None
                
        Returns
        -------
        None
        
        """
        # Parse the logfile and obtain the list of patterns and clusters
        self.log.parse_log()
        self.remove_patterns1()
        #self.remove_patterns2()
        

    def remove_patterns1(self):
        """ This method implements the strategy 1 to remove patterns
        Take into account only the number of instances for removing purposes
            
        Usage example:
            remove_patterns1()
                
        Parameters
        ----------
        None
                
        Returns
        -------
        None
        
        """
        # Best value for Kasteren dataset
        perc = 0.0
        threshold = self.log.minInstances + (self.log.maxInstances - self.log.minInstances)*perc
        print 'Used threshold value for instances:', threshold
        for pattern in self.log.patternlist:
            if pattern.instances < threshold:
                self.removedPatterns.append(pattern)
            else:
                self.defPatternlist.append(pattern)
                
    
    def remove_patterns2(self):
        """ This method implements the strategy 2 to remove patterns
        Take into account only the pattern value for removing purposes
            
        Usage example:
            remove_patterns2()
                
        Parameters
        ----------
        None
                
        Returns
        -------
        None
        
        """
        perc = 0.10
        threshold = self.log.minPatternValue + (self.log.maxPatternValue - self.log.minPatternValue)*perc
        print 'Used threshold value for pattern values:', threshold
        for pattern in self.log.patternlist:
            if pattern.value < threshold:
                self.removedPatterns.append(pattern)
            else:
                self.defPatternlist.append(pattern)
                
    
    def remove_patterns3(self):
        """ This method implements the strategy 3 to remove patterns
        Take into account only the pattern instances for removing purposes
        but calculate the threshold using the IQR (Interquartile range)
            
        Usage example:
            remove_patterns3()
                
        Parameters
        ----------
        None
                
        Returns
        -------
        None
        
        """
        instances = []
        for pattern in self.log.patternlist:
            instances.append(pattern.instances)
            
        iqr = np.subtract(*np.percentile(instances, [75, 25]))
        threshold = self.log.minInstances + iqr
        print 'Used threshold value for instances:', threshold, 'iqr:', iqr
        for pattern in self.log.patternlist:
            if pattern.instances < threshold:
                self.removedPatterns.append(pattern)
            else:
                self.defPatternlist.append(pattern)
                
    
    def remove_patterns4(self):
        """ This method implements the strategy 4 to remove patterns
        Take into account only the pattern values for removing purposes
        but calculate the threshold using the IQR (Interquartile range)
            
        Usage example:
            remove_patterns4()
                
        Parameters
        ----------
        None
                
        Returns
        -------
        None
        
        """
        values = []
        for pattern in self.log.patternlist:
            values.append(pattern.value)
            
        iqr = np.subtract(*np.percentile(values, [75, 25]))
        threshold = self.log.minPatternValue + iqr
        print 'Used threshold value for pattern values:', threshold, 'iqr:', iqr
        for pattern in self.log.patternlist:
            if pattern.value < threshold:
                self.removedPatterns.append(pattern)
            else:
                self.defPatternlist.append(pattern)
    
    #Method to reset the internal lists    
    def reset(self):
        """ Method to reset the instance
                    
        Usage example:
            reset()
                
        Parameters
        ----------
        None
                
        Returns
        -------
        None
        
        """
        self.removedPatterns = []
        self.defPatternlist = []
Example #29
0
import configparser
import serial
import logging
from LogReader import LogReader
import math
import tkinter

logging.basicConfig(level=os.environ.get("LOGLEVEL", "INFO"))

config = configparser.ConfigParser()
config.read("config.ini")
serialPort1 = config["DEFAULT"]["SerialPort1"]
baudRate = config["DEFAULT"]["BaudRate"]

serial1 = serial.Serial(serialPort1, baudRate)
logReader = LogReader(serial1)

print("Monitoring log, press esc to stop")

stepsPerRotation = 9600

def calcXY(steps1, steps2):
    ang1 = steps1 * math.pi * 2 / stepsPerRotation
    ang2 = (stepsPerRotation/2 - steps2) * math.pi * 2 / stepsPerRotation
    # print(steps1, steps2, ang1 * 180 / math.pi, ang2 * 180 / math.pi)
    l1 = 92.5
    l2 = 92.5
    elbowX = math.sin(ang1) * l1
    elbowY = math.cos(ang1) * l1
    curX = elbowX + math.sin(ang2) * l2
    curY = elbowY + math.cos(ang2) * l2
Example #30
0
class DataExtractor:
    def __init__(self):
        self.rdrLog = LogReader()
        
    def extractData(self, strPath):
        log = self.rdrLog.loadLog(strPath)
        self.tti = log.getOwlData()["task-tree-individuals"]
        annot = None
        meta = None
        
        for key in self.tti:
            owlIndiv = self.tti[key]
            
            if owlIndiv.type() == "AnnotationInformation":
                annot = owlIndiv
                if annot and meta: break
            elif owlIndiv.type() == "RobotExperiment":
                meta = owlIndiv
                if annot and meta: break
        
        if annot and meta:
            params = annot.tagNodeValues("knowrob:annotatedParameterType")
            
            param_set = {}
            for param in params:
                param_set[param] = "?"
            
            strHeadline = "TASK-CONTEXT, RESULT"
            for param in params:
                strHeadline += ", " + param
            
            print strHeadline
            
            toplevelnodes = meta.tagAttributeValues("knowrob:subAction", "rdf:resource")
            
            for toplevelnode in toplevelnodes:
                node_key = toplevelnode.split("#")[1]
                self.printNode(node_key, param_set)
    
    def printNode(self, node_key, param_set):
        owlIndiv = self.tti[node_key]
        subactions = owlIndiv.tagAttributeValues("knowrob:subAction", "rdf:resource")
        
        for param in param_set:
            tnv = owlIndiv.tagNodeValues("knowrob:" + param)
            
            if len(tnv) > 0:
                param_set[param] = tnv[0]
        
        strLine = ""
        strLine += owlIndiv.tagNodeValues("knowrob:taskContext")[0]
        
        failures = owlIndiv.tagAttributeValues("knowrob:eventFailure", "rdf:resource")
        
        if len(failures) > 0:
            failure = failures[0].split("#")[1]
            owlFailure = self.tti[failure]
            
            strLine += ", " + owlFailure.type()
        else:
            strLine += ", Success"
        
        for param in param_set:
            strLine += ", " + param_set[param]
        
        for subaction in subactions:
            self.printNode(subaction.split("#")[1], param_set)
        
        print strLine
Example #31
0
class OwlToTrainingDataConverter:
    def __init__(self):
        self.tdTrainingData = TrainingData()
        self.rdrLog = LogReader()

        self.arrIgnoredTasks = []
        self.arrAnnotatedParameters = []

    def setTaskIgnored(self, strTask):
        if not strTask in self.arrIgnoredTasks:
            self.arrIgnoredTasks.append(strTask)

    def addTrackedParameter(self, strParameter):
        self.arrAnnotatedParameters.append(strParameter)

    def convertOwlToTrainingData(self, arrLogDirectories):
        self.addTrackedParameter("taskContext")

        self.setTaskIgnored(u"WITH-FAILURE-HANDLING")
        self.setTaskIgnored(u"WITH-DESIGNATORS")
        self.setTaskIgnored(u"TAG")
        self.setTaskIgnored(u"UIMA-PERCEIVE")
        self.setTaskIgnored(u"GOAL-MONITOR-ACTION")
        self.setTaskIgnored(u"GOAL-ACHIEVE")
        self.setTaskIgnored(u"GOAL-PERFORM")
        self.setTaskIgnored(u"GOAL-PERFORM-ON-PROCESS-MODULE")
        self.setTaskIgnored(u"PERFORM-ACTION-DESIGNATOR")
        self.setTaskIgnored(u"REPLACEABLE-FUNCTION-NAVIGATE")

        self.setTaskIgnored(u"AT-LOCATION")
        self.setTaskIgnored(u"VOLUNTARY-BODY-MOVEMENT-ARMS")
        self.setTaskIgnored(u"MOTION-PLANNING")
        self.setTaskIgnored(u"MOTION-EXECUTION")
        self.setTaskIgnored(u"PUTDOWN")
        self.setTaskIgnored(u"VOLUNTARY-BODY-MOVEMENT-HEAD")
        self.setTaskIgnored(u"OPEN-GRIPPER")
        self.setTaskIgnored(u"CLOSE-GRIPPER")

        self.tdTrainingData.registerAttribute(u"Result")
        self.tdTrainingData.selectFirstAttribute(u"Result")

        self.tdTrainingData.addIgnoredParameter("_time_created")
        self.tdTrainingData.setRelation("PlanExecution")

        for strLogDirectory in arrLogDirectories:
            self.logData = self.rdrLog.loadLog(strLogDirectory)
            self.owlData = self.logData.getOwlData()
            self.designatorData = self.logData.getDesignatorData()

            self.tti = self.owlData["task-tree-individuals"]
            self.di = self.owlData["designator-individuals"]
            self.meta = self.owlData["metadata"]
            self.annotation = self.owlData["annotation"]

            for strParameter in self.annotation.tagNodeValues(
                    "knowrob:annotatedParameterType"):
                self.addTrackedParameter(strParameter)

            self.walkTree(self.meta)

        self.tdTrainingData.writeTrainingDataToFile(sys.argv[1])

    def isTaskIgnored(self, strTask):
        return strTask in self.arrIgnoredTasks

    def walkTree(self, ndOriginNode, dsOrigin=DataSet()):
        for strParameter in self.arrAnnotatedParameters:
            arrParameters = ndOriginNode.tagNodeValues("knowrob:" +
                                                       strParameter)

            if len(arrParameters) > 0:
                dsOrigin.setAttributeValue(strParameter, arrParameters[0])

        #dsOrigin.setAttributeValue(u"Duration", unicode(ndOriginNode.time()))

        arrSubActions = ndOriginNode.subActions()

        strSubResult = u"Success"
        for strSubAction in arrSubActions:
            strSubResultTemp = self.walkTree(self.tti[strSubAction],
                                             dsOrigin.copy())

            if strSubResultTemp != u"Success":
                strSubResult = strSubResultTemp

        arrFailures = ndOriginNode.failures()
        if len(arrFailures) == 0:
            dsOrigin.setAttributeValue(u"Result", strSubResult)
        else:
            desigFailure = self.tti[arrFailures[0]]
            dsOrigin.setAttributeValue(u"Result", desigFailure.type())

        if not self.isTaskIgnored(dsOrigin.getAttributeValue(u"taskContext")):
            self.tdTrainingData.addDataSet(dsOrigin)

        return dsOrigin.getAttributeValue(u"Result")
Example #32
0
class DataCondenser:
    def __init__(self):
        self.rdrLog = LogReader()
        self.tti = {}

    def condenseData(self, strOutputFile, arrSourceDirectories):
        dicToplevelNodes = []

        for strSourceDirectory in arrSourceDirectories:
            logData = self.rdrLog.loadLog(strSourceDirectory)
            owlData = logData.getOwlData()

            self.tti = dict(self.tti.items() +
                            owlData["task-tree-individuals"].items())
            meta = owlData["metadata"]

            if meta:
                dicToplevelNodes += meta.subActions()

        dicResult = {"Toplevel": self.condenseNodes("", dicToplevelNodes)}

        with open(strOutputFile, "wb") as fOut:
            json.dump(dicResult, fOut)

    def condenseNodes(self, strParentNode, arrNodes, nLevel=0):
        arrTypes = {}
        arrIndividuals = {}

        for strNode in arrNodes:
            owlNode = self.tti[strNode]
            nodeclass = owlNode.taskContext()

            if nLevel < 0:
                ident = "*"
            else:
                ident = nodeclass

            failures = owlNode.failures()
            failure = ""
            if len(failures) > 0:
                failure = self.tti[failures[0]].type()

            result = self.condenseNodes(strNode, owlNode.subActions(),
                                        nLevel + 1)
            if not ident in arrTypes:
                arrTypes[ident] = result
            else:
                arrTypes[ident] = self.unifyResults(arrTypes[ident], result)

            arrTypes[ident]["individuals"][strNode] = {
                "parameters": owlNode.annotatedParameters(True),
                "parent": strParentNode,
                "failure": failure,
                "class": nodeclass
            }

        return {"subTypes": arrTypes, "individuals": {}}

    def unifyResults(self, res1, res2):
        resparams = {}
        if len(res1["individuals"]) > 0:
            resparams = res1["individuals"]

        if len(res2["individuals"]) > 0:
            resparams = dict(resparams.items() + res2["individuals"].items())

        unified = {"subTypes": {}, "individuals": resparams}

        for ressub1 in res1["subTypes"]:
            if ressub1 in res2["subTypes"]:
                unified["subTypes"][ressub1] = self.unifyResults(
                    res1["subTypes"][ressub1], res2["subTypes"][ressub1])
            else:
                unified["subTypes"][ressub1] = res1["subTypes"][ressub1]

        for ressub2 in res2["subTypes"]:
            if not ressub2 in res1["subTypes"]:
                unified["subTypes"][ressub2] = res2["subTypes"][ressub2]

        return unified
'''
A script to plot a bunch of points
'''
from mayavi import mlab
from LogReader import LogReader
import numpy as np

def plot_points(points):
    print '[plot_points] Plotting points!'
    xs = np.array([int(point._x) for point in points])
    ys = np.array([int(point._y) for point in points])
    zs = np.array([int(point._z) for point in points])
    labels = np.array([int(point._label) for point in points])
    mlab.points3d(xs, ys, zs, labels, scale_factor = 0.4, mode='cube')
    mlab.show()
    
def plot_predicted_labels(points, labels):
    print '[plot_points] Plotting points!'
    xs = np.array([int(point._x) for point in points])
    ys = np.array([int(point._y) for point in points])
    zs = np.array([int(point._z) for point in points])
    mlab.points3d(xs, ys, zs, labels, scale_factor = .4, mode='cube')
    mlab.show()

if __name__ == "__main__":
    
    #Load a log
    log_object = LogReader('../data/oakland_part3_an_rf.node_features')
    points = log_object.read()
    
    plot_points(points)
Example #34
0
#!/usr/bin/python

import sys
from LogReader import LogReader

lr = LogReader()

log = lr.loadLog(sys.argv[1])
od = log.getOwlData()
tti = od["task-tree-individuals"]

printable_types = ["CRAMAchieve"]
colors = ["#ddffff", "#ffddff", "#ffffdd", "#ddddff", "#ffdddd", "#ddffdd", "#dddddd", "#ffffff"]
color_assignments = {}
color_index = -1

def printIndividual(individual, parent_name):
    global color_index
    global color_assignments
    
    strdot = ""
    
    strlbl = individual.goalContext()
    if not strlbl:
        strlbl = individual.name()
    
    # if len(individual.failures()) > 0:
    #     color = "#ffdddd"
    # else:
    #     color = "#ddffdd"
    distinguisher = individual.goalContext()
 def __init__(self):
     self.tdTrainingData = TrainingData()
     self.rdrLog = LogReader()
     
     self.arrIgnoredTasks = []
     self.arrAnnotatedParameters = []
Example #36
0
class RespData(object):
    def __init__(self, data, verbose=True, isString=False, plotAll = False):
        self.verbose = verbose
        # Constant declarations
        self.epochLength = 30
        self.sampleRate = 5
        self.windowLength = self.epochLength * self.sampleRate
        
        # Create filter chain
        self.filterChain = [self.zeroWithLowPeaks,
                            self.normalize,
                            self.roundData]

        # Create storage for features
        self.features = {}
        self.featureTable = {'zeroCrossings': self.zeroCrossings,

                             'upperPeaks': self.peaks,
                             'lowerPeaks': self.peaks,
                             'upperPeakTimes': self.peaks,
                             'lowerPeakTimes': self.peaks,

                             'Ti': self.TiTe,
                             'Te': self.TiTe,
                             'Hi': self.TiTe,
                             'He': self.TiTe,
                             }

        self.stages = None
        self.log = None

        if isString:
            # Create IO object
            fh = StringIO.StringIO(data)
            self.rawData, self.descriptors = self.readFile(fh)
        else:
            # Read file
            with open(data, 'r') as fh:
                self.rawData, self.descriptors = self.readFile(fh)

        # Filter data
        self.data = self.rawData.copy()

        # if plotAll is True, plots raw data vs. zeroed data vs. normalized data.
        if plotAll:
            i = 1
            numPlots = len(self.filterChain)
            plt.figure(1)
            sp1 = plt.subplot(numPlots,1,i)
            sp1.plot(self.data[:,0])
            plt.figure(2)
            sp2 = plt.subplot(numPlots,1,i)
            sp2.plot(self.data[:,1])
        for filt in self.filterChain:
            self.data = filt(self.data)
            if plotAll and (not filt == self.roundData):
                i+=1
                plt.figure(1)
                sp = plt.subplot(numPlots,1,i,sharex=sp1)
                sp.plot(self.data[:,0])
                sp.plot([0 for x in self.data[:,0]])
                plt.figure(2)
                sp = plt.subplot(numPlots,1,i,sharex=sp2)
                sp.plot(self.data[:,1])
                sp.plot([0 for x in self.data[:,1]])
                
            # stores zeroed Data
            if filt == self.zeroWithLowPeaks:
                self.zeroData = self.data.copy()
        if plotAll:
            plt.show()
        # Combine channels into a single channel
        self.singleChannel = self.getSingleChannel()[0]

        
    """#########################################################################
    # Getters
    #########################################################################"""
    
    def getData(self, channel=None, raw=False):
        if channel is not None:
            if raw:
                return self.rawData[:,channel]
            else:
                return self.data[:,channel]
        else:
            return self.singleChannel


    """#########################################################################
    # File Read/Write
    #########################################################################"""
    
    # Read in a .resp file and return data and descriptors
    def readFile(self, fh):
        if self.verbose: print "Reading file"
        data = []
        descriptors = []

        
        # Check version
        if fh.readline().strip() != "RESP100":
            raise Exception("Incompatible filetype")
        
        # Get descriptors
        descriptors.append(fh.readline().strip().strip('#'))
        descriptors.append(fh.readline().strip().strip('#'))

        # Read each line
        for line in fh:
            # Remove whitespace
            line = line.strip()
            
            # Ignore comments and empty lines
            if line == "" or line[0] == '#':
                continue
            
            # Split columns, convert to integer, and add to data
            line = map(int, line.split(','))
            data.append(line)

        # Return data and descriptors
        return (np.array(data), descriptors)


    # Load staging file
    def loadStaging(self, filename):
        if self.verbose: print "Loading stages"
        stages = []

        # Open file and read in stages
        with open(filename, 'r')as fh:
            for line in fh:
                stages.append(int(line.strip()[-1]))

        # Check length of data is compatible with stages
        if len(stages) > self.data.shape[0] / self.windowLength:
            raise Exception('Too many stages for data file')

        # Pad stages to length of data
        padding = (self.data.shape[0] / self.windowLength) - len(stages)
        if padding > 0:
            if self.verbose: print "Padding stage list by {}".format(padding)
            stages += [0] * padding

        self.stages = np.array(stages)


    # Write file to ARFF format with correct features
    def writeARFF(self, filename, featureList=None, channel=0):
        # If no feature list specified, use all features
        if featureList == None:
            featureList = ['Ti', 'Te', 'Hi', 'He', 'varVt']

        # Check that staging is already loaded
        if self.stages is None:
            raise Exception("Staging must be loaded first")

        # Get all features from feature list
        features = []
        for feature in featureList:
            features.append(self.getFeature(channel, feature))

        # Build large matrix of all features
        data = np.vstack(features + [self.stages]).transpose()

        # Filter data to remove unstaged sections
        data = np.delete(data, np.nonzero(data[:,-1] == 0), 0)

        # Convert to list for wake/sleep
        c = {1:'s', 2:'s', 3:'s', 4:'s', 5:'w'}
        data = data.tolist()
        for i in range(len(data)):
            data[i][-1] = c[data[i][-1]]

        # Write ARFF file
        with open(filename, 'w') as fh:
            fh.write("@RELATION {}\n\n".format('name')) ## TODO: FIGURE OUT WHAT NAME DOES
            for feature in featureList:
                fh.write("@ATTRIBUTE {} NUMERIC\n".format(feature))
            fh.write("@ATTRIBUTE stage {s,w}\n")

            fh.write("@DATA\n")

            writer = csv.writer(fh)
            writer.writerows(data)
            
            
    """#########################################################################
    # Stage Extraction
    #########################################################################"""

    def getStageData(self, stageList, data=None):
        # Check that stages are already loaded
        if self.stages is None:
            raise Exception("Stages must be loaded first.")

        # If data is not specified, use self.data
        if data is None:
            data = self.data

        # Check that data is the correct length
        if data.shape[0] != self.data.shape[0]:
            raise Exception("Data is not comparable to loaded data, check size")

        # Create array that indicates which points should be included
        validPoints = np.zeros(data.shape[0], dtype="bool")

        # For each stage, if it is to be included, set validPoints
        for i in range(self.stages.shape[0]):
            if self.stages[i] in stageList:
                validPoints[i*self.windowLength:(i+1)*self.windowLength] = 1

        return data[validPoints]
    

    """#########################################################################
    # Filters
    #########################################################################"""

    # simple median filter. 
    def medfilt(self,data,width):
        w = width
        newData = np.zeros(len(data))
        for i in range(len(data)):
            if i-w/2<0:
                newData[i] = np.median(np.hstack((np.zeros(w/2-i),data[0:i+w/2+1])))
            elif i+w/2+1>len(data):
                newData[i] = np.median(np.hstack((data[i-w/2:len(data)],np.zeros(i+w/2+1-len(data)))))
            else:
                newData[i] = np.median(data[i-w/2:i+w/2+1])
        return newData

    # Returns lower peaks, upper peaks, average line.
    def peaks(self, data = None):
        if data == None:
            data = self.getData()
            
        lowerPeakLine = np.zeros(data.shape)
        upperPeakLine = np.zeros(data.shape)
        avgs = np.zeros(data.shape)
        # parameter. Window length for running mean used for zero crossings. Window is about a breath long. 
        runningMeanWidth = 25
        lowerPeaks =[]
        upperPeaks = []
        lowerPeakTimes = []
        upperPeakTimes = []
        d = data.copy()
        dead = np.zeros(d.shape)        # Array to indicate where signal stays constant
        currentBreath = []              # Values of the current breath (one period of signal)
        currentBreathi = []             # Indices corresponding to above
        previous = 0                    # previous data point
        stable = 0                      # count of how long a signal is constant
        lastLowerPeak = None            # (index of the last peak, value of last peak)
        lastUpperPeak = None
        # intialize positive, a boolean which keeps track of whether signal is above or below the average. 
        if d[0]>0:
            positive = True
        else:
            positive = False

        for j in range(len(d)):
            # find mean line
            avg = np.mean(d[max(0,j-runningMeanWidth/2):min(len(d),j+runningMeanWidth/2)])
            avgs[j] = avg

            # signal above mean line. Add values to current breath. 
            if d[j] > avg:
                positive = True
                currentBreath += [d[j]]
                currentBreathi += [j]

            # signal below mean line
            else:
                # full period completed. Process peaks.     
                if positive and currentBreath:
                    m = np.argmin(currentBreath-avg)        # index of the new peak. currentBreath[m] = currentPeak
                    M = np.argmax(currentBreath-avg)
                    
                    lowerPeakTimes += [currentBreathi[m]]
                    upperPeakTimes += [currentBreathi[M]]
                    
                    # detect first peak. peak line set constant at first peak, until first peak.
                    if lastLowerPeak == None:
                        for k in range(m):
                            lowerPeakLine[k] = currentBreath[m]
                        lastLowerPeak = (currentBreathi[m], currentBreath[m])
                    if lastUpperPeak == None:
                        for k in range(M):
                            upperPeakLine[k] = currentBreath[M]
                        lastUpperPeak = (currentBreathi[M], currentBreath[M])
                    else:
                        # line fit from the last peak (lastLowerPeak=(a,b)) to the current peak (m,currentBreath[m])
                        x1 = lastLowerPeak[0]
                        y1 = lastLowerPeak[1]
                        x2 = currentBreathi[m]
                        y2 = currentBreath[m]
                        if x2-x1:
                            for k in range(x1,x2):
                                # only store peaks if the signal is not dead
                                if not dead[k]:
                                    lowerPeakLine[ k ] = float(y2-y1)/float(x2-x1)*(k-x1)+y1
                            lastLowerPeak = (currentBreathi[m], currentBreath[m])
                        else:
                            # very rare case where last peak is the same as new peak (x2-x1 = 0)
                            lowerPeakLine[currentBreathi[m]] = lastLowerPeak[1]
                            lastLowerPeak = (currentBreathi[m], currentBreath[m])
                            
                        # line fit from lastUpperPeak=(a,b) to (M,currentBreath[M])
                        x1 = lastUpperPeak[0]
                        y1 = lastUpperPeak[1]
                        x2 = currentBreathi[M]
                        y2 = currentBreath[M]
                        if x2-x1:
                            for k in range(x1,x2):
                                # only store peaks if the signal is not dead
                                if not dead[k]:
                                    upperPeakLine[ k ] = float(y2-y1)/float(x2-x1)*(k-x1)+y1
                            lastUpperPeak = (currentBreathi[M], currentBreath[M])
                        else:
                            # very rare case where last peak is the same as new peak (x2-x1 = 0)
                            upperPeakLine[currentBreathi[M]] = lastUpperPeak[1]
                            lastUpperPeak = (currentBreathi[M], currentBreath[M])
                    # Reset breath
                    currentBreath = []
                    currentBreathi = []
                    
                # add data to current breath
                currentBreath += [d[j]]
                currentBreathi += [j]
                positive = False

            # if constant, increment stable. 
            if abs(d[j] - previous) < 5:
                stable += 1
            # if changing, reset stable. 
            else:
                stable = 0
            # if constant for long enough, process peaks. When constant, every point is counted as a peak. 
            if stable>5:
                
                # connect previous peak to new "peak"
                x1 = lastLowerPeak[0]
                y1 = lastLowerPeak[1]
                x2 = j
                y2 = d[j]
                if x2-x1:
                    for k in range(x1,x2):
                        if not dead[k]:
                            lowerPeakLine[ k ] = float(y2-y1)/float(x2-x1)*(k-x1)+y1
                x1 = lastUpperPeak[0]
                y1 = lastUpperPeak[1]
                if x2-x1:
                    for k in range(x1,x2):
                        if not dead[k]:
                            upperPeakLine[ k ] = float(y2-y1)/float(x2-x1)*(k-x1)+y1
                
                lowerPeakTimes += [j]
                upperPeakTimes += [j]
                
                # points where stable was incrementing is also dead. 
                for k in range(j-4,j+1):
                    dead[j] = 1
                    lowerPeakLine[j] = d[j]
                    upperPeakLine[j] = d[j]
                lastLowerPeak = (j,d[j])
                lastUpperPeak = (j,d[j])
                currentBreath = []
                currentBreathi = []
                
            previous = d[j]
        # final peak to the end is a flat line
        for j in range(lastLowerPeak[0],len(d)):
            lowerPeakLine[j] = lastLowerPeak[1]
        for j in range(lastUpperPeak[0],len(d)):
            upperPeakLine[j] = lastUpperPeak[1]
        
        upperPeaks = data[upperPeakTimes]
        lowerPeaks = data[lowerPeakTimes]
        upperPeakTimes = np.array(upperPeakTimes)
        lowerPeakTimes = np.array(lowerPeakTimes)
        
        return {'avgs':avgs,'upperPeaks': upperPeaks, 'upperPeakTimes': upperPeakTimes, 'lowerPeaks': lowerPeaks,
                'lowerPeakTimes': lowerPeakTimes,'upperPeakLine':upperPeakLine, 'lowerPeakLine':lowerPeakLine}

    def TiTe(self, peaks = None):
        if peaks == None:
            uPeaks = self.getFeature('upperPeaks')
            lPeaks = self.getFeature('lowerPeaks')
            uPeakTimes = self.getFeature('upperPeakTimes')
            lPeakTimes = self.getFeature('lowerPeakTimes')
        else:
            uPeaks = peaks[0]
            uPeakTimes = peaks[1]
            lPeaks = peaks[2]
            lPeakTimes = peaks[3]
        
        # Make arrays same length
        numBreaths = min(len(uPeaks), len(lPeaks))
        uPeaks = uPeaks[:numBreaths]
        lPeaks = lPeaks[:numBreaths]
        uPeakTimes = uPeakTimes[:numBreaths]
        lPeakTimes = lPeakTimes[:numBreaths]
        
        # Find Ti/Te values
        if uPeakTimes[0] > lPeakTimes[0]:
            # Inhale happened first
            tiValues = uPeakTimes - lPeakTimes
            teValues = (np.roll(lPeakTimes, -1) - uPeakTimes)[:-1]

            hiValues = uPeaks - lPeaks
            heValues = (np.roll(lPeaks, -1) - uPeaks)[:-1]
        else:
            # Exhale happened first
            teValues = lPeakTimes - uPeakTimes
            tiValues = (np.roll(uPeakTimes, -1) - lPeakTimes)[:-1]

            heValues = lPeaks - uPeaks
            hiValues = (np.roll(uPeaks, -1) - lPeaks)[:-1]


        # Fill in long array with most recent value
        ti = np.zeros(len(self.data))
        te = np.zeros(len(self.data))
        hi = np.zeros(len(self.data))
        he = np.zeros(len(self.data))

        for i in range(len(uPeakTimes)-1):
            ti[uPeakTimes[i]:uPeakTimes[i+1]] = tiValues[i]
            hi[uPeakTimes[i]:uPeakTimes[i+1]] = hiValues[i]
        ti[uPeakTimes[-1]:] = tiValues[-1]
        hi[uPeakTimes[-1]:] = hiValues[-1]

        for i in range(len(lPeakTimes)-1):
            te[lPeakTimes[i]:lPeakTimes[i+1]] = teValues[i]
            he[lPeakTimes[i]:lPeakTimes[i+1]] = heValues[i]
        te[lPeakTimes[-1]:] = teValues[-1]
        he[lPeakTimes[-1]:] = heValues[-1]

        return {'Ti': ti, 'Te': te, 'Hi': hi, 'He': he}
    
    # Zero-center data    
    def zeroWithLowPeaks(self, data):
        if self.verbose: print "Zeroing data"
        numCols = data.shape[1]
        newData = np.zeros(data.shape)
        for i in range(numCols):
            d = self.peaks(data=data[:,i])
            lowerPeakLine = d['lowerPeakLine']
            avgs = d['avgs']
            newData[:,i]=data[:,i]-lowerPeakLine
            plt.figure()
            plt.plot(data[:,i])
            plt.plot(lowerPeakLine)
            plt.plot(avgs)
            plt.plot(newData[:,i])
            plt.plot([0 for x in newData[:,i]])
        return newData
        
    # Normalize data using median filter
    def normalize(self, data):
        if self.verbose: print 'Normalizing data'
        # Parameters
        medFilterWidth = 151
        scale = 30
        numCols = data.shape[1]
        maxValue = 400

        newData = np.zeros(data.shape)
        for i in range(numCols):
            d = self.peaks(data=data[:,i])
            upperPeakLine = d['upperPeakLine']
            avgs = d['avgs']
            plt.figure()
            plt.plot(data[:,i])
            plt.plot(upperPeakLine)
            plt.plot(avgs)
                
            # Decimate peak list
            dec = 10
            decUpperPeaks = upperPeakLine[np.arange(1,len(upperPeakLine), dec)]
            
            # Filter shortened peak list
            if self.verbose: print " - {}: Median filter".format(i)
            decUpperPeaks = self.medfilt(decUpperPeaks, medFilterWidth)
            
            
            # Un-decimate peak list
            for j in range(len(decUpperPeaks)):
                if not j:
                    for k in range(1):
                        upperPeakLine[k] = decUpperPeaks[j]
                else:
                    for k in range(dec):
                        upperPeakLine[dec*(j-1)+k+1] = float(decUpperPeaks[j] - decUpperPeaks[j-1] )/dec * k + decUpperPeaks[j-1]
            for k in range(len(decUpperPeaks)*dec+1, len(upperPeakLine)):
                upperPeakLine[k] = decUpperPeaks[len(decUpperPeaks)-1]
                
            plt.plot(upperPeakLine)
                
            # Normalize using width of envelope
            width = (upperPeakLine) / 2.0
            width[width == 0] = np.inf
            data[:,i] = (data[:,i] / width) * scale

            # Limit peaks
            data[:,i][data[:,i] > maxValue] = maxValue
            data[:,i][data[:,i] < -maxValue] = -maxValue

            newData[:,i] = data[:,i]
            
        return newData
            


    # Round data array to integer
    def roundData(self, data):
        if self.verbose: print 'Rounding Data'
        newData = np.zeros(data.shape, int)
        data.round(0, newData)
        return newData

    """#########################################################################
    # Feature Extraction
    #########################################################################"""
    
    # Get requested feature from table, calculate if not available
    def getFeature(self, featureName):
        # Check if feature needs to be calculated
        if featureName not in self.features:
            
            if featureName in self.featureTable:
                data = self.featureTable[featureName]()
                self.features.update(data)

        # Return feature
        return self.features[featureName]
    

##    # Find peaks between zero crossings
##    def peaks(self):
##        # Need zero crossings
##        crossings = self.getFeature('zeroCrossings')
##
##        data = self.getData()
##
##        # Storage
##        uPeaks = []
##        uPeakTimes = []
##        lPeaks = []
##        lPeakTimes = []
##
##        for i in range(0, len(crossings)-2, 2):
##            bump = data[crossings[i]:crossings[i+2]]
##            uPeakTimes.append(crossings[i] + np.argmax(bump))
##            lPeakTimes.append(crossings[i] + np.argmin(bump))
##
##        uPeaks = data[uPeakTimes]
##        lPeaks = data[lPeakTimes]
##        uPeakTimes = np.array(uPeakTimes)
##        lPeakTimes = np.array(lPeakTimes)
##
##        return {'upperPeaks': uPeaks, 'upperPeakTimes': uPeakTimes, 'lowerPeaks': lPeaks, 'lowerPeakTimes': lPeakTimes}
        

    # Find locations of zero crossings
    def zeroCrossings(self):
        data = self.getData()
            
        dataRoll = np.roll(data, 1)
        
        # Effectively: if the next point's sign is different than the curren point, flag as crossing
        crossings = np.nonzero( (data>0) != (dataRoll>0) )[0]
        
        return {'zeroCrossings': crossings}


##    def TiTe(self):
##        # Requires peaks
##        uPeaks = self.getFeature('upperPeaks')
##        lPeaks = self.getFeature('lowerPeaks')
##        uPeakTimes = self.getFeature('upperPeakTimes')
##        lPeakTimes = self.getFeature('lowerPeakTimes')
##
##        # Make arrays same length
##        numBreaths = min(len(uPeaks), len(lPeaks))
##        uPeaks = uPeaks[:numBreaths]
##        lPeaks = lPeaks[:numBreaths]
##        uPeakTimes = uPeakTimes[:numBreaths]
##        lPeakTimes = lPeakTimes[:numBreaths]
##        
##        # Find Ti/Te values
##        if uPeakTimes[0] > lPeakTimes[0]:
##            # Inhale happened first
##            tiValues = uPeakTimes - lPeakTimes
##            teValues = (np.roll(lPeakTimes, -1) - uPeakTimes)[:-1]
##
##            hiValues = uPeaks - lPeaks
##            heValues = (np.roll(lPeaks, -1) - uPeaks)[:-1]
##        else:
##            # Exhale happened first
##            teValues = lPeakTimes - uPeakTimes
##            tiValues = (np.roll(uPeakTimes, -1) - lPeakTimes)[:-1]
##
##            heValues = lPeaks - uPeaks
##            hiValues = (np.roll(uPeaks, -1) - lPeaks)[:-1]
##
##
##        # Fill in long array with most recent value
##        ti = np.zeros(len(self.data))
##        te = np.zeros(len(self.data))
##        hi = np.zeros(len(self.data))
##        he = np.zeros(len(self.data))
##
##        for i in range(len(uPeakTimes)-1):
##            ti[uPeakTimes[i]:uPeakTimes[i+1]] = tiValues[i]
##            hi[uPeakTimes[i]:uPeakTimes[i+1]] = hiValues[i]
##        ti[uPeakTimes[-1]:] = tiValues[-1]
##        hi[uPeakTimes[-1]:] = hiValues[-1]
##
##        for i in range(len(lPeakTimes)-1):
##            te[lPeakTimes[i]:lPeakTimes[i+1]] = teValues[i]
##            he[lPeakTimes[i]:lPeakTimes[i+1]] = heValues[i]
##        te[lPeakTimes[-1]:] = teValues[-1]
##        he[lPeakTimes[-1]:] = heValues[-1]
##
##        return {'Ti': ti, 'Te': te, 'Hi': hi, 'He': he}


    """#########################################################################
    # Channel Selection
    #########################################################################"""

    def getSingleChannel(self):
        # Parameters
        maxValue = 2**24
        extremeMargin = 0.10
        extremeTop = maxValue * (1-extremeMargin)
        extremeBottom = maxValue * extremeMargin

        # Get datasets in windows
        ch1 = self.rawData[:, 0]
        ch2 = self.rawData[:, 1]
        remainder = self.windowLength - (ch1.shape[0] % self.windowLength)
        if remainder != 0:
            ch1 = np.hstack([ch1, np.zeros(remainder)])
            ch2 = np.hstack([ch2, np.zeros(remainder)])
        ch1 = ch1.reshape((-1, self.windowLength))
        ch2 = ch2.reshape((-1, self.windowLength))

        # Build list of comparator functions that will be applied in order, lower value will be chosen
        functionList = [lambda i,ch: np.count_nonzero((ch[i]==maxValue) + (ch[i]==0)),
                        lambda i,ch: np.count_nonzero((ch[i]<extremeBottom)+(ch[i]>extremeTop)),
                        lambda i,ch: -1 * np.std(ch[i]),
                        ]

        # For each window, decide which channel to use
        output = np.zeros(ch1.shape[0])
        for i in range(ch1.shape[0]):
            for func in functionList:
                # Get score
                v1 = func(i,ch1)
                v2 = func(i,ch2)

                # See if we can make a decision on these values
                if v1 < v2:
                    output[i] = 0
                    break
                elif v2 < v1:
                    output[i] = 1
                    break
                else:
                    continue


        # Create final dataset
        data = np.zeros(self.data.shape[0])
        for i in range(output.shape[0]):
            r1 = i*self.windowLength
            r2 = min((i+1)*self.windowLength, self.data.shape[0])
            data[r1:r2] = self.data[r1:r2, output[i]]
        
        return data, output


    """#########################################################################
    # Sleep/Wake Based on Actigraphy
    #########################################################################"""

    def getWake(self, channel=None, moveWindow=25, sleepWindow=150, moveThresh=3.0, wakeThresh=0.40):
        # Get view of data
        data = self.getData(channel)

        # Create standard deviation
        stdChart = np.zeros(data.shape[0])
        for i in range(data.shape[0]):
            stdChart[i] = np.std(data[max(0, i-moveWindow/2) : min(data.shape[0], i+moveWindow/2)])

        # Create movement chart
        moveChart = stdChart > moveThresh * np.mean(stdChart)

        # Pad to fill each epoch
        remainder = moveChart.shape[0] % sleepWindow
        if remainder:
            moveChart = np.hstack([moveChart, [0]*(sleepWindow-remainder)])

        # For each epoch, if enough movement, mark wake
        wakeChart = moveChart.reshape(-1, sleepWindow)
        wakeChart = np.sum(wakeChart, 1) / float(sleepWindow)
        wakeChart = (wakeChart > wakeThresh) * 1

        return wakeChart, moveChart

    """#########################################################################
    # Utilities
    #########################################################################"""

    # Convert from a list of values per epoch to a list of time-values
    def fillEpochs(self, data):
        return np.ravel(np.tile(data, self.windowLength).reshape(-1, data.shape[0]).transpose())[:self.data.shape[0]]

    # Convert from time-series to a value per epoch array
    def getEpoch(self, data):
        return data.reshape(-1, self.windowLength)[:,0].copy()

    """#########################################################################
    # Sleep Lab Log Files
    #########################################################################"""

    def loadLog(self, filename):
        self.log = LogReader(filename)

    def getEvent(self, eventType, filt=None):
        if self.log is None:
            raise Exception('Error: Must load log first')
        
        return self.log.getTimeSeries(eventType, self.data.shape[0], filt=filt)

    def getEventTypes(self):
        if self.log is None:
            raise Exception('Error: Must load log first')
        
        return self.log.getEventTypes()
Example #37
0
class LogAnalyzer:
    def __init__(self):
        self.rdrLog = LogReader()
        self.arrColors = ["white", "red", "blue", "yellow", "black"]

    def analyzeLog(self, strPath):
        log = self.rdrLog.loadLog(strPath)
        #data = log.getOwlData()["task-tree"]
        tti = log.getOwlData()["task-tree-individuals"]

        #with open("data.pkl", "wb") as f:
        #    pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
        #    data = pickle.load(f)

        #data = self.correctTime(data)

        #imgTaskPie = Image(Geometry(700, 700), Color("white"))

        #imgTaskPie.strokeColor("#000000")
        #imgTaskPie.strokeWidth(2.5)
        #imgTaskPie.fillColor("transparent")

        #self.drawTaskPie(imgTaskPie, data, -1, -1, 5)
        #imgTaskPie.write("out.png")

        # toTasks = self.timelyOrderedTasks(data)

        # dicClassTimes = {}
        # for dicTask in toTasks:
        #     owlTask = tti[dicTask["name"]]
        #     strType = owlTask.type()

        #     if not strType in dicClassTimes:
        #         dicClassTimes[strType] = int(dicTask["time"])
        #     else:
        #         dicClassTimes[strType] += int(dicTask["time"])
        
        # nEarliestTS = -1
        # nLatestTS = -1
        
        # for dicTask in tti:#toTasks:
        #     owlTask = tti[dicTask]
        #     TS = owlTask.timeSpan()
            
        #     if TS:
        #         if nEarliestTS == -1 or float(TS[0]) < nEarliestTS:
        #             nEarliestTS = float(TS[0])
        #         if nLatestTS == -1 or float(TS[1]) > nLatestTS:
        #             nLatestTS = float(TS[1])
        
        # nOverallTime = nLatestTS - nEarliestTS
        
        fEarliest = -1
        fLatest = -1
        
        dicClassTimes = {}
        for strTask in tti:
            owlTask = tti[strTask]
            TS = owlTask.timeSpan()
            
            if TS:
                if owlTask.type() in dicClassTimes:
                    dicClassTimes[owlTask.type()] += (float(TS[1]) - float(TS[0]))
                else:
                    dicClassTimes[owlTask.type()] = (float(TS[1]) - float(TS[0]))
                
                if float(TS[0]) < fEarliest or fEarliest == -1:
                    fEarliest = float(TS[0])

                if float(TS[1]) > fLatest or fLatest == -1:
                    fLatest = float(TS[1])
        
        print "Time =", (fLatest - fEarliest)
        
        #with open("classtimes.pkl", "wb") as f:
        #    pickle.dump(dicClassTimes, f, pickle.HIGHEST_PROTOCOL)

        #print "Longest Task: ", toTasks[len(toTasks) - 1]

        #for strItem in dicClassTimes:
            #print strItem, dicClassTimes[strItem]
        
        for strClass in dicClassTimes:
            print strClass, " = ", dicClassTimes[strClass]
        
        #print ""
        # if not "MotionPlanning" in dicClassTimes:
        #     print "Picking Up Objects: " + str(dicClassTimes["PickingUpAnObject"])
        #     print "Placing Objects: " + str(dicClassTimes["PuttingDownAnObject"])
        #     print "Path Planning + Motion Execution: " + str(dicClassTimes["ArmMovement"])
        #     print "Navigation: " + str(dicClassTimes["BaseMovement"])
        #     print "Head Movement: " + str(dicClassTimes["HeadMovement"])
        #     print "Perception Queries: " + str(dicClassTimes["UIMAPerception"])
        #     print "Object Identity Resolution + Belief State Updates: " + str(dicClassTimes["PerceivingObjects"] - dicClassTimes["UIMAPerception"])
        # else:
        #     # print "--- General ---"
        #     # print "Overall                    : " + str(nOverallTime)
        #     print "--- High Level ---"
        #     print "Picking Up Objects         : " + str(dicClassTimes["PickingUpAnObject"] +
        #                                                 dicClassTimes["CarryingAnObject"] +
        #                                                 dicClassTimes["LiftingAnObject"])
        #     print "Placing Objects            : " + str(dicClassTimes["PuttingDownAnObject"])
        #     print "Finding Objects            : " + str(dicClassTimes["FindingObjects"])
        #     # print "Other Activities           : " + str(nOverallTime -
        #     #                                             (dicClassTimes["PickingUpAnObject"] +
        #     #                                              dicClassTimes["CarryingAnObject"] +
        #     #                                              dicClassTimes["LiftingAnObject"] +
        #     #                                              dicClassTimes["PuttingDownAnObject"] +
        #     #                                              dicClassTimes["FindingObjects"] -
        #     #                                              dicClassTimes["UIMAPerception"]))
        #     print "--- Low Level ---"
        #     print "Path Planning              : " + str(dicClassTimes["MotionPlanning"])
        #     print "Motion Execution           : " + str(dicClassTimes["MotionExecution"])
        #     print "Navigation                 : " + str(dicClassTimes["BaseMovement"])
        #     print "Head Movement              : " + str(dicClassTimes["HeadMovement"])
        #     print "Perception Queries         : " + str(dicClassTimes["UIMAPerception"])
        #     print "Object Identity Resolution : " + str(dicClassTimes["ObjectIdentityResolution"])
        #     print "Belief State Updates       : " + str(dicClassTimes["BeliefStateUpdate"])

    def timelyOrderedTasks(self, data):
        dicLinear = self.linearizeTaskTree(data)
        arrItems = []

        for strItem in dicLinear:
            arrItems.append({"name": strItem,
                             "time": dicLinear[strItem]})

        return sorted(arrItems, key=lambda item: item["time"])

    def linearizeTaskTree(self, tree):
        dicLinear = {}

        for strBranch in tree:
            dicLinear[strBranch] = tree[strBranch]["time"]
            dicSub = self.linearizeTaskTree(tree[strBranch]["children"])
            dicLinear = dict(dicLinear, **dicSub)

        return dicLinear

    def correctTime(self, data):
        for strBranchName in data:
            data[strBranchName]["children"] = self.correctTime(data[strBranchName]["children"])

            nTimeSum = 0
            for strChild in data[strBranchName]["children"]:
                nTimeSum += data[strBranchName]["children"][strChild]["time"]

            if data[strBranchName]["time"] < nTimeSum:
                data[strBranchName]["time"] = nTimeSum

        return data

    def drawTaskPie(self, imgPie, dicTaskTree, globalTimespan = -1, parentTimespan = -1, radiusDelta = 10, radiusInner = 0, angleStart = 0, angleEnd = 360):
        if globalTimespan == -1:
            globalTimespan = 0
            for strBranchName in dicTaskTree:
                globalTimespan += dicTaskTree[strBranchName]["time"]

        if parentTimespan == -1:
            parentTimespan = 0
            for strBranchName in dicTaskTree:
                parentTimespan += dicTaskTree[strBranchName]["time"]

        if parentTimespan > 0:
            nSegments = len(dicTaskTree)

            radiusOuter = radiusInner + radiusDelta

            nCenterX = imgPie.columns() / 2
            nCenterY = imgPie.rows() / 2

            nStartXOuter = nCenterX - radiusOuter
            nStartYOuter = nCenterY - radiusOuter
            nEndXOuter = nCenterX + radiusOuter
            nEndYOuter = nCenterY + radiusOuter

            nStartXInner = nCenterX - radiusInner
            nStartYInner = nCenterY - radiusInner
            nEndXInner = nCenterX + radiusInner
            nEndYInner = nCenterY + radiusInner

            dAngleOffset = 0

            for strBranchName in dicTaskTree:
                dAngleWidth = float(dicTaskTree[strBranchName]["time"]) / float(parentTimespan) * (angleEnd - angleStart)

                if dAngleWidth > 0:
                    dStartingAngle = angleStart + dAngleOffset
                    dEndingAngle = dStartingAngle + dAngleWidth
                    dAngleOffset += dAngleWidth

                    if "children" in dicTaskTree[strBranchName]:
                        if len(dicTaskTree[strBranchName]["children"]) > 0:
                            self.drawTaskPie(imgPie, dicTaskTree[strBranchName]["children"], globalTimespan, dicTaskTree[strBranchName]["time"], radiusDelta, radiusOuter, dStartingAngle, dEndingAngle)

                    dTimeSpanDegree = float(dicTaskTree[strBranchName]["time"]) / float(globalTimespan)
                    imgPie.strokeColor(Color(int(255 * dTimeSpanDegree), 0, int(255 * (1.0 - dTimeSpanDegree))))

                    lstDrawables = DrawableList()
                    lstDrawables.append(DrawableLine(nCenterX + radiusInner * math.cos(math.radians(dStartingAngle)),
                                                     nCenterY + radiusInner * math.sin(math.radians(dStartingAngle)),
                                                     nCenterX + radiusOuter * math.cos(math.radians(dStartingAngle)),
                                                     nCenterY + radiusOuter * math.sin(math.radians(dStartingAngle))))
                    lstDrawables.append(DrawableArc(nStartXOuter, nStartYOuter, nEndXOuter, nEndYOuter, dStartingAngle, dEndingAngle))
                    lstDrawables.append(DrawableLine(nCenterX + radiusInner * math.cos(math.radians(dEndingAngle)),
                                                     nCenterY + radiusInner * math.sin(math.radians(dEndingAngle)),
                                                     nCenterX + radiusOuter * math.cos(math.radians(dEndingAngle)),
                                                     nCenterY + radiusOuter * math.sin(math.radians(dEndingAngle))))
                    lstDrawables.append(DrawableArc(nStartXInner, nStartYInner, nEndXInner, nEndYInner, dStartingAngle, dEndingAngle))

                    imgPie.draw(lstDrawables)
Example #38
0
class RespData(object):
    def __init__(self, data, verbose=True, isString=False):
        self.verbose = verbose
        
        # Constant declarations
        self.epochLength = 30
        self.sampleRate = 5
        self.windowLength = self.epochLength * self.sampleRate
        
        # Create filter chain
        self.filterChain = [self.zeroWithFiltFilt,
                            self.normalize,
                            self.roundData]

        # Create storage for features
        self.features = {}
        self.featureTable = {'zeroCrossings': self.zeroCrossings,

                             'upperPeaks': self.peaks,
                             'lowerPeaks': self.peaks,
                             'upperPeakTimes': self.peaks,
                             'lowerPeakTimes': self.peaks,

                             'Ti': self.TiTe,
                             'Te': self.TiTe,
                             'Hi': self.TiTe,
                             'He': self.TiTe,
                             }

        self.stages = None
        self.log = None

        if isString:
            # Create IO object
            fh = StringIO.StringIO(data)
            self.rawData, self.descriptors = self.readFile(fh)
        else:
            # Read file
            with open(data, 'r') as fh:
                self.rawData, self.descriptors = self.readFile(fh)

        # Filter data
        self.data = self.rawData.copy()
        for filt in self.filterChain:
            self.data = filt(self.data)

        # Combine channels into a single channel
        self.singleChannel = self.getSingleChannel()[0]

        
    """#########################################################################
    # Getters
    #########################################################################"""
    
    def getData(self, channel=None, raw=False):
        if channel is not None:
            if raw:
                return self.rawData[:,channel]
            else:
                return self.data[:,channel]
        else:
            return self.singleChannel


    """#########################################################################
    # File Read/Write
    #########################################################################"""
    
    # Read in a .resp file and return data and descriptors
    def readFile(self, fh):
        if self.verbose: print "Reading file"
        data = []
        descriptors = []

        
        # Check version
        if fh.readline().strip() != "RESP100":
            raise Exception("Incompatible filetype")
        
        # Get descriptors
        descriptors.append(fh.readline().strip().strip('#'))
        descriptors.append(fh.readline().strip().strip('#'))

        # Read each line
        for line in fh:
            # Remove whitespace
            line = line.strip()
            
            # Ignore comments and empty lines
            if line == "" or line[0] == '#':
                continue
            
            # Split columns, convert to integer, and add to data
            line = map(int, line.split(','))
            data.append(line)

        # Return data and descriptors
        return (np.array(data), descriptors)


    # Load staging file
    def loadStaging(self, filename):
        if self.verbose: print "Loading stages"
        stages = []

        # Open file and read in stages
        with open(filename, 'r')as fh:
            for line in fh:
                stages.append(int(line.strip()[-1]))

        # Check length of data is compatible with stages
        if len(stages) > self.data.shape[0] / self.windowLength:
            raise Exception('Too many stages for data file')

        # Pad stages to length of data
        padding = (self.data.shape[0] / self.windowLength) - len(stages)
        if padding > 0:
            if self.verbose: print "Padding stage list by {}".format(padding)
            stages += [0] * padding

        self.stages = np.array(stages)


    # Write file to ARFF format with correct features
    def writeARFF(self, filename, featureList=None, channel=0):
        # If no feature list specified, use all features
        if featureList == None:
            featureList = ['Ti', 'Te', 'Hi', 'He', 'varVt']

        # Check that staging is already loaded
        if self.stages is None:
            raise Exception("Staging must be loaded first")

        # Get all features from feature list
        features = []
        for feature in featureList:
            features.append(self.getFeature(channel, feature))

        # Build large matrix of all features
        data = np.vstack(features + [self.stages]).transpose()

        # Filter data to remove unstaged sections
        data = np.delete(data, np.nonzero(data[:,-1] == 0), 0)

        # Convert to list for wake/sleep
        c = {1:'s', 2:'s', 3:'s', 4:'s', 5:'w'}
        data = data.tolist()
        for i in range(len(data)):
            data[i][-1] = c[data[i][-1]]

        # Write ARFF file
        with open(filename, 'w') as fh:
            fh.write("@RELATION {}\n\n".format('name')) ## TODO: FIGURE OUT WHAT NAME DOES
            for feature in featureList:
                fh.write("@ATTRIBUTE {} NUMERIC\n".format(feature))
            fh.write("@ATTRIBUTE stage {s,w}\n")

            fh.write("@DATA\n")

            writer = csv.writer(fh)
            writer.writerows(data)
            
            
    """#########################################################################
    # Stage Extraction
    #########################################################################"""

    def getStageData(self, stageList, data=None):
        # Check that stages are already loaded
        if self.stages is None:
            raise Exception("Stages must be loaded first.")

        # If data is not specified, use self.data
        if data is None:
            data = self.data

        # Check that data is the correct length
        if data.shape[0] != self.data.shape[0]:
            raise Exception("Data is not comparable to loaded data, check size")

        # Create array that indicates which points should be included
        validPoints = np.zeros(data.shape[0], dtype="bool")

        # For each stage, if it is to be included, set validPoints
        for i in range(self.stages.shape[0]):
            if self.stages[i] in stageList:
                validPoints[i*self.windowLength:(i+1)*self.windowLength] = 1

        return data[validPoints]
    

    """#########################################################################
    # Filters
    #########################################################################"""

    # Zero-center data
    def zeroWithFiltFilt(self, data):
        if self.verbose: print "Zeroing data"
        N = 3
        Wn = 0.05
        b, a = sig.butter(N, Wn)
        numCols = data.shape[1]
        newData = np.zeros(data.shape)
        for i in range(numCols):
            newData[:,i] = data[:,i] - sig.filtfilt(b, a, data[:,i])
        return newData


    # Normalize data using median filter
    def normalize(self, data):
        if self.verbose: print 'Normalizing data'
        # Parameters
        filterWidth = 151
        scale = 30
##        nearZero = np.std(data) * 0.05
        nearZero = 10
        numCols = data.shape[1]
        maxValue = 400

        newData = np.zeros(data.shape)
        for i in range(numCols):
            d = data[:,i].copy()
            
            # Find envelope
            if self.verbose: print " - {}: Finding envelope".format(i)
            upperPeaks = np.zeros(d.shape)
            lowerPeaks = np.zeros(d.shape)
            currentUpper = 0.0
            currentLower = 0.0
            for j in range(1, len(d)-1):
                if (d[j-1] < d[j] > d[j+1]) and d[j] > 0:
                    currentUpper = d[j]
                if (d[j-1] > d[j] < d[j+1]) and d[j] < 0:
                    currentLower = d[j]

                upperPeaks[j] = currentUpper
                lowerPeaks[j] = currentLower

            # Decimate peak list
            dec = 10
            decUpperPeaks = upperPeaks[np.arange(1,len(upperPeaks), dec)]
            decLowerPeaks = lowerPeaks[np.arange(1,len(lowerPeaks), dec)]
        
            # Filter shortened peak list
            if self.verbose: print " - {}: Median filter".format(i)
            decUpperPeaks = sig.medfilt(decUpperPeaks, filterWidth)
            decLowerPeaks = sig.medfilt(decLowerPeaks, filterWidth)

            # Un-decimate peak list
            for x in range(len(upperPeaks)):
                upperPeaks[x] = decUpperPeaks[x/dec]
                lowerPeaks[x] = decLowerPeaks[x/dec]

            # Normalize using width of envelope
            width = (upperPeaks - lowerPeaks) / 2.0
            width[width < nearZero] = np.inf
            d = (d / width) * scale

            # Limit peask
            d[d > maxValue] = maxValue
            d[d < -maxValue] = -maxValue

            newData[:,i] = d
            
        return newData
            

    # Round data array to integer
    def roundData(self, data):
        if self.verbose: print 'Rounding Data'
        newData = np.zeros(data.shape, int)
        data.round(0, newData)
        return newData

    """#########################################################################
    # Feature Extraction
    #########################################################################"""
    
    # Get requested feature from table, calculate if not available
    def getFeature(self, featureName):
        # Check if feature needs to be calculated
        if featureName not in self.features:
            
            if featureName in self.featureTable:
                data = self.featureTable[featureName]()
                self.features.update(data)

        # Return feature
        return self.features[featureName]
    

    # Find peaks between zero crossings
    def peaks(self):
        # Need zero crossings
        crossings = self.getFeature('zeroCrossings')

        data = self.getData()

        # Storage
        uPeaks = []
        uPeakTimes = []
        lPeaks = []
        lPeakTimes = []

        for i in range(0, len(crossings)-2, 2):
            bump = data[crossings[i]:crossings[i+2]]
            uPeakTimes.append(crossings[i] + np.argmax(bump))
            lPeakTimes.append(crossings[i] + np.argmin(bump))

        uPeaks = data[uPeakTimes]
        lPeaks = data[lPeakTimes]
        uPeakTimes = np.array(uPeakTimes)
        lPeakTimes = np.array(lPeakTimes)

        return {'upperPeaks': uPeaks, 'upperPeakTimes': uPeakTimes, 'lowerPeaks': lPeaks, 'lowerPeakTimes': lPeakTimes}
        

    # Find locations of zero crossings
    def zeroCrossings(self):
        data = self.getData()
            
        dataRoll = np.roll(data, 1)
        
        # Effectively: if the next point's sign is different than the curren point, flag as crossing
        crossings = np.nonzero( (data>0) != (dataRoll>0) )[0]
        
        return {'zeroCrossings': crossings}


    def TiTe(self):
        # Requires peaks
        uPeaks = self.getFeature('upperPeaks')
        lPeaks = self.getFeature('lowerPeaks')
        uPeakTimes = self.getFeature('upperPeakTimes')
        lPeakTimes = self.getFeature('lowerPeakTimes')

        # Make arrays same length
        numBreaths = min(len(uPeaks), len(lPeaks))
        uPeaks = uPeaks[:numBreaths]
        lPeaks = lPeaks[:numBreaths]
        uPeakTimes = uPeakTimes[:numBreaths]
        lPeakTimes = lPeakTimes[:numBreaths]
        
        # Find Ti/Te values
        if uPeakTimes[0] > lPeakTimes[0]:
            # Inhale happened first
            tiValues = uPeakTimes - lPeakTimes
            teValues = (np.roll(lPeakTimes, -1) - uPeakTimes)[:-1]

            hiValues = uPeaks - lPeaks
            heValues = (np.roll(lPeaks, -1) - uPeaks)[:-1]
        else:
            # Exhale happened first
            teValues = lPeakTimes - uPeakTimes
            tiValues = (np.roll(uPeakTimes, -1) - lPeakTimes)[:-1]

            heValues = lPeaks - uPeaks
            hiValues = (np.roll(uPeaks, -1) - lPeaks)[:-1]


        # Fill in long array with most recent value
        ti = np.zeros(len(self.data))
        te = np.zeros(len(self.data))
        hi = np.zeros(len(self.data))
        he = np.zeros(len(self.data))

        for i in range(len(uPeakTimes)-1):
            ti[uPeakTimes[i]:uPeakTimes[i+1]] = tiValues[i]
            hi[uPeakTimes[i]:uPeakTimes[i+1]] = hiValues[i]
        ti[uPeakTimes[-1]:] = tiValues[-1]
        hi[uPeakTimes[-1]:] = hiValues[-1]

        for i in range(len(lPeakTimes)-1):
            te[lPeakTimes[i]:lPeakTimes[i+1]] = teValues[i]
            he[lPeakTimes[i]:lPeakTimes[i+1]] = heValues[i]
        te[lPeakTimes[-1]:] = teValues[-1]
        he[lPeakTimes[-1]:] = heValues[-1]

        return {'Ti': ti, 'Te': te, 'Hi': hi, 'He': he}


    """#########################################################################
    # Channel Selection
    #########################################################################"""

    def getSingleChannel(self):
        # Parameters
        maxValue = 2**24
        extremeMargin = 0.10
        extremeTop = maxValue * (1-extremeMargin)
        extremeBottom = maxValue * extremeMargin

        # Get datasets in windows
        ch1 = self.rawData[:, 0]
        ch2 = self.rawData[:, 1]
        remainder = self.windowLength - (ch1.shape[0] % self.windowLength)
        if remainder != 0:
            ch1 = np.hstack([ch1, np.zeros(remainder)])
            ch2 = np.hstack([ch2, np.zeros(remainder)])
        ch1 = ch1.reshape((-1, self.windowLength))
        ch2 = ch2.reshape((-1, self.windowLength))

        # Build list of comparator functions that will be applied in order, lower value will be chosen
        functionList = [lambda i,ch: np.count_nonzero((ch[i]==maxValue) + (ch[i]==0)),
                        lambda i,ch: np.count_nonzero((ch[i]<extremeBottom)+(ch[i]>extremeTop)),
                        lambda i,ch: -1 * np.std(ch[i]),
                        ]

        # For each window, decide which channel to use
        output = np.zeros(ch1.shape[0])
        for i in range(ch1.shape[0]):
            for func in functionList:
                # Get score
                v1 = func(i,ch1)
                v2 = func(i,ch2)

                # See if we can make a decision on these values
                if v1 < v2:
                    output[i] = 0
                    break
                elif v2 < v1:
                    output[i] = 1
                    break
                else:
                    continue


        # Create final dataset
        data = np.zeros(self.data.shape[0])
        for i in range(output.shape[0]):
            r1 = i*self.windowLength
            r2 = min((i+1)*self.windowLength, self.data.shape[0])
            data[r1:r2] = self.data[r1:r2, output[i]]
        
        return data, output


    """#########################################################################
    # Sleep/Wake Based on Actigraphy
    #########################################################################"""

    def getWake(self, channel=None, moveWindow=25, sleepWindow=150, moveThresh=3.0, wakeThresh=0.40):
        # Get view of data
        data = self.getData(channel)

        # Create standard deviation
        stdChart = np.zeros(data.shape[0])
        for i in range(data.shape[0]):
            stdChart[i] = np.std(data[max(0, i-moveWindow/2) : min(data.shape[0], i+moveWindow/2)])

        # Create movement chart
        moveChart = stdChart > moveThresh * np.mean(stdChart)

        # Pad to fill each epoch
        remainder = moveChart.shape[0] % sleepWindow
        if remainder:
            moveChart = np.hstack([moveChart, [0]*(sleepWindow-remainder)])

        # For each epoch, if enough movement, mark wake
        wakeChart = moveChart.reshape(-1, sleepWindow)
        wakeChart = np.sum(wakeChart, 1) / float(sleepWindow)
        wakeChart = (wakeChart > wakeThresh) * 1

        return wakeChart, moveChart

    """#########################################################################
    # Utilities
    #########################################################################"""

    # Convert from a list of values per epoch to a list of time-values
    def fillEpochs(self, data):
        return np.ravel(np.tile(data, self.windowLength).reshape(-1, data.shape[0]).transpose())[:self.data.shape[0]]

    # Convert from time-series to a value per epoch array
    def getEpoch(self, data):
        return data.reshape(-1, self.windowLength)[:,0].copy()

    """#########################################################################
    # Sleep Lab Log Files
    #########################################################################"""

    def loadLog(self, filename):
        self.log = LogReader(filename)

    def getEvent(self, eventType, filt=None):
        if self.log is None:
            raise Exception('Error: Must load log first')
        
        return self.log.getTimeSeries(eventType, self.data.shape[0], filt=filt)

    def getEventTypes(self):
        if self.log is None:
            raise Exception('Error: Must load log first')
        
        return self.log.getEventTypes()
Example #39
0
 def __init__(self, log):
     self.game  = Game()
     self.reader = LogReader(log)
     self.begin  = -1
     self.line   = -1
     self._first_game = True
Example #40
0
args = argparser.parse_args()

logFilenames = args.logfile
outFilename = args.outfile

treefile = open(outFilename+".tree.tl", 'wb') # clear file
treefile.close()
treefile = open(outFilename+".tree.tl", 'r+b')

jsfile = open(outFilename+".js", "w")
jsfile.write('var data = {"tree": "'+outFilename+'.tree.tl", "dict": "'+outFilename+'.dict.tl", "treeFormat":"64,64,31,1,32"}');
jsfile.close()

readers = []
for logFilename in logFilenames:
    readers.append(LogReader(logFilename))

#################################

old_textmap = ["bad"]
textmap = {}
textmap2 = []
def transform(info):
  if len(info) < 2:
    pass
  elif info[1].isdigit():
    info[1] = old_textmap[int(info[1])]
  else:
    old_textmap.append(info[1])
    len(old_textmap)
Example #41
0
class GraspExtractor:
    def __init__(self):
        self.rdrLog = LogReader()

    def getDesignatorSuccessor(self, strDesignatorID):
        desig = self.di[strDesignatorID]
        if desig:
            successors = desig.tagAttributeValues(
                "knowrob:successorDesignator", "rdf:resource")
            if successors and len(successors) > 0:
                return successors[0].split("#")[1]

    def getNamedDesignator(self, dataDesignators, strName):
        for designator in dataDesignators:
            if designator["designator"]["_id"] == strName:
                return designator["designator"]

    def processPerform(self, owlPerform):
        desigsGraspDetails = owlPerform.tagAttributeValues(
            "knowrob:graspDetails", "rdf:resource")

        if len(desigsGraspDetails) > 0:
            desigGraspDetails = self.getNamedDesignator(
                self.log.getDesignatorData(),
                desigsGraspDetails[0].split("#")[1])["GRASP"]

            dicGraspPose = desigGraspDetails["GRASP-POSE"]
            dicPregraspPose = desigGraspDetails["PREGRASP-POSE"]
            dicObjectPose = desigGraspDetails["OBJECT-POSE"]
            strObjectName = desigGraspDetails["OBJECT-NAME"]
            strSide = desigGraspDetails["ARM"]
            strEffort = desigGraspDetails["EFFORT"]
            strGraspType = desigGraspDetails["GRASP-TYPE"]["QUOTE"]

            print " -- Grasp action --"

            timeSpan = owlPerform.timeSpan()
            print "Time elapsed  :", (float(timeSpan[1]) -
                                      float(timeSpan[0])), "seconds"

            if owlPerform.taskSuccess():
                print "Success       : True"
            else:
                print "Success       : False"

            print "Side          :", strSide
            print "Grasp Type    :", strGraspType
            print "Object Name   :", strObjectName
            print "Object Pose   :"
            self.printPose(dicObjectPose)
            print "Grasp Pose    :"
            self.printPose(dicGraspPose)
            print "Pregrasp Pose :"
            self.printPose(dicPregraspPose)
            print

    def extractGrasps(self, strPath):
        # Load Log
        self.log = self.rdrLog.loadLog(strPath)
        self.tti = self.log.getOwlData()["task-tree-individuals"]
        self.di = self.log.getOwlData()["designator-individuals"]
        annot = self.log.getOwlData()["annotation"]
        meta = self.log.getOwlData()["metadata"]

        for key in self.tti:
            owlIndiv = self.tti[key]

            if owlIndiv.type() == "AnnotationInformation":
                annot = owlIndiv
                if annot and meta: break
            elif owlIndiv.type() == "RobotExperiment":
                meta = owlIndiv
                if annot and meta: break

        if annot and meta:
            for indiv in self.tti:
                if self.tti[indiv].taskContext() == "GRASP":
                    self.processPerform(self.tti[indiv])

    def printPose(self, pose):
        print "   Frame       :", pose["header"]["frame_id"] + "\n" + \
              "   Position    : x =", str(pose["pose"]["position"]["x"]) + "\n" + \
              "                 y =", str(pose["pose"]["position"]["y"]) + "\n" + \
              "                 z =", str(pose["pose"]["position"]["z"]) + "\n" + \
              "   Orientation : x =", str(pose["pose"]["orientation"]["x"]) + "\n" + \
              "                 y =", str(pose["pose"]["orientation"]["y"]) + "\n" + \
              "                 z =", str(pose["pose"]["orientation"]["z"]) + "\n" + \
              "                 w =", str(pose["pose"]["orientation"]["w"])
Example #42
0
    def __init__(self):
        self.tdTrainingData = TrainingData()
        self.rdrLog = LogReader()

        self.arrIgnoredTasks = []
        self.arrAnnotatedParameters = []
Example #43
0
class LogAnalyzer:
    def __init__(self):
        self.rdrLog = LogReader()
        self.arrColors = ["white", "red", "blue", "yellow", "black"]

    def analyzeLog(self, strPath):
        log = self.rdrLog.loadLog(strPath)
        #data = log.getOwlData()["task-tree"]
        tti = log.getOwlData()["task-tree-individuals"]

        #with open("data.pkl", "wb") as f:
        #    pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
        #    data = pickle.load(f)

        #data = self.correctTime(data)

        #imgTaskPie = Image(Geometry(700, 700), Color("white"))

        #imgTaskPie.strokeColor("#000000")
        #imgTaskPie.strokeWidth(2.5)
        #imgTaskPie.fillColor("transparent")

        #self.drawTaskPie(imgTaskPie, data, -1, -1, 5)
        #imgTaskPie.write("out.png")

        # toTasks = self.timelyOrderedTasks(data)

        # dicClassTimes = {}
        # for dicTask in toTasks:
        #     owlTask = tti[dicTask["name"]]
        #     strType = owlTask.type()

        #     if not strType in dicClassTimes:
        #         dicClassTimes[strType] = int(dicTask["time"])
        #     else:
        #         dicClassTimes[strType] += int(dicTask["time"])

        # nEarliestTS = -1
        # nLatestTS = -1

        # for dicTask in tti:#toTasks:
        #     owlTask = tti[dicTask]
        #     TS = owlTask.timeSpan()

        #     if TS:
        #         if nEarliestTS == -1 or float(TS[0]) < nEarliestTS:
        #             nEarliestTS = float(TS[0])
        #         if nLatestTS == -1 or float(TS[1]) > nLatestTS:
        #             nLatestTS = float(TS[1])

        # nOverallTime = nLatestTS - nEarliestTS

        fEarliest = -1
        fLatest = -1

        dicClassTimes = {}
        for strTask in tti:
            owlTask = tti[strTask]
            TS = owlTask.timeSpan()

            if TS:
                if owlTask.type() in dicClassTimes:
                    dicClassTimes[owlTask.type()] += (float(TS[1]) -
                                                      float(TS[0]))
                else:
                    dicClassTimes[owlTask.type()] = (float(TS[1]) -
                                                     float(TS[0]))

                if float(TS[0]) < fEarliest or fEarliest == -1:
                    fEarliest = float(TS[0])

                if float(TS[1]) > fLatest or fLatest == -1:
                    fLatest = float(TS[1])

        print "Time =", (fLatest - fEarliest)

        #with open("classtimes.pkl", "wb") as f:
        #    pickle.dump(dicClassTimes, f, pickle.HIGHEST_PROTOCOL)

        #print "Longest Task: ", toTasks[len(toTasks) - 1]

        #for strItem in dicClassTimes:
        #print strItem, dicClassTimes[strItem]

        for strClass in dicClassTimes:
            print strClass, " = ", dicClassTimes[strClass]

        #print ""
        # if not "MotionPlanning" in dicClassTimes:
        #     print "Picking Up Objects: " + str(dicClassTimes["PickingUpAnObject"])
        #     print "Placing Objects: " + str(dicClassTimes["PuttingDownAnObject"])
        #     print "Path Planning + Motion Execution: " + str(dicClassTimes["ArmMovement"])
        #     print "Navigation: " + str(dicClassTimes["BaseMovement"])
        #     print "Head Movement: " + str(dicClassTimes["HeadMovement"])
        #     print "Perception Queries: " + str(dicClassTimes["UIMAPerception"])
        #     print "Object Identity Resolution + Belief State Updates: " + str(dicClassTimes["PerceivingObjects"] - dicClassTimes["UIMAPerception"])
        # else:
        #     # print "--- General ---"
        #     # print "Overall                    : " + str(nOverallTime)
        #     print "--- High Level ---"
        #     print "Picking Up Objects         : " + str(dicClassTimes["PickingUpAnObject"] +
        #                                                 dicClassTimes["CarryingAnObject"] +
        #                                                 dicClassTimes["LiftingAnObject"])
        #     print "Placing Objects            : " + str(dicClassTimes["PuttingDownAnObject"])
        #     print "Finding Objects            : " + str(dicClassTimes["FindingObjects"])
        #     # print "Other Activities           : " + str(nOverallTime -
        #     #                                             (dicClassTimes["PickingUpAnObject"] +
        #     #                                              dicClassTimes["CarryingAnObject"] +
        #     #                                              dicClassTimes["LiftingAnObject"] +
        #     #                                              dicClassTimes["PuttingDownAnObject"] +
        #     #                                              dicClassTimes["FindingObjects"] -
        #     #                                              dicClassTimes["UIMAPerception"]))
        #     print "--- Low Level ---"
        #     print "Path Planning              : " + str(dicClassTimes["MotionPlanning"])
        #     print "Motion Execution           : " + str(dicClassTimes["MotionExecution"])
        #     print "Navigation                 : " + str(dicClassTimes["BaseMovement"])
        #     print "Head Movement              : " + str(dicClassTimes["HeadMovement"])
        #     print "Perception Queries         : " + str(dicClassTimes["UIMAPerception"])
        #     print "Object Identity Resolution : " + str(dicClassTimes["ObjectIdentityResolution"])
        #     print "Belief State Updates       : " + str(dicClassTimes["BeliefStateUpdate"])

    def timelyOrderedTasks(self, data):
        dicLinear = self.linearizeTaskTree(data)
        arrItems = []

        for strItem in dicLinear:
            arrItems.append({"name": strItem, "time": dicLinear[strItem]})

        return sorted(arrItems, key=lambda item: item["time"])

    def linearizeTaskTree(self, tree):
        dicLinear = {}

        for strBranch in tree:
            dicLinear[strBranch] = tree[strBranch]["time"]
            dicSub = self.linearizeTaskTree(tree[strBranch]["children"])
            dicLinear = dict(dicLinear, **dicSub)

        return dicLinear

    def correctTime(self, data):
        for strBranchName in data:
            data[strBranchName]["children"] = self.correctTime(
                data[strBranchName]["children"])

            nTimeSum = 0
            for strChild in data[strBranchName]["children"]:
                nTimeSum += data[strBranchName]["children"][strChild]["time"]

            if data[strBranchName]["time"] < nTimeSum:
                data[strBranchName]["time"] = nTimeSum

        return data

    def drawTaskPie(self,
                    imgPie,
                    dicTaskTree,
                    globalTimespan=-1,
                    parentTimespan=-1,
                    radiusDelta=10,
                    radiusInner=0,
                    angleStart=0,
                    angleEnd=360):
        if globalTimespan == -1:
            globalTimespan = 0
            for strBranchName in dicTaskTree:
                globalTimespan += dicTaskTree[strBranchName]["time"]

        if parentTimespan == -1:
            parentTimespan = 0
            for strBranchName in dicTaskTree:
                parentTimespan += dicTaskTree[strBranchName]["time"]

        if parentTimespan > 0:
            nSegments = len(dicTaskTree)

            radiusOuter = radiusInner + radiusDelta

            nCenterX = imgPie.columns() / 2
            nCenterY = imgPie.rows() / 2

            nStartXOuter = nCenterX - radiusOuter
            nStartYOuter = nCenterY - radiusOuter
            nEndXOuter = nCenterX + radiusOuter
            nEndYOuter = nCenterY + radiusOuter

            nStartXInner = nCenterX - radiusInner
            nStartYInner = nCenterY - radiusInner
            nEndXInner = nCenterX + radiusInner
            nEndYInner = nCenterY + radiusInner

            dAngleOffset = 0

            for strBranchName in dicTaskTree:
                dAngleWidth = float(
                    dicTaskTree[strBranchName]["time"]) / float(
                        parentTimespan) * (angleEnd - angleStart)

                if dAngleWidth > 0:
                    dStartingAngle = angleStart + dAngleOffset
                    dEndingAngle = dStartingAngle + dAngleWidth
                    dAngleOffset += dAngleWidth

                    if "children" in dicTaskTree[strBranchName]:
                        if len(dicTaskTree[strBranchName]["children"]) > 0:
                            self.drawTaskPie(
                                imgPie, dicTaskTree[strBranchName]["children"],
                                globalTimespan,
                                dicTaskTree[strBranchName]["time"],
                                radiusDelta, radiusOuter, dStartingAngle,
                                dEndingAngle)

                    dTimeSpanDegree = float(dicTaskTree[strBranchName]
                                            ["time"]) / float(globalTimespan)
                    imgPie.strokeColor(
                        Color(int(255 * dTimeSpanDegree), 0,
                              int(255 * (1.0 - dTimeSpanDegree))))

                    lstDrawables = DrawableList()
                    lstDrawables.append(
                        DrawableLine(
                            nCenterX + radiusInner *
                            math.cos(math.radians(dStartingAngle)),
                            nCenterY + radiusInner *
                            math.sin(math.radians(dStartingAngle)),
                            nCenterX + radiusOuter *
                            math.cos(math.radians(dStartingAngle)),
                            nCenterY + radiusOuter *
                            math.sin(math.radians(dStartingAngle))))
                    lstDrawables.append(
                        DrawableArc(nStartXOuter, nStartYOuter, nEndXOuter,
                                    nEndYOuter, dStartingAngle, dEndingAngle))
                    lstDrawables.append(
                        DrawableLine(
                            nCenterX +
                            radiusInner * math.cos(math.radians(dEndingAngle)),
                            nCenterY +
                            radiusInner * math.sin(math.radians(dEndingAngle)),
                            nCenterX +
                            radiusOuter * math.cos(math.radians(dEndingAngle)),
                            nCenterY + radiusOuter *
                            math.sin(math.radians(dEndingAngle))))
                    lstDrawables.append(
                        DrawableArc(nStartXInner, nStartYInner, nEndXInner,
                                    nEndYInner, dStartingAngle, dEndingAngle))

                    imgPie.draw(lstDrawables)
class OwlToTrainingDataConverter:
    def __init__(self):
        self.tdTrainingData = TrainingData()
        self.rdrLog = LogReader()
        
        self.arrIgnoredTasks = []
        self.arrAnnotatedParameters = []
    
    def setTaskIgnored(self, strTask):
        if not strTask in self.arrIgnoredTasks:
            self.arrIgnoredTasks.append(strTask)
    
    def addTrackedParameter(self, strParameter):
        self.arrAnnotatedParameters.append(strParameter)
    
    def convertOwlToTrainingData(self, arrLogDirectories):
        self.addTrackedParameter("taskContext")
        
        self.setTaskIgnored(u"WITH-FAILURE-HANDLING")
        self.setTaskIgnored(u"WITH-DESIGNATORS")
        self.setTaskIgnored(u"TAG")
        self.setTaskIgnored(u"UIMA-PERCEIVE")
        self.setTaskIgnored(u"GOAL-MONITOR-ACTION")
        self.setTaskIgnored(u"GOAL-ACHIEVE")
        self.setTaskIgnored(u"GOAL-PERFORM")
        self.setTaskIgnored(u"GOAL-PERFORM-ON-PROCESS-MODULE")
        self.setTaskIgnored(u"PERFORM-ACTION-DESIGNATOR")
        self.setTaskIgnored(u"REPLACEABLE-FUNCTION-NAVIGATE")
        
        self.setTaskIgnored(u"AT-LOCATION")
        self.setTaskIgnored(u"VOLUNTARY-BODY-MOVEMENT-ARMS")
        self.setTaskIgnored(u"MOTION-PLANNING")
        self.setTaskIgnored(u"MOTION-EXECUTION")
        self.setTaskIgnored(u"PUTDOWN")
        self.setTaskIgnored(u"VOLUNTARY-BODY-MOVEMENT-HEAD")
        self.setTaskIgnored(u"OPEN-GRIPPER")
        self.setTaskIgnored(u"CLOSE-GRIPPER")
        
        self.tdTrainingData.registerAttribute(u"Result")
        self.tdTrainingData.selectFirstAttribute(u"Result")
        
        self.tdTrainingData.addIgnoredParameter("_time_created")
        self.tdTrainingData.setRelation("PlanExecution")
        
        for strLogDirectory in arrLogDirectories:
            self.logData = self.rdrLog.loadLog(strLogDirectory)
            self.owlData = self.logData.getOwlData()
            self.designatorData = self.logData.getDesignatorData()
            
            self.tti = self.owlData["task-tree-individuals"]
            self.di = self.owlData["designator-individuals"]
            self.meta = self.owlData["metadata"]
            self.annotation = self.owlData["annotation"]
            
            for strParameter in self.annotation.tagNodeValues("knowrob:annotatedParameterType"):
                self.addTrackedParameter(strParameter)
        
            self.walkTree(self.meta)
        
        self.tdTrainingData.writeTrainingDataToFile(sys.argv[1])
    
    def isTaskIgnored(self, strTask):
        return strTask in self.arrIgnoredTasks
    
    def walkTree(self, ndOriginNode, dsOrigin = DataSet()):
        for strParameter in self.arrAnnotatedParameters:
            arrParameters = ndOriginNode.tagNodeValues("knowrob:" + strParameter)
            
            if len(arrParameters) > 0:
                dsOrigin.setAttributeValue(strParameter, arrParameters[0])
        
        #dsOrigin.setAttributeValue(u"Duration", unicode(ndOriginNode.time()))
        
        arrSubActions = ndOriginNode.subActions()
        
        strSubResult = u"Success"
        for strSubAction in arrSubActions:
            strSubResultTemp = self.walkTree(self.tti[strSubAction], dsOrigin.copy())
            
            if strSubResultTemp != u"Success":
                strSubResult = strSubResultTemp
        
        arrFailures = ndOriginNode.failures()
        if len(arrFailures) == 0:
            dsOrigin.setAttributeValue(u"Result", strSubResult)
        else:
            desigFailure = self.tti[arrFailures[0]]
            dsOrigin.setAttributeValue(u"Result", desigFailure.type())
        
        if not self.isTaskIgnored(dsOrigin.getAttributeValue(u"taskContext")):
            self.tdTrainingData.addDataSet(dsOrigin)
        
        return dsOrigin.getAttributeValue(u"Result")
Example #45
0
 def __init__(self):
     self.rdrLog = LogReader()
     self.arrColors = ["white", "red", "blue", "yellow", "black"]
Example #46
0
#!/usr/bin/env python3
import sys
from matplotlib import pyplot
from itertools import accumulate

from LogReader import LogReader

if len(sys.argv) != 2:
	print("Usage: {} <simulation.log>".format(sys.argv[0]))
	sys.exit(1)

pyplot.rcParams["figure.figsize"] = [5.75, 4.0]

reader = LogReader(sys.argv[1])

def calculateQueue(packets):
	timeQueue = {}
	sentPackets = filter(lambda pe: pe.successful, packets)
	injects = list(reader.injects)
	queue = {}

	def enqueue(packet):
		if packet not in queue: queue[packet] = 0
		queue[packet] += 1

	def recordInTimeQueue(time):
		for packet in queue:
			if packet not in timeQueue: timeQueue[packet] = []
			timeQueue[packet].append((time, queue[packet]))

	for pe in sentPackets:
Example #47
0
class DataCondenser:
    def __init__(self):
        self.rdrLog = LogReader()
        self.tti = {}
        
    def condenseData(self, strOutputFile, arrSourceDirectories):
        dicToplevelNodes = []
        
        for strSourceDirectory in arrSourceDirectories:
            logData = self.rdrLog.loadLog(strSourceDirectory)
            owlData = logData.getOwlData()
            
            self.tti = dict(self.tti.items() + owlData["task-tree-individuals"].items())
            meta = owlData["metadata"]
            
            if meta:
                dicToplevelNodes += meta.subActions()
        
        dicResult = {"Toplevel" : self.condenseNodes("", dicToplevelNodes)}
        
        with open(strOutputFile, "wb") as fOut:
            json.dump(dicResult, fOut)
    
    def condenseNodes(self, strParentNode, arrNodes, nLevel = 0):
        arrTypes = {}
        arrIndividuals = {}
        
        for strNode in arrNodes:
            owlNode = self.tti[strNode]
            nodeclass = owlNode.taskContext()
            
            if nLevel < 0:
                ident = "*"
            else:
                ident = nodeclass
            
            failures = owlNode.failures()
            failure = ""
            if len(failures) > 0:
                failure = self.tti[failures[0]].type()
            
            result = self.condenseNodes(strNode, owlNode.subActions(), nLevel + 1)
            if not ident in arrTypes:
                arrTypes[ident] = result
            else:
                arrTypes[ident] = self.unifyResults(arrTypes[ident], result)
            
            arrTypes[ident]["individuals"][strNode] = {"parameters" : owlNode.annotatedParameters(True),
                                                       "parent" : strParentNode,
                                                       "failure" : failure,
                                                       "class" : nodeclass}
        
        return {"subTypes" : arrTypes,
                "individuals" : {}}
    
    def unifyResults(self, res1, res2):
        resparams = {}
        if len(res1["individuals"]) > 0:
            resparams = res1["individuals"]
        
        if len(res2["individuals"]) > 0:
            resparams = dict(resparams.items() + res2["individuals"].items())
        
        unified = {"subTypes" : {},
                   "individuals" : resparams}
        
        for ressub1 in res1["subTypes"]:
            if ressub1 in res2["subTypes"]:
                unified["subTypes"][ressub1] = self.unifyResults(res1["subTypes"][ressub1],
                                                                 res2["subTypes"][ressub1])
            else:
                unified["subTypes"][ressub1] = res1["subTypes"][ressub1]
        
        for ressub2 in res2["subTypes"]:
            if not ressub2 in res1["subTypes"]:
                unified["subTypes"][ressub2] = res2["subTypes"][ressub2]
        
        return unified