Exemple #1
0
def parser(data):
    """Parse a response body from the server.  Since this may include
multiple lines, we need to be careful to do this right and merge the
results together."""
    rv = {}
    for line in data.split('\n'):
        if len(line.strip()) == 0: 
            continue
        line_obj = json.loads(line)

        if not isinstance(line_obj, list): 
            return data
        if len(line_obj) == 0: continue

        for v in line_obj:
            if not 'uuid' in v:
                return line_obj
            # raise SmapException("Invalid sMAP object: " + str(v))
            id = v['uuid']
            if not id in rv:
                rv[id] = v
            else:
                if 'Readings' in v and 'Readings' in rv[id]:
                    rv[id]['Readings'].extend(v['Readings'])
                    del v['Readings']
                rv[id] = util.dict_merge(rv[id], v)
    return rv.values()
Exemple #2
0
def parser(data):
    """Parse a response body from the server.  Since this may include
multiple lines, we need to be careful to do this right and merge the
results together."""
    rv = {}
    for line in data.split('\n'):
        if len(line.strip()) == 0:
            continue
        line_obj = json.loads(line)

        if not isinstance(line_obj, list):
            return data
        if len(line_obj) == 0: continue

        for v in line_obj:
            if not 'uuid' in v:
                return line_obj
            # raise SmapException("Invalid sMAP object: " + str(v))
            id = v['uuid']
            if not id in rv:
                rv[id] = v
            else:
                if 'Readings' in v and 'Readings' in rv[id]:
                    rv[id]['Readings'].extend(v['Readings'])
                    del v['Readings']
                rv[id] = util.dict_merge(rv[id], v)
    return rv.values()
Exemple #3
0
 def read(self):
     restrict = "Path='" + "' or Path='".join(self.paths) + "'"
     query = "apply %s to data in (now -15m, now) where (%s)" % (self.expr, restrict)
     r = requests.post(self.url, data=query, params=self.params)
     self._value = zip(self.paths, json.loads(r.text))
     self.writeDROMScsv(self._value)
     self.writeDROMSzip()
     self.postDROMSzip()
Exemple #4
0
 def render_DELETE(self, request):
     request.setHeader('Content-type', 'application/json')
     content = request.content.read()
     if content:
         del_uuids = json.loads(content)
         self.inst.jobs.jobs = filter(lambda j: j.uuid not in del_uuids, self.inst.jobs.jobs)
         self.inst.jobs.cancel_job(del_uuids)
     return json.dumps(map(lambda j: j.uuid, self.inst.jobs.jobs))
Exemple #5
0
 def render_PUT(self, request):
     request.setHeader('Content-type', 'application/json')
     content = request.content.read()
     if content:
         obj = json.loads(content)
         uids = self.add_jobs(obj)
         return json.dumps(uids)
     else:
         return None
Exemple #6
0
 def render_PUT(self, request):
     request.setHeader('Content-type', 'application/json')
     content = request.content.read()
     if content:
         obj = json.loads(content)
         uids = self.add_jobs(obj)
         return json.dumps(uids)
     else:
         return None
Exemple #7
0
 def render_DELETE(self, request):
     request.setHeader('Content-type', 'application/json')
     content = request.content.read()
     if content:
         del_uuids = json.loads(content)
         self.inst.jobs.jobs = filter(lambda j: j.uuid not in del_uuids,
                                      self.inst.jobs.jobs)
         self.inst.jobs.cancel_job(del_uuids)
     return json.dumps(map(lambda j: j.uuid, self.inst.jobs.jobs))
Exemple #8
0
 def cancel_jobs(self, job_ids):
     url = self.base + '/jobs'
     payload = json.dumps(job_ids)
     opener = urllib2.build_opener(urllib2.HTTPHandler)
     request = urllib2.Request(url, data=payload)
     request.add_header('Content-Type', 'your/contenttype')
     request.get_method = lambda: 'DELETE'
     fp = opener.open(request)
     rv = json.loads(fp.read())
     return rv
Exemple #9
0
 def cancel_jobs(self, job_ids):
     url = self.base + '/jobs'
     payload = json.dumps(job_ids)
     opener = urllib2.build_opener(urllib2.HTTPHandler)
     request = urllib2.Request(url, data=payload)
     request.add_header('Content-Type', 'your/contenttype')
     request.get_method = lambda: 'DELETE'
     fp = opener.open(request)
     rv = json.loads(fp.read())
     return rv
Exemple #10
0
 def get_state(self, path):
     fp = urllib2.urlopen(self.base + "/data" + path)
     res = json.loads(fp.read())
     if 'Readings' not in res:
         raise util.SmapException("Readings not found. \
           Make sure the path corresponds to a timeseries (not a collection)")
     else:
         try: 
             rv = res['Readings'][0]
         except IndexError:
             raise util.SmapException("The timeseries doesn't have any readings")
     return rv
Exemple #11
0
 def get_state(self, path):
     fp = urllib2.urlopen(self.base + "/data" + path)
     res = json.loads(fp.read())
     if 'Readings' not in res:
         raise util.SmapException("Readings not found. \
           Make sure the path corresponds to a timeseries (not a collection)"
                                  )
     else:
         try:
             rv = res['Readings'][0]
         except IndexError:
             raise util.SmapException(
                 "The timeseries doesn't have any readings")
     return rv
def read(request):
    content = request.content.read()
    if request.getHeader('Content-Encoding') in ['gzip']:
        content = zlib.decompress(content)

    if request.getHeader("Content-Type") in ["application/json", None]:
        obj = json.loads(content)
    elif request.getHeader("Content-Type") in ["text/csv"]:
        obj = load_csv(content)
    elif request.getHeader("Content-Type") in ["avro/binary"]:
        obj = load_report(content)
    else:
        raise SmapException("Invalid Content-Type\n", 400)
    return obj
Exemple #13
0
def read(request):
    content = request.content.read()
    if request.getHeader('Content-Encoding') in ['gzip']:
        content = zlib.decompress(content)

    if request.getHeader("Content-Type") in ["application/json", None]:
        obj = json.loads(content)
    elif request.getHeader("Content-Type") in ["text/csv"]:
        obj = load_csv(content)
    elif request.getHeader("Content-Type") in ["avro/binary"]:
        obj = load_report(content)
    else:
        raise SmapException("Invalid Content-Type\n", 400)
    return obj
Exemple #14
0
 def tags(self, path):
     """ 
     Get all metadata associated with path, including metadata 
     inherited from collections it belongs to
     """
     nodes = path.split('/')
     p = ""
     rv = {}
     for node in nodes:
         p += '/' + node
         fp = urllib2.urlopen(self.base + "/data" + p)
         res = json.loads(fp.read())
         rv = util.dict_merge(rv, res)
     if 'Contents' in rv:
         del rv['Contents']
     return rv
Exemple #15
0
 def set_state(self, path, state):
     url = self.base + "/data" + path + "?state=" + str(state)
     opener = urllib2.build_opener(urllib2.HTTPHandler)
     request = urllib2.Request(url, data='')
     request.get_method = lambda: 'PUT'
     try: 
         fp = opener.open(request)
     except urllib2.HTTPError:
         raise util.SmapException("Invalid path.")
     res = json.loads(fp.read())
     if 'Actuator' not in res:
         raise util.SmapException("Path does not locate an actuator.")
     elif res['Readings'][0][1] != state:
         raise util.SmapException("Actuaton failed")
     else:
         return 0
Exemple #16
0
 def submit_jobs(self, jobs):
     """
     jobs is an array of job objects: 
     job is an object that is formed according to ../schema/job.av
     properties: Name (str), StartTime (longint), Actions (array)
     where actions is an array where each entry is an object
     with properties State (longint or double) and Path (str)
     """ 
     url = self.base + '/jobs'
     payload = json.dumps(jobs)
     opener = urllib2.build_opener(urllib2.HTTPHandler)
     request = urllib2.Request(url, data=payload)
     request.get_method = lambda: 'PUT'
     fp = opener.open(request)
     rv = json.loads(fp.read())
     return rv
Exemple #17
0
 def set_state(self, path, state):
     url = self.base + "/data" + path + "?state=" + str(state)
     opener = urllib2.build_opener(urllib2.HTTPHandler)
     request = urllib2.Request(url, data='')
     request.get_method = lambda: 'PUT'
     try:
         fp = opener.open(request)
     except urllib2.HTTPError:
         raise util.SmapException("Invalid path.")
     res = json.loads(fp.read())
     if 'Actuator' not in res:
         raise util.SmapException("Path does not locate an actuator.")
     elif res['Readings'][0][1] != state:
         raise util.SmapException("Actuaton failed")
     else:
         return 0
Exemple #18
0
 def tags(self, path):
     """ 
     Get all metadata associated with path, including metadata 
     inherited from collections it belongs to
     """
     nodes = path.split('/')
     p = ""
     rv = {}
     for node in nodes:
         p += '/' + node
         fp = urllib2.urlopen(self.base + "/data" + p)
         res = json.loads(fp.read())
         rv = util.dict_merge(rv, res)
     if 'Contents' in rv:
         del rv['Contents']
     return rv
Exemple #19
0
 def submit_jobs(self, jobs):
     """
     jobs is an array of job objects: 
     job is an object that is formed according to ../schema/job.av
     properties: Name (str), StartTime (longint), Actions (array)
     where actions is an array where each entry is an object
     with properties State (longint or double) and Path (str)
     """
     url = self.base + '/jobs'
     payload = json.dumps(jobs)
     opener = urllib2.build_opener(urllib2.HTTPHandler)
     request = urllib2.Request(url, data=payload)
     request.get_method = lambda: 'PUT'
     fp = opener.open(request)
     rv = json.loads(fp.read())
     return rv
Exemple #20
0
 def lineReceived(self, line):
     self.failcount = 0
     if not len(line.strip()): return
     try:
         obj = json.loads(line)
         if self.format == "raw":
             self.client.datacb(obj)
         else:
             uuids, data = [], []
             for v in obj.itervalues():
                 if 'uuid' in v:
                     uuids.append(v['uuid'])
                     data.append(np.array(v['Readings']))
             self.client.datacb(uuids, data)
     except:
         log.err()
         print line
Exemple #21
0
 def lineReceived(self, line):
     self.failcount = 0
     if not len(line.strip()): return
     try:
         obj = json.loads(line)
         if self.format == "raw":
             self.client.datacb(obj)
         else:
             uuids, data = [], []
             for v in obj.itervalues():
                 if 'uuid' in v:
                     uuids.append(v['uuid'])
                     data.append(np.array(v['Readings']))
             self.client.datacb(uuids, data)
     except:
         log.err()
         print line
Exemple #22
0
 def contents(self, path='/'):
     """
     Get a list of the paths of timeseries in a collection.
     Will search the root collection by default.
     """
     fp = urllib2.urlopen(self.base + "/data" + path)
     res = json.loads(fp.read())
     if "Readings" in res:
         # Found a timeseries
         actuator = "Actuator" in res
         return {'Path': path, 'uuid': res['uuid'], 'Actuator': actuator}
     elif "Contents" in res:
         # Found a collection
         acc = []
         for item in res["Contents"]:
             p = path + item + "/"
             c = self.contents(p)
             try:
                 acc = acc + c
             except TypeError:
                 acc.append(c)
         return acc
Exemple #23
0
 def contents(self, path='/'):
     """
     Get a list of the paths of timeseries in a collection.
     Will search the root collection by default.
     """
     fp = urllib2.urlopen(self.base + "/data" + path)
     res = json.loads(fp.read())
     if "Readings" in res:
         # Found a timeseries
         actuator = "Actuator" in res
         return {'Path': path, 'uuid': res['uuid'], 'Actuator': actuator}
     elif "Contents" in res:
         # Found a collection 
         acc = []
         for item in res["Contents"]:
             p = path + item + "/"
             c = self.contents(p)
             try:
                 acc = acc + c
             except TypeError:
                 acc.append(c)
         return acc
Exemple #24
0
 def write(self, data):
     try:
         pprint.pprint(json.loads(data))
     except:
         print data
Exemple #25
0
 def write(self, data):
     try:
         pprint.pprint(json.loads(data))
     except:
         print data
Exemple #26
0
 def _parser(data):
     obj = json.loads(data)
     return obj[0]['uuid'], np.array(obj[0]['Readings'])
Exemple #27
0
 def _parser(data):
     obj = json.loads(data)
     return obj[0]['uuid'], np.array(obj[0]['Readings'])