Ejemplo n.º 1
0
 def d0_basic(self, weekly=False):
     cherrypy.response.headers['Content-Type'] = 'image/png'
     url = self.metadata.get('d0_csv', 'http://physics.lunet.edu/~snow/' \
         'd0osgprod.csv')
     url_fp = urllib2.urlopen(url)
     metadata = {'croptime':False, 'span':86400, 'pivot_name': 'Date', \
         'column_names': 'Merged Events', \
         'title': 'D0 OSG Production'}
     if weekly:
         metadata['span'] *= 7
     fp = cStringIO.StringIO()
     data = {}
     for line in url_fp.readlines():
         info = line.strip().split(',')
         grouping_name = time.strptime(info[0], '%y%m%d')
         group = datetime.datetime(*grouping_name[:6])
         # Change the date to the first day of that week; groups things
         # by week instead of the default by day.
         if weekly:
             weekday = group.isoweekday()
             group -= datetime.timedelta(weekday, 0)
         group = to_timestamp(group)
         results = int(info[3])
         value = data.setdefault(group, 0)
         data[group] = value + results
     metadata['starttime'] = min(data.values())
     time_max = max(data.values())
     if weekly:
         metadata['endtime'] = time_max + 7*86400
     else:
         metadata['endtime'] = time_max + 1*86400
     GB = GratiaBar()
     GB(data, fp, metadata)
     return fp.getvalue()
Ejemplo n.º 2
0
 def parse_type(self, string, my_type):
     if my_type == 'int':
         return int(string)
     elif my_type == 'offset_now':
         value = int(string)
         if value > 0:
             return value
         else:
             return time.time() + value
     elif my_type == 'float':
         return float(string)
     elif my_type == 'eval':
         raise ValueError("sorry eval type not supported")
     elif my_type == 'datetime':
         return convert_to_datetime(string)
     elif my_type == 'timestamp':
         return to_timestamp(string)
     elif my_type == 'bool' or my_type == 'boolean':
         if not isinstance(string, str):
             return bool(string)
         if string.lower().strip() == 'false':
             return False
         elif string.lower().strip() == 'true':
             return True
         else:
             raise TypeError("Cannot convert string %s to boolean; valid "
                             "inputs are 'true' or 'false'." % string)
     else:
         return str(string)
Ejemplo n.º 3
0
 def groupingAttrs( self, grouping_name, grouping ):
   grouping_attrs = {}
   if grouping_name and str(grouping_name).lower()=='time':
     grouping_attrs['value'] = str(datetime.datetime.utcfromtimestamp(to_timestamp(grouping)))
   else:
     grouping_attrs['value'] = str(grouping)
   return grouping_attrs
Ejemplo n.º 4
0
 def parse_type( self, string, my_type ):
   if my_type == 'int': 
     return int( string )
   elif my_type == 'offset_now':
     value = int( string )
     if value > 0:
       return value
     else:
       return time.time() + value
   elif my_type == 'float':
     return float( string )
   elif my_type == 'eval':
     raise ValueError("sorry eval type not supported")
   elif my_type == 'datetime':
     return convert_to_datetime( string )
   elif my_type == 'timestamp':
     return to_timestamp( string )
   elif my_type == 'bool' or my_type == 'boolean':
       if not isinstance(string, str):
           return bool(string)
       if string.lower().strip() == 'false':
           return False
       elif string.lower().strip() == 'true':
           return True
       else:
           raise TypeError("Cannot convert string %s to boolean; valid "
                     "inputs are 'true' or 'false'." % string )
   else:
     return str( string )
Ejemplo n.º 5
0
 def d0_basic(self, weekly=False):
     cherrypy.response.headers['Content-Type'] = 'image/png'
     url = self.metadata.get('d0_csv', 'http://physics.lunet.edu/~snow/' \
         'd0osgprod.csv')
     url_fp = urllib2.urlopen(url)
     metadata = {'croptime':False, 'span':86400, 'pivot_name': 'Date', \
         'column_names': 'Merged Events', \
         'title': 'D0 OSG Production'}
     if weekly:
         metadata['span'] *= 7
     fp = cStringIO.StringIO()
     data = {}
     for line in url_fp.readlines():
         info = line.strip().split(',')
         grouping_name = time.strptime(info[0], '%y%m%d')
         group = datetime.datetime(*grouping_name[:6])
         # Change the date to the first day of that week; groups things
         # by week instead of the default by day.
         if weekly:
             weekday = group.isoweekday()
             group -= datetime.timedelta(weekday, 0)
         group = to_timestamp(group)
         results = int(info[3])
         value = data.setdefault(group, 0)
         data[group] = value + results
     metadata['starttime'] = min(data.values())
     time_max = max(data.values())
     if weekly:
         metadata['endtime'] = time_max + 7 * 86400
     else:
         metadata['endtime'] = time_max + 1 * 86400
     GB = GratiaBar()
     GB(data, fp, metadata)
     return fp.getvalue()
Ejemplo n.º 6
0
 def groupingAttrs(self, grouping_name, grouping):
     grouping_attrs = {}
     if grouping_name and str(grouping_name).lower() == 'time':
         grouping_attrs['value'] = str(
             datetime.datetime.utcfromtimestamp(to_timestamp(grouping)))
     else:
         grouping_attrs['value'] = str(grouping)
     return grouping_attrs
Ejemplo n.º 7
0
 def make_hash_str( self, query, **kw ):
   if 'starttime' in kw.keys():
     kw['starttime'] = int(to_timestamp(kw['starttime']))
   if 'endtime' in kw.keys():
     kw['endtime'] = int(to_timestamp(kw['endtime']))
   if 'starttime' in kw and 'endtime' in kw:
     if kw['endtime'] - kw['starttime'] > 300*5:
       kw['endtime'] -= kw['endtime'] % 300
       kw['starttime'] -= kw['starttime'] % 300
     elif kw['endtime'] - kw['starttime'] > 300:
       kw['endtime'] -= kw['endtime'] % 10
       kw['starttime'] -= kw['starttime'] % 10
   hash_str = str(query)
   keys = kw.keys(); keys.sort()
   for key in keys:
     hash_str += ',' + str(key) + ',' + str(kw[key])
   return hash_str
Ejemplo n.º 8
0
def adjust_time( mytime, **kw ):
  if 'adjust_hours' in kw.keys():
    timechange = float(kw['adjust_hours'])*3600
  else:
    timechange = 0
  timestamp = to_timestamp(mytime)
  timestamp += timechange
  #print mytime, datetime.datetime.utcfromtimestamp( timestamp )
  return datetime.datetime.utcfromtimestamp( timestamp )
Ejemplo n.º 9
0
def adjust_time(mytime, **kw):
    if 'adjust_hours' in kw.keys():
        timechange = float(kw['adjust_hours']) * 3600
    else:
        timechange = 0
    timestamp = to_timestamp(mytime)
    timestamp += timechange
    #print mytime, datetime.datetime.utcfromtimestamp( timestamp )
    return datetime.datetime.utcfromtimestamp(timestamp)
Ejemplo n.º 10
0
 def groupingAttrs( self, grouping_name, grouping, metadata):
   grouping_attrs = {}
   if metadata.get('translate_mp_2_gc',False):
     grouping_attrs['value'] = json.dumps(grouping,separators=(',',':'), cls=CustomDecimalDateObjectJSONEncoder)
   elif grouping_name and str(grouping_name).lower()=='time':
     grouping_attrs['value'] = str(datetime.datetime.utcfromtimestamp(to_timestamp(grouping)))
   else:
     grouping_attrs['value'] = str(grouping)
   return grouping_attrs
Ejemplo n.º 11
0
 def make_hash_str(self, query, **kw):
     if 'starttime' in kw.keys():
         kw['starttime'] = int(to_timestamp(kw['starttime']))
     if 'endtime' in kw.keys():
         kw['endtime'] = int(to_timestamp(kw['endtime']))
     if 'starttime' in kw and 'endtime' in kw:
         if kw['endtime'] - kw['starttime'] > 300 * 5:
             kw['endtime'] -= kw['endtime'] % 300
             kw['starttime'] -= kw['starttime'] % 300
         elif kw['endtime'] - kw['starttime'] > 300:
             kw['endtime'] -= kw['endtime'] % 10
             kw['starttime'] -= kw['starttime'] % 10
     hash_str = str(query)
     keys = kw.keys()
     keys.sort()
     for key in keys:
         hash_str += ',' + str(key) + ',' + str(kw[key])
     return hash_str
Ejemplo n.º 12
0
    def addResults_pg(self, data, metadata, gen, **kw):

        try:
            if 'grapher' in metadata:
                coords = metadata['grapher'].get_coords(
                    metadata['query'], metadata, **metadata['given_kw'])
            else:
                coords = None
        except Exception as e:
            print(e)
            traceback.print_exc(sys.stdout)
            coords = None

        attrs = {'kind': 'pivot-group'}
        pivot_name = str(metadata['pivot_name'])
        if pivot_name and len(pivot_name) > 0:
            attrs['pivot'] = pivot_name
        grouping_name = str(metadata.get('grouping_name', ''))
        if grouping_name and len(grouping_name) > 0:
            attrs['group'] = grouping_name
        if coords:
            attrs['coords'] = 'True'
        else:
            attrs['coords'] = 'False'
        self.write_columns(metadata, gen)
        gen.startElement('data', attrs)

        for pivot in data.keys():
            gen.characters("\n\t\t\t")
            gen.startElement(*self.pivotName(pivot, attrs))
            my_groups = data[pivot].keys()
            my_groups.sort()
            my_groups.reverse()
            for grouping in my_groups:
                gen.characters("\n\t\t\t\t")
                grouping_attrs = {}
                gen.startElement('group',
                                 self.groupingAttrs(grouping_name, grouping))
                if coords:
                    try:
                        groups = coords[pivot]
                        if isinstance(grouping, datetime.datetime) and (
                                not (grouping in groups.keys())):
                            kw['coords'] = groups[to_timestamp(grouping)]
                        else:
                            kw['coords'] = groups[grouping]
                    except Exception as e:
                        #print "Missing coords", pivot, grouping
                        #print e
                        pass
                self.addData(data[pivot][grouping], gen, **kw)
                gen.endElement('group')
            gen.endElement(self.pivotName(pivot, attrs)[0])
        gen.characters("\n\t\t")
        gen.endElement('data')
        gen.characters("\n\t")
Ejemplo n.º 13
0
 def parse_type( self, string, my_type ):
   if my_type == 'int':
     return int( string )
   elif my_type == 'float':
     return float( string )
   elif my_type == 'eval':
     raise ValueError("sorry eval type not supported")
   elif my_type == 'datetime':
     return convert_to_datetime( string )
   elif my_type == 'timestamp':
     return to_timestamp( string )
   else:
     return str( string )
Ejemplo n.º 14
0
 def parse_type(self, string, my_type):
     if my_type == 'int':
         return int(string)
     elif my_type == 'float':
         return float(string)
     elif my_type == 'eval':
         raise ValueError("sorry eval type not supported")
     elif my_type == 'datetime':
         return convert_to_datetime(string)
     elif my_type == 'timestamp':
         return to_timestamp(string)
     else:
         return str(string)
Ejemplo n.º 15
0
 def parse_type( self, string, my_type ):
   if my_type == 'int':
     return int( string )
   elif my_type == 'float':
     return float( string )
   elif my_type == 'eval':
     return eval(str(string),{'__builtins__':None,'time':time},{})
   elif my_type == 'datetime':
     return convert_to_datetime( string )
   elif my_type == 'timestamp':
     return to_timestamp( string )
   else:
     return str( string )
Ejemplo n.º 16
0
 def groupingAttrs(self, grouping_name, grouping, metadata):
     grouping_attrs = {}
     if metadata.get('translate_mp_2_gc', False):
         grouping_attrs['value'] = json.dumps(
             grouping,
             separators=(',', ':'),
             cls=CustomDecimalDateObjectJSONEncoder)
     elif grouping_name and str(grouping_name).lower() == 'time':
         grouping_attrs['value'] = str(
             datetime.datetime.utcfromtimestamp(to_timestamp(grouping)))
     else:
         grouping_attrs['value'] = str(grouping)
     return grouping_attrs
Ejemplo n.º 17
0
 def parse_type( self, string, my_type ):
   if my_type == 'int':
     return int( string )
   elif my_type == 'float':
     return float( string )
   elif my_type == 'eval':
     return eval(str(string),{'__builtins__':None,'time':time},{})
   elif my_type == 'datetime':
     return convert_to_datetime( string )
   elif my_type == 'timestamp':
     return to_timestamp( string )
   else:
     return str( string )
Ejemplo n.º 18
0
  def addResults_pg( self, data, metadata, gen, **kw ):

    try:
      if 'grapher' in metadata:
        coords = metadata['grapher'].get_coords( metadata['query'], metadata, **metadata['given_kw'] )
      else: coords = None
    except Exception as e: 
      print(e)
      traceback.print_exc( sys.stdout )
      coords = None

    attrs = {'kind':'pivot-group'}
    pivot_name = str(metadata['pivot_name'])
    if pivot_name and len(pivot_name) > 0:
      attrs['pivot'] = pivot_name
    grouping_name = str(metadata.get('grouping_name',''))
    if grouping_name and len(grouping_name) > 0:
      attrs['group'] = grouping_name
    if coords:
      attrs['coords'] = 'True'
    else:
      attrs['coords'] = 'False'
    self.write_columns( metadata, gen )
    gen.startElement('data',attrs)

    for pivot in data.keys():
      gen.characters("\n\t\t\t")
      gen.startElement( *self.pivotName( pivot, attrs ) )
      my_groups = data[pivot].keys(); my_groups.sort(); my_groups.reverse()
      for grouping in my_groups:
        gen.characters("\n\t\t\t\t")
        grouping_attrs = {}
        gen.startElement('group', self.groupingAttrs( grouping_name, grouping ) )
        if coords:
          try:
            groups = coords[pivot]
            if isinstance(grouping, datetime.datetime) and (not (grouping in groups.keys()) ):
              kw['coords'] = groups[to_timestamp(grouping)]
            else: kw['coords'] = groups[grouping] 
          except Exception as e:
            #print "Missing coords", pivot, grouping
            #print e
            pass
        self.addData( data[pivot][grouping], gen, **kw )
        gen.endElement('group')
      gen.endElement( self.pivotName( pivot, attrs )[0] )
    gen.characters("\n\t\t")
    gen.endElement('data')
    gen.characters("\n\t")
Ejemplo n.º 19
0
 def d0_basic(self, weekly=False):
     cherrypy.response.headers['Content-Type'] = 'image/png'
     srchUrl = 'InitD0Url'
     modName = 'd0'
     print "%s: srchUrl: %s" % (modName, srchUrl)
     try:
         D0Url = GratiaURLS().GetUrl(srchUrl)
         print "%s: SUCCESS: GratiaURLS().GetUrl(url = %s)" % (modName,
                                                               srchUrl)
         print "%s: retUrl: %s" % (modName, D0Url)
     except:
         print "%s: FAILED: GratiaURLS().GetUrl(url = %s)" % (modName,
                                                              srchUrl)
         pass
     url_fp = urllib2.urlopen(D0Url)
     metadata = {'croptime':False, 'span':86400, 'pivot_name': 'Date', \
         'column_names': 'Merged Events', \
         'title': 'D0 OSG Production'}
     if weekly:
         metadata['span'] *= 7
     fp = cStringIO.StringIO()
     data = {}
     for line in url_fp.readlines():
         info = line.strip().split(',')
         grouping_name = time.strptime(info[0], '%y%m%d')
         group = datetime.datetime(*grouping_name[:6])
         # Change the date to the first day of that week; groups things
         # by week instead of the default by day.
         if weekly:
             weekday = group.isoweekday()
             group -= datetime.timedelta(weekday, 0)
         group = to_timestamp(group)
         results = int(info[3])
         value = data.setdefault(group, 0)
         data[group] = value + results
     metadata['starttime'] = min(data.values())
     time_max = max(data.values())
     if weekly:
         metadata['endtime'] = time_max + 7 * 86400
     else:
         metadata['endtime'] = time_max + 1 * 86400
     GB = GratiaBar()
     GB(data, fp, metadata)
     return fp.getvalue()
Ejemplo n.º 20
0
 def d0_basic(self, weekly=False):
     cherrypy.response.headers['Content-Type'] = 'image/png'
     srchUrl = 'InitD0Url'
     modName = 'd0'
     print "%s: srchUrl: %s" % (modName, srchUrl)
     try:
         D0Url = GratiaURLS().GetUrl(srchUrl)
         print "%s: SUCCESS: GratiaURLS().GetUrl(url = %s)" % (modName,srchUrl)
         print "%s: retUrl: %s" % (modName, D0Url)
     except:
         print "%s: FAILED: GratiaURLS().GetUrl(url = %s)" % (modName,srchUrl)
         pass
     url_fp = urllib2.urlopen(D0Url)
     metadata = {'croptime':False, 'span':86400, 'pivot_name': 'Date', \
         'column_names': 'Merged Events', \
         'title': 'D0 OSG Production'}
     if weekly:
         metadata['span'] *= 7
     fp = cStringIO.StringIO()
     data = {}
     for line in url_fp.readlines():
         info = line.strip().split(',')
         grouping_name = time.strptime(info[0], '%y%m%d')
         group = datetime.datetime(*grouping_name[:6])
         # Change the date to the first day of that week; groups things
         # by week instead of the default by day.
         if weekly:
             weekday = group.isoweekday()
             group -= datetime.timedelta(weekday, 0)
         group = to_timestamp(group)
         results = int(info[3])
         value = data.setdefault(group, 0)
         data[group] = value + results
     metadata['starttime'] = min(data.values())
     time_max = max(data.values())
     if weekly:
         metadata['endtime'] = time_max + 7*86400
     else:
         metadata['endtime'] = time_max + 1*86400
     GB = GratiaBar()
     GB(data, fp, metadata)
     return fp.getvalue()
Ejemplo n.º 21
0
 def parse_type( self, string, my_type ):
   if my_type == 'int': 
     return int( string )
   elif my_type == 'float':
     return float( string )
   elif my_type == 'eval':
     return eval(str(string),{'__builtins__':None,'time':time},{})
   elif my_type == 'datetime':
     return convert_to_datetime( string )
   elif my_type == 'timestamp':
     return to_timestamp( string )
   elif my_type == 'bool' or my_type == 'boolean':
       if type(string) != types.StringType:
           return bool(string)
       if string.lower().strip() == 'false':
           return False
       elif string.lower().strip() == 'true':
           return True
       else:
           raise TypeError("Cannot convert string %s to boolean; valid "
                     "inputs are 'true' or 'false'." % string )
   else:
     return str( string )
Ejemplo n.º 22
0
 def parse_type( self, string, my_type ):
   if my_type == 'int': 
     return int( string )
   elif my_type == 'float':
     return float( string )
   elif my_type == 'eval':
     return eval(str(string),{'__builtins__':None,'time':time},{})
   elif my_type == 'datetime':
     return convert_to_datetime( string )
   elif my_type == 'timestamp':
     return to_timestamp( string )
   elif my_type == 'bool' or my_type == 'boolean':
       if type(string) != types.StringType:
           return bool(string)
       if string.lower().strip() == 'false':
           return False
       elif string.lower().strip() == 'true':
           return True
       else:
           raise TypeError("Cannot convert string %s to boolean; valid "
                     "inputs are 'true' or 'false'." % string )
   else:
     return str( string )
Ejemplo n.º 23
0
class XmlGenerator(QueryHandler):

    XSLT_NAME = 'xml_results.xsl'

    def _do_multiprocess_child(self, q, results, metadata, kw):
        try:
            xml = self.handle_results_internal(results, metadata, **kw)
            q.put(xml)
        except:
            q.put(None)
            raise

    def do_multiprocess(self, results, metadata, **kw):
        q = multiprocessing.Queue()
        p = multiprocessing.Process(target=self._do_multiprocess_child,
                                    args=(q, results, metadata, kw),
                                    name="GraphTool XmlGenerator")
        p.daemon = True
        p.start()
        xml = q.get()
        p.join()
        return xml

    def handle_results_internal(self, results, metadata, **kw):
        output = cStringIO.StringIO()
        gen = self.startPlot(output, results, metadata)
        kind = metadata.get('kind', 'Type not specified!')
        if kind == 'pivot-group':
            self.addResults_pg(results, metadata, gen)
        elif kind == 'pivot':
            self.addResults_p(results, metadata, gen)
        elif kind == 'complex-pivot':
            self.addResults_c_p(results, metadata, gen)
        else:
            raise Exception("Unknown data type! (%s)" % kind)
        self.endPlot(gen)
        return output.getvalue()

    def handle_results(self, results, metadata, **kw):
        if has_multiprocessing:
            return self.do_multiprocess(results, metadata, **kw)
        else:
            return self.handle_results_internal(results, metadata, **kw)

    def handle_list(self, *args, **kw):
        output = cStringIO.StringIO()
        gen = self.startDocument(output)
        base_url = ''
        if 'base_url' in self.metadata:
            base_url = self.metadata['base_url']
            if base_url[-1] != '/':
                base_url += '/'
        i = 0
        for query_obj in self.objs:
            i += 1
            if 'display_name' in query_obj.__dict__:
                name = query_obj.display_name
            else:
                name = "Query"
            gen.startElement("pagelist", {'name': name, 'id': str(i)})
            for page in query_obj.commands.keys():
                gen.characters("\t\t\n")
                attrs = {}
                my_page = self.known_commands[page]
                if 'title' in my_page.__dict__.keys():
                    attrs['title'] = my_page.title
                else:
                    attrs['title'] = page
                gen.startElement('page', attrs)
                gen.characters(base_url + page)
                gen.endElement('page')
            gen.characters("\t\n")
            gen.endElement("pagelist")
            gen.characters("\t\n")
        self.endDocument(gen)
        return output.getvalue()

    def startDocument(self, output, encoding='UTF-8'):
        gen = XMLGenerator(output, encoding)
        gen.startDocument()
        try:
            static_location = '/static/content'
            static_object = self.globals['static']
            static_location = static_object.metadata.get('base_url', '/static')
            static_location += '/content'
        except:
            pass
        output.write('<?xml-stylesheet type="text/xsl" href="%s/%s"?>\n' % \
            (static_location, self.XSLT_NAME) )
        output.write('<!DOCTYPE graphtool-data>\n')
        gen.startElement('graphtool', {})
        gen.characters("\n\t")
        return gen

    def startPlot(self, output, results, metadata, encoding='UTF-8'):
        gen = self.startDocument(output, encoding)
        query_attrs = {}
        name = metadata.get('name', '')
        if name and len(name) > 0:
            query_attrs['name'] = name
        gen.startElement('query', query_attrs)
        gen.characters("\n\t\t")
        title = expand_string(metadata.get('title', ''),
                              metadata.get('sql_vars', ''))
        if title and len(title) > 0:
            gen.startElement('title', {})
            gen.characters(title)
            gen.endElement('title')
            gen.characters("\n\t\t")
        graph_type = metadata.get('graph_type', False)
        if graph_type and len(graph_type) > 0:
            gen.startElement('graph', {})
            gen.characters(graph_type)
            gen.endElement('graph')
            gen.characters("\n\t\t")
        sql_string = str(metadata.get('sql', ''))
        gen.startElement('sql', {})
        gen.characters(sql_string)
        gen.characters("\n\t\t")
        gen.endElement('sql')
        gen.characters("\n\t\t")
        self.write_sql_vars(results, metadata, gen)
        gen.characters("\n\t\t")
        base_url = None
        graphs = metadata.get('grapher', None)
        if graphs and 'base_url' in graphs.metadata:
            base_url = graphs.metadata['base_url']
        else:
            print "Base URL not specified!"
            print metadata
            if graphs:
                print graphs.metadata
            else:
                print "Graphs not specified"
            pass
        my_base_url = self.metadata.get('base_url', '')
        gen.startElement('attr', {'name': 'base_url'})
        gen.characters(my_base_url)
        gen.endElement('attr')
        gen.characters('\n\t\t')
        try:
            static_location = '/static/content'
            static_object = self.globals['static']
            static_location = static_object.metadata.get('base_url', '/static')
            static_location += '/content'
        except:
            pass
        gen.startElement('attr', {'name': 'static_base_url'})
        gen.characters(static_location)
        gen.endElement('attr')
        gen.characters('\n\t\t')
        self.write_graph_url(results, metadata, gen, base_url=base_url)
        gen.characters('\n\t\t')
        return gen

    def write_graph_url(self, results, metadata, gen, base_url=None):
        if base_url != None:
            base = base_url + '/' + metadata.get('name', '') + '?'
            kw = metadata.get('given_kw', {})
            for key, item in kw.items():
                base += str(key) + '=' + str(item) + '&'
            gen.startElement("url", {})
            gen.characters(base)
            gen.endElement("url")

    def write_sql_vars(self, data, metadata, gen):
        sql_vars = metadata['sql_vars']
        for key, item in metadata['given_kw'].items():
            sql_vars[key] = item
        gen.startElement('sqlvars', {})
        for var in sql_vars:
            gen.characters("\n\t\t\t")
            gen.startElement('var', {'name': var})
            gen.characters(str(sql_vars[var]))
            gen.endElement('var')
        gen.characters("\n\t\t")
        gen.endElement('sqlvars')
        gen.characters("\n\t\t")

    def endPlot(self, gen):
        gen.endElement('query')
        gen.characters("\n")
        self.endDocument(gen)

    def endDocument(self, gen):
        gen.endElement('graphtool')
        gen.characters("\n")
        gen.endDocument()

    def write_columns(self, metadata, gen):
        column_names = str(metadata.get('column_names', ''))
        column_units = str(metadata.get('column_units', ''))
        names = [i.strip() for i in column_names.split(',')]
        units = [i.strip() for i in column_units.split(',')]
        columns = {}
        num_columns = min(len(names), len(units))
        for idx in range(num_columns):
            columns[names[idx]] = units[idx]
        if len(columns.items()) > 0:
            gen.startElement('columns', {})
            i = 1
            for header in names:
                gen.characters("\n\t\t\t")
                gen.startElement('column', {
                    'unit': columns[header],
                    'index': str(i)
                })
                i += 1
                gen.characters(header)
                gen.endElement('column')
            gen.characters("\n\t\t")
            gen.endElement('columns')
            gen.characters("\n\t\t")

    def addResults_pg(self, data, metadata, gen, **kw):

        try:
            if 'grapher' in metadata:
                coords = metadata['grapher'].get_coords(
                    metadata['query'], metadata, **metadata['given_kw'])
            else:
                coords = None
        except Exception, e:
            print e
            traceback.print_exc(sys.stdout)
            coords = None

        attrs = {'kind': 'pivot-group'}
        pivot_name = str(metadata['pivot_name'])
        if pivot_name and len(pivot_name) > 0:
            attrs['pivot'] = pivot_name
        grouping_name = str(metadata.get('grouping_name', ''))
        if grouping_name and len(grouping_name) > 0:
            attrs['group'] = grouping_name
        if coords:
            attrs['coords'] = 'True'
        else:
            attrs['coords'] = 'False'
        self.write_columns(metadata, gen)
        gen.startElement('data', attrs)

        for pivot in data.keys():
            gen.characters("\n\t\t\t")
            gen.startElement(*self.pivotName(pivot, attrs))
            my_groups = data[pivot].keys()
            my_groups.sort()
            my_groups.reverse()
            for grouping in my_groups:
                gen.characters("\n\t\t\t\t")
                grouping_attrs = {}
                gen.startElement('group',
                                 self.groupingAttrs(grouping_name, grouping))
                if coords:
                    try:
                        groups = coords[pivot]
                        if type(grouping) == datetime.datetime and (
                                not (grouping in groups.keys())):
                            kw['coords'] = groups[to_timestamp(grouping)]
                        else:
                            kw['coords'] = groups[grouping]
                    except Exception, e:
                        #print "Missing coords", pivot, grouping
                        #print e
                        pass
                self.addData(data[pivot][grouping], gen, **kw)
                gen.endElement('group')
            gen.endElement(self.pivotName(pivot, attrs)[0])
Ejemplo n.º 24
0
def pivot_group_parser_plus(sql_results,
                            pivots="0,1",
                            grouping="2",
                            results="3",
                            pivot_transform="echo",
                            grouping_transform="echo",
                            data_transform="echo",
                            globals=globals(),
                            suppress_zeros=True,
                            **kw):
    metadata = {}
    pivot_cols = [int(i.strip()) for i in pivots.split(',')]
    grouping_cols = [int(i.strip()) for i in grouping.split(',')]
    results_cols = [int(i.strip()) for i in results.split(',')]
    len_results_cols = len(results_cols)
    if len(sql_results) > 0:
        row_size = len(sql_results[0])
    if callable(pivot_transform):
        pivot_transform_func = pivot_transform
    elif pivot_transform == 'echo':
        pivot_transform_func = echo
    else:
        pivot_transform_func = globals[pivot_transform.strip()]
    if grouping_transform == 'echo':
        grouping_transform_func = echo
    else:
        grouping_transform_func = globals[grouping_transform.strip()]
    if data_transform == 'echo':
        data_transform_func = echo
    else:
        data_transform_func = globals[data_transform.strip()]
    parsed_results = {}

    groups = set()
    pivots = set()

    for row in sql_results:
        my_pivot = make_entry(row, pivot_cols, pivot_transform_func, row_size,
                              **kw)
        if my_pivot == None: continue
        my_group = make_entry(row, grouping_cols, grouping_transform_func,
                              row_size, **kw)
        my_group = to_timestamp(my_group)
        groups.add(my_group)
        pivots.add(my_pivot)

    groups = list(groups)
    groups.sort()

    if len(groups) > 0:
        min_span = groups[-1]
        for i in range(len(groups) - 1):
            min_span = min(groups[i + 1] - groups[i], min_span)

    for row in sql_results:
        my_pivot = make_entry(row, pivot_cols, pivot_transform_func, row_size,
                              **kw)
        if my_pivot == None: continue
        my_group = make_entry(row, grouping_cols, grouping_transform_func,
                              row_size, **kw)
        my_group = to_timestamp(my_group)
        if not (my_pivot in parsed_results.keys()):
            parsed_results[my_pivot] = {}
        if my_group in parsed_results[my_pivot].keys():
            parsed_results[my_pivot][my_group] = add_data(
                parsed_results[my_pivot][my_group], row, results_cols)
        else:
            parsed_results[my_pivot][my_group] = new_data(
                row, results_cols, len_results_cols)

    filtered_results = {}
    metadata['kind'] = 'pivot-group'
    #metadata['is_cumulative'] = True

    for pivot in parsed_results.keys():
        data = parsed_results[pivot]
        tmp_group = {}
        #pivot_has_nonzero = True
        for grouping, info in data.items():
            info = check_tuple(info, len_results_cols)
            #if has_nonzero( info, len_results_cols ):
            tmp_group[grouping] = info
            #pivot_has_nonzero = True
        #if pivot_has_nonzero:
        filtered_results[pivot] = tmp_group

    if len(groups) == 0:
        return filtered_results, metadata

    results = filtered_results

    filtered_results = {}

    current_group = groups.pop(0)
    csum = {}
    for pivot in results.keys():
        csum[pivot] = 0
        filtered_results[pivot] = {}

    def add_cumulative_data(current_group):
        for pivot in results.keys():
            if current_group in results[pivot].keys():
                csum[pivot] = float(results[pivot][current_group])
            filtered_results[pivot][current_group] = csum[pivot]

    while len(groups) > 0:
        next_group = groups[0]
        add_cumulative_data(current_group)
        while current_group + min_span < next_group:
            current_group += min_span
            add_cumulative_data(current_group)
        current_group = groups.pop(0)
    add_cumulative_data(current_group)

    for pivot, groups in filtered_results.items():
        for group, data in groups.items():
            groups[group] = data_transform_func(data, **kw)

    return filtered_results, metadata
Ejemplo n.º 25
0
def pivot_group_parser_plus( sql_results, pivots="0,1", grouping="2", results="3", pivot_transform="echo", grouping_transform="echo", data_transform="echo", globals=globals(), suppress_zeros=True, **kw ):
    metadata = {}
    pivot_cols = [int(i.strip()) for i in pivots.split(',')]
    grouping_cols = [int(i.strip()) for i in grouping.split(',')]
    results_cols = [int(i.strip()) for i in results.split(',')]
    len_results_cols = len(results_cols) 
    if len(sql_results) > 0:
      row_size = len(sql_results[0])
    if callable(pivot_transform):
      pivot_transform_func = pivot_transform
    elif pivot_transform == 'echo':
      pivot_transform_func = echo
    else:
      pivot_transform_func = globals[pivot_transform.strip()]
    if grouping_transform == 'echo':
      grouping_transform_func = echo
    else:
      grouping_transform_func = globals[grouping_transform.strip()]
    if data_transform == 'echo':
      data_transform_func = echo
    else:
      data_transform_func = globals[data_transform.strip()]
    parsed_results = {}

    groups = set()
    pivots = set()

    for row in sql_results:
      my_pivot = make_entry( row, pivot_cols, pivot_transform_func, row_size, **kw )
      if my_pivot == None: continue
      my_group = make_entry( row, grouping_cols, grouping_transform_func, row_size, **kw )
      my_group = to_timestamp( my_group )
      groups.add( my_group )
      pivots.add( my_pivot )

    groups = list(groups)
    groups.sort()

    if len(groups) > 0:
      min_span = groups[-1]
      for i in range( len(groups)-1 ):
        min_span = min( groups[i+1] - groups[i], min_span )

    for row in sql_results:
      my_pivot = make_entry( row, pivot_cols, pivot_transform_func, row_size, **kw )
      if my_pivot == None: continue
      my_group = make_entry( row, grouping_cols, grouping_transform_func, row_size, **kw )
      my_group = to_timestamp( my_group )
      if not (my_pivot in parsed_results.keys()): parsed_results[my_pivot] = {}
      if my_group in parsed_results[my_pivot].keys():
        parsed_results[my_pivot][my_group] = add_data( parsed_results[my_pivot][my_group], row, results_cols )
      else:
        parsed_results[my_pivot][my_group] = new_data( row, results_cols, len_results_cols )

    filtered_results = {}
    metadata['kind'] = 'pivot-group'
    #metadata['is_cumulative'] = True

    for pivot in parsed_results.keys():
      data = parsed_results[pivot]
      tmp_group = {}
      #pivot_has_nonzero = True
      for grouping, info in data.items():
        info = check_tuple( info, len_results_cols )
        #if has_nonzero( info, len_results_cols ):
        tmp_group[grouping] = info
          #pivot_has_nonzero = True
      #if pivot_has_nonzero:
      filtered_results[pivot] = tmp_group

    if len(groups) == 0:
      return filtered_results, metadata

    results = filtered_results

    filtered_results = {};

    current_group = groups.pop(0)
    csum = {}
    for pivot in results.keys():
      csum[ pivot ] = 0
      filtered_results[pivot] = {}

    def add_cumulative_data( current_group ):
      for pivot in results.keys():
        if current_group in results[pivot].keys():
          csum[ pivot ] = float(results[pivot][current_group])
        filtered_results[pivot][current_group] = csum[ pivot ]

    while len(groups) > 0:
      next_group = groups[0]
      add_cumulative_data( current_group )
      while current_group + min_span < next_group:
        current_group += min_span
        add_cumulative_data( current_group )
      current_group = groups.pop(0)
    add_cumulative_data( current_group )

    for pivot, groups in filtered_results.items():
        for group, data in groups.items():
            groups[group] = data_transform_func(data, **kw) 

    return filtered_results, metadata