예제 #1
0
    def query(request, basequery, sortsql='1 asc'):
        basesql, baseparams = basequery.query.get_compiler(
            basequery.db).as_sql(with_col_aliases=False)
        # Run the query
        cursor = connections[request.database].cursor()
        query = '''
        select x.row1, x.row2, x.col1, x.col2, x.col3,
          min(x.frozen_start), min(x.total_start),
          coalesce(sum(case o2.locked when %s then o2.quantity else 0 end),0),
          coalesce(sum(o2.quantity),0)
        from (
          select oper.name as row1,  oper.location_id as row2,
               d.bucket as col1, d.startdate as col2, d.enddate as col3,
               coalesce(sum(case o1.locked when %s then o1.quantity else 0 end),0) as frozen_start,
               coalesce(sum(o1.quantity),0) as total_start
          from (%s) oper
          -- Multiply with buckets
          cross join (
             select name as bucket, startdate, enddate
             from common_bucketdetail
             where bucket_id = '%s' and enddate > '%s' and startdate < '%s'
             ) d
          -- Planned and frozen quantity, based on start date
          left join out_operationplan o1
          on oper.name = o1.operation
          and d.startdate <= o1.startdate
          and d.enddate > o1.startdate
          -- Grouping
          group by oper.name, oper.location_id, d.bucket, d.startdate, d.enddate
        ) x
        -- Planned and frozen quantity, based on end date
        left join out_operationplan o2
        on x.row1 = o2.operation
        and x.col2 <= o2.enddate
        and x.col3 > o2.enddate
        -- Grouping and ordering
        group by x.row1, x.row2, x.col1, x.col2, x.col3
        order by %s, x.col2
      ''' % (sql_true(), sql_true(), basesql, request.report_bucket,
             request.report_startdate, request.report_enddate, sortsql)
        cursor.execute(query, baseparams)

        # Convert the SQl results to python
        for row in cursor.fetchall():
            yield {
                'operation': row[0],
                'location': row[1],
                'bucket': row[2],
                'startdate': python_date(row[3]),
                'enddate': python_date(row[4]),
                'locked_start': row[5],
                'total_start': row[6],
                'locked_end': row[7],
                'total_end': row[8],
            }
예제 #2
0
  def query(request, basequery, sortsql='1 asc'):
    basesql, baseparams = basequery.query.get_compiler(basequery.db).as_sql(with_col_aliases=False)
    # Run the query
    cursor = connections[request.database].cursor()
    query = '''
        select x.row1, x.row2, x.col1, x.col2, x.col3,
          min(x.frozen_start), min(x.total_start),
          coalesce(sum(case o2.locked when %s then o2.quantity else 0 end),0),
          coalesce(sum(o2.quantity),0)
        from (
          select oper.name as row1,  oper.location_id as row2,
               d.bucket as col1, d.startdate as col2, d.enddate as col3,
               coalesce(sum(case o1.locked when %s then o1.quantity else 0 end),0) as frozen_start,
               coalesce(sum(o1.quantity),0) as total_start
          from (%s) oper
          -- Multiply with buckets
          cross join (
             select name as bucket, startdate, enddate
             from common_bucketdetail
             where bucket_id = '%s' and enddate > '%s' and startdate < '%s'
             ) d
          -- Planned and frozen quantity, based on start date
          left join out_operationplan o1
          on oper.name = o1.operation
          and d.startdate <= o1.startdate
          and d.enddate > o1.startdate
          -- Grouping
          group by oper.name, oper.location_id, d.bucket, d.startdate, d.enddate
        ) x
        -- Planned and frozen quantity, based on end date
        left join out_operationplan o2
        on x.row1 = o2.operation
        and x.col2 <= o2.enddate
        and x.col3 > o2.enddate
        -- Grouping and ordering
        group by x.row1, x.row2, x.col1, x.col2, x.col3
        order by %s, x.col2
      ''' % (sql_true(), sql_true(), basesql, request.report_bucket,
             request.report_startdate, request.report_enddate, sortsql)
    cursor.execute(query, baseparams)

    # Convert the SQl results to python
    for row in cursor.fetchall():
      yield {
        'operation': row[0],
        'location': row[1],
        'bucket': row[2],
        'startdate': python_date(row[3]),
        'enddate': python_date(row[4]),
        'locked_start': row[5],
        'total_start': row[6],
        'locked_end': row[7],
        'total_end': row[8],
        }
예제 #3
0
파일: buffer.py 프로젝트: taghubnet/frePPLe
  def query(request, basequery, sortsql='1 asc'):
    cursor = connections[request.database].cursor()
    basesql, baseparams = basequery.query.get_compiler(basequery.db).as_sql(with_col_aliases=False)

    # Assure the item hierarchy is up to date
    Buffer.rebuildHierarchy(database=basequery.db)

    # Execute a query  to get the onhand value at the start of our horizon
    startohdict = {}
    query = '''
      select buffers.name, sum(oh.onhand)
      from (%s) buffers
      inner join buffer
      on buffer.lft between buffers.lft and buffers.rght
      inner join (
      select out_flowplan.thebuffer as thebuffer, out_flowplan.onhand as onhand
      from out_flowplan,
        (select thebuffer, max(id) as id
         from out_flowplan
         where flowdate < '%s'
         group by thebuffer
        ) maxid
      where maxid.thebuffer = out_flowplan.thebuffer
      and maxid.id = out_flowplan.id
      ) oh
      on oh.thebuffer = buffer.name
      group by buffers.name
      ''' % (basesql, request.report_startdate)
    cursor.execute(query, baseparams)
    for row in cursor.fetchall():
      startohdict[row[0]] = float(row[1])

    # Execute the actual query
    query = '''
      select buf.name as row1, buf.item_id as row2, buf.location_id as row3,
             d.bucket as col1, d.startdate as col2, d.enddate as col3,
             coalesce(sum(%s),0.0) as consumed,
             coalesce(-sum(%s),0.0) as produced
        from (%s) buf
        -- Multiply with buckets
        cross join (
             select name as bucket, startdate, enddate
             from common_bucketdetail
             where bucket_id = '%s' and enddate > '%s' and startdate < '%s'
             ) d
        -- Include child buffers
        inner join buffer
        on buffer.lft between buf.lft and buf.rght
        -- Consumed and produced quantities
        left join out_flowplan
        on buffer.name = out_flowplan.thebuffer
        and d.startdate <= out_flowplan.flowdate
        and d.enddate > out_flowplan.flowdate
        and out_flowplan.flowdate >= '%s'
        and out_flowplan.flowdate < '%s'
        -- Grouping and sorting
        group by buf.name, buf.item_id, buf.location_id, buf.onhand, d.bucket, d.startdate, d.enddate
        order by %s, d.startdate
      ''' % (
        sql_max('out_flowplan.quantity', '0.0'), sql_min('out_flowplan.quantity', '0.0'),
        basesql, request.report_bucket, request.report_startdate, request.report_enddate,
        request.report_startdate, request.report_enddate, sortsql
      )
    cursor.execute(query, baseparams)

    # Build the python result
    prevbuf = None
    for row in cursor.fetchall():
      if row[0] != prevbuf:
        prevbuf = row[0]
        startoh = startohdict.get(prevbuf, 0)
        endoh = startoh + float(row[6] - row[7])
      else:
        startoh = endoh
        endoh += float(row[6] - row[7])
      yield {
        'buffer': row[0],
        'item': row[1],
        'location': row[2],
        'bucket': row[3],
        'startdate': python_date(row[4]),
        'enddate': python_date(row[5]),
        'startoh': round(startoh, 1),
        'produced': round(row[6], 1),
        'consumed': round(row[7], 1),
        'endoh': round(endoh, 1),
        }
예제 #4
0
파일: buffer.py 프로젝트: gaohaian/frePPLe
    def query(request, basequery, sortsql='1 asc'):
        cursor = connections[request.database].cursor()
        basesql, baseparams = basequery.query.get_compiler(
            basequery.db).as_sql(with_col_aliases=False)

        # Assure the item hierarchy is up to date
        Buffer.rebuildHierarchy(database=basequery.db)

        # Execute a query  to get the onhand value at the start of our horizon
        startohdict = {}
        query = '''
      select buffers.name, sum(oh.onhand)
      from (%s) buffers
      inner join buffer
      on buffer.lft between buffers.lft and buffers.rght
      inner join (
      select out_flowplan.thebuffer as thebuffer, out_flowplan.onhand as onhand
      from out_flowplan,
        (select thebuffer, max(id) as id
         from out_flowplan
         where flowdate < '%s'
         group by thebuffer
        ) maxid
      where maxid.thebuffer = out_flowplan.thebuffer
      and maxid.id = out_flowplan.id
      ) oh
      on oh.thebuffer = buffer.name
      group by buffers.name
      ''' % (basesql, request.report_startdate)
        cursor.execute(query, baseparams)
        for row in cursor.fetchall():
            startohdict[row[0]] = float(row[1])

        # Execute the actual query
        query = '''
      select buf.name as row1, buf.item_id as row2, buf.location_id as row3,
             d.bucket as col1, d.startdate as col2, d.enddate as col3,
             coalesce(sum(%s),0.0) as consumed,
             coalesce(-sum(%s),0.0) as produced
        from (%s) buf
        -- Multiply with buckets
        cross join (
             select name as bucket, startdate, enddate
             from common_bucketdetail
             where bucket_id = '%s' and enddate > '%s' and startdate < '%s'
             ) d
        -- Include child buffers
        inner join buffer
        on buffer.lft between buf.lft and buf.rght
        -- Consumed and produced quantities
        left join out_flowplan
        on buffer.name = out_flowplan.thebuffer
        and d.startdate <= out_flowplan.flowdate
        and d.enddate > out_flowplan.flowdate
        and out_flowplan.flowdate >= '%s'
        and out_flowplan.flowdate < '%s'
        -- Grouping and sorting
        group by buf.name, buf.item_id, buf.location_id, buf.onhand, d.bucket, d.startdate, d.enddate
        order by %s, d.startdate
      ''' % (sql_max('out_flowplan.quantity',
                     '0.0'), sql_min('out_flowplan.quantity',
                                     '0.0'), basesql, request.report_bucket,
             request.report_startdate, request.report_enddate,
             request.report_startdate, request.report_enddate, sortsql)
        cursor.execute(query, baseparams)

        # Build the python result
        prevbuf = None
        for row in cursor.fetchall():
            if row[0] != prevbuf:
                prevbuf = row[0]
                startoh = startohdict.get(prevbuf, 0)
                endoh = startoh + float(row[6] - row[7])
            else:
                startoh = endoh
                endoh += float(row[6] - row[7])
            yield {
                'buffer': row[0],
                'item': row[1],
                'location': row[2],
                'bucket': row[3],
                'startdate': python_date(row[4]),
                'enddate': python_date(row[5]),
                'startoh': round(startoh, 1),
                'produced': round(row[6], 1),
                'consumed': round(row[7], 1),
                'endoh': round(endoh, 1),
            }
예제 #5
0
    def query(request, basequery, sortsql='1 asc'):
        basesql, baseparams = basequery.query.get_compiler(
            basequery.db).as_sql(with_col_aliases=True)
        cursor = connections[request.database].cursor()

        # Assure the item hierarchy is up to date
        Item.rebuildHierarchy(database=basequery.db)

        # Execute a query to get the backlog at the start of the horizon
        startbacklogdict = {}
        query = '''
      select items.name, sum(quantity)
      from (%s) items
      inner join item
      on item.lft between items.lft and items.rght
      inner join out_demand
      on item.name = out_demand.item
        and (plandate is null or plandate >= '%s')
        and due < '%s'
      group by items.name
      ''' % (basesql, request.report_startdate, request.report_startdate)
        cursor.execute(query, baseparams)
        for row in cursor.fetchall():
            if row[0]:
                startbacklogdict[row[0]] = float(row[1])

        # Execute the query
        query = '''
        select y.name as row1,
               y.bucket as col1, y.startdate as col2, y.enddate as col3,
               min(y.orders),
               min(y.planned)
        from (
          select x.name as name, x.lft as lft, x.rght as rght,
               x.bucket as bucket, x.startdate as startdate, x.enddate as enddate,
               coalesce(sum(demand.quantity),0) as orders,
               min(x.planned) as planned
          from (
          select items.name as name, items.lft as lft, items.rght as rght,
                 d.bucket as bucket, d.startdate as startdate, d.enddate as enddate,
                 coalesce(sum(out_demand.quantity),0) as planned
          from (%s) items
          -- Multiply with buckets
          cross join (
             select name as bucket, startdate, enddate
             from common_bucketdetail
             where bucket_id = '%s' and enddate > '%s' and startdate < '%s'
             ) d
          -- Include hierarchical children
          inner join item
          on item.lft between items.lft and items.rght
          -- Planned quantity
          left join out_demand
          on item.name = out_demand.item
          and d.startdate <= out_demand.plandate
          and d.enddate > out_demand.plandate
          and out_demand.plandate >= '%s'
          and out_demand.plandate < '%s'
          -- Grouping
          group by items.name, items.lft, items.rght, d.bucket, d.startdate, d.enddate
        ) x
        -- Requested quantity
        inner join item
        on item.lft between x.lft and x.rght
        left join demand
        on item.name = demand.item_id
        and x.startdate <= demand.due
        and x.enddate > demand.due
        and demand.due >= '%s'
        and demand.due < '%s'
        -- Grouping
        group by x.name, x.lft, x.rght, x.bucket, x.startdate, x.enddate
        ) y
        -- Ordering and grouping
        group by y.name, y.lft, y.rght, y.bucket, y.startdate, y.enddate
        order by %s, y.startdate
       ''' % (basesql, request.report_bucket, request.report_startdate,
              request.report_enddate, request.report_startdate,
              request.report_enddate, request.report_startdate,
              request.report_enddate, sortsql)
        cursor.execute(query, baseparams)

        # Build the python result
        previtem = None
        for row in cursor.fetchall():
            if row[0] != previtem:
                backlog = startbacklogdict.get(row[0], 0)
                previtem = row[0]
            backlog += float(row[4]) - float(row[5])
            yield {
                'item': row[0],
                'bucket': row[1],
                'startdate': python_date(row[2]),
                'enddate': python_date(row[3]),
                'demand': round(row[4], 1),
                'supply': round(row[5], 1),
                'backlog': round(backlog, 1)
            }
예제 #6
0
파일: resource.py 프로젝트: dhl/frePPLe
  def query(request, basequery, sortsql='1 asc'):
    basesql, baseparams = basequery.query.get_compiler(basequery.db).as_sql(with_col_aliases=True)

    # Get the time units
    units = OverviewReport.getUnits(request)

    # Assure the item hierarchy is up to date
    Resource.rebuildHierarchy(database=basequery.db)

    # Execute the query
    cursor = connections[request.database].cursor()
    query = '''
      select res.name as row1, res.location_id as row2,
             coalesce(max(plan_summary.avg_util),0) as avgutil,
             d.bucket as col1, d.startdate as col2,
             coalesce(sum(out_resourceplan.available),0) * (case when res.type = 'buckets' then 1 else %f end) as available,
             coalesce(sum(out_resourceplan.unavailable),0) * (case when res.type = 'buckets' then 1 else %f end) as unavailable,
             coalesce(sum(out_resourceplan.load),0) * (case when res.type = 'buckets' then 1 else %f end) as loading,
             coalesce(sum(out_resourceplan.setup),0) * (case when res.type = 'buckets' then 1 else %f end) as setup
      from (%s) res
      -- Multiply with buckets
      cross join (
                   select name as bucket, startdate, enddate
                   from common_bucketdetail
                   where bucket_id = '%s' and enddate > '%s' and startdate < '%s'
                   ) d
      -- Include child resources
      inner join %s res2
      on res2.lft between res.lft and res.rght
      -- Utilization info
      left join out_resourceplan
      on res2.name = out_resourceplan.theresource
      and d.startdate <= out_resourceplan.startdate
      and d.enddate > out_resourceplan.startdate
      and out_resourceplan.startdate >= '%s'
      and out_resourceplan.startdate < '%s'
      -- Average utilization info
      left join (
                select
                  theresource,
                  ( coalesce(sum(out_resourceplan.load),0) + coalesce(sum(out_resourceplan.setup),0) )
                   * 100.0 / coalesce(%s,1) as avg_util
                from out_resourceplan
                where out_resourceplan.startdate >= '%s'
                and out_resourceplan.startdate < '%s'
                group by theresource
                ) plan_summary
      on res2.name = plan_summary.theresource
      -- Grouping and sorting
      group by res.name, res.location_id, res.type, d.bucket, d.startdate
      order by %s, d.startdate
      ''' % (
        units[0], units[0], units[0], units[0],
        basesql, request.report_bucket, request.report_startdate,
        request.report_enddate,
        connections[basequery.db].ops.quote_name('resource'),
        request.report_startdate, request.report_enddate,
        sql_max('sum(out_resourceplan.available)', '0.0001'),
        request.report_startdate, request.report_enddate, sortsql
      )
    cursor.execute(query, baseparams)

    # Build the python result
    for row in cursor.fetchall():
      if row[5] != 0:
        util = row[7] * 100 / row[5]
      else:
        util = 0
      yield {
        'resource': row[0],
        'location': row[1],
        'avgutil': round(row[2], 2),
        'bucket': row[3],
        'startdate': python_date(row[4]),
        'available': round(row[5], 1),
        'unavailable': round(row[6], 1),
        'load': round(row[7], 1),
        'setup': round(row[8], 1),
        'utilization': round(util, 2)
        }
예제 #7
0
파일: demand.py 프로젝트: Rona111/frePPLe
  def query(request, basequery, sortsql='1 asc'):
    basesql, baseparams = basequery.query.get_compiler(basequery.db).as_sql(with_col_aliases=False)
    cursor = connections[request.database].cursor()

    # Assure the item hierarchy is up to date
    Item.rebuildHierarchy(database=basequery.db)

    # Execute a query to get the backlog at the start of the horizon
    startbacklogdict = {}
    query = '''
      select items.name, sum(quantity)
      from (%s) items
      inner join item
      on item.lft between items.lft and items.rght
      inner join out_demand
      on item.name = out_demand.item
        and (plandate is null or plandate >= '%s')
        and due < '%s'
      group by items.name
      ''' % (basesql, request.report_startdate, request.report_startdate)
    cursor.execute(query, baseparams)
    for row in cursor.fetchall():
      if row[0]:
        startbacklogdict[row[0]] = float(row[1])

    # Execute the query
    query = '''
        select y.name as row1,
               y.bucket as col1, y.startdate as col2, y.enddate as col3,
               min(y.orders),
               min(y.planned)
        from (
          select x.name as name, x.lft as lft, x.rght as rght,
               x.bucket as bucket, x.startdate as startdate, x.enddate as enddate,
               coalesce(sum(demand.quantity),0) as orders,
               min(x.planned) as planned
          from (
          select items.name as name, items.lft as lft, items.rght as rght,
                 d.bucket as bucket, d.startdate as startdate, d.enddate as enddate,
                 coalesce(sum(out_demand.quantity),0) as planned
          from (%s) items
          -- Multiply with buckets
          cross join (
             select name as bucket, startdate, enddate
             from common_bucketdetail
             where bucket_id = '%s' and enddate > '%s' and startdate < '%s'
             ) d
          -- Include hierarchical children
          inner join item
          on item.lft between items.lft and items.rght
          -- Planned quantity
          left join out_demand
          on item.name = out_demand.item
          and d.startdate <= out_demand.plandate
          and d.enddate > out_demand.plandate
          and out_demand.plandate >= '%s'
          and out_demand.plandate < '%s'
          -- Grouping
          group by items.name, items.lft, items.rght, d.bucket, d.startdate, d.enddate
        ) x
        -- Requested quantity
        inner join item
        on item.lft between x.lft and x.rght
        left join demand
        on item.name = demand.item_id
        and x.startdate <= demand.due
        and x.enddate > demand.due
        and demand.due >= '%s'
        and demand.due < '%s'
        and demand.status = 'open'
        -- Grouping
        group by x.name, x.lft, x.rght, x.bucket, x.startdate, x.enddate
        ) y
        -- Ordering and grouping
        group by y.name, y.lft, y.rght, y.bucket, y.startdate, y.enddate
        order by %s, y.startdate
       ''' % (basesql, request.report_bucket, request.report_startdate,
              request.report_enddate, request.report_startdate,
              request.report_enddate, request.report_startdate,
              request.report_enddate, sortsql)
    cursor.execute(query, baseparams)

    # Build the python result
    previtem = None
    for row in cursor.fetchall():
      if row[0] != previtem:
        backlog = startbacklogdict.get(row[0], 0)
        previtem = row[0]
      backlog += float(row[4]) - float(row[5])
      yield {
        'item': row[0],
        'bucket': row[1],
        'startdate': python_date(row[2]),
        'enddate': python_date(row[3]),
        'demand': round(row[4], 1),
        'supply': round(row[5], 1),
        'backlog': round(backlog, 1)
        }
예제 #8
0
    def query(request, basequery, sortsql='1 asc'):
        basesql, baseparams = basequery.query.get_compiler(
            basequery.db).as_sql(with_col_aliases=True)

        # Get the time units
        units = OverviewReport.getUnits(request)

        # Assure the item hierarchy is up to date
        Resource.rebuildHierarchy(database=basequery.db)

        # Execute the query
        cursor = connections[request.database].cursor()
        query = '''
      select res.name as row1, res.location_id as row2,
             coalesce(max(plan_summary.avg_util),0) as avgutil,
             d.bucket as col1, d.startdate as col2,
             coalesce(sum(out_resourceplan.available),0) * (case when res.type = 'buckets' then 1 else %f end) as available,
             coalesce(sum(out_resourceplan.unavailable),0) * (case when res.type = 'buckets' then 1 else %f end) as unavailable,
             coalesce(sum(out_resourceplan.load),0) * (case when res.type = 'buckets' then 1 else %f end) as loading,
             coalesce(sum(out_resourceplan.setup),0) * (case when res.type = 'buckets' then 1 else %f end) as setup
      from (%s) res
      -- Multiply with buckets
      cross join (
                   select name as bucket, startdate, enddate
                   from common_bucketdetail
                   where bucket_id = '%s' and enddate > '%s' and startdate < '%s'
                   ) d
      -- Include child resources
      inner join %s res2
      on res2.lft between res.lft and res.rght
      -- Utilization info
      left join out_resourceplan
      on res2.name = out_resourceplan.theresource
      and d.startdate <= out_resourceplan.startdate
      and d.enddate > out_resourceplan.startdate
      and out_resourceplan.startdate >= '%s'
      and out_resourceplan.startdate < '%s'
      -- Average utilization info
      left join (
                select
                  theresource,
                  ( coalesce(sum(out_resourceplan.load),0) + coalesce(sum(out_resourceplan.setup),0) )
                   * 100.0 / coalesce(%s,1) as avg_util
                from out_resourceplan
                where out_resourceplan.startdate >= '%s'
                and out_resourceplan.startdate < '%s'
                group by theresource
                ) plan_summary
      on res2.name = plan_summary.theresource
      -- Grouping and sorting
      group by res.name, res.location_id, res.type, d.bucket, d.startdate
      order by %s, d.startdate
      ''' % (units[0], units[0], units[0], units[0], basesql,
             request.report_bucket, request.report_startdate,
             request.report_enddate,
             connections[basequery.db].ops.quote_name('resource'),
             request.report_startdate, request.report_enddate,
             sql_max('sum(out_resourceplan.available)', '0.0001'),
             request.report_startdate, request.report_enddate, sortsql)
        cursor.execute(query, baseparams)

        # Build the python result
        for row in cursor.fetchall():
            if row[5] != 0:
                util = row[7] * 100 / row[5]
            else:
                util = 0
            yield {
                'resource': row[0],
                'location': row[1],
                'avgutil': round(row[2], 2),
                'bucket': row[3],
                'startdate': python_date(row[4]),
                'available': round(row[5], 1),
                'unavailable': round(row[6], 1),
                'load': round(row[7], 1),
                'setup': round(row[8], 1),
                'utilization': round(util, 2)
            }