예제 #1
0
 def initialize(reportclass, request):
     if reportclass._attributes_added != 2:
         reportclass._attributes_added = 2
         reportclass.attr_sql = ""
         # Adding custom operation attributes
         for f in getAttributeFields(
             Operation, related_name_prefix="operation", initially_hidden=True
         ):
             f.editable = False
             reportclass.rows += (f,)
             reportclass.attr_sql += "operation.%s, " % f.name.split("__")[-1]
         # Adding custom item attributes
         for f in getAttributeFields(
             Item, related_name_prefix="item", initially_hidden=True
         ):
             f.editable = False
             reportclass.rows += (f,)
             reportclass.attr_sql += "item.%s, " % f.name.split("__")[-1]
         # Adding custom location attributes
         for f in getAttributeFields(
             Location, related_name_prefix="location", initially_hidden=True
         ):
             f.editable = False
             reportclass.rows += (f,)
             reportclass.attr_sql += "location.%s, " % f.name.split("__")[-1]
예제 #2
0
 def initialize(reportclass, request):
     if reportclass._attributes_added != 2:
         reportclass._attributes_added = 2
         reportclass.attr_sql = ""
         # Adding custom item attributes
         for f in getAttributeFields(Item,
                                     related_name_prefix="item",
                                     initially_hidden=True):
             reportclass.rows += (f, )
             reportclass.attr_sql += "item.%s, " % f.name.split("__")[-1]
         # Adding custom location attributes
         for f in getAttributeFields(Location,
                                     related_name_prefix="location",
                                     initially_hidden=True):
             reportclass.rows += (f, )
             reportclass.attr_sql += "location.%s, " % f.name.split(
                 "__")[-1]
         # Adding custom customer attributes
         for f in getAttributeFields(Customer,
                                     related_name_prefix="customer",
                                     initially_hidden=True):
             reportclass.rows += (f, )
             reportclass.attr_sql += "customer.%s, " % f.name.split(
                 "__")[-1]
         # Adding custom demand attributes
         for f in getAttributeFields(Demand, initially_hidden=True):
             reportclass.rows += (f, )
             reportclass.attr_sql += "demand.%s, " % f.name.split("__")[-1]
예제 #3
0
 def initialize(reportclass, request):
     if reportclass._attributes_added != 2:
         reportclass._attributes_added = 2
         reportclass.attr_sql = ""
         # Adding custom item attributes
         for f in getAttributeFields(
             Item, related_name_prefix="item", initially_hidden=True
         ):
             f.editable = False
             reportclass.rows += (f,)
             t = f.name.split("__")[-1]
             reportclass.attr_sql += "item.%s as item__%s, " % (t, t)
         # Adding custom location attributes
         for f in getAttributeFields(
             Location, related_name_prefix="location", initially_hidden=True
         ):
             f.editable = False
             reportclass.rows += (f,)
             t = f.name.split("__")[-1]
             reportclass.attr_sql += "location.%s as location__%s, " % (t, t)
         # Adding custom supplier attributes
         for f in getAttributeFields(
             Supplier, related_name_prefix="supplier", initially_hidden=True
         ):
             f.editable = False
             reportclass.rows += (f,)
             t = f.name.split("__")[-1]
             reportclass.attr_sql += "supplier.%s as supplier__%s, " % (t, t)
예제 #4
0
파일: capacity.py 프로젝트: zhengr/frepple
 def initialize(reportclass, request):
     if reportclass._attributes_added != 2:
         reportclass._attributes_added = 2
         # Adding custom operation attributes
         for f in getAttributeFields(
                 Operation, related_name_prefix="operationplan__operation"):
             f.editable = False
             reportclass.rows += (f, )
         # Adding custom resource attributes
         for f in getAttributeFields(Resource,
                                     related_name_prefix="resource"):
             f.editable = False
             reportclass.rows += (f, )
         # Adding custom item attributes
         for f in getAttributeFields(
                 Item,
                 related_name_prefix="operationplan__operation__item"):
             f.editable = False
             reportclass.rows += (f, )
         # Adding custom operationplan attributes
         for f in getAttributeFields(
                 OperationPlan,
                 related_name_prefix="operationplan",
                 initially_hidden=True,
         ):
             f.editable = False
             reportclass.rows += (f, )
예제 #5
0
파일: resource.py 프로젝트: suribes/frepple
 def initialize(reportclass, request):
   if reportclass._attributes_added != 2:
     reportclass._attributes_added = 2
     # Adding custom operation attributes
     for f in getAttributeFields(Operation, related_name_prefix="operation"):
       f.editable = False
       reportclass.rows += (f,)
     # Adding custom resource attributes
     for f in getAttributeFields(Resource, related_name_prefix="resource"):
       f.editable = False
       reportclass.rows += (f,)
예제 #6
0
 def initialize(reportclass, request):
   if reportclass._attributes_added != 2:
     reportclass._attributes_added = 2
     reportclass.attr_sql = ''
     # Adding custom item attributes
     for f in getAttributeFields(Item, initially_hidden=True):
       reportclass.rows += (f,)
       reportclass.attr_sql += 'item.%s, ' % f.name.split('__')[-1]
     # Adding custom location attributes
     for f in getAttributeFields(Location, related_name_prefix="location", initially_hidden=True):
       reportclass.rows += (f,)
       reportclass.attr_sql += 'location.%s, ' % f.name.split('__')[-1]
예제 #7
0
 def initialize(reportclass, request):
     if reportclass._attributes_added != 2:
         reportclass._attributes_added = 2
         for f in getAttributeFields(PurchaseOrder):
             reportclass.rows += (f,)
         for f in getAttributeFields(Item, related_name_prefix="item"):
             f.editable = False
             reportclass.rows += (f,)
         for f in getAttributeFields(Location, related_name_prefix="location"):
             f.editable = False
             reportclass.rows += (f,)
         for f in getAttributeFields(Supplier, related_name_prefix="supplier"):
             f.editable = False
             reportclass.rows += (f,)
예제 #8
0
파일: resource.py 프로젝트: frePPLe/frePPLe
 def initialize(reportclass, request):
   if reportclass._attributes_added != 2:
     reportclass._attributes_added = 2
     reportclass.attr_sql = ''
     # Adding custom resource attributes
     for f in getAttributeFields(Resource, initially_hidden=True):
       f.editable = False
       reportclass.rows += (f,)
       reportclass.attr_sql += 'res.%s, ' % f.name.split('__')[-1]
     # Adding custom location attributes
     for f in getAttributeFields(Location, related_name_prefix="location", initially_hidden=True):
       f.editable = False
       reportclass.rows += (f,)
       reportclass.attr_sql += 'location.%s, ' % f.name.split('__')[-1]
예제 #9
0
파일: resource.py 프로젝트: suribes/frepple
 def initialize(reportclass, request):
   if reportclass._attributes_added != 2:
     reportclass._attributes_added = 2
     reportclass.attr_sql = ''
     # Adding custom resource attributes
     for f in getAttributeFields(Resource, initially_hidden=True):
       f.editable = False
       reportclass.rows += (f,)
       reportclass.attr_sql += 'res.%s, ' % f.name.split('__')[-1]
     # Adding custom location attributes
     for f in getAttributeFields(Location, related_name_prefix="location", initially_hidden=True):
       f.editable = False
       reportclass.rows += (f,)
       reportclass.attr_sql += 'location.%s, ' % f.name.split('__')[-1]
예제 #10
0
파일: inventory.py 프로젝트: xfyecn/frepple
 def initialize(reportclass, request):
     if reportclass._attributes_added != 2:
         reportclass._attributes_added = 2
         for f in getAttributeFields(DistributionOrder):
             reportclass.rows += (f,)
         for f in getAttributeFields(Item, related_name_prefix="item"):
             f.editable = False
             reportclass.rows += (f,)
         for f in getAttributeFields(Location, related_name_prefix="origin"):
             f.editable = False
             reportclass.rows += (f,)
         for f in getAttributeFields(Location, related_name_prefix="destination"):
             f.editable = False
             reportclass.rows += (f,)
예제 #11
0
 def initialize(reportclass, request):
     if reportclass._attributes_added != 2:
         reportclass._attributes_added = 2
         reportclass.attr_sql = ''
         # Adding custom item attributes
         for f in getAttributeFields(Item, initially_hidden=True):
             reportclass.attr_sql += 'item.%s, ' % f.name.split('__')[-1]
예제 #12
0
파일: demand.py 프로젝트: zhengr/frepple
 def initialize(reportclass, request):
     if reportclass._attributes_added != 2:
         reportclass._attributes_added = 2
         reportclass.attr_sql = ""
         # Adding custom item attributes
         for f in getAttributeFields(Item, initially_hidden=False):
             f.editable = False
             reportclass.rows += (f,)
             reportclass.attr_sql += "parent.%s, " % f.name.split("__")[-1]
예제 #13
0
파일: demand.py 프로젝트: frePPLe/frePPLe
 def initialize(reportclass, request):
   if reportclass._attributes_added != 2:
     reportclass._attributes_added = 2
     reportclass.attr_sql = ''
     # Adding custom item attributes
     for f in getAttributeFields(Item, initially_hidden=True):
       f.editable = False
       reportclass.rows += (f,)
       reportclass.attr_sql += 'parent.%s, ' % f.name.split('__')[-1]
예제 #14
0
 def initialize(reportclass, request):
     if reportclass._attributes_added != 2:
         reportclass._attributes_added = 2
         for f in getAttributeFields(DeliveryOrder):
             reportclass.rows += (f, )
         for f in getAttributeFields(Item, related_name_prefix="item"):
             f.editable = False
             f.initially_hidden = True
             reportclass.rows += (f, )
         for f in getAttributeFields(Location,
                                     related_name_prefix="location"):
             f.editable = False
             f.initially_hidden = True
             reportclass.rows += (f, )
         for f in getAttributeFields(
                 Customer, related_name_prefix="demand__customer"):
             f.editable = False
             f.initially_hidden = True
             reportclass.rows += (f, )
예제 #15
0
    def query(reportclass, request, basequery, sortsql='1 asc'):
        basesql, baseparams = basequery.query.get_compiler(
            basequery.db).as_sql(with_col_aliases=False)

        # Get the time units
        units = OverviewReport.getUnits(request)

        # Assure the item hierarchy is up to date
        Resource.rebuildHierarchy(database=basequery.db)

        # Execute the query
        cursor = connections[request.database].cursor()
        query = '''
      select res.name, res.description, res.category, res.subcategory,
        res.type, res.maximum, res.maximum_calendar_id, res.cost, res.maxearly,
        res.setupmatrix_id, res.setup, location.name, location.description,
        location.category, location.subcategory, location.available_id,
        coalesce(max(plan_summary.avg_util),0) as avgutil, res.available_id available_calendar, 
        %s
        d.bucket as col1, d.startdate as col2,
        coalesce(sum(out_resourceplan.available),0) * (case when res.type = 'buckets' then 1 else %f end) as available,
        coalesce(sum(out_resourceplan.unavailable),0) * (case when res.type = 'buckets' then 1 else %f end) as unavailable,
        coalesce(sum(out_resourceplan.load),0) * (case when res.type = 'buckets' then 1 else %f end) as loading,
        coalesce(sum(out_resourceplan.setup),0) * (case when res.type = 'buckets' then 1 else %f end) as setup
      from (%s) res
      left outer join location
        on res.location_id = location.name
      -- Multiply with buckets
      cross join (
                   select name as bucket, startdate, enddate
                   from common_bucketdetail
                   where bucket_id = '%s' and enddate > '%s' and startdate < '%s'
                   ) d
      -- Utilization info
      left join out_resourceplan
      on res.name = out_resourceplan.resource
      and d.startdate <= out_resourceplan.startdate
      and d.enddate > out_resourceplan.startdate
      and out_resourceplan.startdate >= '%s'
      and out_resourceplan.startdate < '%s'
      -- Average utilization info
      left join (
          select
            resource,
            ( coalesce(sum(out_resourceplan.load),0) + coalesce(sum(out_resourceplan.setup),0) )
             * 100.0 / coalesce(greatest(sum(out_resourceplan.available), 0.0001),1) as avg_util
          from out_resourceplan
          where out_resourceplan.startdate >= '%s'
          and out_resourceplan.startdate < '%s'
          group by resource
          ) plan_summary
      on res.name = plan_summary.resource
      -- Grouping and sorting
      group by res.name, res.description, res.category, res.subcategory,
        res.type, res.maximum, res.maximum_calendar_id, res.available_id, res.cost, res.maxearly,
        res.setupmatrix_id, res.setup, location.name, location.description,
        location.category, location.subcategory, location.available_id,
        %s d.bucket, d.startdate
      order by %s, d.startdate
      ''' % (reportclass.attr_sql, units[0], units[0], units[0], units[0],
             basesql, request.report_bucket, request.report_startdate,
             request.report_enddate, request.report_startdate,
             request.report_enddate, request.report_startdate,
             request.report_enddate, reportclass.attr_sql, sortsql)
        cursor.execute(query, baseparams)

        # Build the python result
        for row in cursor.fetchall():
            numfields = len(row)
            if row[numfields - 4] != 0:
                util = row[numfields - 2] * 100 / row[numfields - 4]
            else:
                util = 0
            result = {
                'resource': row[0],
                'description': row[1],
                'category': row[2],
                'subcategory': row[3],
                'type': row[4],
                'maximum': row[5],
                'maximum_calendar': row[6],
                'cost': row[7],
                'maxearly': row[8],
                'setupmatrix': row[9],
                'setup': row[10],
                'location__name': row[11],
                'location__description': row[12],
                'location__category': row[13],
                'location__subcategory': row[14],
                'location__available': row[15],
                'avgutil': round(row[16], 2),
                'available_calendar': row[17],
                'bucket': row[numfields - 6],
                'startdate': row[numfields - 5].date(),
                'available': round(row[numfields - 4], 1),
                'unavailable': round(row[numfields - 3], 1),
                'load': round(row[numfields - 2], 1),
                'setup': round(row[numfields - 1], 1),
                'utilization': round(util, 2)
            }
            idx = 17
            for f in getAttributeFields(Resource):
                result[f.field_name] = row[idx]
                idx += 1
            for f in getAttributeFields(Location):
                result[f.field_name] = row[idx]
                idx += 1
            yield result
예제 #16
0
파일: demand.py 프로젝트: frePPLe/frePPLe
  def query(reportclass, request, basequery, sortsql='1 asc'):
    basesql, baseparams = basequery.query.get_compiler(basequery.db).as_sql(with_col_aliases=False)

    # Assure the item hierarchy is up to date
    Item.rebuildHierarchy(database=basequery.db)

    # Execute a query to get the backlog at the start of the horizon
    startbacklogdict = {}
    query = '''
      select name, sum(qty) from
        (
        select parent.name, sum(demand.quantity) qty from (%s) item
        inner join item parent on item.lft between parent.lft and parent.rght
        inner join demand on demand.item_id = item.name and demand.status in ('open','quote') and due < %%s
        group by parent.name
        union all
        select parent.name, sum(operationplanmaterial.quantity) qty
        from operationplanmaterial
        inner join operationplan on operationplan.reference = operationplanmaterial.operationplan_id
          and operationplan.type = 'DLVR'
          and operationplan.enddate < %%s
        inner join (%s) item on operationplanmaterial.item_id = item.name
        inner join item parent on item.lft between parent.lft and parent.rght
        group by parent.name
        ) t
        group by name
      ''' % (basesql, basesql)
    with connections[request.database].chunked_cursor() as cursor_chunked:
      cursor_chunked.execute(query, baseparams + (request.report_startdate, request.report_startdate) + baseparams)
      for row in cursor_chunked:
        if row[0]:
          startbacklogdict[row[0]] = float(row[1])

    # Execute the query
    query = '''
      select
      parent.name, parent.description, parent.category, parent.subcategory,
      parent.owner_id, parent.cost, parent.source, parent.lastmodified,
      %s
      d.bucket,
      d.startdate,
      d.enddate,
      sum(coalesce((select sum(quantity) from demand
       where demand.item_id = child.name and status in ('open','quote') and due >= greatest(%%s,d.startdate) and due < d.enddate),0)) orders,
      sum(coalesce((select sum(-operationplanmaterial.quantity) from operationplanmaterial
      inner join operationplan on operationplan.reference = operationplanmaterial.operationplan_id and operationplan.type = 'DLVR'
      where operationplanmaterial.item_id = child.name
      and operationplanmaterial.flowdate >= greatest(%%s,d.startdate)
      and operationplanmaterial.flowdate < d.enddate),0)) planned
      from (%s) parent
      inner join item child on child.lft between parent.lft and parent.rght
      cross join (
                   select name as bucket, startdate, enddate
                   from common_bucketdetail
                   where bucket_id = %%s and enddate > %%s and startdate < %%s
                   ) d
      group by
        parent.name, parent.description, parent.category, parent.subcategory,
        parent.owner_id, parent.cost, parent.source, parent.lastmodified,
        %s
        d.bucket, d.startdate, d.enddate
      order by %s, d.startdate
    ''' % (reportclass.attr_sql, basesql, reportclass.attr_sql, sortsql)

    # Build the python result
    with connections[request.database].chunked_cursor() as cursor_chunked:
      cursor_chunked.execute(
        query,
        (request.report_startdate, request.report_startdate) +  # orders planned
        baseparams +
        (request.report_bucket, request.report_startdate, request.report_enddate)  # buckets
        )
      previtem = None
      for row in cursor_chunked:
        numfields = len(row)
        if row[0] != previtem:
          backlog = startbacklogdict.get(row[0], 0)
          previtem = row[0]
        backlog += float(row[numfields - 2]) - float(row[numfields - 1])
        res = {
          'item': row[0],
          'description': row[1],
          'category': row[2],
          'subcategory': row[3],
          'owner': row[4],
          'cost': row[5],
          'source': row[6],
          'lastmodified': row[7],
          'bucket': row[numfields - 5],
          'startdate': row[numfields - 4].date(),
          'enddate': row[numfields - 3].date(),
          'demand': row[numfields - 2],
          'supply': row[numfields - 1],
          'backlog': backlog,
          }
        idx = 8
        for f in getAttributeFields(Item):
          res[f.field_name] = row[idx]
          idx += 1
        yield res
예제 #17
0
  def query(reportclass, request, basequery, sortsql='1 asc'):
    cursor = connections[request.database].cursor()
    basesql, baseparams = basequery.query.get_compiler(basequery.db).as_sql(with_col_aliases=False)

    # Execute the actual query
    query = '''
      with opplanmat as (
          %s
        ),
        ss_buckets as (
          -- Buffer min cal entries
          select
            buffer.item_id, buffer.location_id, 2 priority_1, calendarbucket.priority priority_2,
            calendarbucket.value ssvalue, coalesce(calendarbucket.startdate, '1971-01-01'::timestamp) startdate,
            coalesce(calendarbucket.enddate, '2030-12-31'::timestamp) enddate
          from buffer
          inner join opplanmat
            on buffer.item_id = opplanmat.item_id and buffer.location_id = opplanmat.location_id
          inner join calendarbucket
            on calendarbucket.calendar_id = buffer.minimum_calendar_id
          union all
          -- Buffer min cal default value + Buffer min
          select
            opplanmat.item_id, opplanmat.location_id, 2 priority_1, 99999999,
            coalesce(calendar.defaultvalue, buffer.minimum), '1971-01-01'::timestamp, '2030-12-31'::timestamp
          from buffer
          inner join opplanmat
            on buffer.item_id = opplanmat.item_id and buffer.location_id = opplanmat.location_id
          left outer join calendar
            on calendar.name = buffer.minimum_calendar_id
        )
      select
        invplan.item_id || ' @ ' || invplan.location_id,
        invplan.item_id, invplan.location_id,
        item.description, item.category, item.subcategory, item.owner_id,
        item.source, item.lastmodified, location.description, location.category,
        location.subcategory, location.available_id, location.owner_id,
        location.source, location.lastmodified, %s
        invplan.startoh,
        invplan.startoh + invplan.produced - invplan.consumed as endoh,
        coalesce((
        select
        extract (epoch from case when initial_onhand <= 0 then interval '0 day' else min(flowdate) -
                 greatest(invplan.startdate, %%s) end)/(3600*24) days_of_cover
        from
        (
        select
        item_id,
        location_id,
        flowdate,
        onhand - quantity onhand_before,
        onhand onhand_after,
        first_value(onhand - quantity) over(partition by item_id, location_id order by item_id, location_id, flowdate,id) initial_onhand,
        sum(case when quantity < 0 then -quantity else 0 end) over(partition by item_id, location_id order by item_id, location_id, flowdate,id) total_consumed
        from operationplanmaterial
        where flowdate >= greatest(invplan.startdate, %%s) and item_id = invplan.item_id and location_id = invplan.location_id
        ) t
        where total_consumed >= initial_onhand
        group by item_id, location_id, initial_onhand
        ), case when invplan.startoh <= 0 then 0 else 999 end)
        startohdoc,
        invplan.bucket,
        invplan.startdate,
        invplan.enddate,
        (
          select ssvalue
          from ss_buckets
          where ss_buckets.item_id = invplan.item_id
            and ss_buckets.startdate <= greatest(invplan.startdate, %%s)
            and ss_buckets.enddate > greatest(invplan.startdate, %%s)
            and ssvalue is not null
          order by ss_buckets.priority_1, ss_buckets.priority_2 limit 1
        ) safetystock,
        invplan.consumed,
        invplan.consumedMO,
        invplan.consumedDO,
        invplan.consumedSO,
        invplan.produced,
        invplan.producedMO,
        invplan.producedDO,
        invplan.producedPO
      from (
        select
          opplanmat.item_id, opplanmat.location_id,
          d.bucket as bucket, d.startdate as startdate, d.enddate as enddate,
          coalesce(sum(greatest(operationplanmaterial.quantity, 0)),0) as produced,
          coalesce(sum(greatest(case when operationplan.type = 'MO' then operationplanmaterial.quantity else 0 end, 0)),0) as producedMO,
          coalesce(sum(greatest(case when operationplan.type = 'DO' then operationplanmaterial.quantity else 0 end, 0)),0) as producedDO,
          coalesce(sum(greatest(case when operationplan.type = 'PO' then operationplanmaterial.quantity else 0 end, 0)),0) as producedPO,
          coalesce(-sum(least(operationplanmaterial.quantity, 0)),0) as consumed,
          coalesce(-sum(least(case when operationplan.type = 'MO' then operationplanmaterial.quantity else 0 end, 0)),0) as consumedMO,
          coalesce(-sum(least(case when operationplan.type = 'DO' then operationplanmaterial.quantity else 0 end, 0)),0) as consumedDO,
          coalesce(-sum(least(case when operationplan.type = 'DLVR' then operationplanmaterial.quantity else 0 end, 0)),0) as consumedSO,
          coalesce(initial_on_hand.onhand,0) startoh
        from opplanmat
        -- Multiply with buckets
        cross join (
             select name as bucket, startdate, enddate
             from common_bucketdetail
             where bucket_id = %%s and enddate > %%s and startdate < %%s
             ) d
        -- Initial on hand
        left join operationplanmaterial initial_on_hand
            on initial_on_hand.item_id = opplanmat.item_id
            and initial_on_hand.location_id = opplanmat.location_id
            and initial_on_hand.flowdate < greatest(d.startdate,%%s)
            and not exists (select 1 from operationplanmaterial opm where opm.item_id = initial_on_hand.item_id
            and opm.location_id = initial_on_hand.location_id and opm.flowdate < greatest(d.startdate,%%s)
            and opm.id > initial_on_hand.id)
         -- Consumed and produced quantities
        left join operationplanmaterial
        on opplanmat.item_id = operationplanmaterial.item_id
        and opplanmat.location_id = operationplanmaterial.location_id
        and d.startdate <= operationplanmaterial.flowdate
        and d.enddate > operationplanmaterial.flowdate
        and operationplanmaterial.flowdate >= greatest(d.startdate,%%s)
        and operationplanmaterial.flowdate < %%s
        left outer join operationplan on operationplan.id = operationplanmaterial.operationplan_id
        -- Grouping and sorting
        group by opplanmat.item_id,
        opplanmat.location_id,
        d.bucket,
        d.startdate,
        d.enddate,
        coalesce(initial_on_hand.onhand,0)
        ) invplan
      left outer join item on
        invplan.item_id = item.name
      left outer join location on
        invplan.location_id = location.name
      order by %s, invplan.startdate
      ''' % (
        basesql, reportclass.attr_sql, sortsql
      )
    cursor.execute(
      query, baseparams + (
        request.report_startdate, request.report_startdate,
        request.report_startdate, request.report_startdate,
        request.report_bucket, request.report_startdate, request.report_enddate,
        request.report_startdate, request.report_startdate, request.report_startdate, request.report_enddate
        )
      )

    # Build the python result
    prevbuf = None
    for row in cursor.fetchall():
      numfields = len(row)
      
      res = {
        'buffer': row[0],
        'item': row[1],
        'location': row[2],
        'item__description': row[3],
        'item__category': row[4],
        'item__subcategory': row[5],
        'item__owner': row[6],
        'item__source': row[7],
        'item__lastmodified': row[8],
        'location__description': row[9],
        'location__category': row[10],
        'location__subcategory': row[11],
        'location__available_id': row[12],
        'location__owner_id': row[13],
        'location__source': row[14],
        'location__lastmodified': row[15],
        'startoh': round(row[numfields - 15], 1),
        'endoh': round(row[numfields - 14], 1),
        'startohdoc': int(row[numfields - 13]),
        'bucket': row[numfields - 12],
        'startdate': row[numfields - 11].date(),
        'enddate': row[numfields - 10].date(),
        'safetystock': round(row[numfields - 9] or 0, 1),
        'consumed': round(row[numfields - 8], 1),
        'consumedMO': round(row[numfields - 7], 1),
        'consumedDO': round(row[numfields - 6], 1),
        'consumedSO': round(row[numfields - 5], 1),
        'produced': round(row[numfields - 4], 1),
        'producedMO': round(row[numfields - 3], 1),
        'producedDO': round(row[numfields - 2], 1),
        'producedPO': round(row[numfields - 1], 1),
        }
      # Add attribute fields
      idx = 16
      for f in getAttributeFields(Item, related_name_prefix="item"):
        res[f.field_name] = row[idx]
        idx += 1
      for f in getAttributeFields(Location, related_name_prefix="location"):
        res[f.field_name] = row[idx]
        idx += 1
      yield res
예제 #18
0
  def query(reportclass, request, basequery, sortsql='1 asc'):
    cursor = connections[request.database].cursor()
    basesql, baseparams = basequery.query.get_compiler(basequery.db).as_sql(with_col_aliases=False)

    # Execute the actual query
    query = '''
       select item.name||' @ '||location.name,
       item.name item_id,
       location.name location_id,
       item.description,
       item.category,
       item.subcategory,
       item.owner_id,
       item.source,
       item.lastmodified,
       location.description,
       location.category,
       location.subcategory,
       location.available_id,
       location.owner_id,
       location.source,
       location.lastmodified,
       %s
       coalesce((select onhand from operationplanmaterial where item_id = item.name and
       location_id = location.name and flowdate < greatest(d.startdate,%%s)
       order by flowdate desc, id desc limit 1),0) startoh,
       coalesce((select onhand from operationplanmaterial where item_id = item.name and location_id = location.name
       and flowdate < greatest(d.startdate,%%s)
       order by flowdate desc, id desc limit 1),0) - coalesce(-sum(least(operationplanmaterial.quantity, 0)),0)
       + coalesce(sum(greatest(operationplanmaterial.quantity, 0)),0) endoh,
       case when coalesce((select onhand from operationplanmaterial where item_id = item.name and
       location_id = location.name and flowdate < greatest(d.startdate,%%s)
       order by flowdate desc, id desc limit 1),0) = 0 then 0 
       when (select to_char(flowdate,'YYYY-MM-DD')||' '||round(periodofcover/86400) from operationplanmaterial where item_id = item.name and
       location_id = location.name and flowdate < greatest(d.startdate,%%s)
       order by flowdate desc, id desc limit 1) = '1971-01-01 999' then 999 else
       extract( epoch from (select flowdate from operationplanmaterial where item_id = item.name and
       location_id = location.name and flowdate < greatest(d.startdate,%%s)
       order by flowdate desc, id desc limit 1)
       + coalesce((select periodofcover from operationplanmaterial where item_id = item.name and
       location_id = location.name and flowdate < greatest(d.startdate,%%s)
       order by flowdate desc, id desc limit 1),0) * interval '1 second'
       - greatest(d.startdate,%%s))/86400 end startohdoc,
       d.bucket,
       d.startdate,
       d.enddate,
       coalesce((select minimum from operationplanmaterial where item_id = item.name and
       location_id = location.name and flowdate < greatest(d.startdate,%%s)
       order by flowdate desc, id desc limit 1),0) safetystock,
       coalesce(-sum(least(operationplanmaterial.quantity, 0)),0) as consumed,
       coalesce(-sum(least(case when operationplan.type = 'MO' then operationplanmaterial.quantity else 0 end, 0)),0) as consumedMO,
       coalesce(-sum(least(case when operationplan.type = 'DO' then operationplanmaterial.quantity else 0 end, 0)),0) as consumedDO,
       coalesce(-sum(least(case when operationplan.type = 'DLVR' then operationplanmaterial.quantity else 0 end, 0)),0) as consumedSO,
       coalesce(sum(greatest(operationplanmaterial.quantity, 0)),0) as produced,
       coalesce(sum(greatest(case when operationplan.type = 'MO' then operationplanmaterial.quantity else 0 end, 0)),0) as producedMO,
       coalesce(sum(greatest(case when operationplan.type = 'DO' then operationplanmaterial.quantity else 0 end, 0)),0) as producedDO,
       coalesce(sum(greatest(case when operationplan.type = 'PO' then operationplanmaterial.quantity else 0 end, 0)),0) as producedPO
       from
       (%s) opplanmat
       inner join item on item.name = opplanmat.item_id
       inner join location on location.name = opplanmat.location_id
       -- Multiply with buckets
      cross join (
         select name as bucket, startdate, enddate
         from common_bucketdetail
         where bucket_id = %%s and enddate > %%s and startdate < %%s
         ) d
      -- Consumed and produced quantities
      left join operationplanmaterial
        on opplanmat.item_id = operationplanmaterial.item_id
        and opplanmat.location_id = operationplanmaterial.location_id
        and d.startdate <= operationplanmaterial.flowdate
        and d.enddate > operationplanmaterial.flowdate
        and operationplanmaterial.flowdate >= greatest(d.startdate,%%s)
        and operationplanmaterial.flowdate < d.enddate
      left outer join operationplan on operationplan.id = operationplanmaterial.operationplan_id
      group by
       item.name,
       location.name,
       item.description, 
       item.category, 
       item.subcategory, 
       item.owner_id,
       item.source, 
       item.lastmodified, 
       location.description, 
       location.category,
       location.subcategory, 
       location.available_id, 
       location.owner_id,
       location.source, 
       location.lastmodified,
       d.bucket,
       d.startdate,
       d.enddate
       order by %s, d.startdate
    ''' % (
        reportclass.attr_sql, basesql, sortsql
      )

    cursor.execute(
      query, (
        request.report_startdate,  # startoh
        request.report_startdate,  # endoh
        request.report_startdate,  # startohdoc
        request.report_startdate,  # startohdoc
        request.report_startdate,  # startohdoc
        request.report_startdate,  # startohdoc
        request.report_startdate,  # startohdoc
        request.report_startdate,)  # safetystock
        + baseparams +   # opplanmat
        (request.report_bucket, request.report_startdate, request.report_enddate,  # bucket d
        request.report_startdate,  # operationplanmaterial
        )
      )

    # Build the python result
    for row in cursor.fetchall():
      numfields = len(row)
      res = {
        'buffer': row[0],
        'item': row[1],
        'location': row[2],
        'item__description': row[3],
        'item__category': row[4],
        'item__subcategory': row[5],
        'item__owner': row[6],
        'item__source': row[7],
        'item__lastmodified': row[8],
        'location__description': row[9],
        'location__category': row[10],
        'location__subcategory': row[11],
        'location__available_id': row[12],
        'location__owner_id': row[13],
        'location__source': row[14],
        'location__lastmodified': row[15],
        'startoh': round(row[numfields - 15], 1),
        'endoh': round(row[numfields - 14], 1),
        'startohdoc': int(row[numfields - 13]),
        'bucket': row[numfields - 12],
        'startdate': row[numfields - 11].date(),
        'enddate': row[numfields - 10].date(),
        'safetystock': round(row[numfields - 9] or 0, 1),
        'consumed': round(row[numfields - 8], 1),
        'consumedMO': round(row[numfields - 7], 1),
        'consumedDO': round(row[numfields - 6], 1),
        'consumedSO': round(row[numfields - 5], 1),
        'produced': round(row[numfields - 4], 1),
        'producedMO': round(row[numfields - 3], 1),
        'producedDO': round(row[numfields - 2], 1),
        'producedPO': round(row[numfields - 1], 1),
        }
      # Add attribute fields
      idx = 16
      for f in getAttributeFields(Item, related_name_prefix="item"):
        res[f.field_name] = row[idx]
        idx += 1
      for f in getAttributeFields(Location, related_name_prefix="location"):
        res[f.field_name] = row[idx]
        idx += 1
      yield res
예제 #19
0
  def query(reportclass, request, basequery, sortsql='1 asc'):
    cursor = connections[request.database].cursor()
    basesql, baseparams = basequery.query.get_compiler(basequery.db).as_sql(with_col_aliases=False)

    # Execute a query  to get the onhand value at the start of our horizon
    startohdict = {}
    query = '''
      select opplanmat.item_id, opplanmat.location_id, sum(oh.onhand)
      from (%s) opplanmat
      inner join (
        select operationplanmaterial.item_id,
          operationplanmaterial.location_id,
          operationplanmaterial.onhand as onhand
        from operationplanmaterial,
          (select item_id, location_id, max(id) as id
           from operationplanmaterial
           where flowdate < '%s'
           group by item_id, location_id
          ) maxid
        where maxid.item_id = operationplanmaterial.item_id
          and maxid.location_id = operationplanmaterial.location_id
        and maxid.id = operationplanmaterial.id
      ) oh
      on oh.item_id = opplanmat.item_id
      and oh.location_id = opplanmat.location_id
      group by opplanmat.item_id, opplanmat.location_id
      ''' % (basesql, request.report_startdate)
    cursor.execute(query, baseparams)
    for row in cursor.fetchall():
      startohdict[ "%s @ %s" % (row[0], row[1]) ] = float(row[2])

    # Execute the actual query
    query = '''
      select
        invplan.item_id || ' @ ' || invplan.location_id,
        invplan.item_id, invplan.location_id, 
        item.description, item.category, item.subcategory, item.owner_id,
        item.source, item.lastmodified, location.description, location.category,
        location.subcategory, location.available_id, location.owner_id, 
        location.source, location.lastmodified, %s
        invplan.bucket, invplan.startdate, invplan.enddate,
        invplan.consumed, invplan.produced
      from (
        select
          opplanmat.item_id, opplanmat.location_id,
          d.bucket as bucket, d.startdate as startdate, d.enddate as enddate,
          coalesce(sum(greatest(operationplanmaterial.quantity, 0)),0) as consumed,
          coalesce(-sum(least(operationplanmaterial.quantity, 0)),0) as produced
        from (%s) opplanmat
        -- Multiply with buckets
        cross join (
             select name as bucket, startdate, enddate
             from common_bucketdetail
             where bucket_id = %%s and enddate > %%s and startdate < %%s
             ) d
        -- Consumed and produced quantities
        left join operationplanmaterial
        on opplanmat.item_id = operationplanmaterial.item_id
        and opplanmat.location_id = operationplanmaterial.location_id
        and d.startdate <= operationplanmaterial.flowdate
        and d.enddate > operationplanmaterial.flowdate
        and operationplanmaterial.flowdate >= %%s
        and operationplanmaterial.flowdate < %%s
        -- Grouping and sorting
        group by opplanmat.item_id, opplanmat.location_id, d.bucket, d.startdate, d.enddate
        ) invplan
      left outer join item on
        invplan.item_id = item.name
      left outer join location on
        invplan.location_id = location.name
      order by %s, invplan.startdate
      ''' % (
        reportclass.attr_sql, basesql, sortsql
      )
    cursor.execute(
      query, baseparams + (
        request.report_bucket, request.report_startdate,
        request.report_enddate, request.report_startdate, request.report_enddate
        )
      )

    # Build the python result
    prevbuf = None
    for row in cursor.fetchall():
      numfields = len(row)
      if row[0] != prevbuf:
        prevbuf = row[0]
        startoh = startohdict.get(prevbuf, 0)
        endoh = startoh + float(row[numfields - 2] - row[numfields - 1])
      else:
        startoh = endoh
        endoh += float(row[numfields - 2] - row[numfields - 1])
      res = {
        'buffer': row[0],
        'item': row[1],
        'location': row[2],
        'item__description': row[3],
        'item__category': row[4],
        'item__subcategory': row[5],
        'item__owner': row[6],
        'item__source': row[7],
        'item__lastmodified': row[8],
        'location__description': row[9],
        'location__category': row[10],
        'location__subcategory': row[11],
        'location__available_id': row[12],
        'location__owner_id': row[13],
        'location__source': row[14],
        'location__lastmodified': row[15],
        'bucket': row[numfields - 5],
        'startdate': row[numfields - 4].date(),
        'enddate': row[numfields - 3].date(),
        'startoh': round(startoh, 1),
        'produced': round(row[numfields - 2], 1),
        'consumed': round(row[numfields - 1], 1),
        'endoh': round(endoh, 1),
        }
      # Add attribute fields
      idx = 16
      for f in getAttributeFields(Item, related_name_prefix="item"):
        res[f.field_name] = row[idx]
        idx += 1
      for f in getAttributeFields(Location, related_name_prefix="location"):
        res[f.field_name] = row[idx]
        idx += 1
      yield res
예제 #20
0
  def query(reportclass, request, basequery, sortsql='1 asc'):
    cursor = connections[request.database].cursor()
    basesql, baseparams = basequery.query.get_compiler(basequery.db).as_sql(with_col_aliases=False)

    # Execute the actual query
    query = '''
       select item.name||' @ '||location.name,
       item.name item_id,
       location.name location_id,
       item.description,
       item.category,
       item.subcategory,
       item.cost,
       item.owner_id,
       item.source,
       item.lastmodified,
       location.description,
       location.category,
       location.subcategory,
       location.available_id,
       location.owner_id,
       location.source,
       location.lastmodified,
       %s
       (select jsonb_build_object('onhand', onhand, 'flowdate', to_char(flowdate,'YYYY-MM-DD HH24:MI:SS'), 'periodofcover', periodofcover) 
       from operationplanmaterial where item_id = item.name and
       location_id = location.name and flowdate < greatest(d.startdate,%%s)
       order by flowdate desc, id desc limit 1) startoh,
       d.bucket,
       d.startdate,
       d.enddate,
       (select safetystock from
        (
        select 1 as priority, coalesce((select value from calendarbucket 
        where calendar_id = 'SS for '||item.name||' @ '||location.name
        and greatest(d.startdate,%%s) >= startdate and greatest(d.startdate,%%s) < enddate
        order by priority limit 1), (select defaultvalue from calendar where name = 'SS for '||item.name||' @ '||location.name)) as safetystock
        union all
        select 2 as priority, coalesce((select value from calendarbucket 
        where calendar_id = (select minimum_calendar_id from buffer where name = item.name||' @ '||location.name)
        and greatest(d.startdate,%%s) >= startdate and greatest(d.startdate,%%s) < enddate
        order by priority limit 1), (select defaultvalue from calendar where name = (select minimum_calendar_id from buffer where name = item.name||' @ '||location.name))) as safetystock
        union all
        select 3 as priority, minimum as safetystock from buffer where name = item.name||' @ '||location.name
        ) t
        where t.safetystock is not null
        order by priority
        limit 1) safetystock,
       (select jsonb_build_object(
      'work_in_progress_mo', sum(case when (startdate < d.enddate and enddate >= d.enddate) and opm.quantity > 0 and operationplan.type = 'MO' then opm.quantity else 0 end),
      'on_order_po', sum(case when (startdate < d.enddate and enddate >= d.enddate) and opm.quantity > 0 and operationplan.type = 'PO' then opm.quantity else 0 end),
      'in_transit_do', sum(case when (startdate < d.enddate and enddate >= d.enddate) and opm.quantity > 0 and operationplan.type = 'DO' then opm.quantity else 0 end),
      'total_in_progress', sum(case when (startdate < d.enddate and enddate >= d.enddate) and opm.quantity > 0 then opm.quantity else 0 end),
      'consumed', sum(case when (opm.flowdate >= greatest(d.startdate,%%s) and opm.flowdate < d.enddate) and opm.quantity < 0 then -opm.quantity else 0 end),
      'consumedMO', sum(case when operationplan.type = 'MO' and (opm.flowdate >= greatest(d.startdate,%%s) and opm.flowdate < d.enddate) and opm.quantity < 0 then -opm.quantity else 0 end),
      'consumedDO', sum(case when operationplan.type = 'DO' and (opm.flowdate >= greatest(d.startdate,%%s) and opm.flowdate < d.enddate) and opm.quantity < 0 then -opm.quantity else 0 end),
      'consumedSO', sum(case when operationplan.type = 'DLVR' and (opm.flowdate >= greatest(d.startdate,%%s) and opm.flowdate < d.enddate) and opm.quantity < 0 then -opm.quantity else 0 end),
      'produced', sum(case when (opm.flowdate >= greatest(d.startdate,%%s) and opm.flowdate < d.enddate) and opm.quantity > 0 then opm.quantity else 0 end),
      'producedMO', sum(case when operationplan.type = 'MO' and (opm.flowdate >= greatest(d.startdate,%%s) and opm.flowdate < d.enddate) and opm.quantity > 0 then opm.quantity else 0 end),
      'producedDO', sum(case when operationplan.type = 'DO' and (opm.flowdate >= greatest(d.startdate,%%s) and opm.flowdate < d.enddate) and opm.quantity > 0 then opm.quantity else 0 end),
      'producedPO', sum(case when operationplan.type = 'PO' and (opm.flowdate >= greatest(d.startdate,%%s) and opm.flowdate < d.enddate) and opm.quantity > 0 then opm.quantity else 0 end)
      )
      from operationplanmaterial opm
      inner join operationplan on operationplan.id = opm.operationplan_id 
      and ((startdate < d.enddate and enddate >= d.enddate) 
            or (opm.flowdate >= greatest(d.startdate,%%s) and opm.flowdate < d.enddate))
      where opm.item_id = item.name and opm.location_id = location.name) ongoing
       from
       (%s) opplanmat
       inner join item on item.name = opplanmat.item_id
       inner join location on location.name = opplanmat.location_id
       -- Multiply with buckets
      cross join (
         select name as bucket, startdate, enddate
         from common_bucketdetail
         where bucket_id = %%s and enddate > %%s and startdate < %%s
         ) d
      group by
       item.name,
       location.name,
       item.description, 
       item.category, 
       item.subcategory,
       item.cost,
       item.owner_id,
       item.source, 
       item.lastmodified, 
       location.description, 
       location.category,
       location.subcategory, 
       location.available_id, 
       location.owner_id,
       location.source, 
       location.lastmodified,
       d.bucket,
       d.startdate,
       d.enddate
       order by %s, d.startdate
    ''' % (
        reportclass.attr_sql, basesql, sortsql
      )

    # Build the python result
    with connections[request.database].chunked_cursor() as cursor_chunked:
      cursor_chunked.execute(
        query,
        (
          request.report_startdate,  # startoh
          request.report_startdate, request.report_startdate, request.report_startdate, request.report_startdate,  # safetystock
        ) +
        (request.report_startdate, ) * 9 +  # ongoing
        baseparams +  # opplanmat
        (request.report_bucket, request.report_startdate, request.report_enddate),  # bucket d
        )
      for row in cursor_chunked:
        numfields = len(row)
        res = {
          'buffer': row[0],
          'item': row[1],
          'location': row[2],
          'item__description': row[3],
          'item__category': row[4],
          'item__cost': row[6],
          'item__owner': row[7],
          'item__source': row[8],
          'item__lastmodified': row[9],
          'location__description': row[10],
          'location__category': row[11],
          'location__subcategory': row[12],
          'location__available_id': row[13],
          'location__owner_id': row[14],
          'location__source': row[15],
          'location__lastmodified': row[16],
          'startoh': round(row[numfields - 6]['onhand'] if row[numfields - 6] else 0, 1),
          'startohdoc': 0 if (row[numfields - 6]['onhand']  if row[numfields - 6] else 0) <= 0\
                          else (999 if row[numfields - 6]['periodofcover'] == 86313600\
                                    else (datetime.strptime(row[numfields - 6]['flowdate'],'%Y-%m-%d %H:%M:%S') +\
                                          timedelta(seconds=row[numfields - 6]['periodofcover']) - row[numfields - 4]).days if row[numfields - 6]['periodofcover'] else 999),
          'bucket': row[numfields - 5],
          'startdate': row[numfields - 4].date(),
          'enddate': row[numfields - 3].date(),
          'safetystock': round(row[numfields - 2] or 0, 1),
          'consumed': round(row[numfields - 1]['consumed'] or 0, 1),
          'consumedMO': round(row[numfields - 1]['consumedMO'] or 0, 1),
          'consumedDO': round(row[numfields - 1]['consumedDO'] or 0, 1),
          'consumedSO': round(row[numfields - 1]['consumedSO'] or 0, 1),
          'produced': round(row[numfields - 1]['produced'] or 0, 1),
          'producedMO': round(row[numfields - 1]['producedMO'] or 0, 1),
          'producedDO': round(row[numfields - 1]['producedDO'] or 0, 1),
          'producedPO': round(row[numfields - 1]['producedPO'] or 0, 1),
          'total_in_progress': round(row[numfields - 1]['total_in_progress'] or 0, 1),
          'work_in_progress_mo': round(row[numfields - 1]['work_in_progress_mo'] or 0, 1),
          'on_order_po': round(row[numfields - 1]['on_order_po'] or 0, 1),
          'in_transit_do': round(row[numfields - 1]['in_transit_do'] or 0, 1),
          'endoh': round(float(round(row[numfields - 6]['onhand'] if row[numfields - 6] else 0, 1)) + float(round(row[numfields - 1]['produced'] or 0, 1)) - float(round(row[numfields - 1]['consumed'] or 0, 1)), 1),
          }
        # Add attribute fields
        idx = 16
        for f in getAttributeFields(Item, related_name_prefix="item"):
          res[f.field_name] = row[idx]
          idx += 1
        for f in getAttributeFields(Location, related_name_prefix="location"):
          res[f.field_name] = row[idx]
          idx += 1
        yield res
예제 #21
0
    def query(reportclass, request, basequery, sortsql='1 asc'):
        basesql, baseparams = basequery.query.get_compiler(
            basequery.db).as_sql(with_col_aliases=False)

        # Assure the item hierarchy is up to date
        Item.rebuildHierarchy(database=basequery.db)

        # Execute a query to get the backlog at the start of the horizon
        startbacklogdict = {}
        query = '''
      select items.name, coalesce(req.qty, 0) - coalesce(pln.qty, 0)
      from (%s) items
      left outer join (
        select parent.name, sum(quantity) qty
        from demand
        inner join item on demand.item_id = item.name
        inner join item parent on item.lft between parent.lft and parent.rght
        where status in ('open', 'quote')
        and due < %%s
        group by parent.name
        ) req
      on req.name = items.name
      left outer join (
        select parent.name, sum(operationplan.quantity) qty
        from operationplan
        inner join demand on operationplan.demand_id = demand.name
          and operationplan.owner_id is null
          and operationplan.enddate < %%s
        inner join item on demand.item_id = item.name
        inner join item parent on item.lft between parent.lft and parent.rght
        group by parent.name
        ) pln
      on pln.name = items.name
      ''' % basesql
        with connections[request.database].chunked_cursor() as cursor_chunked:
            cursor_chunked.execute(
                query, baseparams +
                (request.report_startdate, request.report_startdate))
            for row in cursor_chunked:
                if row[0]:
                    startbacklogdict[row[0]] = float(row[1])

        # Execute the query
        query = '''
      select 
      parent.name, %s
      d.bucket,
      d.startdate,
      d.enddate,
      sum(coalesce((select sum(quantity) from demand
       where demand.item_id = child.name and status in ('open','quote') and due >= greatest(%%s,d.startdate) and due < d.enddate),0)) orders,
      sum(coalesce((select sum(-operationplanmaterial.quantity) from operationplanmaterial
      inner join operationplan on operationplan.id = operationplanmaterial.operationplan_id and operationplan.type = 'DLVR'
      where operationplanmaterial.item_id = child.name 
      and operationplanmaterial.flowdate >= greatest(%%s,d.startdate) 
      and operationplanmaterial.flowdate < d.enddate),0)) planned    
      from (%s) parent
      inner join item child on child.lft between parent.lft and parent.rght
      cross join (
                   select name as bucket, startdate, enddate
                   from common_bucketdetail
                   where bucket_id = %%s and enddate > %%s and startdate < %%s
                   ) d
      group by 
      parent.name, %s
      d.bucket,
      d.startdate,
      d.enddate
      order by %s, d.startdate
    ''' % (reportclass.attr_sql, basesql, reportclass.attr_sql, sortsql)

        # Build the python result
        with connections[request.database].chunked_cursor() as cursor_chunked:
            cursor_chunked.execute(
                query,
                baseparams + (
                    request.report_startdate,  #orders
                    request.report_startdate,  #planned
                    request.report_bucket,
                    request.report_startdate,
                    request.report_enddate  #buckets
                ))
            previtem = None
            for row in cursor_chunked:
                numfields = len(row)
                if row[0] != previtem:
                    backlog = startbacklogdict.get(row[0], 0)
                    previtem = row[0]
                backlog += float(row[numfields - 2]) - float(
                    row[numfields - 1])
                res = {
                    'item': row[0],
                    'bucket': row[numfields - 5],
                    'startdate': row[numfields - 4].date(),
                    'enddate': row[numfields - 3].date(),
                    'demand': round(row[numfields - 2], 1),
                    'supply': round(row[numfields - 1], 1),
                    'backlog': round(backlog, 1),
                }
                idx = 1
                for f in getAttributeFields(Item):
                    res[f.field_name] = row[idx]
                    idx += 1
                yield res
예제 #22
0
    def query(reportclass, request, basequery, sortsql='1 asc'):
        cursor = connections[request.database].cursor()
        basesql, baseparams = basequery.query.get_compiler(
            basequery.db).as_sql(with_col_aliases=False)

        # Assure the item hierarchy is up to date
        Buffer.rebuildHierarchy(database=basequery.db)

        # Execute a query  to get the onhand value at the start of our horizon
        startohdict = {}
        query = '''
      select buffers.name, sum(oh.onhand)
      from (%s) buffers
      inner join buffer
      on buffer.lft between buffers.lft and buffers.rght
      inner join (
      select operationplanmaterial.buffer as buffer, operationplanmaterial.onhand as onhand
      from operationplanmaterial,
        (select buffer, max(id) as id
         from operationplanmaterial
         where flowdate < '%s'
         group by buffer
        ) maxid
      where maxid.buffer = operationplanmaterial.buffer
      and maxid.id = operationplanmaterial.id
      ) oh
      on oh.buffer = buffer.name
      group by buffers.name
      ''' % (basesql, request.report_startdate)
        cursor.execute(query, baseparams)
        for row in cursor.fetchall():
            startohdict[row[0]] = float(row[1])

        # Execute the actual query
        query = '''
      select
        invplan.buffer_id, item.name, location.name, %s
        invplan.bucket, invplan.startdate, invplan.enddate,
        invplan.consumed, invplan.produced
      from (
        select
          buf.name as buffer_id,
          d.bucket as bucket, d.startdate as startdate, d.enddate as enddate,
          coalesce(sum(greatest(operationplanmaterial.quantity, 0)),0) as consumed,
          coalesce(-sum(least(operationplanmaterial.quantity, 0)),0) as produced
        from (%s) buf
        -- Multiply with buckets
        cross join (
             select name as bucket, startdate, enddate
             from common_bucketdetail
             where bucket_id = %%s and enddate > %%s and startdate < %%s
             ) d
        -- Include child buffers
        inner join buffer
        on buffer.lft between buf.lft and buf.rght
        -- Consumed and produced quantities
        left join operationplanmaterial
        on buffer.name = operationplanmaterial.buffer
        and d.startdate <= operationplanmaterial.flowdate
        and d.enddate > operationplanmaterial.flowdate
        and operationplanmaterial.flowdate >= %%s
        and operationplanmaterial.flowdate < %%s
        -- Grouping and sorting
        group by buf.name, buf.item_id, buf.location_id, buf.onhand, d.bucket, d.startdate, d.enddate
        ) invplan
      left outer join buffer on
        invplan.buffer_id = buffer.name
      left outer join item on
        buffer.item_id = item.name
      left outer join location on
        buffer.location_id = location.name
      order by %s, invplan.startdate
      ''' % (reportclass.attr_sql, basesql, sortsql)
        cursor.execute(
            query,
            baseparams + (request.report_bucket, request.report_startdate,
                          request.report_enddate, request.report_startdate,
                          request.report_enddate))

        # Build the python result
        prevbuf = None
        for row in cursor.fetchall():
            numfields = len(row)
            if row[0] != prevbuf:
                prevbuf = row[0]
                startoh = startohdict.get(prevbuf, 0)
                endoh = startoh + float(row[numfields - 2] -
                                        row[numfields - 1])
            else:
                startoh = endoh
                endoh += float(row[numfields - 2] - row[numfields - 1])
            res = {
                'buffer': row[0],
                'item': row[1],
                'location': row[2],
                'bucket': row[numfields - 5],
                'startdate': row[numfields - 4].date(),
                'enddate': row[numfields - 3].date(),
                'startoh': round(startoh, 1),
                'produced': round(row[numfields - 2], 1),
                'consumed': round(row[numfields - 1], 1),
                'endoh': round(endoh, 1),
            }
            # Add attribute fields
            idx = 3
            for f in getAttributeFields(Item, related_name_prefix="item"):
                res[f.field_name] = row[idx]
                idx += 1
            for f in getAttributeFields(Location,
                                        related_name_prefix="location"):
                res[f.field_name] = row[idx]
                idx += 1
            yield res
예제 #23
0
    def query(reportclass, request, basequery, sortsql="1 asc"):
        basesql, baseparams = basequery.query.get_compiler(
            basequery.db).as_sql(with_col_aliases=False)

        # Get the time units
        units = OverviewReport.getUnits(request)

        # Assure the item hierarchy is up to date
        Resource.rebuildHierarchy(database=basequery.db)

        # Execute the query
        query = """
      select res.name, res.description, res.category, res.subcategory,
        res.type, res.constrained, res.maximum, res.maximum_calendar_id,
        res.cost, res.maxearly, res.setupmatrix_id, res.setup, location.name,
        location.description, location.category, location.subcategory,
        location.available_id, res.avgutil, res.available_id available_calendar,
        res.owner_id,
        %s
        d.bucket as col1, d.startdate as col2,
        coalesce(sum(out_resourceplan.available),0) / (case when res.type = 'buckets' then 1 else %f end) as available,
        coalesce(sum(out_resourceplan.unavailable),0) / (case when res.type = 'buckets' then 1 else %f end) as unavailable,
        coalesce(sum(out_resourceplan.load),0) / (case when res.type = 'buckets' then 1 else %f end) as loading,
        coalesce(sum(out_resourceplan.setup),0) / (case when res.type = 'buckets' then 1 else %f end) as setup
      from (%s) res
      left outer join location
        on res.location_id = location.name
      -- Multiply with buckets
      cross join (
                   select name as bucket, startdate, enddate
                   from common_bucketdetail
                   where bucket_id = '%s' and enddate > '%s' and startdate < '%s'
                   ) d
      -- Utilization info
      left join out_resourceplan
      on res.name = out_resourceplan.resource
      and d.startdate <= out_resourceplan.startdate
      and d.enddate > out_resourceplan.startdate
      and out_resourceplan.startdate >= '%s'
      and out_resourceplan.startdate < '%s'
      -- Grouping and sorting
      group by res.name, res.description, res.category, res.subcategory,
        res.type, res.maximum, res.maximum_calendar_id, res.available_id, res.cost, res.maxearly,
        res.setupmatrix_id, res.setup, location.name, location.description,
        location.category, location.subcategory, location.available_id, res.avgutil, res.owner_id,
        res.constrained,
        %s d.bucket, d.startdate
      order by %s, d.startdate
      """ % (
            reportclass.attr_sql,
            units[0],
            units[0],
            units[0],
            units[0],
            basesql,
            request.report_bucket,
            request.report_startdate,
            request.report_enddate,
            request.report_startdate,
            request.report_enddate,
            reportclass.attr_sql,
            sortsql,
        )

        # Build the python result
        with connections[request.database].chunked_cursor() as cursor_chunked:
            cursor_chunked.execute(query, baseparams)
            for row in cursor_chunked:
                numfields = len(row)
                if row[numfields - 4] != 0:
                    util = round(row[numfields - 2] * 100 / row[numfields - 4],
                                 2)
                else:
                    util = 0
                result = {
                    "resource": row[0],
                    "description": row[1],
                    "category": row[2],
                    "subcategory": row[3],
                    "type": row[4],
                    "constrained": row[5],
                    "maximum": row[6],
                    "maximum_calendar": row[7],
                    "cost": row[8],
                    "maxearly": row[9],
                    "setupmatrix": row[10],
                    "setup": row[11],
                    "location__name": row[12],
                    "location__description": row[13],
                    "location__category": row[14],
                    "location__subcategory": row[15],
                    "location__available": row[16],
                    "avgutil": round(row[17], 2),
                    "available_calendar": row[18],
                    "owner": row[19],
                    "bucket": row[numfields - 6],
                    "startdate": row[numfields - 5],
                    "available": row[numfields - 4],
                    "unavailable": row[numfields - 3],
                    "load": row[numfields - 2],
                    "setuptime": row[numfields - 1],
                    "utilization": util,
                }
                idx = 20
                for f in getAttributeFields(Resource):
                    result[f.field_name] = row[idx]
                    idx += 1
                for f in getAttributeFields(Location):
                    result[f.field_name] = row[idx]
                    idx += 1
                yield result
예제 #24
0
파일: demand.py 프로젝트: zhengr/frepple
    def query(reportclass, request, basequery, sortsql="1 asc"):
        basesql, baseparams = basequery.query.get_compiler(basequery.db).as_sql(
            with_col_aliases=False
        )

        # Assure the item hierarchy is up to date
        Item.rebuildHierarchy(database=basequery.db)

        # Execute a query to get the backlog at the start of the horizon
        startbacklogdict = {}
        query = """
          select name, sum(qty) from
            (
            select item.name, sum(demand.quantity) qty from (%s) item
            inner join item child on child.lft between item.lft and item.rght
            inner join demand on demand.item_id = child.name
            and demand.status in ('open','quote')
            and due < %%s
            group by item.name
            union all
            select item.name, sum(operationplanmaterial.quantity) qty
            from (%s) item
            inner join item child on child.lft between item.lft and item.rght
            inner join operationplanmaterial on operationplanmaterial.item_id = child.name
            inner join operationplan on operationplan.reference = operationplanmaterial.operationplan_id
              and operationplan.demand_id is not null
              and operationplan.enddate < %%s
            group by item.name
            ) t
            group by name
        """ % (
            basesql,
            basesql,
        )
        with transaction.atomic(using=request.database):
            with connections[request.database].chunked_cursor() as cursor_chunked:
                cursor_chunked.execute(
                    query,
                    baseparams
                    + (request.report_startdate,)
                    + baseparams
                    + (request.report_startdate,),
                )
                for row in cursor_chunked:
                    if row[0]:
                        startbacklogdict[row[0]] = max(float(row[1]), 0)

        # Execute the query
        query = """
          select
          parent.name, parent.description, parent.category, parent.subcategory,
          parent.owner_id, parent.cost, parent.volume, parent.weight, parent.uom, parent.periodofcover, parent.source, parent.lastmodified,
          %s
          d.bucket,
          d.startdate,
          d.enddate,
          sum(coalesce((
            select sum(quantity)
            from demand
            inner join item child on child.lft between parent.lft and parent.rght
            where demand.item_id = child.name
            and status in ('open','quote')
            and due >= greatest(%%s,d.startdate)
            and due < d.enddate
            ),0)) orders,
          sum(coalesce((
            select sum(operationplan.quantity)
            from operationplan
            inner join item child on child.lft between parent.lft and parent.rght
            where operationplan.item_id = child.name
            and operationplan.demand_id is not null
            and operationplan.enddate >= greatest(%%s,d.startdate)
            and operationplan.enddate < d.enddate
            ),0)) planned,
          (select json_agg(json_build_array(f1,f2)) from
            (select distinct out_constraint.name as f1, out_constraint.owner as f2
            from out_constraint
            inner join item child
              on child.lft between parent.lft and parent.rght
            inner join operationplan
              on operationplan.demand_id = out_constraint.demand
              and operationplan.due is not null
            and out_constraint.item = child.name
            and operationplan.enddate >= greatest(%%s,d.startdate)
            and (
              out_constraint.name not in ('before current', 'before fence')
              or out_constraint.enddate > d.enddate
              )
            and operationplan.due < d.enddate
            limit 20
            ) cte_reasons
          ) reasons
          from (%s) parent
          cross join (
                       select name as bucket, startdate, enddate
                       from common_bucketdetail
                       where bucket_id = %%s and enddate > %%s and startdate < %%s
                       ) d
          group by
            parent.name, parent.description, parent.category, parent.subcategory,
            parent.owner_id, parent.cost, parent.volume, parent.weight, parent.uom, parent.periodofcover,
            parent.source, parent.lastmodified, parent.lft, parent.rght,
            %s
            d.bucket, d.startdate, d.enddate
          order by %s, d.startdate
        """ % (
            reportclass.attr_sql,
            basesql,
            reportclass.attr_sql,
            sortsql,
        )

        # Build the python result
        with transaction.atomic(using=request.database):
            with connections[request.database].chunked_cursor() as cursor_chunked:
                cursor_chunked.execute(
                    query,
                    (request.report_startdate,) * 3  # orders + planned + constraints
                    + baseparams  # orders planned
                    + (
                        request.report_bucket,
                        request.report_startdate,
                        request.report_enddate,
                    ),  # buckets
                )
                previtem = None
                itemattributefields = getAttributeFields(Item)
                for row in cursor_chunked:
                    numfields = len(row)
                    if row[0] != previtem:
                        backlog = startbacklogdict.get(row[0], 0)
                        previtem = row[0]
                    backlog += float(row[numfields - 3]) - float(row[numfields - 2])
                    res = {
                        "item": row[0],
                        "description": row[1],
                        "category": row[2],
                        "subcategory": row[3],
                        "owner": row[4],
                        "cost": row[5],
                        "volume": row[6],
                        "weight": row[7],
                        "uom": row[8],
                        "periodofcover": row[9],
                        "source": row[10],
                        "lastmodified": row[11],
                        "bucket": row[numfields - 6],
                        "startdate": row[numfields - 5].date(),
                        "enddate": row[numfields - 4].date(),
                        "demand": row[numfields - 3],
                        "supply": row[numfields - 2],
                        "reasons": json.dumps(row[numfields - 1]),
                        "backlog": max(backlog or 0, 0),
                    }
                    idx = 12
                    for f in itemattributefields:
                        res[f.field_name] = row[idx]
                        idx += 1
                    yield res
예제 #25
0
    def query(reportclass, request, basequery, sortsql='1 asc'):
        basesql, baseparams = basequery.query.get_compiler(
            basequery.db).as_sql(with_col_aliases=False)

        # Assure the item hierarchy is up to date
        Item.rebuildHierarchy(database=basequery.db)

        # Execute a query to get the backlog at the start of the horizon
        startbacklogdict = {}
        query = '''
      select name, sum(qty) from
        (
        select parent.name, sum(demand.quantity) qty from (%s) item
        inner join item parent on item.lft between parent.lft and parent.rght
        inner join demand on demand.item_id = item.name and demand.status in ('open','quote') and due < %%s
        group by parent.name
        union all
        select parent.name, sum(operationplanmaterial.quantity) qty
        from operationplanmaterial
        inner join operationplan on operationplan.reference = operationplanmaterial.operationplan_id
          and operationplan.type = 'DLVR'
          and operationplan.enddate < %%s
        inner join (%s) item on operationplanmaterial.item_id = item.name
        inner join item parent on item.lft between parent.lft and parent.rght
        group by parent.name
        ) t
        group by name
      ''' % (basesql, basesql)
        with connections[request.database].chunked_cursor() as cursor_chunked:
            cursor_chunked.execute(
                query, baseparams +
                (request.report_startdate, request.report_startdate) +
                baseparams)
            for row in cursor_chunked:
                if row[0]:
                    startbacklogdict[row[0]] = float(row[1])

        # Execute the query
        query = '''
      select
      parent.name, parent.description, parent.category, parent.subcategory,
      parent.owner_id, parent.cost, parent.source, parent.lastmodified,
      %s
      d.bucket,
      d.startdate,
      d.enddate,
      sum(coalesce((select sum(quantity) from demand
       where demand.item_id = child.name and status in ('open','quote') and due >= greatest(%%s,d.startdate) and due < d.enddate),0)) orders,
      sum(coalesce((select sum(-operationplanmaterial.quantity) from operationplanmaterial
      inner join operationplan on operationplan.reference = operationplanmaterial.operationplan_id and operationplan.type = 'DLVR'
      where operationplanmaterial.item_id = child.name
      and operationplanmaterial.flowdate >= greatest(%%s,d.startdate)
      and operationplanmaterial.flowdate < d.enddate),0)) planned
      from (%s) parent
      inner join item child on child.lft between parent.lft and parent.rght
      cross join (
                   select name as bucket, startdate, enddate
                   from common_bucketdetail
                   where bucket_id = %%s and enddate > %%s and startdate < %%s
                   ) d
      group by
        parent.name, parent.description, parent.category, parent.subcategory,
        parent.owner_id, parent.cost, parent.source, parent.lastmodified,
        %s
        d.bucket, d.startdate, d.enddate
      order by %s, d.startdate
    ''' % (reportclass.attr_sql, basesql, reportclass.attr_sql, sortsql)

        # Build the python result
        with connections[request.database].chunked_cursor() as cursor_chunked:
            cursor_chunked.execute(
                query,
                (request.report_startdate, request.report_startdate)
                +  # orders planned
                baseparams + (request.report_bucket, request.report_startdate,
                              request.report_enddate)  # buckets
            )
            previtem = None
            for row in cursor_chunked:
                numfields = len(row)
                if row[0] != previtem:
                    backlog = startbacklogdict.get(row[0], 0)
                    previtem = row[0]
                backlog += float(row[numfields - 2]) - float(
                    row[numfields - 1])
                res = {
                    'item': row[0],
                    'description': row[1],
                    'category': row[2],
                    'subcategory': row[3],
                    'owner': row[4],
                    'cost': row[5],
                    'source': row[6],
                    'lastmodified': row[7],
                    'bucket': row[numfields - 5],
                    'startdate': row[numfields - 4].date(),
                    'enddate': row[numfields - 3].date(),
                    'demand': row[numfields - 2],
                    'supply': row[numfields - 1],
                    'backlog': backlog,
                }
                idx = 8
                for f in getAttributeFields(Item):
                    res[f.field_name] = row[idx]
                    idx += 1
                yield res
예제 #26
0
파일: resource.py 프로젝트: frePPLe/frePPLe
  def query(reportclass, request, basequery, sortsql='1 asc'):
    basesql, baseparams = basequery.query.get_compiler(basequery.db).as_sql(with_col_aliases=False)

    # Get the time units
    units = OverviewReport.getUnits(request)

    # Assure the item hierarchy is up to date
    Resource.rebuildHierarchy(database=basequery.db)

    # Execute the query
    query = '''
      select res.name, res.description, res.category, res.subcategory,
        res.type, res.maximum, res.maximum_calendar_id, res.cost, res.maxearly,
        res.setupmatrix_id, res.setup, location.name, location.description,
        location.category, location.subcategory, location.available_id,
        res.avgutil, res.available_id available_calendar, res.owner_id,
        %s
        d.bucket as col1, d.startdate as col2,
        coalesce(sum(out_resourceplan.available),0) / (case when res.type = 'buckets' then 1 else %f end) as available,
        coalesce(sum(out_resourceplan.unavailable),0) / (case when res.type = 'buckets' then 1 else %f end) as unavailable,
        coalesce(sum(out_resourceplan.load),0) / (case when res.type = 'buckets' then 1 else %f end) as loading,
        coalesce(sum(out_resourceplan.setup),0) / (case when res.type = 'buckets' then 1 else %f end) as setup
      from (%s) res
      left outer join location
        on res.location_id = location.name
      -- Multiply with buckets
      cross join (
                   select name as bucket, startdate, enddate
                   from common_bucketdetail
                   where bucket_id = '%s' and enddate > '%s' and startdate < '%s'
                   ) d
      -- Utilization info
      left join out_resourceplan
      on res.name = out_resourceplan.resource
      and d.startdate <= out_resourceplan.startdate
      and d.enddate > out_resourceplan.startdate
      and out_resourceplan.startdate >= '%s'
      and out_resourceplan.startdate < '%s'
      -- Grouping and sorting
      group by res.name, res.description, res.category, res.subcategory,
        res.type, res.maximum, res.maximum_calendar_id, res.available_id, res.cost, res.maxearly,
        res.setupmatrix_id, res.setup, location.name, location.description,
        location.category, location.subcategory, location.available_id, res.avgutil, res.owner_id,
        %s d.bucket, d.startdate
      order by %s, d.startdate
      ''' % (
        reportclass.attr_sql, units[0], units[0], units[0], units[0],
        basesql, request.report_bucket, request.report_startdate,
        request.report_enddate,
        request.report_startdate, request.report_enddate,
        reportclass.attr_sql, sortsql
      )

    # Build the python result
    with connections[request.database].chunked_cursor() as cursor_chunked:
      cursor_chunked.execute(query, baseparams)
      for row in cursor_chunked:
        numfields = len(row)
        if row[numfields - 4] != 0:
          util = round(row[numfields - 2] * 100 / row[numfields - 4], 2)
        else:
          util = 0
        result = {
          'resource': row[0], 'description': row[1], 'category': row[2],
          'subcategory': row[3], 'type': row[4], 'maximum': row[5],
          'maximum_calendar': row[6], 'cost': row[7], 'maxearly': row[8],
          'setupmatrix': row[9], 'setup': row[10],
          'location__name': row[11], 'location__description': row[12],
          'location__category': row[13], 'location__subcategory': row[14],
          'location__available': row[15],
          'avgutil': round(row[16], 2),
          'available_calendar': row[17],
          'owner': row[18],
          'bucket': row[numfields - 6],
          'startdate': row[numfields - 5].date(),
          'available': row[numfields - 4],
          'unavailable': row[numfields - 3],
          'load': row[numfields - 2],
          'setup': row[numfields - 1],
          'utilization': util
          }
        idx = 19
        for f in getAttributeFields(Resource):
          result[f.field_name] = row[idx]
          idx += 1
        for f in getAttributeFields(Location):
          result[f.field_name] = row[idx]
          idx += 1
        yield result
예제 #27
0
    def query(reportclass, request, basequery, sortsql='1 asc'):
        basesql, baseparams = basequery.query.get_compiler(
            basequery.db).as_sql(with_col_aliases=False)

        # Assure the item hierarchy is up to date
        Item.rebuildHierarchy(database=basequery.db)

        # Execute a query to get the backlog at the start of the horizon
        startbacklogdict = {}
        query = '''
      select items.name, coalesce(req.qty, 0) - coalesce(pln.qty, 0)
      from (%s) items
      left outer join (
        select parent.name, sum(quantity) qty
        from demand
        inner join item on demand.item_id = item.name
        inner join item parent on item.lft between parent.lft and parent.rght
        where status in ('open', 'quote')
        and due < %%s
        group by parent.name
        ) req
      on req.name = items.name
      left outer join (
        select parent.name, sum(operationplan.quantity) qty
        from operationplan
        inner join demand on operationplan.demand_id = demand.name
          and operationplan.owner_id is null
          and operationplan.enddate < %%s
        inner join item on demand.item_id = item.name
        inner join item parent on item.lft between parent.lft and parent.rght
        group by parent.name
        ) pln
      on pln.name = items.name
      ''' % basesql
        with connections[request.database].chunked_cursor() as cursor_chunked:
            cursor_chunked.execute(
                query, baseparams +
                (request.report_startdate, request.report_startdate))
            for row in cursor_chunked:
                if row[0]:
                    startbacklogdict[row[0]] = float(row[1])

        # Execute the query
        query = '''
        select y.name, %s
               y.bucket, y.startdate, y.enddate,
               min(y.orders),
               min(y.planned)
        from (
          select x.name as name, x.lft as lft, x.rght as rght,
               x.bucket as bucket, x.startdate as startdate, x.enddate as enddate,
               coalesce(sum(demand.quantity),0) as orders,
               min(x.planned) as planned
          from (
          select items.name as name, items.lft as lft, items.rght as rght,
                 d.bucket as bucket, d.startdate as startdate, d.enddate as enddate,
                 coalesce(sum(operationplan.quantity),0) as planned
          from (%s) items
          -- Multiply with buckets
          cross join (
             select name as bucket, startdate, enddate
             from common_bucketdetail
             where bucket_id = %%s and enddate > %%s and startdate < %%s
             ) d
          -- Include hierarchical children
          inner join item
          on item.lft between items.lft and items.rght
          -- Planned quantity
          left outer join operationplan
          on operationplan.type = 'DLVR'
          and operationplan.item_id = item.name
          and d.startdate <= operationplan.enddate
          and d.enddate > operationplan.enddate
          and operationplan.enddate >= %%s
          and operationplan.enddate < %%s
          -- Grouping
          group by items.name, items.lft, items.rght, d.bucket, d.startdate, d.enddate
        ) x
        -- Requested quantity
        inner join item
        on item.lft between x.lft and x.rght
        left join demand
        on item.name = demand.item_id
        and x.startdate <= demand.due
        and x.enddate > demand.due
        and demand.due >= %%s
        and demand.due < %%s
        and demand.status in ('open', 'quote')
        -- Grouping
        group by x.name, x.lft, x.rght, x.bucket, x.startdate, x.enddate
        ) y
        -- Ordering and grouping
        group by %s y.name, y.lft, y.rght, y.bucket, y.startdate, y.enddate
        order by %s, y.startdate
       ''' % (reportclass.attr_sql, basesql, reportclass.attr_sql, sortsql)

        # Build the python result
        with connections[request.database].chunked_cursor() as cursor_chunked:
            cursor_chunked.execute(
                query,
                baseparams + (request.report_bucket, request.report_startdate,
                              request.report_enddate, request.report_startdate,
                              request.report_enddate, request.report_startdate,
                              request.report_enddate))
            previtem = None
            for row in cursor_chunked:
                numfields = len(row)
                if row[0] != previtem:
                    backlog = startbacklogdict.get(row[0], 0)
                    previtem = row[0]
                backlog += float(row[numfields - 2]) - float(
                    row[numfields - 1])
                res = {
                    'item': row[0],
                    'bucket': row[numfields - 5],
                    'startdate': row[numfields - 4].date(),
                    'enddate': row[numfields - 3].date(),
                    'demand': round(row[numfields - 2], 1),
                    'supply': round(row[numfields - 1], 1),
                    'backlog': round(backlog, 1),
                }
                idx = 1
                for f in getAttributeFields(Item):
                    res[f.field_name] = row[idx]
                    idx += 1
                yield res
예제 #28
0
    def query(reportclass, request, basequery, sortsql="1 asc"):
        basesql, baseparams = basequery.query.get_compiler(
            basequery.db).as_sql(with_col_aliases=False)

        # Execute the actual query
        query = """
           select
           opplanmat.buffer,
           item.name item_id,
           location.name location_id,
           item.description,
           item.type,
           item.category,
           item.subcategory,
           item.cost,
           item.owner_id,
           item.source,
           item.lastmodified,
           location.description,
           location.category,
           location.subcategory,
           location.available_id,
           location.owner_id,
           location.source,
           location.lastmodified,
           opplanmat.opplan_batch,
           %s
           (select jsonb_build_object(
               'onhand', onhand,
               'flowdate', to_char(flowdate,'YYYY-MM-DD HH24:MI:SS'),
               'periodofcover', periodofcover
               )
           from operationplanmaterial
           inner join operationplan
             on operationplanmaterial.operationplan_id = operationplan.reference
           where operationplanmaterial.item_id = item.name
             and operationplanmaterial.location_id = location.name
             and (item.type is distinct from 'make to order' or operationplan.batch is not distinct from opplanmat.opplan_batch)
             and flowdate < greatest(d.startdate,%%s)
           order by flowdate desc, id desc limit 1) startoh,
           d.bucket,
           d.startdate,
           d.enddate,
           (select safetystock from
            (
            select 1 as priority, coalesce(
              (select value from calendarbucket
               where calendar_id = 'SS for ' || opplanmat.buffer
               and greatest(d.startdate,%%s) >= startdate and greatest(d.startdate,%%s) < enddate
               order by priority limit 1),
              (select defaultvalue from calendar where name = 'SS for ' || opplanmat.buffer)
              ) as safetystock
            union all
            select 2 as priority, coalesce(
              (select value
               from calendarbucket
               where calendar_id = (
                 select minimum_calendar_id
                 from buffer
                 where item_id = item.name
                 and location_id = location.name
                 and (item.type is distinct from 'make to order' or buffer.batch is not distinct from opplanmat.opplan_batch)
                 )
               and greatest(d.startdate,%%s) >= startdate
               and greatest(d.startdate,%%s) < enddate
               order by priority limit 1),
              (select defaultvalue
               from calendar
               where name = (
                 select minimum_calendar_id
                 from buffer
                 where item_id = item.name
                 and location_id = location.name
                 and (item.type is distinct from 'make to order' or buffer.batch is not distinct from opplanmat.opplan_batch)
                 )
              )
            ) as safetystock
            union all
            select 3 as priority, minimum as safetystock
            from buffer
            where item_id = item.name
            and location_id = location.name
            and (item.type is distinct from 'make to order' or buffer.batch is not distinct from opplanmat.opplan_batch)
            ) t
            where t.safetystock is not null
            order by priority
            limit 1) safetystock,
            (select jsonb_build_object(
               'work_in_progress_mo', sum(case when (startdate < d.enddate and enddate >= d.enddate) and opm.quantity > 0 and operationplan.type = 'MO' then opm.quantity else 0 end),
               'on_order_po', sum(case when (startdate < d.enddate and enddate >= d.enddate) and opm.quantity > 0 and operationplan.type = 'PO' then opm.quantity else 0 end),
               'in_transit_do', sum(case when (startdate < d.enddate and enddate >= d.enddate) and opm.quantity > 0 and operationplan.type = 'DO' then opm.quantity else 0 end),
               'total_in_progress', sum(case when (startdate < d.enddate and enddate >= d.enddate) and opm.quantity > 0 then opm.quantity else 0 end),
               'consumed', sum(case when (opm.flowdate >= greatest(d.startdate,%%s) and opm.flowdate < d.enddate) and opm.quantity < 0 then -opm.quantity else 0 end),
               'consumedMO', sum(case when operationplan.type = 'MO' and (opm.flowdate >= greatest(d.startdate,%%s) and opm.flowdate < d.enddate) and opm.quantity < 0 then -opm.quantity else 0 end),
               'consumedDO', sum(case when operationplan.type = 'DO' and (opm.flowdate >= greatest(d.startdate,%%s) and opm.flowdate < d.enddate) and opm.quantity < 0 then -opm.quantity else 0 end),
               'consumedSO', sum(case when operationplan.type = 'DLVR' and (opm.flowdate >= greatest(d.startdate,%%s) and opm.flowdate < d.enddate) and opm.quantity < 0 then -opm.quantity else 0 end),
               'produced', sum(case when (opm.flowdate >= greatest(d.startdate,%%s) and opm.flowdate < d.enddate) and opm.quantity > 0 then opm.quantity else 0 end),
               'producedMO', sum(case when operationplan.type = 'MO' and (opm.flowdate >= greatest(d.startdate,%%s) and opm.flowdate < d.enddate) and opm.quantity > 0 then opm.quantity else 0 end),
               'producedDO', sum(case when operationplan.type = 'DO' and (opm.flowdate >= greatest(d.startdate,%%s) and opm.flowdate < d.enddate) and opm.quantity > 0 then opm.quantity else 0 end),
               'producedPO', sum(case when operationplan.type = 'PO' and (opm.flowdate >= greatest(d.startdate,%%s) and opm.flowdate < d.enddate) and opm.quantity > 0 then opm.quantity else 0 end)
               )
             from operationplanmaterial opm
             inner join operationplan
             on operationplan.reference = opm.operationplan_id
               and ((startdate < d.enddate and enddate >= d.enddate)
               or (opm.flowdate >= greatest(d.startdate,%%s) and opm.flowdate < d.enddate))
             where opm.item_id = item.name
               and opm.location_id = location.name
               and (item.type is distinct from 'make to order' or operationplan.batch is not distinct from opplanmat.opplan_batch)
           ) ongoing
           from
           (%s) opplanmat
           inner join item on item.name = opplanmat.item_id
           inner join location on location.name = opplanmat.location_id
           -- Multiply with buckets
          cross join (
             select name as bucket, startdate, enddate
             from common_bucketdetail
             where bucket_id = %%s and enddate > %%s and startdate < %%s
             ) d
          group by
           opplanmat.buffer,
           item.name,
           location.name,
           item.description,
           item.type,
           item.category,
           item.subcategory,
           item.cost,
           item.owner_id,
           item.source,
           item.lastmodified,
           location.description,
           location.category,
           location.subcategory,
           location.available_id,
           location.owner_id,
           location.source,
           location.lastmodified,
           opplanmat.opplan_batch,
           d.bucket,
           d.startdate,
           d.enddate
           order by %s, d.startdate
        """ % (
            reportclass.attr_sql,
            basesql,
            sortsql,
        )

        # Build the python result
        with connections[request.database].chunked_cursor() as cursor_chunked:
            cursor_chunked.execute(
                query,
                (
                    request.report_startdate,  # startoh
                    request.report_startdate,
                    request.report_startdate,
                    request.report_startdate,
                    request.report_startdate,  # safetystock
                ) + (request.report_startdate, ) * 9 + baseparams  # ongoing
                + (  # opplanmat
                    request.report_bucket,
                    request.report_startdate,
                    request.report_enddate,
                ),  # bucket d
            )
            for row in cursor_chunked:
                numfields = len(row)
                res = {
                    "buffer":
                    row[0],
                    "item":
                    row[1],
                    "location":
                    row[2],
                    "item__description":
                    row[3],
                    "item__type":
                    row[4],
                    "item__category":
                    row[5],
                    "item__subcategory":
                    row[6],
                    "item__cost":
                    row[7],
                    "item__owner":
                    row[8],
                    "item__source":
                    row[9],
                    "item__lastmodified":
                    row[10],
                    "location__description":
                    row[11],
                    "location__category":
                    row[12],
                    "location__subcategory":
                    row[13],
                    "location__available_id":
                    row[14],
                    "location__owner_id":
                    row[15],
                    "location__source":
                    row[16],
                    "location__lastmodified":
                    row[17],
                    "batch":
                    row[18],
                    "startoh":
                    row[numfields - 6]["onhand"] if row[numfields - 6] else 0,
                    "startohdoc":
                    max(
                        0,
                        0 if
                        (row[numfields - 6]["onhand"] if row[numfields -
                                                             6] else 0) <= 0
                        else
                        (999 if row[numfields -
                                    6]["periodofcover"] == 86313600 else
                         (datetime.strptime(row[numfields - 6]["flowdate"],
                                            "%Y-%m-%d %H:%M:%S") +
                          timedelta(seconds=row[numfields -
                                                6]["periodofcover"]) -
                          row[numfields -
                              4]).days if row[numfields -
                                              6]["periodofcover"] else 999),
                    ),
                    "bucket":
                    row[numfields - 5],
                    "startdate":
                    row[numfields - 4],
                    "enddate":
                    row[numfields - 3],
                    "safetystock":
                    row[numfields - 2] or 0,
                    "consumed":
                    row[numfields - 1]["consumed"] or 0,
                    "consumedMO":
                    row[numfields - 1]["consumedMO"] or 0,
                    "consumedDO":
                    row[numfields - 1]["consumedDO"] or 0,
                    "consumedSO":
                    row[numfields - 1]["consumedSO"] or 0,
                    "produced":
                    row[numfields - 1]["produced"] or 0,
                    "producedMO":
                    row[numfields - 1]["producedMO"] or 0,
                    "producedDO":
                    row[numfields - 1]["producedDO"] or 0,
                    "producedPO":
                    row[numfields - 1]["producedPO"] or 0,
                    "total_in_progress":
                    row[numfields - 1]["total_in_progress"] or 0,
                    "work_in_progress_mo":
                    row[numfields - 1]["work_in_progress_mo"] or 0,
                    "on_order_po":
                    row[numfields - 1]["on_order_po"] or 0,
                    "in_transit_do":
                    row[numfields - 1]["in_transit_do"] or 0,
                    "endoh":
                    float(row[numfields - 6]["onhand"] if row[numfields -
                                                              6] else 0) +
                    float(row[numfields - 1]["produced"] or 0) -
                    float(row[numfields - 1]["consumed"] or 0),
                }
                # Add attribute fields
                idx = 17
                for f in getAttributeFields(Item, related_name_prefix="item"):
                    res[f.field_name] = row[idx]
                    idx += 1
                for f in getAttributeFields(Location,
                                            related_name_prefix="location"):
                    res[f.field_name] = row[idx]
                    idx += 1
                yield res
예제 #29
0
    def query(reportclass, request, basequery, sortsql="1 asc"):
        basesql, baseparams = basequery.query.get_compiler(basequery.db).as_sql(
            with_col_aliases=False
        )
        # Build the query
        query = """
      select
        operation.name, location.name, operation.item_id, operation.description,
        operation.category, operation.subcategory, operation.type, operation.duration,
        operation.duration_per, operation.fence, operation.posttime, operation.sizeminimum,
        operation.sizemultiple, operation.sizemaximum, operation.priority, operation.effective_start,
        operation.effective_end, operation.cost, operation.search, operation.source, operation.lastmodified,
        location.description, location.category, location.subcategory, location.available_id,
        location.lastmodified, item.description, item.category, item.subcategory, item.cost,
        item.volume, item.weight, item.periodofcover, item.owner_id, item.source, item.lastmodified,
        %s
        res.bucket, res.startdate, res.enddate,
        res.proposed_start, res.total_start, res.proposed_end, res.total_end, res.proposed_production, res.total_production
      from operation
      left outer join item
      on operation.item_id = item.name
      left outer join location
      on operation.location_id = location.name
      inner join (
        select oper.name as operation_id, d.bucket, d.startdate, d.enddate,
         coalesce(sum(
           case when operationplan.status = 'proposed'
             and d.startdate <= operationplan.startdate and d.enddate > operationplan.startdate
           then operationplan.quantity
           else 0 end
           ), 0) proposed_start,
         coalesce(sum(
           case when d.startdate <= operationplan.startdate and d.enddate > operationplan.startdate
           then operationplan.quantity else 0 end
           ), 0) total_start,
         coalesce(sum(
           case when operationplan.status = 'proposed'
             and d.startdate < operationplan.enddate and d.enddate >= operationplan.enddate
           then operationplan.quantity else 0 end
           ), 0) proposed_end,
         coalesce(sum(
           case when d.startdate < operationplan.enddate and d.enddate >= operationplan.enddate
           then operationplan.quantity else 0 end
           ), 0) total_end,
         coalesce(sum(
           case when operationplan.status = 'proposed' then
             (
             -- Total overlap
             extract (epoch from least(operationplan.enddate, d.enddate) - greatest(operationplan.startdate, d.startdate))
             -- Minus the interruptions
             - coalesce((
                select sum(greatest(0, extract (epoch from
                  least(to_timestamp(value->>1, 'YYYY-MM-DD HH24:MI:SS'), d.enddate)
                  - greatest(to_timestamp(value->>0, 'YYYY-MM-DD HH24:MI:SS'), d.startdate)
                  )))
                from ( select * from jsonb_array_elements(plan->'interruptions')) breaks
                ), 0)
             )
             / greatest(1, extract(epoch from operationplan.enddate - operationplan.startdate) - coalesce((plan#>>'{unavailable}')::numeric, 0))
             * operationplan.quantity
           else 0 end
           ), 0) proposed_production,
         coalesce(sum(
             (
             -- Total overlap
             extract (epoch from least(operationplan.enddate, d.enddate) - greatest(operationplan.startdate, d.startdate))
             -- Minus the interruptions
             - coalesce((
                select sum(greatest(0, extract (epoch from
                  least(to_timestamp(value->>1, 'YYYY-MM-DD HH24:MI:SS'), d.enddate)
                  - greatest(to_timestamp(value->>0, 'YYYY-MM-DD HH24:MI:SS'), d.startdate)
                  )))
                from ( select * from jsonb_array_elements(plan->'interruptions')) breaks
                ), 0)
             )
           / greatest(1, extract(epoch from operationplan.enddate - operationplan.startdate) - coalesce((plan#>>'{unavailable}')::numeric, 0))
           * operationplan.quantity
           ), 0) total_production
        from (%s) oper
        -- Multiply with buckets
        cross join (
          select name as bucket, startdate, enddate
          from common_bucketdetail
          where bucket_id = '%s' and enddate > '%s' and startdate < '%s'
          ) d
        -- Match overlapping operationplans
        left outer join operationplan
          on operationplan.operation_id = oper.name
          and (operationplan.startdate, operationplan.enddate) overlaps (d.startdate, d.enddate)
        group by oper.name, d.bucket, d.startdate, d.enddate
      ) res
      on res.operation_id = operation.name
      order by %s, res.startdate
      """ % (
            reportclass.attr_sql,
            basesql,
            request.report_bucket,
            request.report_startdate,
            request.report_enddate,
            sortsql,
        )

        # Convert the SQl results to Python
        with transaction.atomic(using=request.database):
            with connections[request.database].chunked_cursor() as cursor_chunked:
                cursor_chunked.execute(query, baseparams)
                for row in cursor_chunked:
                    numfields = len(row)
                    result = {
                        "operation": row[0],
                        "location": row[1],
                        "item": row[2],
                        "description": row[3],
                        "category": row[4],
                        "subcategory": row[5],
                        "type": row[6],
                        "duration": row[7],
                        "duration_per": row[8],
                        "fence": row[9],
                        "posttime": row[10],
                        "sizeminimum": row[11],
                        "sizemultiple": row[12],
                        "sizemaximum": row[13],
                        "priority": row[14],
                        "effective_start": row[15],
                        "effective_end": row[16],
                        "cost": row[17],
                        "search": row[18],
                        "source": row[19],
                        "lastmodified": row[20],
                        "location__description": row[21],
                        "location__category": row[22],
                        "location__subcategory": row[23],
                        "location__available": row[24],
                        "location__lastmodified": row[25],
                        "item__description": row[26],
                        "item__category": row[27],
                        "item__subcategory": row[28],
                        "item__cost": row[29],
                        "item__volume": row[30],
                        "item__weight": row[31],
                        "item__periodofcover": row[32],
                        "item__owner": row[33],
                        "item__source": row[34],
                        "item__lastmodified": row[35],
                        "bucket": row[numfields - 9],
                        "startdate": row[numfields - 8].date(),
                        "enddate": row[numfields - 7].date(),
                        "proposed_start": row[numfields - 6],
                        "total_start": row[numfields - 5],
                        "proposed_end": row[numfields - 4],
                        "total_end": row[numfields - 3],
                        "production_proposed": row[numfields - 2],
                        "production_total": row[numfields - 1],
                    }
                    idx = 36
                    for f in getAttributeFields(Operation):
                        result["operation__%s" % f.field_name] = row[idx]
                        idx += 1
                    for f in getAttributeFields(Item):
                        result["item__%s" % f.field_name] = row[idx]
                        idx += 1
                    for f in getAttributeFields(Location):
                        result["location__%s" % f.field_name] = row[idx]
                        idx += 1
                    yield result