Пример #1
0
    def basequeryset(reportclass, request, *args, **kwargs):

        q = Demand.objects.all()

        if "item" in request.GET:
            Item.rebuildHierarchy(request.database)
            item = Item.objects.using(
                request.database).get(name__exact=unquote(request.GET["item"]))
            q = q.filter(item__lft__gte=item.lft, item__lft__lt=item.rght)
        if "location" in request.GET:
            Location.rebuildHierarchy(request.database)
            location = Location.objects.using(request.database).get(
                name__exact=unquote(request.GET["location"]))
            q = q.filter(location__lft__gte=location.lft,
                         location__lft__lt=location.rght)
        if "customer" in request.GET:
            Customer.rebuildHierarchy(request.database)
            customer = Customer.objects.using(request.database).get(
                name__exact=unquote(request.GET["customer"]))
            q = q.filter(customer_lft__gte=customer.lft,
                         customer_lft__lt=customer.rght)
        if "status_in" in request.GET:
            status = unquote(request.GET["status_in"])
            q = q.filter(status__in=status.split(","))

        return q.annotate(
            plannedshort=RawSQL("quantity - plannedquantity", []))
Пример #2
0
  def query(reportclass, request, basequery):
    '''
    A function that recurses upstream or downstream in the supply chain.
    '''
    entity = basequery.query.get_compiler(basequery.db).as_sql(with_col_aliases=False)[1]
    entity = entity[0]
    root = reportclass.getRoot(request, entity)

    # Update item and location hierarchies
    Item.rebuildHierarchy(database=request.database)
    Location.rebuildHierarchy(database=request.database)

    # Recurse over all operations
    # TODO the current logic isn't generic enough. A lot of buffers may not be explicitly
    # defined, and are created on the fly by deliveries, itemsuppliers or itemdistributions.
    # Currently we don't account for such situations.
    # TODO usage search doesn't find item distributions from that location
    counter = 1
    #operations = set()
    while len(root) > 0:
      # Pop the current node from the stack
      level, parent, curoperation, curqty, issuboperation, parentoper, realdepth, pushsuper, location = root.pop()
      curnode = counter
      counter += 1

      # If an operation has parent operations we forget about the current operation
      # and use only the parent
      if pushsuper and not isinstance(curoperation, (ItemSupplier, ItemDistribution)):
        hasParents = False
        for x in curoperation.superoperations.using(request.database).only('operation').order_by("-priority"):
          root.append( (level, parent, x.operation, curqty, issuboperation, parentoper, realdepth, False, location) )
          hasParents = True
        if hasParents:
          continue

      # Avoid showing the same operation twice.
      # This feature is disabled by default a) because it is not intuitive to understand
      # where operations are skipped, and b) because the quantity of each occurrence might
      # be different.
      # In some models the duplication is confusing and you can enable this feature.
      #if curoperation in operations: continue
      #operations.add(curoperation)

      # Find the next level
      hasChildren = False
      subcount = 0
      if reportclass.downstream:
        # Downstream recursion
        if isinstance(curoperation, ItemSupplier):
          name = 'Purchase %s @ %s from %s' % (curoperation.item.name, location, curoperation.supplier.name)
          optype = "purchase"
          duration = curoperation.leadtime
          duration_per = None
          buffers = [ ("%s @ %s" % (curoperation.item.name, curoperation.location.name), 1), ]
          resources = None
          try:
            downstr = Buffer.objects.using(request.database).get(name="%s @ %s" % (curoperation.item.name, curoperation.location.name))
            root.extend( reportclass.findUsage(downstr, request.database, level, curqty, realdepth + 1, False, location) )
          except Buffer.DoesNotExist:
            pass
        elif isinstance(curoperation, ItemDistribution):
          name = 'Ship %s from %s to %s' % (curoperation.item.name, curoperation.origin.name, curoperation.location.name)
          optype = "distribution"
          duration = curoperation.leadtime
          duration_per = None
          buffers = [
            ("%s @ %s" % (curoperation.item.name, curoperation.origin.name), -1),
            ("%s @ %s" % (curoperation.item.name, curoperation.location.name), 1)
            ]
          resources = None
        else:
          name = curoperation.name
          optype = curoperation.type
          duration = curoperation.duration
          duration_per = curoperation.duration_per
          buffers = [ (x.thebuffer.name, float(x.quantity)) for x in curoperation.flows.only('thebuffer', 'quantity').using(request.database) ]
          resources = [ (x.resource.name, float(x.quantity)) for x in curoperation.loads.only('resource', 'quantity').using(request.database) ]
          for x in curoperation.flows.filter(quantity__gt=0).only('thebuffer').using(request.database):
            curflows = x.thebuffer.flows.filter(quantity__lt=0).only('operation', 'quantity').using(request.database)
            for y in curflows:
              hasChildren = True
              root.append( (level - 1, curnode, y.operation, - curqty * y.quantity, subcount, None, realdepth - 1, pushsuper, x.thebuffer.location.name if x.thebuffer.location else None) )
          for x in curoperation.suboperations.using(request.database).only('suboperation').order_by("-priority"):
            subcount += curoperation.type == "routing" and 1 or -1
            root.append( (level - 1, curnode, x.suboperation, curqty, subcount, curoperation, realdepth, False, location) )
            hasChildren = True
      else:
        # Upstream recursion
        if isinstance(curoperation, ItemSupplier):
          name = 'Purchase %s @ %s from %s' % (curoperation.item.name, location, curoperation.supplier.name)
          optype = "purchase"
          duration = curoperation.leadtime
          duration_per = None
          buffers = [ ("%s @ %s" % (curoperation.item.name, location), 1), ]
          resources = None
        elif isinstance(curoperation, ItemDistribution):
          name = 'Ship %s from %s to %s' % (curoperation.item.name, curoperation.origin.name, location)
          optype = "distribution"
          duration = curoperation.leadtime
          duration_per = None
          buffers = [
            ("%s @ %s" % (curoperation.item.name, curoperation.origin.name), -1),
            ("%s @ %s" % (curoperation.item.name, curoperation.location.name), 1)
            ]
          resources = None
          try:
            upstr = Buffer.objects.using(request.database).get(name="%s @ %s" % (curoperation.item.name, curoperation.origin.name))
            root.extend( reportclass.findReplenishment(upstr, request.database, level + 2, curqty, realdepth + 1, False) )
          except Buffer.DoesNotExist:
            pass
        else:
          curprodflow = None
          name = curoperation.name
          optype = curoperation.type
          duration = curoperation.duration
          duration_per = curoperation.duration_per
          buffers = [ (x.thebuffer.name, float(x.quantity)) for x in curoperation.flows.only('thebuffer', 'quantity').using(request.database) ]
          resources = [ (x.resource.name, float(x.quantity)) for x in curoperation.loads.only('resource', 'quantity').using(request.database) ]
          for x in curoperation.flows.filter(quantity__gt=0).only('quantity').using(request.database):
            curprodflow = x
          curflows = curoperation.flows.filter(quantity__lt=0).only('thebuffer', 'quantity').using(request.database)
          for y in curflows:
            if y.thebuffer.producing:
              hasChildren = True
              root.append( (
                level + 1, curnode, y.thebuffer.producing,
                curprodflow and (-curqty * y.quantity) / curprodflow.quantity or (-curqty * y.quantity),
                subcount, None, realdepth + 1, True, y.thebuffer.location
                ) )
            else:
              root.extend( reportclass.findReplenishment(y.thebuffer, request.database, level + 2, curqty, realdepth + 1, False) )
          for x in curoperation.suboperations.using(request.database).only('suboperation').order_by("-priority"):
            subcount += curoperation.type == "routing" and 1 or -1
            root.append( (level + 1, curnode, x.suboperation, curqty, subcount, curoperation, realdepth, False, location) )
            hasChildren = True

      # Process the current node
      yield {
        'depth': abs(level),
        'id': curnode,
        'operation': name,
        'type': optype,
        'location': curoperation.location and curoperation.location.name or '',
        'duration': duration,
        'duration_per': duration_per,
        'quantity': curqty,
        'suboperation': issuboperation,
        'buffers': buffers,
        'resources': resources,
        'parentoper': parentoper and parentoper.name,
        'parent': parent,
        'leaf': hasChildren and 'false' or 'true',
        'expanded': 'true',
        'numsuboperations': subcount,
        'realdepth': realdepth
        }
Пример #3
0
    def query(reportclass, request, basequery, sortsql="1 asc"):
        basesql, baseparams = basequery.query.get_compiler(basequery.db).as_sql(
            with_col_aliases=False
        )

        # Assure the item hierarchy is up to date
        Item.rebuildHierarchy(database=basequery.db)

        # Execute a query to get the backlog at the start of the horizon
        startbacklogdict = {}
        query = """
          select name, sum(qty) from
            (
            select item.name, sum(demand.quantity) qty from (%s) item
            inner join item child on child.lft between item.lft and item.rght
            inner join demand on demand.item_id = child.name
            and demand.status in ('open','quote')
            and due < %%s
            group by item.name
            union all
            select item.name, sum(operationplanmaterial.quantity) qty
            from (%s) item
            inner join item child on child.lft between item.lft and item.rght
            inner join operationplanmaterial on operationplanmaterial.item_id = child.name
            inner join operationplan on operationplan.reference = operationplanmaterial.operationplan_id
              and operationplan.demand_id is not null
              and operationplan.enddate < %%s
            group by item.name
            ) t
            group by name
        """ % (
            basesql,
            basesql,
        )
        with transaction.atomic(using=request.database):
            with connections[request.database].chunked_cursor() as cursor_chunked:
                cursor_chunked.execute(
                    query,
                    baseparams
                    + (request.report_startdate,)
                    + baseparams
                    + (request.report_startdate,),
                )
                for row in cursor_chunked:
                    if row[0]:
                        startbacklogdict[row[0]] = max(float(row[1]), 0)

        # Execute the query
        query = """
          select
          parent.name, parent.description, parent.category, parent.subcategory,
          parent.owner_id, parent.cost, parent.volume, parent.weight, parent.uom, parent.periodofcover, parent.source, parent.lastmodified,
          %s
          d.bucket,
          d.startdate,
          d.enddate,
          sum(coalesce((
            select sum(quantity)
            from demand
            inner join item child on child.lft between parent.lft and parent.rght
            where demand.item_id = child.name
            and status in ('open','quote')
            and due >= greatest(%%s,d.startdate)
            and due < d.enddate
            ),0)) orders,
          sum(coalesce((
            select sum(operationplan.quantity)
            from operationplan
            inner join item child on child.lft between parent.lft and parent.rght
            where operationplan.item_id = child.name
            and operationplan.demand_id is not null
            and operationplan.enddate >= greatest(%%s,d.startdate)
            and operationplan.enddate < d.enddate
            ),0)) planned,
          (select json_agg(json_build_array(f1,f2)) from
            (select distinct out_constraint.name as f1, out_constraint.owner as f2
            from out_constraint
            inner join item child
              on child.lft between parent.lft and parent.rght
            inner join operationplan
              on operationplan.demand_id = out_constraint.demand
              and operationplan.due is not null
            and out_constraint.item = child.name
            and operationplan.enddate >= greatest(%%s,d.startdate)
            and (
              out_constraint.name not in ('before current', 'before fence')
              or out_constraint.enddate > d.enddate
              )
            and operationplan.due < d.enddate
            limit 20
            ) cte_reasons
          ) reasons
          from (%s) parent
          cross join (
                       select name as bucket, startdate, enddate
                       from common_bucketdetail
                       where bucket_id = %%s and enddate > %%s and startdate < %%s
                       ) d
          group by
            parent.name, parent.description, parent.category, parent.subcategory,
            parent.owner_id, parent.cost, parent.volume, parent.weight, parent.uom, parent.periodofcover,
            parent.source, parent.lastmodified, parent.lft, parent.rght,
            %s
            d.bucket, d.startdate, d.enddate
          order by %s, d.startdate
        """ % (
            reportclass.attr_sql,
            basesql,
            reportclass.attr_sql,
            sortsql,
        )

        # Build the python result
        with transaction.atomic(using=request.database):
            with connections[request.database].chunked_cursor() as cursor_chunked:
                cursor_chunked.execute(
                    query,
                    (request.report_startdate,) * 3  # orders + planned + constraints
                    + baseparams  # orders planned
                    + (
                        request.report_bucket,
                        request.report_startdate,
                        request.report_enddate,
                    ),  # buckets
                )
                previtem = None
                itemattributefields = getAttributeFields(Item)
                for row in cursor_chunked:
                    numfields = len(row)
                    if row[0] != previtem:
                        backlog = startbacklogdict.get(row[0], 0)
                        previtem = row[0]
                    backlog += float(row[numfields - 3]) - float(row[numfields - 2])
                    res = {
                        "item": row[0],
                        "description": row[1],
                        "category": row[2],
                        "subcategory": row[3],
                        "owner": row[4],
                        "cost": row[5],
                        "volume": row[6],
                        "weight": row[7],
                        "uom": row[8],
                        "periodofcover": row[9],
                        "source": row[10],
                        "lastmodified": row[11],
                        "bucket": row[numfields - 6],
                        "startdate": row[numfields - 5].date(),
                        "enddate": row[numfields - 4].date(),
                        "demand": row[numfields - 3],
                        "supply": row[numfields - 2],
                        "reasons": json.dumps(row[numfields - 1]),
                        "backlog": max(backlog or 0, 0),
                    }
                    idx = 12
                    for f in itemattributefields:
                        res[f.field_name] = row[idx]
                        idx += 1
                    yield res
Пример #4
0
    def query(request, basequery, sortsql='1 asc'):
        basesql, baseparams = basequery.query.get_compiler(
            basequery.db).as_sql(with_col_aliases=True)
        cursor = connections[request.database].cursor()

        # Assure the item hierarchy is up to date
        Item.rebuildHierarchy(database=basequery.db)

        # Execute a query to get the backlog at the start of the horizon
        startbacklogdict = {}
        query = '''
      select items.name, sum(quantity)
      from (%s) items
      inner join item
      on item.lft between items.lft and items.rght
      inner join out_demand
      on item.name = out_demand.item
        and (plandate is null or plandate >= '%s')
        and due < '%s'
      group by items.name
      ''' % (basesql, request.report_startdate, request.report_startdate)
        cursor.execute(query, baseparams)
        for row in cursor.fetchall():
            if row[0]:
                startbacklogdict[row[0]] = float(row[1])

        # Execute the query
        query = '''
        select y.name as row1,
               y.bucket as col1, y.startdate as col2, y.enddate as col3,
               min(y.orders),
               min(y.planned)
        from (
          select x.name as name, x.lft as lft, x.rght as rght,
               x.bucket as bucket, x.startdate as startdate, x.enddate as enddate,
               coalesce(sum(demand.quantity),0) as orders,
               min(x.planned) as planned
          from (
          select items.name as name, items.lft as lft, items.rght as rght,
                 d.bucket as bucket, d.startdate as startdate, d.enddate as enddate,
                 coalesce(sum(out_demand.quantity),0) as planned
          from (%s) items
          -- Multiply with buckets
          cross join (
             select name as bucket, startdate, enddate
             from common_bucketdetail
             where bucket_id = '%s' and enddate > '%s' and startdate < '%s'
             ) d
          -- Include hierarchical children
          inner join item
          on item.lft between items.lft and items.rght
          -- Planned quantity
          left join out_demand
          on item.name = out_demand.item
          and d.startdate <= out_demand.plandate
          and d.enddate > out_demand.plandate
          and out_demand.plandate >= '%s'
          and out_demand.plandate < '%s'
          -- Grouping
          group by items.name, items.lft, items.rght, d.bucket, d.startdate, d.enddate
        ) x
        -- Requested quantity
        inner join item
        on item.lft between x.lft and x.rght
        left join demand
        on item.name = demand.item_id
        and x.startdate <= demand.due
        and x.enddate > demand.due
        and demand.due >= '%s'
        and demand.due < '%s'
        -- Grouping
        group by x.name, x.lft, x.rght, x.bucket, x.startdate, x.enddate
        ) y
        -- Ordering and grouping
        group by y.name, y.lft, y.rght, y.bucket, y.startdate, y.enddate
        order by %s, y.startdate
       ''' % (basesql, request.report_bucket, request.report_startdate,
              request.report_enddate, request.report_startdate,
              request.report_enddate, request.report_startdate,
              request.report_enddate, sortsql)
        cursor.execute(query, baseparams)

        # Build the python result
        previtem = None
        for row in cursor.fetchall():
            if row[0] != previtem:
                backlog = startbacklogdict.get(row[0], 0)
                previtem = row[0]
            backlog += float(row[4]) - float(row[5])
            yield {
                'item': row[0],
                'bucket': row[1],
                'startdate': python_date(row[2]),
                'enddate': python_date(row[3]),
                'demand': round(row[4], 1),
                'supply': round(row[5], 1),
                'backlog': round(backlog, 1)
            }
Пример #5
0
  def query(request, basequery, sortsql='1 asc'):
    basesql, baseparams = basequery.query.get_compiler(basequery.db).as_sql(with_col_aliases=False)
    cursor = connections[request.database].cursor()

    # Assure the item hierarchy is up to date
    Item.rebuildHierarchy(database=basequery.db)

    # Execute a query to get the backlog at the start of the horizon
    startbacklogdict = {}
    query = '''
      select items.name, sum(quantity)
      from (%s) items
      inner join item
      on item.lft between items.lft and items.rght
      inner join out_demand
      on item.name = out_demand.item
        and (plandate is null or plandate >= '%s')
        and due < '%s'
      group by items.name
      ''' % (basesql, request.report_startdate, request.report_startdate)
    cursor.execute(query, baseparams)
    for row in cursor.fetchall():
      if row[0]:
        startbacklogdict[row[0]] = float(row[1])

    # Execute the query
    query = '''
        select y.name as row1,
               y.bucket as col1, y.startdate as col2, y.enddate as col3,
               min(y.orders),
               min(y.planned)
        from (
          select x.name as name, x.lft as lft, x.rght as rght,
               x.bucket as bucket, x.startdate as startdate, x.enddate as enddate,
               coalesce(sum(demand.quantity),0) as orders,
               min(x.planned) as planned
          from (
          select items.name as name, items.lft as lft, items.rght as rght,
                 d.bucket as bucket, d.startdate as startdate, d.enddate as enddate,
                 coalesce(sum(out_demand.quantity),0) as planned
          from (%s) items
          -- Multiply with buckets
          cross join (
             select name as bucket, startdate, enddate
             from common_bucketdetail
             where bucket_id = '%s' and enddate > '%s' and startdate < '%s'
             ) d
          -- Include hierarchical children
          inner join item
          on item.lft between items.lft and items.rght
          -- Planned quantity
          left join out_demand
          on item.name = out_demand.item
          and d.startdate <= out_demand.plandate
          and d.enddate > out_demand.plandate
          and out_demand.plandate >= '%s'
          and out_demand.plandate < '%s'
          -- Grouping
          group by items.name, items.lft, items.rght, d.bucket, d.startdate, d.enddate
        ) x
        -- Requested quantity
        inner join item
        on item.lft between x.lft and x.rght
        left join demand
        on item.name = demand.item_id
        and x.startdate <= demand.due
        and x.enddate > demand.due
        and demand.due >= '%s'
        and demand.due < '%s'
        and demand.status = 'open'
        -- Grouping
        group by x.name, x.lft, x.rght, x.bucket, x.startdate, x.enddate
        ) y
        -- Ordering and grouping
        group by y.name, y.lft, y.rght, y.bucket, y.startdate, y.enddate
        order by %s, y.startdate
       ''' % (basesql, request.report_bucket, request.report_startdate,
              request.report_enddate, request.report_startdate,
              request.report_enddate, request.report_startdate,
              request.report_enddate, sortsql)
    cursor.execute(query, baseparams)

    # Build the python result
    previtem = None
    for row in cursor.fetchall():
      if row[0] != previtem:
        backlog = startbacklogdict.get(row[0], 0)
        previtem = row[0]
      backlog += float(row[4]) - float(row[5])
      yield {
        'item': row[0],
        'bucket': row[1],
        'startdate': python_date(row[2]),
        'enddate': python_date(row[3]),
        'demand': round(row[4], 1),
        'supply': round(row[5], 1),
        'backlog': round(backlog, 1)
        }
Пример #6
0
    def query(reportclass, request, basequery, sortsql='1 asc'):
        basesql, baseparams = basequery.query.get_compiler(
            basequery.db).as_sql(with_col_aliases=False)

        # Assure the item hierarchy is up to date
        Item.rebuildHierarchy(database=basequery.db)

        # Execute a query to get the backlog at the start of the horizon
        startbacklogdict = {}
        query = '''
      select name, sum(qty) from
        (
        select parent.name, sum(demand.quantity) qty from (%s) item
        inner join item parent on item.lft between parent.lft and parent.rght
        inner join demand on demand.item_id = item.name and demand.status in ('open','quote') and due < %%s
        group by parent.name
        union all
        select parent.name, sum(operationplanmaterial.quantity) qty
        from operationplanmaterial
        inner join operationplan on operationplan.reference = operationplanmaterial.operationplan_id
          and operationplan.type = 'DLVR'
          and operationplan.enddate < %%s
        inner join (%s) item on operationplanmaterial.item_id = item.name
        inner join item parent on item.lft between parent.lft and parent.rght
        group by parent.name
        ) t
        group by name
      ''' % (basesql, basesql)
        with connections[request.database].chunked_cursor() as cursor_chunked:
            cursor_chunked.execute(
                query, baseparams +
                (request.report_startdate, request.report_startdate) +
                baseparams)
            for row in cursor_chunked:
                if row[0]:
                    startbacklogdict[row[0]] = float(row[1])

        # Execute the query
        query = '''
      select
      parent.name, parent.description, parent.category, parent.subcategory,
      parent.owner_id, parent.cost, parent.source, parent.lastmodified,
      %s
      d.bucket,
      d.startdate,
      d.enddate,
      sum(coalesce((select sum(quantity) from demand
       where demand.item_id = child.name and status in ('open','quote') and due >= greatest(%%s,d.startdate) and due < d.enddate),0)) orders,
      sum(coalesce((select sum(-operationplanmaterial.quantity) from operationplanmaterial
      inner join operationplan on operationplan.reference = operationplanmaterial.operationplan_id and operationplan.type = 'DLVR'
      where operationplanmaterial.item_id = child.name
      and operationplanmaterial.flowdate >= greatest(%%s,d.startdate)
      and operationplanmaterial.flowdate < d.enddate),0)) planned
      from (%s) parent
      inner join item child on child.lft between parent.lft and parent.rght
      cross join (
                   select name as bucket, startdate, enddate
                   from common_bucketdetail
                   where bucket_id = %%s and enddate > %%s and startdate < %%s
                   ) d
      group by
        parent.name, parent.description, parent.category, parent.subcategory,
        parent.owner_id, parent.cost, parent.source, parent.lastmodified,
        %s
        d.bucket, d.startdate, d.enddate
      order by %s, d.startdate
    ''' % (reportclass.attr_sql, basesql, reportclass.attr_sql, sortsql)

        # Build the python result
        with connections[request.database].chunked_cursor() as cursor_chunked:
            cursor_chunked.execute(
                query,
                (request.report_startdate, request.report_startdate)
                +  # orders planned
                baseparams + (request.report_bucket, request.report_startdate,
                              request.report_enddate)  # buckets
            )
            previtem = None
            for row in cursor_chunked:
                numfields = len(row)
                if row[0] != previtem:
                    backlog = startbacklogdict.get(row[0], 0)
                    previtem = row[0]
                backlog += float(row[numfields - 2]) - float(
                    row[numfields - 1])
                res = {
                    'item': row[0],
                    'description': row[1],
                    'category': row[2],
                    'subcategory': row[3],
                    'owner': row[4],
                    'cost': row[5],
                    'source': row[6],
                    'lastmodified': row[7],
                    'bucket': row[numfields - 5],
                    'startdate': row[numfields - 4].date(),
                    'enddate': row[numfields - 3].date(),
                    'demand': row[numfields - 2],
                    'supply': row[numfields - 1],
                    'backlog': backlog,
                }
                idx = 8
                for f in getAttributeFields(Item):
                    res[f.field_name] = row[idx]
                    idx += 1
                yield res
Пример #7
0
    def query(reportclass, request, basequery, sortsql='1 asc'):
        basesql, baseparams = basequery.query.get_compiler(
            basequery.db).as_sql(with_col_aliases=False)

        # Assure the item hierarchy is up to date
        Item.rebuildHierarchy(database=basequery.db)

        # Execute a query to get the backlog at the start of the horizon
        startbacklogdict = {}
        query = '''
      select items.name, coalesce(req.qty, 0) - coalesce(pln.qty, 0)
      from (%s) items
      left outer join (
        select parent.name, sum(quantity) qty
        from demand
        inner join item on demand.item_id = item.name
        inner join item parent on item.lft between parent.lft and parent.rght
        where status in ('open', 'quote')
        and due < %%s
        group by parent.name
        ) req
      on req.name = items.name
      left outer join (
        select parent.name, sum(operationplan.quantity) qty
        from operationplan
        inner join demand on operationplan.demand_id = demand.name
          and operationplan.owner_id is null
          and operationplan.enddate < %%s
        inner join item on demand.item_id = item.name
        inner join item parent on item.lft between parent.lft and parent.rght
        group by parent.name
        ) pln
      on pln.name = items.name
      ''' % basesql
        with connections[request.database].chunked_cursor() as cursor_chunked:
            cursor_chunked.execute(
                query, baseparams +
                (request.report_startdate, request.report_startdate))
            for row in cursor_chunked:
                if row[0]:
                    startbacklogdict[row[0]] = float(row[1])

        # Execute the query
        query = '''
      select 
      parent.name, %s
      d.bucket,
      d.startdate,
      d.enddate,
      sum(coalesce((select sum(quantity) from demand
       where demand.item_id = child.name and status in ('open','quote') and due >= greatest(%%s,d.startdate) and due < d.enddate),0)) orders,
      sum(coalesce((select sum(-operationplanmaterial.quantity) from operationplanmaterial
      inner join operationplan on operationplan.id = operationplanmaterial.operationplan_id and operationplan.type = 'DLVR'
      where operationplanmaterial.item_id = child.name 
      and operationplanmaterial.flowdate >= greatest(%%s,d.startdate) 
      and operationplanmaterial.flowdate < d.enddate),0)) planned    
      from (%s) parent
      inner join item child on child.lft between parent.lft and parent.rght
      cross join (
                   select name as bucket, startdate, enddate
                   from common_bucketdetail
                   where bucket_id = %%s and enddate > %%s and startdate < %%s
                   ) d
      group by 
      parent.name, %s
      d.bucket,
      d.startdate,
      d.enddate
      order by %s, d.startdate
    ''' % (reportclass.attr_sql, basesql, reportclass.attr_sql, sortsql)

        # Build the python result
        with connections[request.database].chunked_cursor() as cursor_chunked:
            cursor_chunked.execute(
                query,
                baseparams + (
                    request.report_startdate,  #orders
                    request.report_startdate,  #planned
                    request.report_bucket,
                    request.report_startdate,
                    request.report_enddate  #buckets
                ))
            previtem = None
            for row in cursor_chunked:
                numfields = len(row)
                if row[0] != previtem:
                    backlog = startbacklogdict.get(row[0], 0)
                    previtem = row[0]
                backlog += float(row[numfields - 2]) - float(
                    row[numfields - 1])
                res = {
                    'item': row[0],
                    'bucket': row[numfields - 5],
                    'startdate': row[numfields - 4].date(),
                    'enddate': row[numfields - 3].date(),
                    'demand': round(row[numfields - 2], 1),
                    'supply': round(row[numfields - 1], 1),
                    'backlog': round(backlog, 1),
                }
                idx = 1
                for f in getAttributeFields(Item):
                    res[f.field_name] = row[idx]
                    idx += 1
                yield res
Пример #8
0
    def query(reportclass, request, basequery, sortsql='1 asc'):
        basesql, baseparams = basequery.query.get_compiler(
            basequery.db).as_sql(with_col_aliases=False)

        # Assure the item hierarchy is up to date
        Item.rebuildHierarchy(database=basequery.db)

        # Execute a query to get the backlog at the start of the horizon
        startbacklogdict = {}
        query = '''
      select items.name, coalesce(req.qty, 0) - coalesce(pln.qty, 0)
      from (%s) items
      left outer join (
        select parent.name, sum(quantity) qty
        from demand
        inner join item on demand.item_id = item.name
        inner join item parent on item.lft between parent.lft and parent.rght
        where status in ('open', 'quote')
        and due < %%s
        group by parent.name
        ) req
      on req.name = items.name
      left outer join (
        select parent.name, sum(operationplan.quantity) qty
        from operationplan
        inner join demand on operationplan.demand_id = demand.name
          and operationplan.owner_id is null
          and operationplan.enddate < %%s
        inner join item on demand.item_id = item.name
        inner join item parent on item.lft between parent.lft and parent.rght
        group by parent.name
        ) pln
      on pln.name = items.name
      ''' % basesql
        with connections[request.database].chunked_cursor() as cursor_chunked:
            cursor_chunked.execute(
                query, baseparams +
                (request.report_startdate, request.report_startdate))
            for row in cursor_chunked:
                if row[0]:
                    startbacklogdict[row[0]] = float(row[1])

        # Execute the query
        query = '''
        select y.name, %s
               y.bucket, y.startdate, y.enddate,
               min(y.orders),
               min(y.planned)
        from (
          select x.name as name, x.lft as lft, x.rght as rght,
               x.bucket as bucket, x.startdate as startdate, x.enddate as enddate,
               coalesce(sum(demand.quantity),0) as orders,
               min(x.planned) as planned
          from (
          select items.name as name, items.lft as lft, items.rght as rght,
                 d.bucket as bucket, d.startdate as startdate, d.enddate as enddate,
                 coalesce(sum(operationplan.quantity),0) as planned
          from (%s) items
          -- Multiply with buckets
          cross join (
             select name as bucket, startdate, enddate
             from common_bucketdetail
             where bucket_id = %%s and enddate > %%s and startdate < %%s
             ) d
          -- Include hierarchical children
          inner join item
          on item.lft between items.lft and items.rght
          -- Planned quantity
          left outer join operationplan
          on operationplan.type = 'DLVR'
          and operationplan.item_id = item.name
          and d.startdate <= operationplan.enddate
          and d.enddate > operationplan.enddate
          and operationplan.enddate >= %%s
          and operationplan.enddate < %%s
          -- Grouping
          group by items.name, items.lft, items.rght, d.bucket, d.startdate, d.enddate
        ) x
        -- Requested quantity
        inner join item
        on item.lft between x.lft and x.rght
        left join demand
        on item.name = demand.item_id
        and x.startdate <= demand.due
        and x.enddate > demand.due
        and demand.due >= %%s
        and demand.due < %%s
        and demand.status in ('open', 'quote')
        -- Grouping
        group by x.name, x.lft, x.rght, x.bucket, x.startdate, x.enddate
        ) y
        -- Ordering and grouping
        group by %s y.name, y.lft, y.rght, y.bucket, y.startdate, y.enddate
        order by %s, y.startdate
       ''' % (reportclass.attr_sql, basesql, reportclass.attr_sql, sortsql)

        # Build the python result
        with connections[request.database].chunked_cursor() as cursor_chunked:
            cursor_chunked.execute(
                query,
                baseparams + (request.report_bucket, request.report_startdate,
                              request.report_enddate, request.report_startdate,
                              request.report_enddate, request.report_startdate,
                              request.report_enddate))
            previtem = None
            for row in cursor_chunked:
                numfields = len(row)
                if row[0] != previtem:
                    backlog = startbacklogdict.get(row[0], 0)
                    previtem = row[0]
                backlog += float(row[numfields - 2]) - float(
                    row[numfields - 1])
                res = {
                    'item': row[0],
                    'bucket': row[numfields - 5],
                    'startdate': row[numfields - 4].date(),
                    'enddate': row[numfields - 3].date(),
                    'demand': round(row[numfields - 2], 1),
                    'supply': round(row[numfields - 1], 1),
                    'backlog': round(backlog, 1),
                }
                idx = 1
                for f in getAttributeFields(Item):
                    res[f.field_name] = row[idx]
                    idx += 1
                yield res
Пример #9
0
    def basequeryset(reportclass, request, *args, **kwargs):
        q = PurchaseOrder.objects.all()
        if "calendarstart" in request.GET:
            q = q.filter(
                Q(enddate__gte=request.GET["calendarstart"])
                | (
                    Q(enddate__isnull=True)
                    & Q(startdate__gte=request.GET["calendarstart"])
                )
            )
        if "calendarend" in request.GET:
            q = q.filter(
                Q(startdate__lte=request.GET["calendarend"])
                | (
                    Q(startdate__isnull=True)
                    & Q(enddate__lte=request.GET["calendarend"])
                )
            )
        if args and args[0]:
            paths = request.path.split("/")
            path = paths[4]
            if paths[4] == "operationplanmaterial":
                q = q.filter(
                    location__name=args[1],
                    item__name=args[0],
                    startdate__lt=args[2],
                    enddate__gte=args[2],
                )
            elif path == "produced":
                q = q.filter(
                    location__name=args[1],
                    item__name=args[0],
                    enddate__gte=args[2],
                    enddate__lt=args[3],
                )
            elif path == "supplier" or request.path.startswith(
                "/detail/input/supplier/"
            ):
                try:
                    Supplier.rebuildHierarchy(database=request.database)
                    sup = (
                        Supplier.objects.all().using(request.database).get(name=args[0])
                    )
                    lft = sup.lft
                    rght = sup.rght
                except Supplier.DoesNotExist:
                    lft = 1
                    rght = 1
                q = q.filter(supplier__lft__gte=lft, supplier__rght__lte=rght)
            elif path == "location" or request.path.startswith(
                "/detail/input/location/"
            ):
                try:
                    Location.rebuildHierarchy(database=request.database)
                    loc = (
                        Location.objects.all().using(request.database).get(name=args[0])
                    )
                    lft = loc.lft
                    rght = loc.rght
                except Location.DoesNotExist:
                    lft = 1
                    rght = 1
                q = q.filter(location__lft__gte=lft, location__rght__lte=rght)
            elif path == "item" or request.path.startswith("/detail/input/item/"):
                try:
                    Item.rebuildHierarchy(database=request.database)
                    itm = Item.objects.all().using(request.database).get(name=args[0])
                    lft = itm.lft
                    rght = itm.rght
                except Item.DoesNotExist:
                    lft = 1
                    rght = 1
                q = q.filter(item__lft__gte=lft, item__rght__lte=rght)

        q = reportclass.operationplanExtraBasequery(q.select_related("item"), request)
        return q.annotate(
            unit_cost=Cast(
                RawSQL(
                    """
                    coalesce((
                      select cost
                      from itemsupplier
                      where itemsupplier.item_id = operationplan.item_id
                        and (itemsupplier.location_id is null or itemsupplier.location_id = operationplan.location_id)
                        and itemsupplier.supplier_id = operationplan.supplier_id
                      order by operationplan.enddate < itemsupplier.effective_end desc nulls first,
                         operationplan.enddate >= itemsupplier.effective_start desc nulls first,
                         priority <> 0,
                         priority
                      limit 1),
                     (select cost from item where item.name = operationplan.item_id), 0)
                    """,
                    [],
                ),
                output_field=FloatField(),
            ),
            total_cost=Cast(F("unit_cost") * F("quantity"), output_field=FloatField()),
            total_volume=Cast(
                F("item__volume") * F("quantity"), output_field=FloatField()
            ),
            total_weight=Cast(
                F("item__weight") * F("quantity"), output_field=FloatField()
            ),
            feasible=RawSQL(
                "coalesce((operationplan.plan->>'feasible')::boolean, true)", []
            ),
            computed_color=RawSQL(
                """
                case when operationplan.color >= 999999 and operationplan.plan ? 'item' then
                999999
                - extract(epoch from operationplan.delay)/86400.0
                + 1000000
                when operationplan.color >= 999999 and not(operationplan.plan ? 'item') then
                999999
                - extract(epoch from operationplan.delay)/86400.0
                else operationplan.color
                end
                """,
                [],
            ),
            itemsupplier_sizeminimum=Cast(
                RawSQL(
                    """
                    select sizeminimum
                    from itemsupplier
                    where itemsupplier.item_id = operationplan.item_id
                      and (itemsupplier.location_id is null or itemsupplier.location_id = operationplan.location_id)
                      and itemsupplier.supplier_id = operationplan.supplier_id
                    order by operationplan.enddate < itemsupplier.effective_end desc nulls first,
                       operationplan.enddate >= itemsupplier.effective_start desc nulls first,
                       priority <> 0,
                       priority
                    limit 1
                    """,
                    [],
                ),
                output_field=FloatField(),
            ),
            itemsupplier_sizemultiple=Cast(
                RawSQL(
                    """
                    select sizemultiple
                    from itemsupplier
                    where itemsupplier.item_id = operationplan.item_id
                      and (itemsupplier.location_id is null or itemsupplier.location_id = operationplan.location_id)
                      and itemsupplier.supplier_id = operationplan.supplier_id
                    order by operationplan.enddate < itemsupplier.effective_end desc nulls first,
                       operationplan.enddate >= itemsupplier.effective_start desc nulls first,
                       priority <> 0,
                       priority
                    limit 1
                    """,
                    [],
                ),
                output_field=FloatField(),
            ),
            itemsupplier_sizemaximum=Cast(
                RawSQL(
                    """
                    select sizemaximum
                    from itemsupplier
                    where itemsupplier.item_id = operationplan.item_id
                      and (itemsupplier.location_id is null or itemsupplier.location_id = operationplan.location_id)
                      and itemsupplier.supplier_id = operationplan.supplier_id
                    order by operationplan.enddate < itemsupplier.effective_end desc nulls first,
                       operationplan.enddate >= itemsupplier.effective_start desc nulls first,
                       priority <> 0,
                       priority
                    limit 1
                    """,
                    [],
                ),
                output_field=FloatField(),
            ),
            itemsupplier_priority=Cast(
                RawSQL(
                    """
                    select priority
                    from itemsupplier
                    where itemsupplier.item_id = operationplan.item_id
                      and (itemsupplier.location_id is null or itemsupplier.location_id = operationplan.location_id)
                      and itemsupplier.supplier_id = operationplan.supplier_id
                    order by operationplan.enddate < itemsupplier.effective_end desc nulls first,
                       operationplan.enddate >= itemsupplier.effective_start desc nulls first,
                       priority <> 0,
                       priority
                    limit 1
                    """,
                    [],
                ),
                output_field=FloatField(),
            ),
            itemsupplier_effective_start=Cast(
                RawSQL(
                    """
                    select effective_start
                    from itemsupplier
                    where itemsupplier.item_id = operationplan.item_id
                      and (itemsupplier.location_id is null or itemsupplier.location_id = operationplan.location_id)
                      and itemsupplier.supplier_id = operationplan.supplier_id
                    order by operationplan.enddate < itemsupplier.effective_end desc nulls first,
                       operationplan.enddate >= itemsupplier.effective_start desc nulls first,
                       priority <> 0,
                       priority
                    limit 1
                    """,
                    [],
                ),
                output_field=DateTimeField(),
            ),
            itemsupplier_effective_end=Cast(
                RawSQL(
                    """
                    select effective_end
                    from itemsupplier
                    where itemsupplier.item_id = operationplan.item_id
                      and (itemsupplier.location_id is null or itemsupplier.location_id = operationplan.location_id)
                      and itemsupplier.supplier_id = operationplan.supplier_id
                    order by operationplan.enddate < itemsupplier.effective_end desc nulls first,
                       operationplan.enddate >= itemsupplier.effective_start desc nulls first,
                       priority <> 0,
                       priority
                    limit 1
                    """,
                    [],
                ),
                output_field=DateTimeField(),
            ),
        )
Пример #10
0
  def query(reportclass, request, basequery, sortsql='1 asc'):
    basesql, baseparams = basequery.query.get_compiler(basequery.db).as_sql(with_col_aliases=False)

    # Assure the item hierarchy is up to date
    Item.rebuildHierarchy(database=basequery.db)

    # Execute a query to get the backlog at the start of the horizon
    startbacklogdict = {}
    query = '''
      select name, sum(qty) from
        (
        select parent.name, sum(demand.quantity) qty from (%s) item
        inner join item parent on item.lft between parent.lft and parent.rght
        inner join demand on demand.item_id = item.name and demand.status in ('open','quote') and due < %%s
        group by parent.name
        union all
        select parent.name, sum(operationplanmaterial.quantity) qty
        from operationplanmaterial
        inner join operationplan on operationplan.reference = operationplanmaterial.operationplan_id
          and operationplan.type = 'DLVR'
          and operationplan.enddate < %%s
        inner join (%s) item on operationplanmaterial.item_id = item.name
        inner join item parent on item.lft between parent.lft and parent.rght
        group by parent.name
        ) t
        group by name
      ''' % (basesql, basesql)
    with connections[request.database].chunked_cursor() as cursor_chunked:
      cursor_chunked.execute(query, baseparams + (request.report_startdate, request.report_startdate) + baseparams)
      for row in cursor_chunked:
        if row[0]:
          startbacklogdict[row[0]] = float(row[1])

    # Execute the query
    query = '''
      select
      parent.name, parent.description, parent.category, parent.subcategory,
      parent.owner_id, parent.cost, parent.source, parent.lastmodified,
      %s
      d.bucket,
      d.startdate,
      d.enddate,
      sum(coalesce((select sum(quantity) from demand
       where demand.item_id = child.name and status in ('open','quote') and due >= greatest(%%s,d.startdate) and due < d.enddate),0)) orders,
      sum(coalesce((select sum(-operationplanmaterial.quantity) from operationplanmaterial
      inner join operationplan on operationplan.reference = operationplanmaterial.operationplan_id and operationplan.type = 'DLVR'
      where operationplanmaterial.item_id = child.name
      and operationplanmaterial.flowdate >= greatest(%%s,d.startdate)
      and operationplanmaterial.flowdate < d.enddate),0)) planned
      from (%s) parent
      inner join item child on child.lft between parent.lft and parent.rght
      cross join (
                   select name as bucket, startdate, enddate
                   from common_bucketdetail
                   where bucket_id = %%s and enddate > %%s and startdate < %%s
                   ) d
      group by
        parent.name, parent.description, parent.category, parent.subcategory,
        parent.owner_id, parent.cost, parent.source, parent.lastmodified,
        %s
        d.bucket, d.startdate, d.enddate
      order by %s, d.startdate
    ''' % (reportclass.attr_sql, basesql, reportclass.attr_sql, sortsql)

    # Build the python result
    with connections[request.database].chunked_cursor() as cursor_chunked:
      cursor_chunked.execute(
        query,
        (request.report_startdate, request.report_startdate) +  # orders planned
        baseparams +
        (request.report_bucket, request.report_startdate, request.report_enddate)  # buckets
        )
      previtem = None
      for row in cursor_chunked:
        numfields = len(row)
        if row[0] != previtem:
          backlog = startbacklogdict.get(row[0], 0)
          previtem = row[0]
        backlog += float(row[numfields - 2]) - float(row[numfields - 1])
        res = {
          'item': row[0],
          'description': row[1],
          'category': row[2],
          'subcategory': row[3],
          'owner': row[4],
          'cost': row[5],
          'source': row[6],
          'lastmodified': row[7],
          'bucket': row[numfields - 5],
          'startdate': row[numfields - 4].date(),
          'enddate': row[numfields - 3].date(),
          'demand': row[numfields - 2],
          'supply': row[numfields - 1],
          'backlog': backlog,
          }
        idx = 8
        for f in getAttributeFields(Item):
          res[f.field_name] = row[idx]
          idx += 1
        yield res