Ejemplo n.º 1
0
    def download_queryset(self, queryset, export_format):
        dataset = build.admin.BuildResource().export(queryset=queryset)

        filedata = dataset.export(export_format)
        filename = f"InvenTree_BuildOrders.{export_format}"

        return DownloadFile(filedata, filename)
Ejemplo n.º 2
0
    def get(self, request, *args, **kwargs):

        label = request.GET.get('label', None)

        try:
            label = StockItemLabel.objects.get(pk=label)
        except (ValueError, StockItemLabel.DoesNotExist):
            raise ValidationError({'label': 'Invalid label ID'})

        item_pks = request.GET.getlist('items[]')

        items = []

        for pk in item_pks:
            try:
                item = StockItem.objects.get(pk=pk)
                items.append(item)
            except (ValueError, StockItem.DoesNotExist):
                pass

        if len(items) == 0:
            raise ValidationError({'items': 'Must provide valid stockitems'})

        pdf = label.render(items).getbuffer()

        return DownloadFile(pdf,
                            'stock_labels.pdf',
                            content_type='application/pdf')
Ejemplo n.º 3
0
    def download_queryset(self, queryset, export_format):
        dataset = PurchaseOrderLineItemResource().export(queryset=queryset)

        filedata = dataset.export(export_format)

        filename = f"InvenTree_PurchaseOrderItems.{export_format}"

        return DownloadFile(filedata, filename)
Ejemplo n.º 4
0
    def get(self, request, *args, **kwargs):

        parts = self.get_parts(request)

        dataset = PartResource().export(queryset=parts)

        csv = dataset.export('csv')
        return DownloadFile(csv, 'InvenTree_Parts.csv')
Ejemplo n.º 5
0
    def download_queryset(self, queryset, export_format):
        dataset = SalesOrderResource().export(queryset=queryset)

        filedata = dataset.export(export_format)

        filename = f"InvenTree_SalesOrders.{export_format}"

        return DownloadFile(filedata, filename)
Ejemplo n.º 6
0
    def download_queryset(self, queryset, export_format):
        """Download the filtered queryset as a file"""

        dataset = PurchaseOrderResource().export(queryset=queryset)

        filedata = dataset.export(export_format)

        filename = f"InvenTree_PurchaseOrders.{export_format}"

        return DownloadFile(filedata, filename)
Ejemplo n.º 7
0
    def get(self, request, *args, **kwargs):

        order = get_object_or_404(PurchaseOrder, pk=self.kwargs['pk'])

        export_format = request.GET.get('format', 'csv')

        filename = '{order} - {company}.{fmt}'.format(
            order=str(order), company=order.supplier.name, fmt=export_format)

        filedata = order.export_to_file(format=export_format)

        return DownloadFile(filedata, filename)
Ejemplo n.º 8
0
    def get(self, request, *args, **kwargs):

        part = get_object_or_404(Part, pk=self.kwargs['pk'])

        export_format = request.GET.get('format', 'csv')

        # Placeholder to test file export
        filename = '"' + part.name + '_BOM.' + export_format + '"'

        filedata = part.export_bom(format=export_format)

        return DownloadFile(filedata, filename)
Ejemplo n.º 9
0
    def download_queryset(self, queryset, export_format):
        """
        Download this queryset as a file.
        Uses the APIDownloadMixin mixin class
        """
        dataset = StockItemResource().export(queryset=queryset)

        filedata = dataset.export(export_format)

        filename = 'InvenTree_StockItems_{date}.{fmt}'.format(
            date=datetime.now().strftime("%d-%b-%Y"), fmt=export_format)

        return DownloadFile(filedata, filename)
Ejemplo n.º 10
0
    def get(self, request, *args, **kwargs):
        """Perform GET request to export SalesOrder dataset"""
        order = get_object_or_404(SalesOrder, pk=self.kwargs.get('pk', None))

        export_format = request.GET.get('format', 'csv')

        filename = f"{str(order)} - {order.customer.name}.{export_format}"

        dataset = SalesOrderLineItemResource().export(queryset=order.lines.all())

        filedata = dataset.export(format=export_format)

        return DownloadFile(filedata, filename)
Ejemplo n.º 11
0
    def get(self, request, *args, **kwargs):

        order = get_object_or_404(PurchaseOrder, pk=self.kwargs['pk'])

        export_format = request.GET.get('format', 'csv')

        filename = '{order} - {company}.{fmt}'.format(
            order=str(order), company=order.supplier.name, fmt=export_format)

        dataset = POLineItemResource().export(queryset=order.lines.all())

        filedata = dataset.export(format=export_format)

        return DownloadFile(filedata, filename)
Ejemplo n.º 12
0
def ExportBom(part, fmt='csv', cascade=False, max_levels=None):
    """ Export a BOM (Bill of Materials) for a given part.

    Args:
        fmt: File format (default = 'csv')
        cascade: If True, multi-level BOM output is supported. Otherwise, a flat top-level-only BOM is exported.
    """

    if not IsValidBOMFormat(fmt):
        fmt = 'csv'

    bom_items = []

    uids = []

    def add_items(items, level):
        # Add items at a given layer
        for item in items:

            item.level = str(int(level))
            
            # Avoid circular BOM references
            if item.pk in uids:
                continue

            bom_items.append(item)

            if item.sub_part.assembly:
                if max_levels is None or level < max_levels:
                    add_items(item.sub_part.bom_items.all().order_by('id'), level + 1)
        
    if cascade:
        # Cascading (multi-level) BOM

        # Start with the top level
        items_to_process = part.bom_items.all().order_by('id')

        add_items(items_to_process, 1)

    else:
        # No cascading needed - just the top-level items
        bom_items = [item for item in part.bom_items.all().order_by('id')]

    dataset = BomItemResource().export(queryset=bom_items, cascade=cascade)
    data = dataset.export(fmt)

    filename = '{n}_BOM.{fmt}'.format(n=part.full_name, fmt=fmt)

    return DownloadFile(data, filename)
Ejemplo n.º 13
0
def MakeBomTemplate(fmt):
    """ Generate a Bill of Materials upload template file (for user download) """

    fmt = fmt.strip().lower()

    if not IsValidBOMFormat(fmt):
        fmt = 'csv'

    fields = ['Part', 'Quantity', 'Overage', 'Reference', 'Notes']

    data = tablib.Dataset(headers=fields).export(fmt)

    filename = 'InvenTree_BOM_Template.' + fmt

    return DownloadFile(data, filename)
Ejemplo n.º 14
0
def ExportBom(part, fmt='csv'):
    """ Export a BOM (Bill of Materials) for a given part.
    """

    if not IsValidBOMFormat(fmt):
        fmt = 'csv'

    bom_items = part.bom_items.all().order_by('id')

    dataset = BomItemResource().export(queryset=bom_items)
    data = dataset.export(fmt)

    filename = '{n}_BOM.{fmt}'.format(n=part.full_name, fmt=fmt)

    return DownloadFile(data, filename)
Ejemplo n.º 15
0
def MakeBomTemplate(fmt):
    """ Generate a Bill of Materials upload template file (for user download) """

    fmt = fmt.strip().lower()

    if not IsValidBOMFormat(fmt):
        fmt = 'csv'

    query = BomItem.objects.filter(pk=None)
    dataset = BomItemResource().export(queryset=query)

    data = dataset.export(fmt)

    filename = 'InvenTree_BOM_Template.' + fmt

    return DownloadFile(data, filename)
Ejemplo n.º 16
0
def MakeBomTemplate(fmt):
    """Generate a Bill of Materials upload template file (for user download)."""
    fmt = fmt.strip().lower()

    if not IsValidBOMFormat(fmt):
        fmt = 'csv'

    # Create an "empty" queryset, essentially.
    # This will then export just the row headers!
    query = BomItem.objects.filter(pk=None)

    dataset = BomItemResource().export(queryset=query, importing=True)

    data = dataset.export(fmt)

    filename = 'InvenTree_BOM_Template.' + fmt

    return DownloadFile(data, filename)
Ejemplo n.º 17
0
def ExportBom(part,
              fmt='csv',
              cascade=False,
              max_levels=None,
              parameter_data=False,
              stock_data=False,
              supplier_data=False,
              manufacturer_data=False):
    """ Export a BOM (Bill of Materials) for a given part.

    Args:
        fmt: File format (default = 'csv')
        cascade: If True, multi-level BOM output is supported. Otherwise, a flat top-level-only BOM is exported.
    """

    if not IsValidBOMFormat(fmt):
        fmt = 'csv'

    bom_items = []

    uids = []

    def add_items(items, level, cascade=True):
        # Add items at a given layer
        for item in items:

            item.level = str(int(level))

            # Avoid circular BOM references
            if item.pk in uids:
                continue

            bom_items.append(item)

            if cascade and item.sub_part.assembly:
                if max_levels is None or level < max_levels:
                    add_items(item.sub_part.bom_items.all().order_by('id'),
                              level + 1)

    top_level_items = part.get_bom_items().order_by('id')

    add_items(top_level_items, 1, cascade)

    dataset = BomItemResource().export(queryset=bom_items, cascade=cascade)

    def add_columns_to_dataset(columns, column_size):
        try:
            for header, column_dict in columns.items():
                # Construct column tuple
                col = tuple(
                    column_dict.get(c_idx, '') for c_idx in range(column_size))
                # Add column to dataset
                dataset.append_col(col, header=header)
        except AttributeError:
            pass

    if parameter_data:
        """
        If requested, add extra columns for each PartParameter associated with each line item
        """

        parameter_cols = {}

        for b_idx, bom_item in enumerate(bom_items):
            # Get part parameters
            parameters = bom_item.sub_part.get_parameters()
            # Add parameters to columns
            if parameters:
                for parameter in parameters:
                    name = parameter.template.name
                    value = parameter.data

                    try:
                        parameter_cols[name].update({b_idx: value})
                    except KeyError:
                        parameter_cols[name] = {b_idx: value}

        # Add parameter columns to dataset
        parameter_cols_ordered = OrderedDict(
            sorted(parameter_cols.items(), key=lambda x: x[0]))
        add_columns_to_dataset(parameter_cols_ordered, len(bom_items))

    if stock_data:
        """
        If requested, add extra columns for stock data associated with each line item
        """

        stock_headers = [
            _('Default Location'),
            _('Available Stock'),
        ]

        stock_cols = {}

        for b_idx, bom_item in enumerate(bom_items):
            stock_data = []
            # Get part default location
            try:
                loc = bom_item.sub_part.get_default_location()

                if loc is not None:
                    stock_data.append(str(loc.name))
                else:
                    stock_data.append('')
            except AttributeError:
                stock_data.append('')

            # Get part current stock
            stock_data.append(str(normalize(
                bom_item.sub_part.available_stock)))

            for s_idx, header in enumerate(stock_headers):
                try:
                    stock_cols[header].update({b_idx: stock_data[s_idx]})
                except KeyError:
                    stock_cols[header] = {b_idx: stock_data[s_idx]}

        # Add stock columns to dataset
        add_columns_to_dataset(stock_cols, len(bom_items))

    if manufacturer_data or supplier_data:
        """
        If requested, add extra columns for each SupplierPart and ManufacturerPart associated with each line item
        """

        # Keep track of the supplier parts we have already exported
        supplier_parts_used = set()

        manufacturer_cols = {}

        for bom_idx, bom_item in enumerate(bom_items):
            # Get part instance
            b_part = bom_item.sub_part

            # Include manufacturer data for each BOM item
            if manufacturer_data:

                # Filter manufacturer parts
                manufacturer_parts = ManufacturerPart.objects.filter(
                    part__pk=b_part.pk).prefetch_related('supplier_parts')

                for mp_idx, mp_part in enumerate(manufacturer_parts):

                    # Extract the "name" field of the Manufacturer (Company)
                    if mp_part and mp_part.manufacturer:
                        manufacturer_name = mp_part.manufacturer.name
                    else:
                        manufacturer_name = ''

                    # Extract the "MPN" field from the Manufacturer Part
                    if mp_part:
                        manufacturer_mpn = mp_part.MPN
                    else:
                        manufacturer_mpn = ''

                    # Generate a column name for this manufacturer
                    k_man = f'{_("Manufacturer")}_{mp_idx}'
                    k_mpn = f'{_("MPN")}_{mp_idx}'

                    try:
                        manufacturer_cols[k_man].update(
                            {bom_idx: manufacturer_name})
                        manufacturer_cols[k_mpn].update(
                            {bom_idx: manufacturer_mpn})
                    except KeyError:
                        manufacturer_cols[k_man] = {bom_idx: manufacturer_name}
                        manufacturer_cols[k_mpn] = {bom_idx: manufacturer_mpn}

                    # We wish to include supplier data for this manufacturer part
                    if supplier_data:

                        for sp_idx, sp_part in enumerate(
                                mp_part.supplier_parts.all()):

                            supplier_parts_used.add(sp_part)

                            if sp_part.supplier and sp_part.supplier:
                                supplier_name = sp_part.supplier.name
                            else:
                                supplier_name = ''

                            if sp_part:
                                supplier_sku = sp_part.SKU
                            else:
                                supplier_sku = ''

                            # Generate column names for this supplier
                            k_sup = str(_("Supplier")) + "_" + str(
                                mp_idx) + "_" + str(sp_idx)
                            k_sku = str(_("SKU")) + "_" + str(
                                mp_idx) + "_" + str(sp_idx)

                            try:
                                manufacturer_cols[k_sup].update(
                                    {bom_idx: supplier_name})
                                manufacturer_cols[k_sku].update(
                                    {bom_idx: supplier_sku})
                            except KeyError:
                                manufacturer_cols[k_sup] = {
                                    bom_idx: supplier_name
                                }
                                manufacturer_cols[k_sku] = {
                                    bom_idx: supplier_sku
                                }

            if supplier_data:
                # Add in any extra supplier parts, which are not associated with a manufacturer part

                for sp_idx, sp_part in enumerate(
                        SupplierPart.objects.filter(part__pk=b_part.pk)):

                    if sp_part in supplier_parts_used:
                        continue

                    supplier_parts_used.add(sp_part)

                    if sp_part.supplier:
                        supplier_name = sp_part.supplier.name
                    else:
                        supplier_name = ''

                    supplier_sku = sp_part.SKU

                    # Generate column names for this supplier
                    k_sup = str(_("Supplier")) + "_" + str(sp_idx)
                    k_sku = str(_("SKU")) + "_" + str(sp_idx)

                    try:
                        manufacturer_cols[k_sup].update(
                            {bom_idx: supplier_name})
                        manufacturer_cols[k_sku].update(
                            {bom_idx: supplier_sku})
                    except KeyError:
                        manufacturer_cols[k_sup] = {bom_idx: supplier_name}
                        manufacturer_cols[k_sku] = {bom_idx: supplier_sku}

        # Add supplier columns to dataset
        add_columns_to_dataset(manufacturer_cols, len(bom_items))

    data = dataset.export(fmt)

    filename = f"{part.full_name}_BOM.{fmt}"

    return DownloadFile(data, filename)
Ejemplo n.º 18
0
def ExportBom(part,
              fmt='csv',
              cascade=False,
              max_levels=None,
              parameter_data=False,
              stock_data=False,
              supplier_data=False,
              manufacturer_data=False):
    """ Export a BOM (Bill of Materials) for a given part.

    Args:
        fmt: File format (default = 'csv')
        cascade: If True, multi-level BOM output is supported. Otherwise, a flat top-level-only BOM is exported.
    """

    if not IsValidBOMFormat(fmt):
        fmt = 'csv'

    bom_items = []

    uids = []

    def add_items(items, level):
        # Add items at a given layer
        for item in items:

            item.level = str(int(level))

            # Avoid circular BOM references
            if item.pk in uids:
                continue

            bom_items.append(item)

            if item.sub_part.assembly:
                if max_levels is None or level < max_levels:
                    add_items(item.sub_part.bom_items.all().order_by('id'),
                              level + 1)

    if cascade:
        # Cascading (multi-level) BOM

        # Start with the top level
        items_to_process = part.bom_items.all().order_by('id')

        add_items(items_to_process, 1)

    else:
        # No cascading needed - just the top-level items
        bom_items = [item for item in part.bom_items.all().order_by('id')]

    dataset = BomItemResource().export(queryset=bom_items, cascade=cascade)

    def add_columns_to_dataset(columns, column_size):
        try:
            for header, column_dict in columns.items():
                # Construct column tuple
                col = tuple(
                    column_dict.get(c_idx, '') for c_idx in range(column_size))
                # Add column to dataset
                dataset.append_col(col, header=header)
        except AttributeError:
            pass

    if parameter_data:
        """
        If requested, add extra columns for each PartParameter associated with each line item
        """

        parameter_cols = {}

        for b_idx, bom_item in enumerate(bom_items):
            # Get part parameters
            parameters = bom_item.sub_part.get_parameters()
            # Add parameters to columns
            if parameters:
                for parameter in parameters:
                    name = parameter.template.name
                    value = parameter.data

                    try:
                        parameter_cols[name].update({b_idx: value})
                    except KeyError:
                        parameter_cols[name] = {b_idx: value}

        # Add parameter columns to dataset
        parameter_cols_ordered = OrderedDict(
            sorted(parameter_cols.items(), key=lambda x: x[0]))
        add_columns_to_dataset(parameter_cols_ordered, len(bom_items))

    if stock_data:
        """
        If requested, add extra columns for stock data associated with each line item
        """

        stock_headers = [
            _('Default Location'),
            _('Available Stock'),
        ]

        stock_cols = {}

        for b_idx, bom_item in enumerate(bom_items):
            stock_data = []
            # Get part default location
            try:
                loc = bom_item.sub_part.get_default_location()

                if loc is not None:
                    stock_data.append(str(loc.name))
                else:
                    stock_data.append('')
            except AttributeError:
                stock_data.append('')
            # Get part current stock
            stock_data.append(str(bom_item.sub_part.available_stock))

            for s_idx, header in enumerate(stock_headers):
                try:
                    stock_cols[header].update({b_idx: stock_data[s_idx]})
                except KeyError:
                    stock_cols[header] = {b_idx: stock_data[s_idx]}

        # Add stock columns to dataset
        add_columns_to_dataset(stock_cols, len(bom_items))

    if manufacturer_data and supplier_data:
        """
        If requested, add extra columns for each SupplierPart and ManufacturerPart associated with each line item
        """

        # Expand dataset with manufacturer parts
        manufacturer_headers = [
            _('Manufacturer'),
            _('MPN'),
        ]

        supplier_headers = [
            _('Supplier'),
            _('SKU'),
        ]

        manufacturer_cols = {}

        for b_idx, bom_item in enumerate(bom_items):
            # Get part instance
            b_part = bom_item.sub_part

            # Filter manufacturer parts
            manufacturer_parts = ManufacturerPart.objects.filter(
                part__pk=b_part.pk)
            manufacturer_parts = manufacturer_parts.prefetch_related(
                'supplier_parts')

            # Process manufacturer part
            for manufacturer_idx, manufacturer_part in enumerate(
                    manufacturer_parts):

                if manufacturer_part and manufacturer_part.manufacturer:
                    manufacturer_name = manufacturer_part.manufacturer.name
                else:
                    manufacturer_name = ''

                if manufacturer_part:
                    manufacturer_mpn = manufacturer_part.MPN
                else:
                    manufacturer_mpn = ''

                # Generate column names for this manufacturer
                k_man = manufacturer_headers[0] + "_" + str(manufacturer_idx)
                k_mpn = manufacturer_headers[1] + "_" + str(manufacturer_idx)

                try:
                    manufacturer_cols[k_man].update({b_idx: manufacturer_name})
                    manufacturer_cols[k_mpn].update({b_idx: manufacturer_mpn})
                except KeyError:
                    manufacturer_cols[k_man] = {b_idx: manufacturer_name}
                    manufacturer_cols[k_mpn] = {b_idx: manufacturer_mpn}

                # Process supplier parts
                for supplier_idx, supplier_part in enumerate(
                        manufacturer_part.supplier_parts.all()):

                    if supplier_part.supplier and supplier_part.supplier:
                        supplier_name = supplier_part.supplier.name
                    else:
                        supplier_name = ''

                    if supplier_part:
                        supplier_sku = supplier_part.SKU
                    else:
                        supplier_sku = ''

                    # Generate column names for this supplier
                    k_sup = str(supplier_headers[0]) + "_" + str(
                        manufacturer_idx) + "_" + str(supplier_idx)
                    k_sku = str(supplier_headers[1]) + "_" + str(
                        manufacturer_idx) + "_" + str(supplier_idx)

                    try:
                        manufacturer_cols[k_sup].update({b_idx: supplier_name})
                        manufacturer_cols[k_sku].update({b_idx: supplier_sku})
                    except KeyError:
                        manufacturer_cols[k_sup] = {b_idx: supplier_name}
                        manufacturer_cols[k_sku] = {b_idx: supplier_sku}

        # Add manufacturer columns to dataset
        add_columns_to_dataset(manufacturer_cols, len(bom_items))

    elif manufacturer_data:
        """
        If requested, add extra columns for each ManufacturerPart associated with each line item
        """

        # Expand dataset with manufacturer parts
        manufacturer_headers = [
            _('Manufacturer'),
            _('MPN'),
        ]

        manufacturer_cols = {}

        for b_idx, bom_item in enumerate(bom_items):
            # Get part instance
            b_part = bom_item.sub_part

            # Filter supplier parts
            manufacturer_parts = ManufacturerPart.objects.filter(
                part__pk=b_part.pk)

            for idx, manufacturer_part in enumerate(manufacturer_parts):

                if manufacturer_part:
                    manufacturer_name = manufacturer_part.manufacturer.name
                else:
                    manufacturer_name = ''

                manufacturer_mpn = manufacturer_part.MPN

                # Add manufacturer data to the manufacturer columns

                # Generate column names for this manufacturer
                k_man = manufacturer_headers[0] + "_" + str(idx)
                k_mpn = manufacturer_headers[1] + "_" + str(idx)

                try:
                    manufacturer_cols[k_man].update({b_idx: manufacturer_name})
                    manufacturer_cols[k_mpn].update({b_idx: manufacturer_mpn})
                except KeyError:
                    manufacturer_cols[k_man] = {b_idx: manufacturer_name}
                    manufacturer_cols[k_mpn] = {b_idx: manufacturer_mpn}

        # Add manufacturer columns to dataset
        add_columns_to_dataset(manufacturer_cols, len(bom_items))

    elif supplier_data:
        """
        If requested, add extra columns for each SupplierPart associated with each line item
        """

        # Expand dataset with manufacturer parts
        manufacturer_headers = [
            _('Supplier'),
            _('SKU'),
        ]

        manufacturer_cols = {}

        for b_idx, bom_item in enumerate(bom_items):
            # Get part instance
            b_part = bom_item.sub_part

            # Filter supplier parts
            supplier_parts = SupplierPart.objects.filter(part__pk=b_part.pk)

            for idx, supplier_part in enumerate(supplier_parts):

                if supplier_part.supplier:
                    supplier_name = supplier_part.supplier.name
                else:
                    supplier_name = ''

                supplier_sku = supplier_part.SKU

                # Add manufacturer data to the manufacturer columns

                # Generate column names for this supplier
                k_sup = manufacturer_headers[0] + "_" + str(idx)
                k_sku = manufacturer_headers[1] + "_" + str(idx)

                try:
                    manufacturer_cols[k_sup].update({b_idx: supplier_name})
                    manufacturer_cols[k_sku].update({b_idx: supplier_sku})
                except KeyError:
                    manufacturer_cols[k_sup] = {b_idx: supplier_name}
                    manufacturer_cols[k_sku] = {b_idx: supplier_sku}

        # Add manufacturer columns to dataset
        add_columns_to_dataset(manufacturer_cols, len(bom_items))

    data = dataset.export(fmt)

    filename = f"{part.full_name}_BOM.{fmt}"

    return DownloadFile(data, filename)
Ejemplo n.º 19
0
def ExportBom(part,
              fmt='csv',
              cascade=False,
              max_levels=None,
              supplier_data=False):
    """ Export a BOM (Bill of Materials) for a given part.

    Args:
        fmt: File format (default = 'csv')
        cascade: If True, multi-level BOM output is supported. Otherwise, a flat top-level-only BOM is exported.
    """

    if not IsValidBOMFormat(fmt):
        fmt = 'csv'

    bom_items = []

    uids = []

    def add_items(items, level):
        # Add items at a given layer
        for item in items:

            item.level = str(int(level))

            # Avoid circular BOM references
            if item.pk in uids:
                continue

            bom_items.append(item)

            if item.sub_part.assembly:
                if max_levels is None or level < max_levels:
                    add_items(item.sub_part.bom_items.all().order_by('id'),
                              level + 1)

    if cascade:
        # Cascading (multi-level) BOM

        # Start with the top level
        items_to_process = part.bom_items.all().order_by('id')

        add_items(items_to_process, 1)

    else:
        # No cascading needed - just the top-level items
        bom_items = [item for item in part.bom_items.all().order_by('id')]

    dataset = BomItemResource().export(queryset=bom_items, cascade=cascade)

    if supplier_data:
        """
        If requested, add extra columns for each SupplierPart associated with the each line item
        """

        # Expand dataset with manufacturer parts
        manufacturer_headers = [
            _('Supplier'),
            _('SKU'),
            _('Manufacturer'),
            _('MPN'),
        ]

        manufacturer_cols = {}

        for b_idx, bom_item in enumerate(bom_items):
            # Get part instance
            b_part = bom_item.sub_part

            # Filter supplier parts
            supplier_parts = SupplierPart.objects.filter(part__pk=b_part.pk)

            for idx, supplier_part in enumerate(supplier_parts):

                if supplier_part.supplier:
                    supplier_name = supplier_part.supplier.name
                else:
                    supplier_name = ''

                supplier_sku = supplier_part.SKU

                if supplier_part.manufacturer:
                    manufacturer_name = supplier_part.manufacturer.name
                else:
                    manufacturer_name = ''

                manufacturer_mpn = supplier_part.MPN

                # Add manufacturer data to the manufacturer columns

                # Generate column names for this supplier
                k_sup = manufacturer_headers[0] + "_" + str(idx)
                k_sku = manufacturer_headers[1] + "_" + str(idx)
                k_man = manufacturer_headers[2] + "_" + str(idx)
                k_mpn = manufacturer_headers[3] + "_" + str(idx)

                try:
                    manufacturer_cols[k_sup].update({b_idx: supplier_name})
                    manufacturer_cols[k_sku].update({b_idx: supplier_sku})
                    manufacturer_cols[k_man].update({b_idx: manufacturer_name})
                    manufacturer_cols[k_mpn].update({b_idx: manufacturer_mpn})
                except KeyError:
                    manufacturer_cols[k_sup] = {b_idx: supplier_name}
                    manufacturer_cols[k_sku] = {b_idx: supplier_sku}
                    manufacturer_cols[k_man] = {b_idx: manufacturer_name}
                    manufacturer_cols[k_mpn] = {b_idx: manufacturer_mpn}

        # Add manufacturer columns to dataset
        for header, col_dict in manufacturer_cols.items():
            # Construct column tuple
            col = tuple(
                col_dict.get(c_idx, '') for c_idx in range(len(bom_items)))
            # Add column to dataset
            dataset.append_col(col, header=header)

    data = dataset.export(fmt)

    filename = '{n}_BOM.{fmt}'.format(n=part.full_name, fmt=fmt)

    return DownloadFile(data, filename)
Ejemplo n.º 20
0
    def get(self, request, *args, **kwargs):
        part = request.GET.get('parts', '')
        parts = []

        for pk in part.split(','):
            try:
                parts.append(Part.objects.get(pk=int(pk)))
            except (Part.DoesNotExist, ValueError):
                continue

        headers = [
            'ID',
            'Name',
            'Description',
            'Category',
            'Category ID',
            'IPN',
            'Revision',
            'URL',
            'Keywords',
            'Notes',
            'Assembly',
            'Component',
            'Template',
            'Trackable',
            'Salable',
            'Active',
            'Virtual',
            'Stock Info',  # Spacer between part data and stock data
            'In Stock',
            'Allocated',
            'Building',
            'On Order',
        ]

        # Construct list of suppliers for each part
        supplier_names = set()

        for part in parts:
            supplier_parts = part.supplier_parts.all()
            part.suppliers = {}

            for sp in supplier_parts:
                name = sp.supplier.name
                supplier_names.add(name)
                part.suppliers[name] = sp

        if len(supplier_names) > 0:
            headers.append('Suppliers')
            for name in supplier_names:
                headers.append(name)

        data = tablib.Dataset(headers=headers)

        for part in parts:
            line = []

            line.append(part.pk)
            line.append(part.name)
            line.append(part.description)
            line.append(str(part.category))
            line.append(part.category.pk)
            line.append(part.IPN)
            line.append(part.revision)
            line.append(part.URL)
            line.append(part.keywords)
            line.append(part.notes)
            line.append(part.assembly)
            line.append(part.component)
            line.append(part.is_template)
            line.append(part.trackable)
            line.append(part.salable)
            line.append(part.active)
            line.append(part.virtual)

            # Stock information
            line.append('')
            line.append(part.total_stock)
            line.append(part.allocation_count)
            line.append(part.quantity_being_built)
            line.append(part.on_order)

            if len(supplier_names) > 0:
                line.append('')

                for name in supplier_names:
                    sp = part.suppliers.get(name, None)
                    if sp:
                        line.append(sp.SKU)
                    else:
                        line.append('')

            data.append(line)

        csv = data.export('csv')
        return DownloadFile(csv, 'InvenTree_Parts.csv')
Ejemplo n.º 21
0
    def get(self, request, *args, **kwargs):

        export_format = request.GET.get('format', 'csv').lower()

        # Check if a particular location was specified
        loc_id = request.GET.get('location', None)
        location = None

        if loc_id:
            try:
                location = StockLocation.objects.get(pk=loc_id)
            except (ValueError, StockLocation.DoesNotExist):
                pass

        # Check if a particular supplier was specified
        sup_id = request.GET.get('supplier', None)
        supplier = None

        if sup_id:
            try:
                supplier = Company.objects.get(pk=sup_id)
            except (ValueError, Company.DoesNotExist):
                pass

        # Check if a particular supplier_part was specified
        sup_part_id = request.GET.get('supplier_part', None)
        supplier_part = None

        if sup_part_id:
            try:
                supplier_part = SupplierPart.objects.get(pk=sup_part_id)
            except (ValueError, SupplierPart.DoesNotExist):
                pass

        # Check if a particular part was specified
        part_id = request.GET.get('part', None)
        part = None

        if part_id:
            try:
                part = Part.objects.get(pk=part_id)
            except (ValueError, Part.DoesNotExist):
                pass

        if export_format not in GetExportFormats():
            export_format = 'csv'

        filename = 'InvenTree_Stocktake_{date}.{fmt}'.format(
            date=datetime.now().strftime("%d-%b-%Y"), fmt=export_format)

        if location:
            # CHeck if locations should be cascading
            cascade = str2bool(request.GET.get('cascade', True))
            stock_items = location.get_stock_items(cascade)
        else:
            cascade = True
            stock_items = StockItem.objects.all()

        if part:
            stock_items = stock_items.filter(part=part)

        if supplier:
            stock_items = stock_items.filter(supplier_part__supplier=supplier)

        if supplier_part:
            stock_items = stock_items.filter(supplier_part=supplier_part)

        # Filter out stock items that are not 'in stock'
        stock_items = stock_items.filter(StockItem.IN_STOCK_FILTER)

        # Pre-fetch related fields to reduce DB queries
        stock_items = stock_items.prefetch_related('part',
                                                   'supplier_part__supplier',
                                                   'location',
                                                   'purchase_order', 'build')

        dataset = StockItemResource().export(queryset=stock_items)

        filedata = dataset.export(export_format)

        return DownloadFile(filedata, filename)
Ejemplo n.º 22
0
    def get(self, request, *args, **kwargs):

        export_format = request.GET.get('format', 'csv').lower()

        # Check if a particular location was specified
        loc_id = request.GET.get('location', None)
        location = None

        if loc_id:
            try:
                location = StockLocation.objects.get(pk=loc_id)
            except (ValueError, StockLocation.DoesNotExist):
                pass

        # Check if a particular supplier was specified
        sup_id = request.GET.get('supplier', None)
        supplier = None

        if sup_id:
            try:
                supplier = Company.objects.get(pk=sup_id)
            except (ValueError, Company.DoesNotExist):
                pass

        # Check if a particular part was specified
        part_id = request.GET.get('part', None)
        part = None

        if part_id:
            try:
                part = Part.objects.get(pk=part_id)
            except (ValueError, Part.DoesNotExist):
                pass

        if export_format not in GetExportFormats():
            export_format = 'csv'

        filename = 'InvenTree_Stocktake_{date}.{fmt}'.format(
            date=datetime.now().strftime("%d-%b-%Y"), fmt=export_format)

        if location:
            # CHeck if locations should be cascading
            cascade = str2bool(request.GET.get('cascade', True))
            stock_items = location.get_stock_items(cascade)
        else:
            cascade = True
            stock_items = StockItem.objects.all()

        if part:
            stock_items = stock_items.filter(part=part)

        if supplier:
            stock_items = stock_items.filter(supplier_part__supplier=supplier)

        # Filter out stock items that are not 'in stock'
        stock_items = stock_items.filter(customer=None)
        stock_items = stock_items.filter(belongs_to=None)

        # Pre-fetch related fields to reduce DB queries
        stock_items = stock_items.prefetch_related('part',
                                                   'supplier_part__supplier',
                                                   'location',
                                                   'purchase_order', 'build')

        # Column headers
        headers = [
            _('Stock ID'),
            _('Part ID'),
            _('Part'),
            _('Supplier Part ID'),
            _('Supplier ID'),
            _('Supplier'),
            _('Location ID'),
            _('Location'),
            _('Quantity'),
            _('Batch'),
            _('Serial'),
            _('Status'),
            _('Notes'),
            _('Review Needed'),
            _('Last Updated'),
            _('Last Stocktake'),
            _('Purchase Order ID'),
            _('Build ID'),
        ]

        data = tablib.Dataset(headers=headers)

        for item in stock_items:
            line = []

            line.append(item.pk)
            line.append(item.part.pk)
            line.append(item.part.full_name)

            if item.supplier_part:
                line.append(item.supplier_part.pk)
                line.append(item.supplier_part.supplier.pk)
                line.append(item.supplier_part.supplier.name)
            else:
                line.append('')
                line.append('')
                line.append('')

            if item.location:
                line.append(item.location.pk)
                line.append(item.location.name)
            else:
                line.append('')
                line.append('')

            line.append(item.quantity)
            line.append(item.batch)
            line.append(item.serial)
            line.append(StockStatus.label(item.status))
            line.append(item.notes)
            line.append(item.review_needed)
            line.append(item.updated)
            line.append(item.stocktake_date)

            if item.purchase_order:
                line.append(item.purchase_order.pk)
            else:
                line.append('')

            if item.build:
                line.append(item.build.pk)
            else:
                line.append('')

            data.append(line)

        filedata = data.export(export_format)

        return DownloadFile(filedata, filename)