mirror of
https://github.com/inventree/InvenTree
synced 2024-08-30 18:33:04 +00:00
* Squashed commit of the following: commit f5cf7b2e7872fc19633321713965763d1890b495 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 20:36:57 2024 +0100 fixed reqs commit 9d845bee98befa4e53c2ac3c783bd704369e3ad2 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 20:32:35 2024 +0100 disable autofix/format commit aff5f271484c3500df7ddde043767c008ce4af21 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 20:28:50 2024 +0100 adjust checks commit 47271cf1efa848ec8374a0d83b5646d06fffa6e7 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 20:28:22 2024 +0100 reorder order of operations commit e1bf178b40b3f0d2d59ba92209156c43095959d2 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 20:01:09 2024 +0100 adapted ruff settings to better fit code base commit ad7d88a6f4f15c9552522131c4e207256fc2bbf6 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 19:59:45 2024 +0100 auto fixed docstring commit a2e54a760e17932dbbc2de0dec23906107f2cda9 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 19:46:35 2024 +0100 fix getattr useage commit cb80c73bc6c0be7f5d2ed3cc9b2ac03fdefd5c41 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 19:25:09 2024 +0100 fix requirements file commit b7780bbd21a32007f3b0ce495b519bf59bb19bf5 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 18:42:28 2024 +0100 fix removed sections commit 71f1681f55c15f62c16c1d7f30a745adc496db97 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 18:41:21 2024 +0100 fix djlint syntax commit a0bcf1bccef8a8ffd482f38e2063bc9066e1d759 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 18:35:28 2024 +0100 remove flake8 from code base commit 22475b31cc06919785be046e007915e43f356793 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 18:34:56 2024 +0100 remove flake8 from code base commit 0413350f14773ac6161473e0cfb069713c13c691 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 18:24:39 2024 +0100 moved ruff section commit d90c48a0bf98befdfacbbb093ee56cdb28afb40d Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 18:24:24 2024 +0100 move djlint config to pyproject commit c5ce55d5119bf2e35e429986f62f875c86178ae1 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 18:20:39 2024 +0100 added isort again commit 42a41d23afc280d4ee6f0e640148abc6f460f05a Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 18:19:02 2024 +0100 move config section commit 85692331816348cb1145570340d1f6488a8265cc Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 18:17:52 2024 +0100 fix codespell error commit 2897c6704d1311a800ce5aa47878d96d6980b377 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 17:29:21 2024 +0100 replaced flake8 with ruff mostly for speed improvements * enable autoformat * added autofixes * switched to single quotes everywhere * switched to ruff for import sorting * fix wrong url response * switched to pathlib for lookup * fixed lookup * Squashed commit of the following: commit d3b795824b5d6d1c0eda67150b45b5cd672b3f6b Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 22:56:17 2024 +0100 fixed source path commit 0bac0c19b88897a19d5c995e4ff50427718b827e Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 22:47:53 2024 +0100 fixed req commit 9f61f01d9cc01f1fb7123102f3658c890469b8ce Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 22:45:18 2024 +0100 added missing toml req commit 91b71ed24a6761b629768d0ad8829fec2819a966 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 20:49:50 2024 +0100 moved isort config commit 12460b04196b12d0272d40552402476d5492fea5 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 20:43:22 2024 +0100 remove flake8 section from setup.cfg commit f5cf7b2e7872fc19633321713965763d1890b495 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 20:36:57 2024 +0100 fixed reqs commit 9d845bee98befa4e53c2ac3c783bd704369e3ad2 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 20:32:35 2024 +0100 disable autofix/format commit aff5f271484c3500df7ddde043767c008ce4af21 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 20:28:50 2024 +0100 adjust checks commit 47271cf1efa848ec8374a0d83b5646d06fffa6e7 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 20:28:22 2024 +0100 reorder order of operations commit e1bf178b40b3f0d2d59ba92209156c43095959d2 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 20:01:09 2024 +0100 adapted ruff settings to better fit code base commit ad7d88a6f4f15c9552522131c4e207256fc2bbf6 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 19:59:45 2024 +0100 auto fixed docstring commit a2e54a760e17932dbbc2de0dec23906107f2cda9 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 19:46:35 2024 +0100 fix getattr useage commit cb80c73bc6c0be7f5d2ed3cc9b2ac03fdefd5c41 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 19:25:09 2024 +0100 fix requirements file commit b7780bbd21a32007f3b0ce495b519bf59bb19bf5 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 18:42:28 2024 +0100 fix removed sections commit 71f1681f55c15f62c16c1d7f30a745adc496db97 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 18:41:21 2024 +0100 fix djlint syntax commit a0bcf1bccef8a8ffd482f38e2063bc9066e1d759 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 18:35:28 2024 +0100 remove flake8 from code base commit 22475b31cc06919785be046e007915e43f356793 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 18:34:56 2024 +0100 remove flake8 from code base commit 0413350f14773ac6161473e0cfb069713c13c691 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 18:24:39 2024 +0100 moved ruff section commit d90c48a0bf98befdfacbbb093ee56cdb28afb40d Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 18:24:24 2024 +0100 move djlint config to pyproject commit c5ce55d5119bf2e35e429986f62f875c86178ae1 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 18:20:39 2024 +0100 added isort again commit 42a41d23afc280d4ee6f0e640148abc6f460f05a Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 18:19:02 2024 +0100 move config section commit 85692331816348cb1145570340d1f6488a8265cc Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 18:17:52 2024 +0100 fix codespell error commit 2897c6704d1311a800ce5aa47878d96d6980b377 Author: Matthias Mair <code@mjmair.com> Date: Sun Jan 7 17:29:21 2024 +0100 replaced flake8 with ruff mostly for speed improvements * fix coverage souce format --------- Co-authored-by: Oliver Walters <oliver.henry.walters@gmail.com>
328 lines
12 KiB
Python
328 lines
12 KiB
Python
"""Functionality for Bill of Material (BOM) management.
|
|
|
|
Primarily BOM upload tools.
|
|
"""
|
|
|
|
from collections import OrderedDict
|
|
|
|
from django.utils.translation import gettext as _
|
|
|
|
from company.models import ManufacturerPart, SupplierPart
|
|
from InvenTree.helpers import DownloadFile, GetExportFormats, normalize, str2bool
|
|
|
|
from .admin import BomItemResource
|
|
from .models import BomItem, BomItemSubstitute, Part
|
|
|
|
|
|
def IsValidBOMFormat(fmt):
|
|
"""Test if a file format specifier is in the valid list of BOM file formats."""
|
|
return fmt.strip().lower() in GetExportFormats()
|
|
|
|
|
|
def MakeBomTemplate(fmt):
|
|
"""Generate a Bill of Materials upload template file (for user download)."""
|
|
fmt = fmt.strip().lower()
|
|
|
|
if not IsValidBOMFormat(fmt):
|
|
fmt = 'csv'
|
|
|
|
# Create an "empty" queryset, essentially.
|
|
# This will then export just the row headers!
|
|
query = BomItem.objects.filter(pk=None)
|
|
|
|
dataset = BomItemResource().export(queryset=query, importing=True)
|
|
|
|
data = dataset.export(fmt)
|
|
|
|
filename = 'InvenTree_BOM_Template.' + fmt
|
|
|
|
return DownloadFile(data, filename)
|
|
|
|
|
|
def ExportBom(
|
|
part: Part, fmt='csv', cascade: bool = False, max_levels: int = None, **kwargs
|
|
):
|
|
"""Export a BOM (Bill of Materials) for a given part.
|
|
|
|
Args:
|
|
part (Part): Part for which the BOM should be exported
|
|
fmt (str, optional): file format. Defaults to 'csv'.
|
|
cascade (bool, optional): If True, multi-level BOM output is supported. Otherwise, a flat top-level-only BOM is exported.. Defaults to False.
|
|
max_levels (int, optional): Levels of items that should be included. None for np sublevels. Defaults to None.
|
|
|
|
kwargs:
|
|
parameter_data (bool, optional): Additional data that should be added. Defaults to False.
|
|
stock_data (bool, optional): Additional data that should be added. Defaults to False.
|
|
supplier_data (bool, optional): Additional data that should be added. Defaults to False.
|
|
manufacturer_data (bool, optional): Additional data that should be added. Defaults to False.
|
|
pricing_data (bool, optional): Include pricing data in exported BOM. Defaults to False
|
|
substitute_part_data (bool, optional): Include substitute part numbers in exported BOM. Defaults to False
|
|
|
|
Returns:
|
|
StreamingHttpResponse: Response that can be passed to the endpoint
|
|
"""
|
|
parameter_data = str2bool(kwargs.get('parameter_data', False))
|
|
stock_data = str2bool(kwargs.get('stock_data', False))
|
|
supplier_data = str2bool(kwargs.get('supplier_data', False))
|
|
manufacturer_data = str2bool(kwargs.get('manufacturer_data', False))
|
|
pricing_data = str2bool(kwargs.get('pricing_data', False))
|
|
substitute_part_data = str2bool(kwargs.get('substitute_part_data', False))
|
|
|
|
if not IsValidBOMFormat(fmt):
|
|
fmt = 'csv'
|
|
|
|
bom_items = []
|
|
|
|
uids = []
|
|
|
|
def add_items(items, level, cascade=True):
|
|
# Add items at a given layer
|
|
for item in items:
|
|
item.level = str(int(level))
|
|
|
|
# Avoid circular BOM references
|
|
if item.pk in uids:
|
|
continue
|
|
|
|
bom_items.append(item)
|
|
|
|
if cascade and item.sub_part.assembly:
|
|
if max_levels is None or level < max_levels:
|
|
add_items(item.sub_part.bom_items.all().order_by('id'), level + 1)
|
|
|
|
top_level_items = part.get_bom_items().order_by('id')
|
|
|
|
add_items(top_level_items, 1, cascade)
|
|
|
|
dataset = BomItemResource().export(
|
|
queryset=bom_items, cascade=cascade, include_pricing=pricing_data
|
|
)
|
|
|
|
def add_columns_to_dataset(columns, column_size):
|
|
try:
|
|
for header, column_dict in columns.items():
|
|
# Construct column tuple
|
|
col = tuple(column_dict.get(c_idx, '') for c_idx in range(column_size))
|
|
# Add column to dataset
|
|
dataset.append_col(col, header=header)
|
|
except AttributeError:
|
|
pass
|
|
|
|
if substitute_part_data:
|
|
"""If requested, add extra columns for all substitute part numbers associated with each line item."""
|
|
|
|
col_index = 0
|
|
substitute_cols = {}
|
|
|
|
for bom_item in bom_items:
|
|
substitutes = BomItemSubstitute.objects.filter(bom_item=bom_item)
|
|
for s_idx, substitute in enumerate(substitutes):
|
|
"""Create substitute part name column"""
|
|
name = f'{_("Substitute Part")}{s_idx + 1}'
|
|
value = substitute.part.name
|
|
try:
|
|
substitute_cols[name].update({col_index: value})
|
|
except KeyError:
|
|
substitute_cols[name] = {col_index: value}
|
|
|
|
"""Create substitute part description column"""
|
|
name = f'{_("Substitute Description")}{s_idx + 1}'
|
|
value = substitute.part.description
|
|
try:
|
|
substitute_cols[name].update({col_index: value})
|
|
except KeyError:
|
|
substitute_cols[name] = {col_index: value}
|
|
|
|
col_index = col_index + 1
|
|
|
|
# Add substitute columns to dataset
|
|
add_columns_to_dataset(substitute_cols, len(bom_items))
|
|
|
|
if parameter_data:
|
|
"""If requested, add extra columns for each PartParameter associated with each line item."""
|
|
|
|
parameter_cols = {}
|
|
|
|
for b_idx, bom_item in enumerate(bom_items):
|
|
# Get part parameters
|
|
parameters = bom_item.sub_part.get_parameters()
|
|
# Add parameters to columns
|
|
if parameters:
|
|
for parameter in parameters:
|
|
name = parameter.template.name
|
|
value = parameter.data
|
|
|
|
try:
|
|
parameter_cols[name].update({b_idx: value})
|
|
except KeyError:
|
|
parameter_cols[name] = {b_idx: value}
|
|
|
|
# Add parameter columns to dataset
|
|
parameter_cols_ordered = OrderedDict(
|
|
sorted(parameter_cols.items(), key=lambda x: x[0])
|
|
)
|
|
add_columns_to_dataset(parameter_cols_ordered, len(bom_items))
|
|
|
|
if stock_data:
|
|
"""If requested, add extra columns for stock data associated with each line item."""
|
|
|
|
stock_headers = [
|
|
_('Default Location'),
|
|
_('Total Stock'),
|
|
_('Available Stock'),
|
|
_('On Order'),
|
|
]
|
|
|
|
stock_cols = {}
|
|
|
|
for b_idx, bom_item in enumerate(bom_items):
|
|
stock_data = []
|
|
|
|
sub_part = bom_item.sub_part
|
|
|
|
# Get part default location
|
|
try:
|
|
loc = sub_part.get_default_location()
|
|
|
|
if loc is not None:
|
|
stock_data.append(str(loc.name))
|
|
else:
|
|
stock_data.append('')
|
|
except AttributeError:
|
|
stock_data.append('')
|
|
|
|
# Total "in stock" quantity for this part
|
|
stock_data.append(str(normalize(sub_part.total_stock)))
|
|
|
|
# Total "available stock" quantity for this part
|
|
stock_data.append(str(normalize(sub_part.available_stock)))
|
|
|
|
# Total "on order" quantity for this part
|
|
stock_data.append(str(normalize(sub_part.on_order)))
|
|
|
|
for s_idx, header in enumerate(stock_headers):
|
|
try:
|
|
stock_cols[header].update({b_idx: stock_data[s_idx]})
|
|
except KeyError:
|
|
stock_cols[header] = {b_idx: stock_data[s_idx]}
|
|
|
|
# Add stock columns to dataset
|
|
add_columns_to_dataset(stock_cols, len(bom_items))
|
|
|
|
if manufacturer_data or supplier_data:
|
|
"""If requested, add extra columns for each SupplierPart and ManufacturerPart associated with each line item."""
|
|
|
|
# Keep track of the supplier parts we have already exported
|
|
supplier_parts_used = set()
|
|
|
|
manufacturer_cols = {}
|
|
|
|
for bom_idx, bom_item in enumerate(bom_items):
|
|
# Get part instance
|
|
b_part = bom_item.sub_part
|
|
|
|
# Include manufacturer data for each BOM item
|
|
if manufacturer_data:
|
|
# Filter manufacturer parts
|
|
manufacturer_parts = ManufacturerPart.objects.filter(
|
|
part__pk=b_part.pk
|
|
).prefetch_related('supplier_parts')
|
|
|
|
for mp_idx, mp_part in enumerate(manufacturer_parts):
|
|
# Extract the "name" field of the Manufacturer (Company)
|
|
if mp_part and mp_part.manufacturer:
|
|
manufacturer_name = mp_part.manufacturer.name
|
|
else:
|
|
manufacturer_name = ''
|
|
|
|
# Extract the "MPN" field from the Manufacturer Part
|
|
if mp_part:
|
|
manufacturer_mpn = mp_part.MPN
|
|
else:
|
|
manufacturer_mpn = ''
|
|
|
|
# Generate a column name for this manufacturer
|
|
k_man = f'{_("Manufacturer")}_{mp_idx}'
|
|
k_mpn = f'{_("MPN")}_{mp_idx}'
|
|
|
|
try:
|
|
manufacturer_cols[k_man].update({bom_idx: manufacturer_name})
|
|
manufacturer_cols[k_mpn].update({bom_idx: manufacturer_mpn})
|
|
except KeyError:
|
|
manufacturer_cols[k_man] = {bom_idx: manufacturer_name}
|
|
manufacturer_cols[k_mpn] = {bom_idx: manufacturer_mpn}
|
|
|
|
# We wish to include supplier data for this manufacturer part
|
|
if supplier_data:
|
|
for sp_idx, sp_part in enumerate(mp_part.supplier_parts.all()):
|
|
supplier_parts_used.add(sp_part)
|
|
|
|
if sp_part.supplier:
|
|
supplier_name = sp_part.supplier.name
|
|
else:
|
|
supplier_name = ''
|
|
|
|
if sp_part:
|
|
supplier_sku = sp_part.SKU
|
|
else:
|
|
supplier_sku = ''
|
|
|
|
# Generate column names for this supplier
|
|
k_sup = (
|
|
str(_('Supplier'))
|
|
+ '_'
|
|
+ str(mp_idx)
|
|
+ '_'
|
|
+ str(sp_idx)
|
|
)
|
|
k_sku = (
|
|
str(_('SKU')) + '_' + str(mp_idx) + '_' + str(sp_idx)
|
|
)
|
|
|
|
try:
|
|
manufacturer_cols[k_sup].update({
|
|
bom_idx: supplier_name
|
|
})
|
|
manufacturer_cols[k_sku].update({bom_idx: supplier_sku})
|
|
except KeyError:
|
|
manufacturer_cols[k_sup] = {bom_idx: supplier_name}
|
|
manufacturer_cols[k_sku] = {bom_idx: supplier_sku}
|
|
|
|
if supplier_data:
|
|
# Add in any extra supplier parts, which are not associated with a manufacturer part
|
|
|
|
for sp_idx, sp_part in enumerate(
|
|
SupplierPart.objects.filter(part__pk=b_part.pk)
|
|
):
|
|
if sp_part in supplier_parts_used:
|
|
continue
|
|
|
|
supplier_parts_used.add(sp_part)
|
|
|
|
if sp_part.supplier:
|
|
supplier_name = sp_part.supplier.name
|
|
else:
|
|
supplier_name = ''
|
|
|
|
supplier_sku = sp_part.SKU
|
|
|
|
# Generate column names for this supplier
|
|
k_sup = str(_('Supplier')) + '_' + str(sp_idx)
|
|
k_sku = str(_('SKU')) + '_' + str(sp_idx)
|
|
|
|
try:
|
|
manufacturer_cols[k_sup].update({bom_idx: supplier_name})
|
|
manufacturer_cols[k_sku].update({bom_idx: supplier_sku})
|
|
except KeyError:
|
|
manufacturer_cols[k_sup] = {bom_idx: supplier_name}
|
|
manufacturer_cols[k_sku] = {bom_idx: supplier_sku}
|
|
|
|
# Add supplier columns to dataset
|
|
add_columns_to_dataset(manufacturer_cols, len(bom_items))
|
|
|
|
data = dataset.export(fmt)
|
|
|
|
filename = f'{part.full_name}_BOM.{fmt}'
|
|
|
|
return DownloadFile(data, filename)
|