From 24f1491b8e2cdde37073e44c82313786e8f38268 Mon Sep 17 00:00:00 2001 From: Oliver Date: Wed, 16 Feb 2022 11:32:40 +1100 Subject: [PATCH 01/14] Adds generic API endpoint for extracting data from a tabulated file --- InvenTree/InvenTree/serializers.py | 174 ++++++++++++++++++++++++++++- InvenTree/InvenTree/version.py | 5 +- 2 files changed, 177 insertions(+), 2 deletions(-) diff --git a/InvenTree/InvenTree/serializers.py b/InvenTree/InvenTree/serializers.py index ffc84a5f71..a4415c0826 100644 --- a/InvenTree/InvenTree/serializers.py +++ b/InvenTree/InvenTree/serializers.py @@ -5,8 +5,8 @@ Serializers used in various InvenTree apps # -*- coding: utf-8 -*- from __future__ import unicode_literals - import os +import tablib from decimal import Decimal @@ -332,3 +332,175 @@ class InvenTreeDecimalField(serializers.FloatField): return Decimal(str(data)) except: raise serializers.ValidationError(_("Invalid value")) + + +class DataFileUploadSerializer(serializers.Serializer): + """ + Generic serializer for uploading a data file, and extracting a dataset. + + - Validates uploaded file + - Extracts column names + - Extracts data rows + """ + + class Meta: + fields = [ + 'bom_file', + ] + + data_file = serializers.FileField( + label=_("Data File"), + help_text=_("Select data file for upload"), + required=True, + allow_empty_file=False, + ) + + def validate_data_file(self, data_file): + """ + Perform validation checks on the uploaded data file. + """ + + self.filename = data_file.name + + name, ext = os.path.splitext(data_file.name) + + # Remove the leading . from the extension + ext = ext[1:] + + accepted_file_types = [ + 'xls', 'xlsx', + 'csv', 'tsv', + 'xml', + ] + + if ext not in accepted_file_types: + raise serializers.ValidationError(_("Unsupported file type")) + + # Impose a 50MB limit on uploaded BOM files + max_upload_file_size = 50 * 1024 * 1024 + + if data_file.size > max_upload_file_size: + raise serializers.ValidationError(_("File is too large")) + + # Read file data into memory (bytes object) + try: + data = data_file.read() + except Exception as e: + raise serializers.ValidationError(str(e)) + + if ext in ['csv', 'tsv', 'xml']: + try: + data = data.decode() + except Exception as e: + raise serializers.ValidationError(str(e)) + + # Convert to a tablib dataset (we expect headers) + try: + self.dataset = tablib.Dataset().load(data, ext, headers=True) + except Exception as e: + raise serializers.ValidationError(str(e)) + + if len(self.dataset) == 0: + raise serializers.ValidationError(_("No data rows found in file")) + + return data_file + + def extract_data(self): + """ + Returns dataset extracted from the file + """ + + return { + 'headers': self.dataset.headers, + 'rows': [row.values() for row in self.dataset.dict], + 'filename': self.filename, + } + + +class DataFileExtractSerializer(serializers.Serializer): + """ + Generic serializer for extracting data from an imported dataset. + + - User provides an array of matched headers + - User provides an array of raw data rows + """ + + # Provide a dict of expected columns for this importer + EXPECTED_COLUMNS = {} + + # Provide a list of required columns for this importer + REQUIRED_COLUMNS = [] + + class Meta: + fields = [ + 'raw_headers', + 'mapped_headers', + 'rows', + ] + + raw_headers = serializers.ListField( + child=serializers.CharField(), + ) + + mapped_headers = serializers.ListField( + child=serializers.CharField(), + ) + + rows = serializers.ListField( + child=serializers.ListField( + child=serializers.CharField( + allow_blank=True, + ), + ) + ) + + def validate(self, data): + + data = super().validate(data) + + self.raw_headers = data.get('raw_headers', []) + self.mapped_headers = data.get('mapped_headers', []) + self.rows = data.get('rows', []) + + if len(self.rows) == 0: + raise serializers.ValidationError(_("No data rows provided")) + + if len(self.raw_headers) == 0: + raise serializers.ValidationError(_("File headers not supplied")) + + if len(self.mapped_headers) == 0: + raise serializers.ValidationError(_("Mapped headers not supplied")) + + if len(self.raw_headers) != len(self.mapped_headers): + raise serializers.ValidationError(_("Supplied header list has incorrect length")) + + self.validate_headers() + + return self.extract_data(data) + + def extract_data(self, data): + """ + Extract row data based on the provided fields. + Returns an array of mapped column:value values + """ + + return data + + def validate_headers(self): + """ + Perform custom validation of header mapping. + """ + + print("validate_headers()") + + for col in self.REQUIRED_COLUMNS: + print("checking col:", col) + if col not in self.mapped_headers: + raise serializers.ValidationError(_("Missing required column") + f": {col}") + + + def save(self): + """ + No "save" action for this serializer + """ + ... \ No newline at end of file diff --git a/InvenTree/InvenTree/version.py b/InvenTree/InvenTree/version.py index 19235f0e0a..feaf1558d1 100644 --- a/InvenTree/InvenTree/version.py +++ b/InvenTree/InvenTree/version.py @@ -12,11 +12,14 @@ import common.models INVENTREE_SW_VERSION = "0.6.0 dev" # InvenTree API version -INVENTREE_API_VERSION = 24 +INVENTREE_API_VERSION = 25 """ Increment this API version number whenever there is a significant change to the API that any clients need to know about +v25 -> 2022-02-16 + - Adds API endpoint for uploading a BOM file and extracting data + v24 -> 2022-02-10 - Adds API endpoint for deleting (cancelling) build order outputs From 58aa2adde7685512266ab68c766d4793df9fa6f8 Mon Sep 17 00:00:00 2001 From: Oliver Date: Wed, 16 Feb 2022 11:42:15 +1100 Subject: [PATCH 02/14] Adds model mixin for generically determining which fields can be imported on any particular model --- InvenTree/InvenTree/models.py | 56 +++++++++++++++++++++++++++++++++++ InvenTree/part/models.py | 33 +++++++++++++++++++-- 2 files changed, 87 insertions(+), 2 deletions(-) diff --git a/InvenTree/InvenTree/models.py b/InvenTree/InvenTree/models.py index b42d54cbe9..63a4d23ce2 100644 --- a/InvenTree/InvenTree/models.py +++ b/InvenTree/InvenTree/models.py @@ -45,6 +45,62 @@ def rename_attachment(instance, filename): return os.path.join(instance.getSubdir(), filename) +class DataImportMixin(object): + """ + Model mixin class which provides support for 'data import' functionality. + + Models which implement this mixin should provide information on the fields available for import + """ + + # Define a map of fields avaialble for import + IMPORT_FIELDS = {} + + @classmethod + def get_import_fields(cls): + """ + Return all available import fields + + Where information on a particular field is not explicitly provided, + introspect the base model to (attempt to) find that information. + + """ + fields = cls.IMPORT_FIELDS + + for name, field in fields.items(): + + # Attempt to extract base field information from the model + base_field = None + + for f in cls._meta.fields: + if f.name == name: + base_field = f + break + + if base_field: + if 'label' not in field: + field['label'] = base_field.verbose_name + + if 'help_text' not in field: + field['help_text'] = base_field.help_text + + fields[name] = field + + return fields + + @classmethod + def get_required_import_fields(cls): + """ Return all *required* import fields """ + fields = {} + + for name, field in cls.get_import_fields().items(): + required = field.get('required', False) + + if required: + fields[name] = field + + return fields + + class ReferenceIndexingMixin(models.Model): """ A mixin for keeping track of numerical copies of the "reference" field. diff --git a/InvenTree/part/models.py b/InvenTree/part/models.py index b312937e30..e45fd1a783 100644 --- a/InvenTree/part/models.py +++ b/InvenTree/part/models.py @@ -46,7 +46,7 @@ from common.models import InvenTreeSetting from InvenTree import helpers from InvenTree import validators -from InvenTree.models import InvenTreeTree, InvenTreeAttachment +from InvenTree.models import InvenTreeTree, InvenTreeAttachment, DataImportMixin from InvenTree.fields import InvenTreeURLField from InvenTree.helpers import decimal2string, normalize, decimal2money import InvenTree.tasks @@ -2550,7 +2550,7 @@ class PartCategoryParameterTemplate(models.Model): help_text=_('Default Parameter Value')) -class BomItem(models.Model): +class BomItem(models.Model, DataImportMixin): """ A BomItem links a part to its component items. A part can have a BOM (bill of materials) which defines which parts are required (and in what quantity) to make it. @@ -2568,6 +2568,35 @@ class BomItem(models.Model): allow_variants: Stock for part variants can be substituted for this BomItem """ + # Fields available for bulk import + IMPORT_FIELDS = { + 'quantity': { + 'required': True + }, + 'optional': {}, + 'reference': {}, + 'overage': {}, + 'note': {}, + 'inherited': {}, + 'allow_variants': {}, + 'part': { + 'label': _('Part'), + 'help_text': _('Part ID or part name'), + }, + 'part_id': { + 'label': _('Part ID'), + 'help_text': _('Unique part ID value') + }, + 'part_name': { + 'label': _('Part Name'), + 'help_text': _('Part name'), + }, + 'part_ipn': { + 'label': _('Part IPN'), + 'help_text': _('Part IPN value'), + } + } + @staticmethod def get_api_url(): return reverse('api-bom-list') From d7adb6959d9cb72eec9e731fe1a31b1fe45688f5 Mon Sep 17 00:00:00 2001 From: Oliver Date: Wed, 16 Feb 2022 14:57:13 +1100 Subject: [PATCH 03/14] Adds functionality to map file columns to model fiels --- InvenTree/InvenTree/serializers.py | 138 ++++++++++++++++++++++------- InvenTree/part/api.py | 25 +++--- InvenTree/part/serializers.py | 130 +++++---------------------- 3 files changed, 143 insertions(+), 150 deletions(-) diff --git a/InvenTree/InvenTree/serializers.py b/InvenTree/InvenTree/serializers.py index a4415c0826..4d558e50c0 100644 --- a/InvenTree/InvenTree/serializers.py +++ b/InvenTree/InvenTree/serializers.py @@ -343,6 +343,9 @@ class DataFileUploadSerializer(serializers.Serializer): - Extracts data rows """ + # Implementing class should register a target model (database model) to be used for import + TARGET_MODEL = None + class Meta: fields = [ 'bom_file', @@ -400,18 +403,81 @@ class DataFileUploadSerializer(serializers.Serializer): except Exception as e: raise serializers.ValidationError(str(e)) + if len(self.dataset.headers) == 0: + raise serializers.ValidationError(_("No columns found in file")) + if len(self.dataset) == 0: raise serializers.ValidationError(_("No data rows found in file")) return data_file + def match_column(self, column_name, field_names): + """ + Attempt to match a column name (from the file) to a field (defined in the model) + + Order of matching is: + - Direct match + - Case insensitive match + - Fuzzy match + """ + + column_name = column_name.strip() + + column_name_lower = column_name.lower() + + if column_name in field_names: + return column_name + + for field_name in field_names: + if field_name.lower() == column_name_lower: + return field_name + + # TODO: Fuzzy pattern matching + + # No matches found + return None + + def extract_data(self): """ Returns dataset extracted from the file """ + # Provide a dict of available import fields for the model + model_fields = {} + + # Keep track of columns we have already extracted + matched_columns = set() + + if self.TARGET_MODEL: + try: + model_fields = self.TARGET_MODEL.get_import_fields() + except: + pass + + # Extract a list of valid model field names + model_field_names = [key for key in model_fields.keys()] + + # Provide a dict of available columns from the dataset + file_columns = {} + + for header in self.dataset.headers: + column = {} + + # Attempt to "match" file columns to model fields + match = self.match_column(header, model_field_names) + + if match is not None and match not in matched_columns: + matched_columns.add(match) + column['value'] = match + else: + column['value'] = None + + file_columns[header] = column + return { - 'headers': self.dataset.headers, + 'file_fields': file_columns, + 'model_fields': model_fields, 'rows': [row.values() for row in self.dataset.dict], 'filename': self.filename, } @@ -425,25 +491,20 @@ class DataFileExtractSerializer(serializers.Serializer): - User provides an array of raw data rows """ - # Provide a dict of expected columns for this importer - EXPECTED_COLUMNS = {} - - # Provide a list of required columns for this importer - REQUIRED_COLUMNS = [] + # Implementing class should register a target model (database model) to be used for import + TARGET_MODEL = None class Meta: fields = [ - 'raw_headers', - 'mapped_headers', + 'columns', 'rows', ] - raw_headers = serializers.ListField( - child=serializers.CharField(), - ) - - mapped_headers = serializers.ListField( - child=serializers.CharField(), + # Mapping of columns + columns = serializers.ListField( + child=serializers.CharField( + allow_blank=True, + ), ) rows = serializers.ListField( @@ -458,23 +519,16 @@ class DataFileExtractSerializer(serializers.Serializer): data = super().validate(data) - self.raw_headers = data.get('raw_headers', []) - self.mapped_headers = data.get('mapped_headers', []) + self.columns = data.get('columns', []) self.rows = data.get('rows', []) if len(self.rows) == 0: raise serializers.ValidationError(_("No data rows provided")) - if len(self.raw_headers) == 0: - raise serializers.ValidationError(_("File headers not supplied")) + if len(self.columns) == 0: + raise serializers.ValidationError(_("No data columns supplied")) - if len(self.mapped_headers) == 0: - raise serializers.ValidationError(_("Mapped headers not supplied")) - - if len(self.raw_headers) != len(self.mapped_headers): - raise serializers.ValidationError(_("Supplied header list has incorrect length")) - - self.validate_headers() + self.validate_extracted_columns() return self.extract_data(data) @@ -486,18 +540,38 @@ class DataFileExtractSerializer(serializers.Serializer): return data - def validate_headers(self): + def validate_extracted_columns(self): """ Perform custom validation of header mapping. """ - print("validate_headers()") - - for col in self.REQUIRED_COLUMNS: - print("checking col:", col) - if col not in self.mapped_headers: - raise serializers.ValidationError(_("Missing required column") + f": {col}") + if self.TARGET_MODEL: + try: + model_fields = self.TARGET_MODEL.get_import_fields() + except: + model_fields = {} + cols_seen = set() + + for name, field in model_fields.items(): + + required = field.get('required', False) + + # Check for missing required columns + if required: + if name not in self.columns: + raise serializers.ValidationError(_("Missing required column") + f": '{name}'") + + for col in self.columns: + + if not col: + continue + + # Check for duplicated columns + if col in cols_seen: + raise serializers.ValidationError(_("Duplicate column") + f": '{col}'") + + cols_seen.add(col) def save(self): """ diff --git a/InvenTree/part/api.py b/InvenTree/part/api.py index 4c52b87520..7600e32780 100644 --- a/InvenTree/part/api.py +++ b/InvenTree/part/api.py @@ -1539,7 +1539,18 @@ class BomExtract(generics.CreateAPIView): """ queryset = Part.objects.none() - serializer_class = part_serializers.BomExtractSerializer + serializer_class = part_serializers.BomFileExtractSerializer + + +class BomUpload(generics.CreateAPIView): + """ + API endpoint for uploading a complete Bill of Materials. + + It is assumed that the BOM has been extracted from a file using the BomExtract endpoint. + """ + + queryset = Part.objects.all() + serializer_class = part_serializers.BomFileUploadSerializer def create(self, request, *args, **kwargs): """ @@ -1556,16 +1567,6 @@ class BomExtract(generics.CreateAPIView): return Response(data, status=status.HTTP_201_CREATED, headers=headers) -class BomUpload(generics.CreateAPIView): - """ - API endpoint for uploading a complete Bill of Materials. - - It is assumed that the BOM has been extracted from a file using the BomExtract endpoint. - """ - - queryset = Part.objects.all() - serializer_class = part_serializers.BomUploadSerializer - class BomDetail(generics.RetrieveUpdateDestroyAPIView): """ API endpoint for detail view of a single BomItem object """ @@ -1719,9 +1720,9 @@ bom_api_urls = [ url(r'^.*$', BomDetail.as_view(), name='api-bom-item-detail'), ])), + url(r'^upload/', BomUpload.as_view(), name='api-bom-upload'), url(r'^extract/', BomExtract.as_view(), name='api-bom-extract'), - url(r'^upload/', BomUpload.as_view(), name='api-bom-upload'), # Catch-all url(r'^.*$', BomList.as_view(), name='api-bom-list'), diff --git a/InvenTree/part/serializers.py b/InvenTree/part/serializers.py index 195ce15e4f..cde5cc5087 100644 --- a/InvenTree/part/serializers.py +++ b/InvenTree/part/serializers.py @@ -17,7 +17,9 @@ from rest_framework import serializers from sql_util.utils import SubqueryCount, SubquerySum from djmoney.contrib.django_rest_framework import MoneyField -from InvenTree.serializers import (InvenTreeAttachmentSerializerField, +from InvenTree.serializers import (DataFileUploadSerializer, + DataFileExtractSerializer, + InvenTreeAttachmentSerializerField, InvenTreeDecimalField, InvenTreeImageSerializerField, InvenTreeModelSerializer, @@ -709,7 +711,7 @@ class PartCopyBOMSerializer(serializers.Serializer): ) -class BomExtractSerializer(serializers.Serializer): +class BomFileUploadSerializer(DataFileUploadSerializer): """ Serializer for uploading a file and extracting data from it. @@ -729,50 +731,7 @@ class BomExtractSerializer(serializers.Serializer): """ - class Meta: - fields = [ - 'bom_file', - 'part', - 'clear_existing', - ] - - # These columns must be present - REQUIRED_COLUMNS = [ - 'quantity', - ] - - # We need at least one column to specify a "part" - PART_COLUMNS = [ - 'part', - 'part_id', - 'part_name', - 'part_ipn', - ] - - # These columns are "optional" - OPTIONAL_COLUMNS = [ - 'allow_variants', - 'inherited', - 'optional', - 'overage', - 'note', - 'reference', - ] - - def find_matching_column(self, col_name, columns): - - # Direct match - if col_name in columns: - return col_name - - col_name = col_name.lower().strip() - - for col in columns: - if col.lower().strip() == col_name: - return col - - # No match - return None + TARGET_MODEL = BomItem def find_matching_data(self, row, col_name, columns): """ @@ -783,58 +742,7 @@ class BomExtractSerializer(serializers.Serializer): return row.get(col_name, None) - bom_file = serializers.FileField( - label=_("BOM File"), - help_text=_("Select Bill of Materials file"), - required=True, - allow_empty_file=False, - ) - - def validate_bom_file(self, bom_file): - """ - Perform validation checks on the uploaded BOM file - """ - - self.filename = bom_file.name - - name, ext = os.path.splitext(bom_file.name) - - # Remove the leading . from the extension - ext = ext[1:] - - accepted_file_types = [ - 'xls', 'xlsx', - 'csv', 'tsv', - 'xml', - ] - - if ext not in accepted_file_types: - raise serializers.ValidationError(_("Unsupported file type")) - - # Impose a 50MB limit on uploaded BOM files - max_upload_file_size = 50 * 1024 * 1024 - - if bom_file.size > max_upload_file_size: - raise serializers.ValidationError(_("File is too large")) - - # Read file data into memory (bytes object) - try: - data = bom_file.read() - except Exception as e: - raise serializers.ValidationError(str(e)) - - if ext in ['csv', 'tsv', 'xml']: - try: - data = data.decode() - except Exception as e: - raise serializers.ValidationError(str(e)) - - # Convert to a tablib dataset (we expect headers) - try: - self.dataset = tablib.Dataset().load(data, ext, headers=True) - except Exception as e: - raise serializers.ValidationError(str(e)) - + """ for header in self.REQUIRED_COLUMNS: match = self.find_matching_column(header, self.dataset.headers) @@ -861,11 +769,9 @@ class BomExtractSerializer(serializers.Serializer): raise serializers.ValidationError(_("No data rows found")) return bom_file + """ - def extract_data(self): - """ - Read individual rows out of the BOM file - """ + def dextract_data(self): rows = [] errors = [] @@ -880,9 +786,9 @@ class BomExtractSerializer(serializers.Serializer): row_error = {} - """ - If the "level" column is specified, and this is not a top-level BOM item, ignore the row! - """ + + # If the "level" column is specified, and this is not a top-level BOM item, ignore the row! + if level_column is not None: level = row.get('level', None) @@ -989,15 +895,19 @@ class BomExtractSerializer(serializers.Serializer): 'filename': self.filename, } + """ part = serializers.PrimaryKeyRelatedField(queryset=Part.objects.filter(assembly=True), required=True) clear_existing = serializers.BooleanField( label=_("Clear Existing BOM"), help_text=_("Delete existing BOM data first"), ) + """ def save(self): + ... + """ data = self.validated_data master_part = data['part'] @@ -1006,7 +916,15 @@ class BomExtractSerializer(serializers.Serializer): if clear_existing: # Remove all existing BOM items - master_part.bom_items.all().delete() + $ master_part.bom_items.all().delete() + """ + + +class BomFileExtractSerializer(DataFileExtractSerializer): + """ + """ + + TARGET_MODEL = BomItem class BomUploadSerializer(serializers.Serializer): From f399f4fa34c5b4aaa5ac47e50bb3bbb0d9c7df07 Mon Sep 17 00:00:00 2001 From: Oliver Date: Wed, 16 Feb 2022 16:57:27 +1100 Subject: [PATCH 04/14] Refactoring API endpoints - Improved URL naming scheme --- InvenTree/InvenTree/models.py | 6 +- InvenTree/InvenTree/serializers.py | 91 +++++- InvenTree/part/api.py | 37 ++- InvenTree/part/models.py | 10 +- InvenTree/part/serializers.py | 299 +++++------------- InvenTree/part/templates/part/upload_bom.html | 97 +++++- InvenTree/templates/js/translated/bom.js | 27 +- InvenTree/templates/js/translated/forms.js | 2 +- 8 files changed, 295 insertions(+), 274 deletions(-) diff --git a/InvenTree/InvenTree/models.py b/InvenTree/InvenTree/models.py index 63a4d23ce2..0fe3136871 100644 --- a/InvenTree/InvenTree/models.py +++ b/InvenTree/InvenTree/models.py @@ -59,7 +59,7 @@ class DataImportMixin(object): def get_import_fields(cls): """ Return all available import fields - + Where information on a particular field is not explicitly provided, introspect the base model to (attempt to) find that information. @@ -67,7 +67,7 @@ class DataImportMixin(object): fields = cls.IMPORT_FIELDS for name, field in fields.items(): - + # Attempt to extract base field information from the model base_field = None @@ -79,7 +79,7 @@ class DataImportMixin(object): if base_field: if 'label' not in field: field['label'] = base_field.verbose_name - + if 'help_text' not in field: field['help_text'] = base_field.help_text diff --git a/InvenTree/InvenTree/serializers.py b/InvenTree/InvenTree/serializers.py index 4d558e50c0..472bab30b9 100644 --- a/InvenTree/InvenTree/serializers.py +++ b/InvenTree/InvenTree/serializers.py @@ -411,7 +411,7 @@ class DataFileUploadSerializer(serializers.Serializer): return data_file - def match_column(self, column_name, field_names): + def match_column(self, column_name, field_names, exact=False): """ Attempt to match a column name (from the file) to a field (defined in the model) @@ -432,12 +432,15 @@ class DataFileUploadSerializer(serializers.Serializer): if field_name.lower() == column_name_lower: return field_name - # TODO: Fuzzy pattern matching + if exact: + # Finished available 'exact' matches + return None + + # TODO: Fuzzy pattern matching for column names # No matches found return None - def extract_data(self): """ Returns dataset extracted from the file @@ -465,7 +468,7 @@ class DataFileUploadSerializer(serializers.Serializer): column = {} # Attempt to "match" file columns to model fields - match = self.match_column(header, model_field_names) + match = self.match_column(header, model_field_names, exact=True) if match is not None and match not in matched_columns: matched_columns.add(match) @@ -482,13 +485,16 @@ class DataFileUploadSerializer(serializers.Serializer): 'filename': self.filename, } + def save(self): + ... + class DataFileExtractSerializer(serializers.Serializer): """ Generic serializer for extracting data from an imported dataset. - User provides an array of matched headers - - User provides an array of raw data rows + - User provides an array of raw data rows """ # Implementing class should register a target model (database model) to be used for import @@ -500,7 +506,7 @@ class DataFileExtractSerializer(serializers.Serializer): 'rows', ] - # Mapping of columns + # Mapping of columns columns = serializers.ListField( child=serializers.CharField( allow_blank=True, @@ -530,16 +536,69 @@ class DataFileExtractSerializer(serializers.Serializer): self.validate_extracted_columns() - return self.extract_data(data) - - def extract_data(self, data): - """ - Extract row data based on the provided fields. - Returns an array of mapped column:value values - """ - return data + @property + def data(self): + + if self.TARGET_MODEL: + try: + model_fields = self.TARGET_MODEL.get_import_fields() + except: + model_fields = {} + + rows = [] + + for row in self.rows: + """ + Optionally pre-process each row, before sending back to the client + """ + + processed_row = self.process_row(self.row_to_dict(row)) + + if processed_row: + rows.append({ + "original": row, + "data": processed_row, + }) + + return { + 'fields': model_fields, + 'columns': self.columns, + 'rows': rows, + } + + def process_row(self, row): + """ + Process a 'row' of data, which is a mapped column:value dict + + Returns either a mapped column:value dict, or None. + + If the function returns None, the column is ignored! + """ + + # Default implementation simply returns the original row data + return row + + def row_to_dict(self, row): + """ + Convert a "row" to a named data dict + """ + + row_dict = { + 'errors': {}, + } + + for idx, value in enumerate(row): + + if idx < len(self.columns): + col = self.columns[idx] + + if col: + row_dict[col] = value + + return row_dict + def validate_extracted_columns(self): """ Perform custom validation of header mapping. @@ -561,7 +620,7 @@ class DataFileExtractSerializer(serializers.Serializer): if required: if name not in self.columns: raise serializers.ValidationError(_("Missing required column") + f": '{name}'") - + for col in self.columns: if not col: @@ -577,4 +636,4 @@ class DataFileExtractSerializer(serializers.Serializer): """ No "save" action for this serializer """ - ... \ No newline at end of file + ... diff --git a/InvenTree/part/api.py b/InvenTree/part/api.py index 7600e32780..7f19a38183 100644 --- a/InvenTree/part/api.py +++ b/InvenTree/part/api.py @@ -1533,16 +1533,7 @@ class BomList(generics.ListCreateAPIView): ] -class BomExtract(generics.CreateAPIView): - """ - API endpoint for extracting BOM data from a BOM file. - """ - - queryset = Part.objects.none() - serializer_class = part_serializers.BomFileExtractSerializer - - -class BomUpload(generics.CreateAPIView): +class BomImportUpload(generics.CreateAPIView): """ API endpoint for uploading a complete Bill of Materials. @@ -1550,7 +1541,7 @@ class BomUpload(generics.CreateAPIView): """ queryset = Part.objects.all() - serializer_class = part_serializers.BomFileUploadSerializer + serializer_class = part_serializers.BomImportUploadSerializer def create(self, request, *args, **kwargs): """ @@ -1567,6 +1558,23 @@ class BomUpload(generics.CreateAPIView): return Response(data, status=status.HTTP_201_CREATED, headers=headers) +class BomImportExtract(generics.CreateAPIView): + """ + API endpoint for extracting BOM data from a BOM file. + """ + + queryset = Part.objects.none() + serializer_class = part_serializers.BomImportExtractSerializer + + +class BomImportSubmit(generics.CreateAPIView): + """ + API endpoint for submitting BOM data from a BOM file + """ + + queryset = BomItem.objects.none() + serializer_class = part_serializers.BomImportSubmitSerializer + class BomDetail(generics.RetrieveUpdateDestroyAPIView): """ API endpoint for detail view of a single BomItem object """ @@ -1720,9 +1728,10 @@ bom_api_urls = [ url(r'^.*$', BomDetail.as_view(), name='api-bom-item-detail'), ])), - url(r'^upload/', BomUpload.as_view(), name='api-bom-upload'), - url(r'^extract/', BomExtract.as_view(), name='api-bom-extract'), - + # API endpoint URLs for importing BOM data + url(r'^import/upload/', BomImportUpload.as_view(), name='api-bom-import-upload'), + url(r'^import/extract/', BomImportExtract.as_view(), name='api-bom-import-extract'), + url(r'^import/submit/', BomImportSubmit.as_view(), name='api-bom-import-submit'), # Catch-all url(r'^.*$', BomList.as_view(), name='api-bom-list'), diff --git a/InvenTree/part/models.py b/InvenTree/part/models.py index e45fd1a783..478c4c195c 100644 --- a/InvenTree/part/models.py +++ b/InvenTree/part/models.py @@ -2573,12 +2573,12 @@ class BomItem(models.Model, DataImportMixin): 'quantity': { 'required': True }, - 'optional': {}, 'reference': {}, 'overage': {}, - 'note': {}, - 'inherited': {}, 'allow_variants': {}, + 'inherited': {}, + 'optional': {}, + 'note': {}, 'part': { 'label': _('Part'), 'help_text': _('Part ID or part name'), @@ -2594,6 +2594,10 @@ class BomItem(models.Model, DataImportMixin): 'part_ipn': { 'label': _('Part IPN'), 'help_text': _('Part IPN value'), + }, + 'level': { + 'label': _('Level'), + 'help_text': _('BOM level'), } } diff --git a/InvenTree/part/serializers.py b/InvenTree/part/serializers.py index cde5cc5087..6b0c89ad88 100644 --- a/InvenTree/part/serializers.py +++ b/InvenTree/part/serializers.py @@ -4,8 +4,6 @@ JSON serializers for Part app import imghdr from decimal import Decimal -import os -import tablib from django.urls import reverse_lazy from django.db import models, transaction @@ -711,223 +709,100 @@ class PartCopyBOMSerializer(serializers.Serializer): ) -class BomFileUploadSerializer(DataFileUploadSerializer): +class BomImportUploadSerializer(DataFileUploadSerializer): """ Serializer for uploading a file and extracting data from it. - - Note: 2022-02-04 - This needs a *serious* refactor in future, probably - - When parsing the file, the following things happen: - - a) Check file format and validity - b) Look for "required" fields - c) Look for "part" fields - used to "infer" part - - Once the file itself has been validated, we iterate through each data row: - - - If the "level" column is provided, ignore anything below level 1 - - Try to "guess" the part based on part_id / part_name / part_ipn - - Extract other fields as required - - """ - - TARGET_MODEL = BomItem - - def find_matching_data(self, row, col_name, columns): - """ - Extract data from the row, based on the "expected" column name - """ - - col_name = self.find_matching_column(col_name, columns) - - return row.get(col_name, None) - - """ - for header in self.REQUIRED_COLUMNS: - - match = self.find_matching_column(header, self.dataset.headers) - - if match is None: - raise serializers.ValidationError(_("Missing required column") + f": '{header}'") - - part_column_matches = {} - - part_match = False - - for col in self.PART_COLUMNS: - col_match = self.find_matching_column(col, self.dataset.headers) - - part_column_matches[col] = col_match - - if col_match is not None: - part_match = True - - if not part_match: - raise serializers.ValidationError(_("No part column found")) - - if len(self.dataset) == 0: - raise serializers.ValidationError(_("No data rows found")) - - return bom_file - """ - - def dextract_data(self): - - rows = [] - errors = [] - - found_parts = set() - - headers = self.dataset.headers - - level_column = self.find_matching_column('level', headers) - - for row in self.dataset.dict: - - row_error = {} - - - # If the "level" column is specified, and this is not a top-level BOM item, ignore the row! - - if level_column is not None: - level = row.get('level', None) - - if level is not None: - try: - level = int(level) - if level != 1: - continue - except: - pass - - """ - Next, we try to "guess" the part, based on the provided data. - - A) If the part_id is supplied, use that! - B) If the part name and/or part_ipn are supplied, maybe we can use those? - """ - part_id = self.find_matching_data(row, 'part_id', headers) - part_name = self.find_matching_data(row, 'part_name', headers) - part_ipn = self.find_matching_data(row, 'part_ipn', headers) - - part = None - - if part_id is not None: - try: - part = Part.objects.get(pk=part_id) - except (ValueError, Part.DoesNotExist): - pass - - # Optionally, specify using field "part" - if part is None: - pk = self.find_matching_data(row, 'part', headers) - - if pk is not None: - try: - part = Part.objects.get(pk=pk) - except (ValueError, Part.DoesNotExist): - pass - - if part is None: - - if part_name or part_ipn: - queryset = Part.objects.all() - - if part_name: - queryset = queryset.filter(name=part_name) - - if part_ipn: - queryset = queryset.filter(IPN=part_ipn) - - # Only if we have a single direct match - if queryset.exists(): - if queryset.count() == 1: - part = queryset.first() - else: - # Multiple matches! - row_error['part'] = _('Multiple matching parts found') - - if part is None: - if 'part' not in row_error: - row_error['part'] = _('No matching part found') - else: - if part.pk in found_parts: - row_error['part'] = _("Duplicate part selected") - - elif not part.component: - row_error['part'] = _('Part is not designated as a component') - - found_parts.add(part.pk) - - row['part'] = part.pk if part is not None else None - - """ - Read out the 'quantity' column - check that it is valid - """ - quantity = self.find_matching_data(row, 'quantity', self.dataset.headers) - - # Ensure quantity field is provided - row['quantity'] = quantity - - if quantity is None: - row_error['quantity'] = _('Quantity not provided') - else: - try: - quantity = Decimal(quantity) - - if quantity <= 0: - row_error['quantity'] = _('Quantity must be greater than zero') - except: - row_error['quantity'] = _('Invalid quantity') - - # For each "optional" column, ensure the column names are allocated correctly - for field_name in self.OPTIONAL_COLUMNS: - if field_name not in row: - row[field_name] = self.find_matching_data(row, field_name, self.dataset.headers) - - rows.append(row) - errors.append(row_error) - - return { - 'rows': rows, - 'errors': errors, - 'headers': headers, - 'filename': self.filename, - } - - """ - part = serializers.PrimaryKeyRelatedField(queryset=Part.objects.filter(assembly=True), required=True) - - clear_existing = serializers.BooleanField( - label=_("Clear Existing BOM"), - help_text=_("Delete existing BOM data first"), - ) - """ - - def save(self): - - ... - """ - data = self.validated_data - - master_part = data['part'] - clear_existing = data['clear_existing'] - - if clear_existing: - - # Remove all existing BOM items - $ master_part.bom_items.all().delete() - """ - - -class BomFileExtractSerializer(DataFileExtractSerializer): - """ """ TARGET_MODEL = BomItem -class BomUploadSerializer(serializers.Serializer): +class BomImportExtractSerializer(DataFileExtractSerializer): + """ + """ + + TARGET_MODEL = BomItem + + def validate_extracted_columns(self): + super().validate_extracted_columns() + + part_columns = ['part', 'part_name', 'part_ipn', 'part_id'] + + if not any([col in self.columns for col in part_columns]): + # At least one part column is required! + raise serializers.ValidationError(_("No part column specified")) + + def process_row(self, row): + + # Skip any rows which are at a lower "level" + level = row.get('level', None) + + if level is not None: + try: + level = int(level) + if level != 1: + # Skip this row + return None + except: + pass + + # Attempt to extract a valid part based on the provided data + part_id = row.get('part_id', row.get('part', None)) + part_name = row.get('part_name', row.get('part', None)) + part_ipn = row.get('part_ipn', None) + + part = None + + if part_id is not None: + try: + part = Part.objects.get(pk=part_id) + except (ValueError, Part.DoesNotExist): + pass + + # No direct match, where else can we look? + if part is None: + if part_name or part_ipn: + queryset = Part.objects.all() + + if part_name: + queryset = queryset.filter(name=part_name) + + if part_ipn: + queryset = queryset.filter(IPN=part_ipn) + + if queryset.exists(): + if queryset.count() == 1: + part = queryset.first() + else: + row['errors']['part'] = _('Multiple matching parts found') + + if part is None: + row['errors']['part'] = _('No matching part found') + else: + if not part.component: + row['errors']['part'] = _('Part is not designed as a component') + + # Update the 'part' value in the row + row['part'] = part.pk if part is not None else None + + # Check the provided 'quantity' value + quantity = row.get('quantity', None) + + if quantity is None: + row['errors']['quantity'] = _('Quantity not provided') + else: + try: + quantity = Decimal(quantity) + + if quantity <= 0: + row['errors']['quantity'] = _('Quantity must be greater than zero') + except: + row['errors']['quantity'] = _('Invalid quantity') + + return row + + +class BomImportSubmitSerializer(serializers.Serializer): """ Serializer for uploading a BOM against a specified part. diff --git a/InvenTree/part/templates/part/upload_bom.html b/InvenTree/part/templates/part/upload_bom.html index 151a4b5424..bbb69e3083 100644 --- a/InvenTree/part/templates/part/upload_bom.html +++ b/InvenTree/part/templates/part/upload_bom.html @@ -77,15 +77,10 @@ $('#bom-template-download').click(function() { $('#bom-upload').click(function() { - constructForm('{% url "api-bom-extract" %}', { + constructForm('{% url "api-bom-import-upload" %}', { method: 'POST', fields: { - bom_file: {}, - part: { - value: {{ part.pk }}, - hidden: true, - }, - clear_existing: {}, + data_file: {}, }, title: '{% trans "Upload BOM File" %}', onSuccess: function(response) { @@ -96,12 +91,92 @@ $('#bom-upload').click(function() { // Disable the "submit" button $('#bom-submit').show(); - constructBomUploadTable(response); + var fields = {}; - $('#bom-submit').click(function() { - submitBomTable({{ part.pk }}, { - bom_data: response, + var choices = []; + + // Add an "empty" value + choices.push({ + value: '', + display_name: '-----', + }); + + for (const [name, field] of Object.entries(response.model_fields)) { + choices.push({ + value: name, + display_name: field.label || name, }); + } + + var field_names = Object.keys(response.file_fields); + + for (var idx = 0; idx < field_names.length; idx++) { + + var field_name = field_names[idx]; + + // Construct a new field + fields[`column_${idx}`] = { + type: 'choice', + label: field_name, + value: response.file_fields[field_name].value, + choices: choices, + inline: true, + }; + } + + constructForm('{% url "api-bom-import-extract" %}', { + method: 'POST', + title: '{% trans "Select BOM Columns" %}', + fields: fields, + onSubmit: function(fields, opts) { + var columns = []; + + for (var idx = 0; idx < field_names.length; idx++) { + columns.push( + getFormFieldValue(`column_${idx}`, {}, {}) + ); + } + + $(opts.modal).find('#modal-progress-spinner').show(); + + inventreePut( + opts.url, + { + columns: columns, + rows: response.rows, + }, + { + method: 'POST', + success: function(r) { + handleFormSuccess(r, opts); + + constructBomUploadTable(r); + + $('#bom-submit').click(function() { + submitBomTable({{ part.pk }}, { + bom_data: response, + }); + }); + }, + error: function(xhr) { + + $(opts.modal).find('#modal-progress-spinner').hide(); + + switch (xhr.status) { + case 400: + handleFormErrors(xhr.responseJSON, fields, opts); + break; + default: + $(opts.modal).modal('hide'); + + console.log(`upload error at ${opts.url}`); + showApiError(xhr, opts.url); + break; + } + } + } + ); + }, }); } }); diff --git a/InvenTree/templates/js/translated/bom.js b/InvenTree/templates/js/translated/bom.js index 0c70bd3d86..5c93c7c8f2 100644 --- a/InvenTree/templates/js/translated/bom.js +++ b/InvenTree/templates/js/translated/bom.js @@ -40,12 +40,6 @@ function constructBomUploadTable(data, options={}) { function constructRow(row, idx, fields) { // Construct an individual row from the provided data - var errors = {}; - - if (data.errors && data.errors.length > idx) { - errors = data.errors[idx]; - } - var field_options = { hideLabels: true, hideClearButton: true, @@ -60,7 +54,7 @@ function constructBomUploadTable(data, options={}) { return `Cannot render field '${field_name}`; } - field.value = row[field_name]; + field.value = row.data[field_name]; return constructField(`items_${field_name}_${idx}`, field, field_options); @@ -99,19 +93,19 @@ function constructBomUploadTable(data, options={}) { $('#bom-import-table tbody').append(html); // Handle any errors raised by initial data import - if (errors.part) { - addFieldErrorMessage(`items_sub_part_${idx}`, errors.part); + if (row.data.errors.part) { + addFieldErrorMessage(`items_sub_part_${idx}`, row.data.errors.part); } - if (errors.quantity) { - addFieldErrorMessage(`items_quantity_${idx}`, errors.quantity); + if (row.data.errors.quantity) { + addFieldErrorMessage(`items_quantity_${idx}`, row.data.errors.quantity); } // Initialize the "part" selector for this row initializeRelatedField( { name: `items_sub_part_${idx}`, - value: row.part, + value: row.data.part, api_url: '{% url "api-part-list" %}', filters: { component: true, @@ -140,7 +134,12 @@ function constructBomUploadTable(data, options={}) { }); // Prettify the original import data - var pretty = JSON.stringify(row, undefined, 4); + var pretty = JSON.stringify( + { + columns: data.columns, + row: row.original, + }, undefined, 4 + ); var html = `
@@ -176,7 +175,7 @@ function submitBomTable(part_id, options={}) { var idx_values = []; - var url = '{% url "api-bom-upload" %}'; + var url = '{% url "api-bom-import-submit" %}'; $('.bom-import-row').each(function() { var idx = $(this).attr('idx'); diff --git a/InvenTree/templates/js/translated/forms.js b/InvenTree/templates/js/translated/forms.js index 7f82587d2e..b8f387328b 100644 --- a/InvenTree/templates/js/translated/forms.js +++ b/InvenTree/templates/js/translated/forms.js @@ -1219,7 +1219,7 @@ function addFieldErrorMessage(name, error_text, error_idx=0, options={}) { field_dom.append(error_html); } else { - console.log(`WARNING: addFieldErrorMessage could not locate field '${field_name}`); + console.log(`WARNING: addFieldErrorMessage could not locate field '${field_name}'`); } } From 8f6312f7f44f7095732e1eae05bfb2561e3e4eff Mon Sep 17 00:00:00 2001 From: Oliver Date: Wed, 16 Feb 2022 16:57:54 +1100 Subject: [PATCH 05/14] Adds generic javascript function for mapping file columns to model fields --- InvenTree/part/templates/part/upload_bom.html | 102 +++------------ InvenTree/templates/js/translated/forms.js | 121 +++++++++++++++++- 2 files changed, 133 insertions(+), 90 deletions(-) diff --git a/InvenTree/part/templates/part/upload_bom.html b/InvenTree/part/templates/part/upload_bom.html index bbb69e3083..1646a9b70c 100644 --- a/InvenTree/part/templates/part/upload_bom.html +++ b/InvenTree/part/templates/part/upload_bom.html @@ -88,96 +88,24 @@ $('#bom-upload').click(function() { // Clear existing entries from the table $('.bom-import-row').remove(); - // Disable the "submit" button - $('#bom-submit').show(); + selectImportFields( + '{% url "api-bom-import-extract" %}', + response, + { + success: function(response) { + constructBomUploadTable(response); - var fields = {}; + // Show the "submit" button + $('#bom-submit').show(); - var choices = []; - - // Add an "empty" value - choices.push({ - value: '', - display_name: '-----', - }); - - for (const [name, field] of Object.entries(response.model_fields)) { - choices.push({ - value: name, - display_name: field.label || name, - }); - } - - var field_names = Object.keys(response.file_fields); - - for (var idx = 0; idx < field_names.length; idx++) { - - var field_name = field_names[idx]; - - // Construct a new field - fields[`column_${idx}`] = { - type: 'choice', - label: field_name, - value: response.file_fields[field_name].value, - choices: choices, - inline: true, - }; - } - - constructForm('{% url "api-bom-import-extract" %}', { - method: 'POST', - title: '{% trans "Select BOM Columns" %}', - fields: fields, - onSubmit: function(fields, opts) { - var columns = []; - - for (var idx = 0; idx < field_names.length; idx++) { - columns.push( - getFormFieldValue(`column_${idx}`, {}, {}) - ); + $('#bom-submit').click(function() { + submitBomTable({{ part.pk }}, { + bom_data: response, + }); + }); } - - $(opts.modal).find('#modal-progress-spinner').show(); - - inventreePut( - opts.url, - { - columns: columns, - rows: response.rows, - }, - { - method: 'POST', - success: function(r) { - handleFormSuccess(r, opts); - - constructBomUploadTable(r); - - $('#bom-submit').click(function() { - submitBomTable({{ part.pk }}, { - bom_data: response, - }); - }); - }, - error: function(xhr) { - - $(opts.modal).find('#modal-progress-spinner').hide(); - - switch (xhr.status) { - case 400: - handleFormErrors(xhr.responseJSON, fields, opts); - break; - default: - $(opts.modal).modal('hide'); - - console.log(`upload error at ${opts.url}`); - showApiError(xhr, opts.url); - break; - } - } - } - ); - }, - }); + } + ); } }); diff --git a/InvenTree/templates/js/translated/forms.js b/InvenTree/templates/js/translated/forms.js index b8f387328b..6ab183cc96 100644 --- a/InvenTree/templates/js/translated/forms.js +++ b/InvenTree/templates/js/translated/forms.js @@ -31,6 +31,7 @@ setFormInputPlaceholder, setFormGroupVisibility, showFormInput, + selectImportFields, */ /** @@ -895,8 +896,8 @@ function getFormFieldValue(name, field={}, options={}) { // Find the HTML element var el = getFormFieldElement(name, options); - if (!el) { - console.log(`ERROR: getFormFieldValue could not locate field '{name}'`); + if (!el.exists()) { + console.log(`ERROR: getFormFieldValue could not locate field '${name}'`); return null; } @@ -2080,7 +2081,7 @@ function constructLabel(name, parameters) { * - parameters: Field parameters returned by the OPTIONS method * */ -function constructInput(name, parameters, options) { +function constructInput(name, parameters, options={}) { var html = ''; @@ -2422,3 +2423,117 @@ function constructHelpText(name, parameters) { return html; } + + +/* + * Construct a dialog to select import fields + */ +function selectImportFields(url, data={}, options={}) { + + if (!data.model_fields) { + console.log("WARNING: selectImportFields is missing 'model_fields'"); + return; + } + + if (!data.file_fields) { + console.log("WARNING: selectImportFields is missing 'file_fields'"); + return; + } + + var choices = []; + + // Add an "empty" value + choices.push({ + value: '', + display_name: '-----', + }); + + for (const [name, field] of Object.entries(data.model_fields)) { + choices.push({ + value: name, + display_name: field.label || name, + }); + } + + var rows = ''; + + var field_names = Object.keys(data.file_fields); + + for (var idx = 0; idx < field_names.length; idx++) { + + var field_name = field_names[idx]; + + var choice_input = constructInput( + `column_${idx}`, + { + type: 'choice', + label: field_name, + value: data.file_fields[field_name].value, + choices: choices, + } + ); + + rows += `${field_name}${choice_input}`; + } + + var headers = `{% trans "File Column" %}{% trans "Field Name" %}`; + + var html = ''; + + if (options.preamble) { + html += options.preamble; + } + + html += `${headers}${rows}
`; + + constructForm(url, { + method: 'POST', + title: '{% trans "Select Columns" %}', + fields: {}, + preFormContent: html, + onSubmit: function(fields, opts) { + + var columns = []; + + for (var idx = 0; idx < field_names.length; idx++) { + columns.push(getFormFieldValue(`column_${idx}`, {}, opts)); + } + + $(opts.modal).find('#modal-progress-spinner').show(); + + inventreePut( + opts.url, + { + columns: columns, + rows: data.rows, + }, + { + method: 'POST', + success: function(response) { + handleFormSuccess(response, opts); + + if (options.success) { + options.success(response); + } + }, + error: function(xhr) { + + $(opts.modal).find('#modal-progress-spinner').hide(); + + switch (xhr.status) { + case 400: + handleFormErrors(xhr.responseJSON, fields, opts); + break; + default: + $(opts.modal).modal('hide'); + + console.log(`upload error at ${opts.url}`); + showApiError(xhr, opts.url); + break; + } + } + } + ) + }, + }); +} \ No newline at end of file From fbef0e1ede1a56baafc3506e7c730469d675c89f Mon Sep 17 00:00:00 2001 From: Oliver Date: Wed, 16 Feb 2022 17:18:17 +1100 Subject: [PATCH 06/14] js linting --- InvenTree/templates/js/translated/forms.js | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/InvenTree/templates/js/translated/forms.js b/InvenTree/templates/js/translated/forms.js index 6ab183cc96..9ee3dcace3 100644 --- a/InvenTree/templates/js/translated/forms.js +++ b/InvenTree/templates/js/translated/forms.js @@ -2431,12 +2431,12 @@ function constructHelpText(name, parameters) { function selectImportFields(url, data={}, options={}) { if (!data.model_fields) { - console.log("WARNING: selectImportFields is missing 'model_fields'"); + console.log(`WARNING: selectImportFields is missing 'model_fields'`); return; } if (!data.file_fields) { - console.log("WARNING: selectImportFields is missing 'file_fields'"); + console.log(`WARNING: selectImportFields is missing 'file_fields'`); return; } @@ -2533,7 +2533,7 @@ function selectImportFields(url, data={}, options={}) { } } } - ) + ); }, }); -} \ No newline at end of file +} From 9c8f15fd56d955cd2ac99a83ed89fa99fcac256c Mon Sep 17 00:00:00 2001 From: Oliver Date: Wed, 16 Feb 2022 19:43:14 +1100 Subject: [PATCH 07/14] Fix field name --- InvenTree/InvenTree/serializers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/InvenTree/InvenTree/serializers.py b/InvenTree/InvenTree/serializers.py index 472bab30b9..d527e0433a 100644 --- a/InvenTree/InvenTree/serializers.py +++ b/InvenTree/InvenTree/serializers.py @@ -348,7 +348,7 @@ class DataFileUploadSerializer(serializers.Serializer): class Meta: fields = [ - 'bom_file', + 'data_file', ] data_file = serializers.FileField( From 371af2a34ac9be5bea6422a7b2a28a1b93b34db3 Mon Sep 17 00:00:00 2001 From: Oliver Date: Wed, 16 Feb 2022 22:19:02 +1100 Subject: [PATCH 08/14] unit test fixes --- InvenTree/part/test_bom_import.py | 30 +++++++++++++----------------- 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/InvenTree/part/test_bom_import.py b/InvenTree/part/test_bom_import.py index ce622ed991..7061da77ec 100644 --- a/InvenTree/part/test_bom_import.py +++ b/InvenTree/part/test_bom_import.py @@ -41,8 +41,6 @@ class BomUploadTest(InvenTreeAPITestCase): assembly=False, ) - self.url = reverse('api-bom-extract') - def post_bom(self, filename, file_data, part=None, clear_existing=None, expected_code=None, content_type='text/plain'): bom_file = SimpleUploadedFile( @@ -58,11 +56,9 @@ class BomUploadTest(InvenTreeAPITestCase): clear_existing = False response = self.post( - self.url, + reverse('api-bom-import-upload'), data={ - 'bom_file': bom_file, - 'part': part, - 'clear_existing': clear_existing, + 'data_file': bom_file, }, expected_code=expected_code, format='multipart', @@ -76,14 +72,12 @@ class BomUploadTest(InvenTreeAPITestCase): """ response = self.post( - self.url, + reverse('api-bom-import-upload'), data={}, expected_code=400 ) - self.assertIn('No file was submitted', str(response.data['bom_file'])) - self.assertIn('This field is required', str(response.data['part'])) - self.assertIn('This field is required', str(response.data['clear_existing'])) + self.assertIn('No file was submitted', str(response.data['data_file'])) def test_unsupported_file(self): """ @@ -96,7 +90,7 @@ class BomUploadTest(InvenTreeAPITestCase): expected_code=400, ) - self.assertIn('Unsupported file type', str(response.data['bom_file'])) + self.assertIn('Unsupported file type', str(response.data['data_file'])) def test_broken_file(self): """ @@ -109,7 +103,7 @@ class BomUploadTest(InvenTreeAPITestCase): expected_code=400, ) - self.assertIn('The submitted file is empty', str(response.data['bom_file'])) + self.assertIn('The submitted file is empty', str(response.data['data_file'])) response = self.post_bom( 'test.xls', @@ -118,11 +112,11 @@ class BomUploadTest(InvenTreeAPITestCase): content_type='application/xls', ) - self.assertIn('Unsupported format, or corrupt file', str(response.data['bom_file'])) + self.assertIn('Unsupported format, or corrupt file', str(response.data['data_file'])) - def test_invalid_upload(self): + def test_missing_rows(self): """ - Test upload of an invalid file + Test upload of an invalid file (without data rows) """ dataset = tablib.Dataset() @@ -139,7 +133,7 @@ class BomUploadTest(InvenTreeAPITestCase): expected_code=400, ) - self.assertIn("Missing required column: 'quantity'", str(response.data)) + self.assertIn('No data rows found in file', str(response.data)) # Try again, with an .xlsx file response = self.post_bom( @@ -149,7 +143,9 @@ class BomUploadTest(InvenTreeAPITestCase): expected_code=400, ) - self.assertIn("Missing required column: 'quantity'", str(response.data)) + self.assertIn('No data rows found in file', str(response.data)) + + def test_something(self): # Add the quantity field (or close enough) dataset.headers.append('quAntiTy ') From e298a3adbf6995a2144b4c2fad436bb2ba9bace2 Mon Sep 17 00:00:00 2001 From: Oliver Date: Thu, 17 Feb 2022 11:45:44 +1100 Subject: [PATCH 09/14] Implement unit test for missing columns --- InvenTree/part/test_bom_import.py | 57 +++++++++++++++++++++++-------- 1 file changed, 42 insertions(+), 15 deletions(-) diff --git a/InvenTree/part/test_bom_import.py b/InvenTree/part/test_bom_import.py index 7061da77ec..06ff2a79d0 100644 --- a/InvenTree/part/test_bom_import.py +++ b/InvenTree/part/test_bom_import.py @@ -145,31 +145,58 @@ class BomUploadTest(InvenTreeAPITestCase): self.assertIn('No data rows found in file', str(response.data)) - def test_something(self): + def test_missing_columns(self): + """ + Upload extracted data, but with missing columns + """ - # Add the quantity field (or close enough) - dataset.headers.append('quAntiTy ') + url = reverse('api-bom-import-extract') + + rows = [ + ['1', 'test'], + ['2', 'test'], + ] - response = self.post_bom( - 'test.csv', - bytes(dataset.csv, 'utf8'), - content_type='text/csv', + # Post without columns + response = self.post( + url, + {}, expected_code=400, ) - self.assertIn('No part column found', str(response.data)) + self.assertIn('This field is required', str(response.data['rows'])) + self.assertIn('This field is required', str(response.data['columns'])) - dataset.headers.append('part_id') - dataset.headers.append('part_name') + response = self.post( + url, + { + 'rows': rows, + 'columns': ['part', 'reference'], + }, + expected_code=400 + ) - response = self.post_bom( - 'test.csv', - bytes(dataset.csv, 'utf8'), - content_type='text/csv', + self.assertIn("Missing required column: 'quantity'", str(response.data)) + + response = self.post( + url, + { + 'rows': rows, + 'columns': ['quantity', 'reference'], + }, expected_code=400, ) - self.assertIn('No data rows found', str(response.data)) + self.assertIn('No part column specified', str(response.data)) + + response = self.post( + url, + { + 'rows': rows, + 'columns': ['quantity', 'part'], + }, + expected_code=201, + ) def test_invalid_data(self): """ From 47f6b709c91b5202c603fbeae174aa9a4b8ba8c2 Mon Sep 17 00:00:00 2001 From: Oliver Date: Thu, 17 Feb 2022 12:10:48 +1100 Subject: [PATCH 10/14] Improve unit testing --- InvenTree/part/serializers.py | 2 +- InvenTree/part/test_bom_import.py | 34 ++++++++++++++++++------------- 2 files changed, 21 insertions(+), 15 deletions(-) diff --git a/InvenTree/part/serializers.py b/InvenTree/part/serializers.py index 6b0c89ad88..cc489cf5c9 100644 --- a/InvenTree/part/serializers.py +++ b/InvenTree/part/serializers.py @@ -780,7 +780,7 @@ class BomImportExtractSerializer(DataFileExtractSerializer): row['errors']['part'] = _('No matching part found') else: if not part.component: - row['errors']['part'] = _('Part is not designed as a component') + row['errors']['part'] = _('Part is not designated as a component') # Update the 'part' value in the row row['part'] = part.pk if part is not None else None diff --git a/InvenTree/part/test_bom_import.py b/InvenTree/part/test_bom_import.py index 06ff2a79d0..8cf66a183b 100644 --- a/InvenTree/part/test_bom_import.py +++ b/InvenTree/part/test_bom_import.py @@ -218,25 +218,31 @@ class BomUploadTest(InvenTreeAPITestCase): dataset.append([cmp.pk, idx]) - # Add a duplicate part too - dataset.append([components.first().pk, 'invalid']) + url = reverse('api-bom-import-extract') - response = self.post_bom( - 'test.csv', - bytes(dataset.csv, 'utf8'), - content_type='text/csv', - expected_code=201 + response = self.post( + url, + { + 'columns': dataset.headers, + 'rows': [row for row in dataset], + }, ) - errors = response.data['errors'] + rows = response.data['rows'] - self.assertIn('Quantity must be greater than zero', str(errors[0])) - self.assertIn('Part is not designated as a component', str(errors[5])) - self.assertIn('Duplicate part selected', str(errors[-1])) - self.assertIn('Invalid quantity', str(errors[-1])) + # Returned data must be the same as the original dataset + self.assertEqual(len(rows), len(dataset)) - for idx, row in enumerate(response.data['rows'][:-1]): - self.assertEqual(str(row['part']), str(components[idx].pk)) + for idx, row in enumerate(rows): + data = row['data'] + cmp = components[idx] + + # Should have guessed the correct part + data['part'] = cmp.pk + + # Check some specific error messages + self.assertEqual(rows[0]['data']['errors']['quantity'], 'Quantity must be greater than zero') + self.assertEqual(rows[5]['data']['errors']['part'], 'Part is not designated as a component') def test_part_guess(self): """ From 82cfc5423ac628139151e7931f1305c9706224a4 Mon Sep 17 00:00:00 2001 From: Oliver Date: Thu, 17 Feb 2022 13:07:16 +1100 Subject: [PATCH 11/14] Further improvements to unit tests --- InvenTree/part/test_bom_import.py | 43 ++++++++++++++++++++++--------- 1 file changed, 31 insertions(+), 12 deletions(-) diff --git a/InvenTree/part/test_bom_import.py b/InvenTree/part/test_bom_import.py index 8cf66a183b..20d434cbf5 100644 --- a/InvenTree/part/test_bom_import.py +++ b/InvenTree/part/test_bom_import.py @@ -151,7 +151,7 @@ class BomUploadTest(InvenTreeAPITestCase): """ url = reverse('api-bom-import-extract') - + rows = [ ['1', 'test'], ['2', 'test'], @@ -262,9 +262,14 @@ class BomUploadTest(InvenTreeAPITestCase): 10, ]) - response = self.post_bom( - 'test.csv', - bytes(dataset.csv, 'utf8'), + url = reverse('api-bom-import-extract') + + response = self.post( + url, + { + 'columns': dataset.headers, + 'rows': [row for row in dataset], + }, expected_code=201, ) @@ -273,7 +278,7 @@ class BomUploadTest(InvenTreeAPITestCase): self.assertEqual(len(rows), 10) for idx in range(10): - self.assertEqual(rows[idx]['part'], components[idx].pk) + self.assertEqual(rows[idx]['data']['part'], components[idx].pk) # Should also be able to 'guess' part by the IPN value dataset = tablib.Dataset() @@ -286,9 +291,12 @@ class BomUploadTest(InvenTreeAPITestCase): 10, ]) - response = self.post_bom( - 'test.csv', - bytes(dataset.csv, 'utf8'), + response = self.post( + url, + { + 'columns': dataset.headers, + 'rows': [row for row in dataset], + }, expected_code=201, ) @@ -297,13 +305,15 @@ class BomUploadTest(InvenTreeAPITestCase): self.assertEqual(len(rows), 10) for idx in range(10): - self.assertEqual(rows[idx]['part'], components[idx].pk) + self.assertEqual(rows[idx]['data']['part'], components[idx].pk) def test_levels(self): """ Test that multi-level BOMs are correctly handled during upload """ + url = reverse('api-bom-import-extract') + dataset = tablib.Dataset() dataset.headers = ['level', 'part', 'quantity'] @@ -317,11 +327,20 @@ class BomUploadTest(InvenTreeAPITestCase): 2, ]) - response = self.post_bom( - 'test.csv', - bytes(dataset.csv, 'utf8'), + response = self.post( + url, + { + 'rows': [row for row in dataset], + 'columns': dataset.headers, + }, expected_code=201, ) + rows = response.data['rows'] + # Only parts at index 1, 4, 7 should have been returned self.assertEqual(len(response.data['rows']), 3) + + self.assertEqual(rows[0]['data']['part'], 3) + self.assertEqual(rows[1]['data']['part'], 6) + self.assertEqual(rows[2]['data']['part'], 9) From 31fd69fc7696ff5e7114bb0f0d88ba4e7d553355 Mon Sep 17 00:00:00 2001 From: Oliver Date: Thu, 17 Feb 2022 14:00:16 +1100 Subject: [PATCH 12/14] Ensure unit tests are more resilient --- InvenTree/part/test_bom_import.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/InvenTree/part/test_bom_import.py b/InvenTree/part/test_bom_import.py index 20d434cbf5..8903660f39 100644 --- a/InvenTree/part/test_bom_import.py +++ b/InvenTree/part/test_bom_import.py @@ -341,6 +341,7 @@ class BomUploadTest(InvenTreeAPITestCase): # Only parts at index 1, 4, 7 should have been returned self.assertEqual(len(response.data['rows']), 3) - self.assertEqual(rows[0]['data']['part'], 3) - self.assertEqual(rows[1]['data']['part'], 6) - self.assertEqual(rows[2]['data']['part'], 9) + # Check the returned PK values + self.assertEqual(rows[0]['data']['part'], components[1].pk) + self.assertEqual(rows[1]['data']['part'], components[4].pk) + self.assertEqual(rows[2]['data']['part'], components[7].pk) From 1b6dacd5bae353db2d44f9ef296f743067485bf1 Mon Sep 17 00:00:00 2001 From: Oliver Date: Thu, 17 Feb 2022 22:48:15 +1100 Subject: [PATCH 13/14] Allow processing of "null" cells (caused by xls / xlsx import) --- InvenTree/InvenTree/serializers.py | 1 + 1 file changed, 1 insertion(+) diff --git a/InvenTree/InvenTree/serializers.py b/InvenTree/InvenTree/serializers.py index d527e0433a..98e03fdc0e 100644 --- a/InvenTree/InvenTree/serializers.py +++ b/InvenTree/InvenTree/serializers.py @@ -517,6 +517,7 @@ class DataFileExtractSerializer(serializers.Serializer): child=serializers.ListField( child=serializers.CharField( allow_blank=True, + allow_null=True, ), ) ) From 6e6f9d6c2f3e4a2a43e4c9fe9856c464b4f89dcb Mon Sep 17 00:00:00 2001 From: Oliver Date: Thu, 17 Feb 2022 22:54:02 +1100 Subject: [PATCH 14/14] Reintroduce option to clear (delete) BOM before uploading new data --- InvenTree/part/serializers.py | 29 +++++++++++++++++++ InvenTree/part/templates/part/upload_bom.html | 5 ++++ 2 files changed, 34 insertions(+) diff --git a/InvenTree/part/serializers.py b/InvenTree/part/serializers.py index cc489cf5c9..549b546a5b 100644 --- a/InvenTree/part/serializers.py +++ b/InvenTree/part/serializers.py @@ -716,6 +716,35 @@ class BomImportUploadSerializer(DataFileUploadSerializer): TARGET_MODEL = BomItem + class Meta: + fields = [ + 'data_file', + 'part', + 'clear_existing_bom', + ] + + part = serializers.PrimaryKeyRelatedField( + queryset=Part.objects.all(), + required=True, + allow_null=False, + many=False, + ) + + clear_existing_bom = serializers.BooleanField( + label=_('Clear Existing BOM'), + help_text=_('Delete existing BOM items before uploading') + ) + + def save(self): + + data = self.validated_data + + if data.get('clear_existing_bom', False): + part = data['part'] + + with transaction.atomic(): + part.bom_items.all().delete() + class BomImportExtractSerializer(DataFileExtractSerializer): """ diff --git a/InvenTree/part/templates/part/upload_bom.html b/InvenTree/part/templates/part/upload_bom.html index 1646a9b70c..9db26f7b39 100644 --- a/InvenTree/part/templates/part/upload_bom.html +++ b/InvenTree/part/templates/part/upload_bom.html @@ -81,6 +81,11 @@ $('#bom-upload').click(function() { method: 'POST', fields: { data_file: {}, + part: { + value: {{ part.pk }}, + hidden: true, + }, + clear_existing_bom: {}, }, title: '{% trans "Upload BOM File" %}', onSuccess: function(response) {