Merge pull request #2634 from SchrodingersGat/match-fields

Match fields
This commit is contained in:
Oliver 2022-02-18 08:33:48 +11:00 committed by GitHub
commit 85ab8b5098
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 789 additions and 388 deletions

View File

@ -45,6 +45,62 @@ def rename_attachment(instance, filename):
return os.path.join(instance.getSubdir(), filename)
class DataImportMixin(object):
"""
Model mixin class which provides support for 'data import' functionality.
Models which implement this mixin should provide information on the fields available for import
"""
# Define a map of fields avaialble for import
IMPORT_FIELDS = {}
@classmethod
def get_import_fields(cls):
"""
Return all available import fields
Where information on a particular field is not explicitly provided,
introspect the base model to (attempt to) find that information.
"""
fields = cls.IMPORT_FIELDS
for name, field in fields.items():
# Attempt to extract base field information from the model
base_field = None
for f in cls._meta.fields:
if f.name == name:
base_field = f
break
if base_field:
if 'label' not in field:
field['label'] = base_field.verbose_name
if 'help_text' not in field:
field['help_text'] = base_field.help_text
fields[name] = field
return fields
@classmethod
def get_required_import_fields(cls):
""" Return all *required* import fields """
fields = {}
for name, field in cls.get_import_fields().items():
required = field.get('required', False)
if required:
fields[name] = field
return fields
class ReferenceIndexingMixin(models.Model):
"""
A mixin for keeping track of numerical copies of the "reference" field.

View File

@ -5,8 +5,8 @@ Serializers used in various InvenTree apps
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import tablib
from decimal import Decimal
@ -332,3 +332,309 @@ class InvenTreeDecimalField(serializers.FloatField):
return Decimal(str(data))
except:
raise serializers.ValidationError(_("Invalid value"))
class DataFileUploadSerializer(serializers.Serializer):
"""
Generic serializer for uploading a data file, and extracting a dataset.
- Validates uploaded file
- Extracts column names
- Extracts data rows
"""
# Implementing class should register a target model (database model) to be used for import
TARGET_MODEL = None
class Meta:
fields = [
'data_file',
]
data_file = serializers.FileField(
label=_("Data File"),
help_text=_("Select data file for upload"),
required=True,
allow_empty_file=False,
)
def validate_data_file(self, data_file):
"""
Perform validation checks on the uploaded data file.
"""
self.filename = data_file.name
name, ext = os.path.splitext(data_file.name)
# Remove the leading . from the extension
ext = ext[1:]
accepted_file_types = [
'xls', 'xlsx',
'csv', 'tsv',
'xml',
]
if ext not in accepted_file_types:
raise serializers.ValidationError(_("Unsupported file type"))
# Impose a 50MB limit on uploaded BOM files
max_upload_file_size = 50 * 1024 * 1024
if data_file.size > max_upload_file_size:
raise serializers.ValidationError(_("File is too large"))
# Read file data into memory (bytes object)
try:
data = data_file.read()
except Exception as e:
raise serializers.ValidationError(str(e))
if ext in ['csv', 'tsv', 'xml']:
try:
data = data.decode()
except Exception as e:
raise serializers.ValidationError(str(e))
# Convert to a tablib dataset (we expect headers)
try:
self.dataset = tablib.Dataset().load(data, ext, headers=True)
except Exception as e:
raise serializers.ValidationError(str(e))
if len(self.dataset.headers) == 0:
raise serializers.ValidationError(_("No columns found in file"))
if len(self.dataset) == 0:
raise serializers.ValidationError(_("No data rows found in file"))
return data_file
def match_column(self, column_name, field_names, exact=False):
"""
Attempt to match a column name (from the file) to a field (defined in the model)
Order of matching is:
- Direct match
- Case insensitive match
- Fuzzy match
"""
column_name = column_name.strip()
column_name_lower = column_name.lower()
if column_name in field_names:
return column_name
for field_name in field_names:
if field_name.lower() == column_name_lower:
return field_name
if exact:
# Finished available 'exact' matches
return None
# TODO: Fuzzy pattern matching for column names
# No matches found
return None
def extract_data(self):
"""
Returns dataset extracted from the file
"""
# Provide a dict of available import fields for the model
model_fields = {}
# Keep track of columns we have already extracted
matched_columns = set()
if self.TARGET_MODEL:
try:
model_fields = self.TARGET_MODEL.get_import_fields()
except:
pass
# Extract a list of valid model field names
model_field_names = [key for key in model_fields.keys()]
# Provide a dict of available columns from the dataset
file_columns = {}
for header in self.dataset.headers:
column = {}
# Attempt to "match" file columns to model fields
match = self.match_column(header, model_field_names, exact=True)
if match is not None and match not in matched_columns:
matched_columns.add(match)
column['value'] = match
else:
column['value'] = None
file_columns[header] = column
return {
'file_fields': file_columns,
'model_fields': model_fields,
'rows': [row.values() for row in self.dataset.dict],
'filename': self.filename,
}
def save(self):
...
class DataFileExtractSerializer(serializers.Serializer):
"""
Generic serializer for extracting data from an imported dataset.
- User provides an array of matched headers
- User provides an array of raw data rows
"""
# Implementing class should register a target model (database model) to be used for import
TARGET_MODEL = None
class Meta:
fields = [
'columns',
'rows',
]
# Mapping of columns
columns = serializers.ListField(
child=serializers.CharField(
allow_blank=True,
),
)
rows = serializers.ListField(
child=serializers.ListField(
child=serializers.CharField(
allow_blank=True,
allow_null=True,
),
)
)
def validate(self, data):
data = super().validate(data)
self.columns = data.get('columns', [])
self.rows = data.get('rows', [])
if len(self.rows) == 0:
raise serializers.ValidationError(_("No data rows provided"))
if len(self.columns) == 0:
raise serializers.ValidationError(_("No data columns supplied"))
self.validate_extracted_columns()
return data
@property
def data(self):
if self.TARGET_MODEL:
try:
model_fields = self.TARGET_MODEL.get_import_fields()
except:
model_fields = {}
rows = []
for row in self.rows:
"""
Optionally pre-process each row, before sending back to the client
"""
processed_row = self.process_row(self.row_to_dict(row))
if processed_row:
rows.append({
"original": row,
"data": processed_row,
})
return {
'fields': model_fields,
'columns': self.columns,
'rows': rows,
}
def process_row(self, row):
"""
Process a 'row' of data, which is a mapped column:value dict
Returns either a mapped column:value dict, or None.
If the function returns None, the column is ignored!
"""
# Default implementation simply returns the original row data
return row
def row_to_dict(self, row):
"""
Convert a "row" to a named data dict
"""
row_dict = {
'errors': {},
}
for idx, value in enumerate(row):
if idx < len(self.columns):
col = self.columns[idx]
if col:
row_dict[col] = value
return row_dict
def validate_extracted_columns(self):
"""
Perform custom validation of header mapping.
"""
if self.TARGET_MODEL:
try:
model_fields = self.TARGET_MODEL.get_import_fields()
except:
model_fields = {}
cols_seen = set()
for name, field in model_fields.items():
required = field.get('required', False)
# Check for missing required columns
if required:
if name not in self.columns:
raise serializers.ValidationError(_("Missing required column") + f": '{name}'")
for col in self.columns:
if not col:
continue
# Check for duplicated columns
if col in cols_seen:
raise serializers.ValidationError(_("Duplicate column") + f": '{col}'")
cols_seen.add(col)
def save(self):
"""
No "save" action for this serializer
"""
...

View File

@ -12,11 +12,14 @@ import common.models
INVENTREE_SW_VERSION = "0.6.0 dev"
# InvenTree API version
INVENTREE_API_VERSION = 25
INVENTREE_API_VERSION = 26
"""
Increment this API version number whenever there is a significant change to the API that any clients need to know about
v26 -> 2022-02-17
- Adds API endpoint for uploading a BOM file and extracting data
v25 -> 2022-02-17
- Adds ability to filter "part" list endpoint by "in_bom_for" argument

View File

@ -1550,13 +1550,15 @@ class BomList(generics.ListCreateAPIView):
]
class BomExtract(generics.CreateAPIView):
class BomImportUpload(generics.CreateAPIView):
"""
API endpoint for extracting BOM data from a BOM file.
API endpoint for uploading a complete Bill of Materials.
It is assumed that the BOM has been extracted from a file using the BomExtract endpoint.
"""
queryset = Part.objects.none()
serializer_class = part_serializers.BomExtractSerializer
queryset = Part.objects.all()
serializer_class = part_serializers.BomImportUploadSerializer
def create(self, request, *args, **kwargs):
"""
@ -1573,15 +1575,22 @@ class BomExtract(generics.CreateAPIView):
return Response(data, status=status.HTTP_201_CREATED, headers=headers)
class BomUpload(generics.CreateAPIView):
class BomImportExtract(generics.CreateAPIView):
"""
API endpoint for uploading a complete Bill of Materials.
It is assumed that the BOM has been extracted from a file using the BomExtract endpoint.
API endpoint for extracting BOM data from a BOM file.
"""
queryset = Part.objects.all()
serializer_class = part_serializers.BomUploadSerializer
queryset = Part.objects.none()
serializer_class = part_serializers.BomImportExtractSerializer
class BomImportSubmit(generics.CreateAPIView):
"""
API endpoint for submitting BOM data from a BOM file
"""
queryset = BomItem.objects.none()
serializer_class = part_serializers.BomImportSubmitSerializer
class BomDetail(generics.RetrieveUpdateDestroyAPIView):
@ -1736,9 +1745,10 @@ bom_api_urls = [
url(r'^.*$', BomDetail.as_view(), name='api-bom-item-detail'),
])),
url(r'^extract/', BomExtract.as_view(), name='api-bom-extract'),
url(r'^upload/', BomUpload.as_view(), name='api-bom-upload'),
# API endpoint URLs for importing BOM data
url(r'^import/upload/', BomImportUpload.as_view(), name='api-bom-import-upload'),
url(r'^import/extract/', BomImportExtract.as_view(), name='api-bom-import-extract'),
url(r'^import/submit/', BomImportSubmit.as_view(), name='api-bom-import-submit'),
# Catch-all
url(r'^.*$', BomList.as_view(), name='api-bom-list'),

View File

@ -46,7 +46,7 @@ from common.models import InvenTreeSetting
from InvenTree import helpers
from InvenTree import validators
from InvenTree.models import InvenTreeTree, InvenTreeAttachment
from InvenTree.models import InvenTreeTree, InvenTreeAttachment, DataImportMixin
from InvenTree.fields import InvenTreeURLField
from InvenTree.helpers import decimal2string, normalize, decimal2money
import InvenTree.tasks
@ -2580,7 +2580,7 @@ class PartCategoryParameterTemplate(models.Model):
help_text=_('Default Parameter Value'))
class BomItem(models.Model):
class BomItem(models.Model, DataImportMixin):
""" A BomItem links a part to its component items.
A part can have a BOM (bill of materials) which defines
which parts are required (and in what quantity) to make it.
@ -2598,6 +2598,39 @@ class BomItem(models.Model):
allow_variants: Stock for part variants can be substituted for this BomItem
"""
# Fields available for bulk import
IMPORT_FIELDS = {
'quantity': {
'required': True
},
'reference': {},
'overage': {},
'allow_variants': {},
'inherited': {},
'optional': {},
'note': {},
'part': {
'label': _('Part'),
'help_text': _('Part ID or part name'),
},
'part_id': {
'label': _('Part ID'),
'help_text': _('Unique part ID value')
},
'part_name': {
'label': _('Part Name'),
'help_text': _('Part name'),
},
'part_ipn': {
'label': _('Part IPN'),
'help_text': _('Part IPN value'),
},
'level': {
'label': _('Level'),
'help_text': _('BOM level'),
}
}
@staticmethod
def get_api_url():
return reverse('api-bom-list')

View File

@ -4,8 +4,6 @@ JSON serializers for Part app
import imghdr
from decimal import Decimal
import os
import tablib
from django.urls import reverse_lazy
from django.db import models, transaction
@ -17,7 +15,9 @@ from rest_framework import serializers
from sql_util.utils import SubqueryCount, SubquerySum
from djmoney.contrib.django_rest_framework import MoneyField
from InvenTree.serializers import (InvenTreeAttachmentSerializerField,
from InvenTree.serializers import (DataFileUploadSerializer,
DataFileExtractSerializer,
InvenTreeAttachmentSerializerField,
InvenTreeDecimalField,
InvenTreeImageSerializerField,
InvenTreeModelSerializer,
@ -709,307 +709,129 @@ class PartCopyBOMSerializer(serializers.Serializer):
)
class BomExtractSerializer(serializers.Serializer):
class BomImportUploadSerializer(DataFileUploadSerializer):
"""
Serializer for uploading a file and extracting data from it.
Note: 2022-02-04 - This needs a *serious* refactor in future, probably
When parsing the file, the following things happen:
a) Check file format and validity
b) Look for "required" fields
c) Look for "part" fields - used to "infer" part
Once the file itself has been validated, we iterate through each data row:
- If the "level" column is provided, ignore anything below level 1
- Try to "guess" the part based on part_id / part_name / part_ipn
- Extract other fields as required
"""
TARGET_MODEL = BomItem
class Meta:
fields = [
'bom_file',
'data_file',
'part',
'clear_existing',
'clear_existing_bom',
]
# These columns must be present
REQUIRED_COLUMNS = [
'quantity',
]
# We need at least one column to specify a "part"
PART_COLUMNS = [
'part',
'part_id',
'part_name',
'part_ipn',
]
# These columns are "optional"
OPTIONAL_COLUMNS = [
'allow_variants',
'inherited',
'optional',
'overage',
'note',
'reference',
]
def find_matching_column(self, col_name, columns):
# Direct match
if col_name in columns:
return col_name
col_name = col_name.lower().strip()
for col in columns:
if col.lower().strip() == col_name:
return col
# No match
return None
def find_matching_data(self, row, col_name, columns):
"""
Extract data from the row, based on the "expected" column name
"""
col_name = self.find_matching_column(col_name, columns)
return row.get(col_name, None)
bom_file = serializers.FileField(
label=_("BOM File"),
help_text=_("Select Bill of Materials file"),
part = serializers.PrimaryKeyRelatedField(
queryset=Part.objects.all(),
required=True,
allow_empty_file=False,
allow_null=False,
many=False,
)
def validate_bom_file(self, bom_file):
"""
Perform validation checks on the uploaded BOM file
"""
self.filename = bom_file.name
name, ext = os.path.splitext(bom_file.name)
# Remove the leading . from the extension
ext = ext[1:]
accepted_file_types = [
'xls', 'xlsx',
'csv', 'tsv',
'xml',
]
if ext not in accepted_file_types:
raise serializers.ValidationError(_("Unsupported file type"))
# Impose a 50MB limit on uploaded BOM files
max_upload_file_size = 50 * 1024 * 1024
if bom_file.size > max_upload_file_size:
raise serializers.ValidationError(_("File is too large"))
# Read file data into memory (bytes object)
try:
data = bom_file.read()
except Exception as e:
raise serializers.ValidationError(str(e))
if ext in ['csv', 'tsv', 'xml']:
try:
data = data.decode()
except Exception as e:
raise serializers.ValidationError(str(e))
# Convert to a tablib dataset (we expect headers)
try:
self.dataset = tablib.Dataset().load(data, ext, headers=True)
except Exception as e:
raise serializers.ValidationError(str(e))
for header in self.REQUIRED_COLUMNS:
match = self.find_matching_column(header, self.dataset.headers)
if match is None:
raise serializers.ValidationError(_("Missing required column") + f": '{header}'")
part_column_matches = {}
part_match = False
for col in self.PART_COLUMNS:
col_match = self.find_matching_column(col, self.dataset.headers)
part_column_matches[col] = col_match
if col_match is not None:
part_match = True
if not part_match:
raise serializers.ValidationError(_("No part column found"))
if len(self.dataset) == 0:
raise serializers.ValidationError(_("No data rows found"))
return bom_file
def extract_data(self):
"""
Read individual rows out of the BOM file
"""
rows = []
errors = []
found_parts = set()
headers = self.dataset.headers
level_column = self.find_matching_column('level', headers)
for row in self.dataset.dict:
row_error = {}
"""
If the "level" column is specified, and this is not a top-level BOM item, ignore the row!
"""
if level_column is not None:
level = row.get('level', None)
if level is not None:
try:
level = int(level)
if level != 1:
continue
except:
pass
"""
Next, we try to "guess" the part, based on the provided data.
A) If the part_id is supplied, use that!
B) If the part name and/or part_ipn are supplied, maybe we can use those?
"""
part_id = self.find_matching_data(row, 'part_id', headers)
part_name = self.find_matching_data(row, 'part_name', headers)
part_ipn = self.find_matching_data(row, 'part_ipn', headers)
part = None
if part_id is not None:
try:
part = Part.objects.get(pk=part_id)
except (ValueError, Part.DoesNotExist):
pass
# Optionally, specify using field "part"
if part is None:
pk = self.find_matching_data(row, 'part', headers)
if pk is not None:
try:
part = Part.objects.get(pk=pk)
except (ValueError, Part.DoesNotExist):
pass
if part is None:
if part_name or part_ipn:
queryset = Part.objects.all()
if part_name:
queryset = queryset.filter(name=part_name)
if part_ipn:
queryset = queryset.filter(IPN=part_ipn)
# Only if we have a single direct match
if queryset.exists():
if queryset.count() == 1:
part = queryset.first()
else:
# Multiple matches!
row_error['part'] = _('Multiple matching parts found')
if part is None:
if 'part' not in row_error:
row_error['part'] = _('No matching part found')
else:
if part.pk in found_parts:
row_error['part'] = _("Duplicate part selected")
elif not part.component:
row_error['part'] = _('Part is not designated as a component')
found_parts.add(part.pk)
row['part'] = part.pk if part is not None else None
"""
Read out the 'quantity' column - check that it is valid
"""
quantity = self.find_matching_data(row, 'quantity', self.dataset.headers)
# Ensure quantity field is provided
row['quantity'] = quantity
if quantity is None:
row_error['quantity'] = _('Quantity not provided')
else:
try:
quantity = Decimal(quantity)
if quantity <= 0:
row_error['quantity'] = _('Quantity must be greater than zero')
except:
row_error['quantity'] = _('Invalid quantity')
# For each "optional" column, ensure the column names are allocated correctly
for field_name in self.OPTIONAL_COLUMNS:
if field_name not in row:
row[field_name] = self.find_matching_data(row, field_name, self.dataset.headers)
rows.append(row)
errors.append(row_error)
return {
'rows': rows,
'errors': errors,
'headers': headers,
'filename': self.filename,
}
part = serializers.PrimaryKeyRelatedField(queryset=Part.objects.filter(assembly=True), required=True)
clear_existing = serializers.BooleanField(
label=_("Clear Existing BOM"),
help_text=_("Delete existing BOM data first"),
clear_existing_bom = serializers.BooleanField(
label=_('Clear Existing BOM'),
help_text=_('Delete existing BOM items before uploading')
)
def save(self):
data = self.validated_data
master_part = data['part']
clear_existing = data['clear_existing']
if data.get('clear_existing_bom', False):
part = data['part']
if clear_existing:
# Remove all existing BOM items
master_part.bom_items.all().delete()
with transaction.atomic():
part.bom_items.all().delete()
class BomUploadSerializer(serializers.Serializer):
class BomImportExtractSerializer(DataFileExtractSerializer):
"""
"""
TARGET_MODEL = BomItem
def validate_extracted_columns(self):
super().validate_extracted_columns()
part_columns = ['part', 'part_name', 'part_ipn', 'part_id']
if not any([col in self.columns for col in part_columns]):
# At least one part column is required!
raise serializers.ValidationError(_("No part column specified"))
def process_row(self, row):
# Skip any rows which are at a lower "level"
level = row.get('level', None)
if level is not None:
try:
level = int(level)
if level != 1:
# Skip this row
return None
except:
pass
# Attempt to extract a valid part based on the provided data
part_id = row.get('part_id', row.get('part', None))
part_name = row.get('part_name', row.get('part', None))
part_ipn = row.get('part_ipn', None)
part = None
if part_id is not None:
try:
part = Part.objects.get(pk=part_id)
except (ValueError, Part.DoesNotExist):
pass
# No direct match, where else can we look?
if part is None:
if part_name or part_ipn:
queryset = Part.objects.all()
if part_name:
queryset = queryset.filter(name=part_name)
if part_ipn:
queryset = queryset.filter(IPN=part_ipn)
if queryset.exists():
if queryset.count() == 1:
part = queryset.first()
else:
row['errors']['part'] = _('Multiple matching parts found')
if part is None:
row['errors']['part'] = _('No matching part found')
else:
if not part.component:
row['errors']['part'] = _('Part is not designated as a component')
# Update the 'part' value in the row
row['part'] = part.pk if part is not None else None
# Check the provided 'quantity' value
quantity = row.get('quantity', None)
if quantity is None:
row['errors']['quantity'] = _('Quantity not provided')
else:
try:
quantity = Decimal(quantity)
if quantity <= 0:
row['errors']['quantity'] = _('Quantity must be greater than zero')
except:
row['errors']['quantity'] = _('Invalid quantity')
return row
class BomImportSubmitSerializer(serializers.Serializer):
"""
Serializer for uploading a BOM against a specified part.

View File

@ -77,15 +77,15 @@ $('#bom-template-download').click(function() {
$('#bom-upload').click(function() {
constructForm('{% url "api-bom-extract" %}', {
constructForm('{% url "api-bom-import-upload" %}', {
method: 'POST',
fields: {
bom_file: {},
data_file: {},
part: {
value: {{ part.pk }},
hidden: true,
},
clear_existing: {},
clear_existing_bom: {},
},
title: '{% trans "Upload BOM File" %}',
onSuccess: function(response) {
@ -93,16 +93,24 @@ $('#bom-upload').click(function() {
// Clear existing entries from the table
$('.bom-import-row').remove();
// Disable the "submit" button
$('#bom-submit').show();
selectImportFields(
'{% url "api-bom-import-extract" %}',
response,
{
success: function(response) {
constructBomUploadTable(response);
constructBomUploadTable(response);
// Show the "submit" button
$('#bom-submit').show();
$('#bom-submit').click(function() {
submitBomTable({{ part.pk }}, {
bom_data: response,
});
});
$('#bom-submit').click(function() {
submitBomTable({{ part.pk }}, {
bom_data: response,
});
});
}
}
);
}
});

View File

@ -41,8 +41,6 @@ class BomUploadTest(InvenTreeAPITestCase):
assembly=False,
)
self.url = reverse('api-bom-extract')
def post_bom(self, filename, file_data, part=None, clear_existing=None, expected_code=None, content_type='text/plain'):
bom_file = SimpleUploadedFile(
@ -58,11 +56,9 @@ class BomUploadTest(InvenTreeAPITestCase):
clear_existing = False
response = self.post(
self.url,
reverse('api-bom-import-upload'),
data={
'bom_file': bom_file,
'part': part,
'clear_existing': clear_existing,
'data_file': bom_file,
},
expected_code=expected_code,
format='multipart',
@ -76,14 +72,12 @@ class BomUploadTest(InvenTreeAPITestCase):
"""
response = self.post(
self.url,
reverse('api-bom-import-upload'),
data={},
expected_code=400
)
self.assertIn('No file was submitted', str(response.data['bom_file']))
self.assertIn('This field is required', str(response.data['part']))
self.assertIn('This field is required', str(response.data['clear_existing']))
self.assertIn('No file was submitted', str(response.data['data_file']))
def test_unsupported_file(self):
"""
@ -96,7 +90,7 @@ class BomUploadTest(InvenTreeAPITestCase):
expected_code=400,
)
self.assertIn('Unsupported file type', str(response.data['bom_file']))
self.assertIn('Unsupported file type', str(response.data['data_file']))
def test_broken_file(self):
"""
@ -109,7 +103,7 @@ class BomUploadTest(InvenTreeAPITestCase):
expected_code=400,
)
self.assertIn('The submitted file is empty', str(response.data['bom_file']))
self.assertIn('The submitted file is empty', str(response.data['data_file']))
response = self.post_bom(
'test.xls',
@ -118,11 +112,11 @@ class BomUploadTest(InvenTreeAPITestCase):
content_type='application/xls',
)
self.assertIn('Unsupported format, or corrupt file', str(response.data['bom_file']))
self.assertIn('Unsupported format, or corrupt file', str(response.data['data_file']))
def test_invalid_upload(self):
def test_missing_rows(self):
"""
Test upload of an invalid file
Test upload of an invalid file (without data rows)
"""
dataset = tablib.Dataset()
@ -139,7 +133,7 @@ class BomUploadTest(InvenTreeAPITestCase):
expected_code=400,
)
self.assertIn("Missing required column: 'quantity'", str(response.data))
self.assertIn('No data rows found in file', str(response.data))
# Try again, with an .xlsx file
response = self.post_bom(
@ -149,32 +143,61 @@ class BomUploadTest(InvenTreeAPITestCase):
expected_code=400,
)
self.assertIn('No data rows found in file', str(response.data))
def test_missing_columns(self):
"""
Upload extracted data, but with missing columns
"""
url = reverse('api-bom-import-extract')
rows = [
['1', 'test'],
['2', 'test'],
]
# Post without columns
response = self.post(
url,
{},
expected_code=400,
)
self.assertIn('This field is required', str(response.data['rows']))
self.assertIn('This field is required', str(response.data['columns']))
response = self.post(
url,
{
'rows': rows,
'columns': ['part', 'reference'],
},
expected_code=400
)
self.assertIn("Missing required column: 'quantity'", str(response.data))
# Add the quantity field (or close enough)
dataset.headers.append('quAntiTy ')
response = self.post_bom(
'test.csv',
bytes(dataset.csv, 'utf8'),
content_type='text/csv',
response = self.post(
url,
{
'rows': rows,
'columns': ['quantity', 'reference'],
},
expected_code=400,
)
self.assertIn('No part column found', str(response.data))
self.assertIn('No part column specified', str(response.data))
dataset.headers.append('part_id')
dataset.headers.append('part_name')
response = self.post_bom(
'test.csv',
bytes(dataset.csv, 'utf8'),
content_type='text/csv',
expected_code=400,
response = self.post(
url,
{
'rows': rows,
'columns': ['quantity', 'part'],
},
expected_code=201,
)
self.assertIn('No data rows found', str(response.data))
def test_invalid_data(self):
"""
Upload data which contains errors
@ -195,25 +218,31 @@ class BomUploadTest(InvenTreeAPITestCase):
dataset.append([cmp.pk, idx])
# Add a duplicate part too
dataset.append([components.first().pk, 'invalid'])
url = reverse('api-bom-import-extract')
response = self.post_bom(
'test.csv',
bytes(dataset.csv, 'utf8'),
content_type='text/csv',
expected_code=201
response = self.post(
url,
{
'columns': dataset.headers,
'rows': [row for row in dataset],
},
)
errors = response.data['errors']
rows = response.data['rows']
self.assertIn('Quantity must be greater than zero', str(errors[0]))
self.assertIn('Part is not designated as a component', str(errors[5]))
self.assertIn('Duplicate part selected', str(errors[-1]))
self.assertIn('Invalid quantity', str(errors[-1]))
# Returned data must be the same as the original dataset
self.assertEqual(len(rows), len(dataset))
for idx, row in enumerate(response.data['rows'][:-1]):
self.assertEqual(str(row['part']), str(components[idx].pk))
for idx, row in enumerate(rows):
data = row['data']
cmp = components[idx]
# Should have guessed the correct part
data['part'] = cmp.pk
# Check some specific error messages
self.assertEqual(rows[0]['data']['errors']['quantity'], 'Quantity must be greater than zero')
self.assertEqual(rows[5]['data']['errors']['part'], 'Part is not designated as a component')
def test_part_guess(self):
"""
@ -233,9 +262,14 @@ class BomUploadTest(InvenTreeAPITestCase):
10,
])
response = self.post_bom(
'test.csv',
bytes(dataset.csv, 'utf8'),
url = reverse('api-bom-import-extract')
response = self.post(
url,
{
'columns': dataset.headers,
'rows': [row for row in dataset],
},
expected_code=201,
)
@ -244,7 +278,7 @@ class BomUploadTest(InvenTreeAPITestCase):
self.assertEqual(len(rows), 10)
for idx in range(10):
self.assertEqual(rows[idx]['part'], components[idx].pk)
self.assertEqual(rows[idx]['data']['part'], components[idx].pk)
# Should also be able to 'guess' part by the IPN value
dataset = tablib.Dataset()
@ -257,9 +291,12 @@ class BomUploadTest(InvenTreeAPITestCase):
10,
])
response = self.post_bom(
'test.csv',
bytes(dataset.csv, 'utf8'),
response = self.post(
url,
{
'columns': dataset.headers,
'rows': [row for row in dataset],
},
expected_code=201,
)
@ -268,13 +305,15 @@ class BomUploadTest(InvenTreeAPITestCase):
self.assertEqual(len(rows), 10)
for idx in range(10):
self.assertEqual(rows[idx]['part'], components[idx].pk)
self.assertEqual(rows[idx]['data']['part'], components[idx].pk)
def test_levels(self):
"""
Test that multi-level BOMs are correctly handled during upload
"""
url = reverse('api-bom-import-extract')
dataset = tablib.Dataset()
dataset.headers = ['level', 'part', 'quantity']
@ -288,11 +327,21 @@ class BomUploadTest(InvenTreeAPITestCase):
2,
])
response = self.post_bom(
'test.csv',
bytes(dataset.csv, 'utf8'),
response = self.post(
url,
{
'rows': [row for row in dataset],
'columns': dataset.headers,
},
expected_code=201,
)
rows = response.data['rows']
# Only parts at index 1, 4, 7 should have been returned
self.assertEqual(len(response.data['rows']), 3)
# Check the returned PK values
self.assertEqual(rows[0]['data']['part'], components[1].pk)
self.assertEqual(rows[1]['data']['part'], components[4].pk)
self.assertEqual(rows[2]['data']['part'], components[7].pk)

View File

@ -40,12 +40,6 @@ function constructBomUploadTable(data, options={}) {
function constructRow(row, idx, fields) {
// Construct an individual row from the provided data
var errors = {};
if (data.errors && data.errors.length > idx) {
errors = data.errors[idx];
}
var field_options = {
hideLabels: true,
hideClearButton: true,
@ -60,7 +54,7 @@ function constructBomUploadTable(data, options={}) {
return `Cannot render field '${field_name}`;
}
field.value = row[field_name];
field.value = row.data[field_name];
return constructField(`items_${field_name}_${idx}`, field, field_options);
@ -99,19 +93,19 @@ function constructBomUploadTable(data, options={}) {
$('#bom-import-table tbody').append(html);
// Handle any errors raised by initial data import
if (errors.part) {
addFieldErrorMessage(`items_sub_part_${idx}`, errors.part);
if (row.data.errors.part) {
addFieldErrorMessage(`items_sub_part_${idx}`, row.data.errors.part);
}
if (errors.quantity) {
addFieldErrorMessage(`items_quantity_${idx}`, errors.quantity);
if (row.data.errors.quantity) {
addFieldErrorMessage(`items_quantity_${idx}`, row.data.errors.quantity);
}
// Initialize the "part" selector for this row
initializeRelatedField(
{
name: `items_sub_part_${idx}`,
value: row.part,
value: row.data.part,
api_url: '{% url "api-part-list" %}',
filters: {
component: true,
@ -140,7 +134,12 @@ function constructBomUploadTable(data, options={}) {
});
// Prettify the original import data
var pretty = JSON.stringify(row, undefined, 4);
var pretty = JSON.stringify(
{
columns: data.columns,
row: row.original,
}, undefined, 4
);
var html = `
<div class='alert alert-block'>
@ -176,7 +175,7 @@ function submitBomTable(part_id, options={}) {
var idx_values = [];
var url = '{% url "api-bom-upload" %}';
var url = '{% url "api-bom-import-submit" %}';
$('.bom-import-row').each(function() {
var idx = $(this).attr('idx');

View File

@ -31,6 +31,7 @@
setFormInputPlaceholder,
setFormGroupVisibility,
showFormInput,
selectImportFields,
*/
/**
@ -895,8 +896,8 @@ function getFormFieldValue(name, field={}, options={}) {
// Find the HTML element
var el = getFormFieldElement(name, options);
if (!el) {
console.log(`ERROR: getFormFieldValue could not locate field '{name}'`);
if (!el.exists()) {
console.log(`ERROR: getFormFieldValue could not locate field '${name}'`);
return null;
}
@ -1219,7 +1220,7 @@ function addFieldErrorMessage(name, error_text, error_idx=0, options={}) {
field_dom.append(error_html);
} else {
console.log(`WARNING: addFieldErrorMessage could not locate field '${field_name}`);
console.log(`WARNING: addFieldErrorMessage could not locate field '${field_name}'`);
}
}
@ -2080,7 +2081,7 @@ function constructLabel(name, parameters) {
* - parameters: Field parameters returned by the OPTIONS method
*
*/
function constructInput(name, parameters, options) {
function constructInput(name, parameters, options={}) {
var html = '';
@ -2422,3 +2423,117 @@ function constructHelpText(name, parameters) {
return html;
}
/*
* Construct a dialog to select import fields
*/
function selectImportFields(url, data={}, options={}) {
if (!data.model_fields) {
console.log(`WARNING: selectImportFields is missing 'model_fields'`);
return;
}
if (!data.file_fields) {
console.log(`WARNING: selectImportFields is missing 'file_fields'`);
return;
}
var choices = [];
// Add an "empty" value
choices.push({
value: '',
display_name: '-----',
});
for (const [name, field] of Object.entries(data.model_fields)) {
choices.push({
value: name,
display_name: field.label || name,
});
}
var rows = '';
var field_names = Object.keys(data.file_fields);
for (var idx = 0; idx < field_names.length; idx++) {
var field_name = field_names[idx];
var choice_input = constructInput(
`column_${idx}`,
{
type: 'choice',
label: field_name,
value: data.file_fields[field_name].value,
choices: choices,
}
);
rows += `<tr><td><em>${field_name}</em></td><td>${choice_input}</td></tr>`;
}
var headers = `<tr><th>{% trans "File Column" %}</th><th>{% trans "Field Name" %}</th></tr>`;
var html = '';
if (options.preamble) {
html += options.preamble;
}
html += `<table class='table table-condensed'>${headers}${rows}</table>`;
constructForm(url, {
method: 'POST',
title: '{% trans "Select Columns" %}',
fields: {},
preFormContent: html,
onSubmit: function(fields, opts) {
var columns = [];
for (var idx = 0; idx < field_names.length; idx++) {
columns.push(getFormFieldValue(`column_${idx}`, {}, opts));
}
$(opts.modal).find('#modal-progress-spinner').show();
inventreePut(
opts.url,
{
columns: columns,
rows: data.rows,
},
{
method: 'POST',
success: function(response) {
handleFormSuccess(response, opts);
if (options.success) {
options.success(response);
}
},
error: function(xhr) {
$(opts.modal).find('#modal-progress-spinner').hide();
switch (xhr.status) {
case 400:
handleFormErrors(xhr.responseJSON, fields, opts);
break;
default:
$(opts.modal).modal('hide');
console.log(`upload error at ${opts.url}`);
showApiError(xhr, opts.url);
break;
}
}
}
);
},
});
}