mirror of
https://github.com/inventree/InvenTree
synced 2024-08-30 18:33:04 +00:00
POST request now returns extracted data rows (as an array of dicts)
This commit is contained in:
parent
707787d82c
commit
137c9ff2f2
@ -1541,6 +1541,21 @@ class BomExtract(generics.CreateAPIView):
|
||||
queryset = Part.objects.none()
|
||||
serializer_class = part_serializers.BomExtractSerializer
|
||||
|
||||
def create(self, request, *args, **kwargs):
|
||||
"""
|
||||
Custom create function to return the extracted data
|
||||
"""
|
||||
|
||||
serializer = self.get_serializer(data=request.data)
|
||||
serializer.is_valid(raise_exception=True)
|
||||
self.perform_create(serializer)
|
||||
headers = self.get_success_headers(serializer.data)
|
||||
|
||||
data = serializer.extract_data()
|
||||
|
||||
return Response(data, status=status.HTTP_201_CREATED, headers=headers)
|
||||
|
||||
|
||||
|
||||
class BomDetail(generics.RetrieveUpdateDestroyAPIView):
|
||||
""" API endpoint for detail view of a single BomItem object """
|
||||
|
@ -724,6 +724,44 @@ class BomExtractSerializer(serializers.Serializer):
|
||||
|
||||
"""
|
||||
|
||||
# These columns must be present
|
||||
REQUIRED_COLUMNS = [
|
||||
'quantity',
|
||||
]
|
||||
|
||||
# We need at least one column to specify a "part"
|
||||
PART_COLUMNS = [
|
||||
'part',
|
||||
'part_id',
|
||||
'part_name',
|
||||
'part_ipn',
|
||||
]
|
||||
|
||||
# These columns are "optional"
|
||||
OPTIONAL_COLUMNS = [
|
||||
'allow_variants',
|
||||
'inherited',
|
||||
'optional',
|
||||
'overage',
|
||||
'note',
|
||||
'reference',
|
||||
]
|
||||
|
||||
def find_matching_column(self, col_name, columns):
|
||||
|
||||
# Direct match
|
||||
if col_name in columns:
|
||||
return col_name
|
||||
|
||||
col_name = col_name.lower().strip()
|
||||
|
||||
for col in columns:
|
||||
if col.lower().strip() == col_name:
|
||||
return col
|
||||
|
||||
# No match
|
||||
return None
|
||||
|
||||
bom_file = serializers.FileField(
|
||||
label=_("BOM File"),
|
||||
help_text=_("Select Bill of Materials file"),
|
||||
@ -736,6 +774,8 @@ class BomExtractSerializer(serializers.Serializer):
|
||||
Perform validation checks on the uploaded BOM file
|
||||
"""
|
||||
|
||||
self.filename = bom_file.name
|
||||
|
||||
name, ext = os.path.splitext(bom_file.name)
|
||||
|
||||
# Remove the leading . from the extension
|
||||
@ -765,47 +805,9 @@ class BomExtractSerializer(serializers.Serializer):
|
||||
# Convert to a tablib dataset (we expect headers)
|
||||
self.dataset = tablib.Dataset().load(data, ext, headers=True)
|
||||
|
||||
# These columns must be present
|
||||
required_columns = [
|
||||
'quantity',
|
||||
]
|
||||
for header in self.REQUIRED_COLUMNS:
|
||||
|
||||
# We need at least one column to specify a "part"
|
||||
part_columns = [
|
||||
'part',
|
||||
'part_id',
|
||||
'part_name',
|
||||
'part_ipn',
|
||||
]
|
||||
|
||||
# These columns are "optional"
|
||||
optional_columns = [
|
||||
'allow_variants',
|
||||
'inherited',
|
||||
'optional',
|
||||
'overage',
|
||||
'note',
|
||||
'reference',
|
||||
]
|
||||
|
||||
def find_matching_column(col_name, columns):
|
||||
|
||||
# Direct match
|
||||
if col_name in columns:
|
||||
return col_name
|
||||
|
||||
col_name = col_name.lower().strip()
|
||||
|
||||
for col in columns:
|
||||
if col.lower().strip() == col_name:
|
||||
return col
|
||||
|
||||
# No match
|
||||
return None
|
||||
|
||||
for header in required_columns:
|
||||
|
||||
match = find_matching_column(header, self.dataset.headers)
|
||||
match = self.find_matching_column(header, self.dataset.headers)
|
||||
|
||||
if match is None:
|
||||
raise serializers.ValidationError(_("Missing required column") + f": '{header}'")
|
||||
@ -814,8 +816,8 @@ class BomExtractSerializer(serializers.Serializer):
|
||||
|
||||
part_match = False
|
||||
|
||||
for col in part_columns:
|
||||
col_match = find_matching_column(col, self.dataset.headers)
|
||||
for col in self.PART_COLUMNS:
|
||||
col_match = self.find_matching_column(col, self.dataset.headers)
|
||||
|
||||
part_column_matches[col] = col_match
|
||||
|
||||
@ -827,6 +829,22 @@ class BomExtractSerializer(serializers.Serializer):
|
||||
|
||||
return bom_file
|
||||
|
||||
def extract_data(self):
|
||||
"""
|
||||
Read individual rows out of the BOM file
|
||||
"""
|
||||
|
||||
rows = []
|
||||
|
||||
for row in self.dataset.dict:
|
||||
rows.append(row)
|
||||
|
||||
return {
|
||||
'rows': rows,
|
||||
'headers': self.dataset.headers,
|
||||
'filename': self.filename,
|
||||
}
|
||||
|
||||
class Meta:
|
||||
fields = [
|
||||
'bom_file',
|
||||
|
Loading…
Reference in New Issue
Block a user