Merge pull request #3020 from SchrodingersGat/download-unit-tests

Download unit tests
This commit is contained in:
Oliver 2022-05-19 13:34:41 +10:00 committed by GitHub
commit 2ddaa9c04a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 450 additions and 4 deletions

View File

@ -2,6 +2,11 @@
Helper functions for performing API unit tests
"""
import csv
import io
import re
from django.http.response import StreamingHttpResponse
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from rest_framework.test import APITestCase
@ -165,3 +170,87 @@ class InvenTreeAPITestCase(APITestCase):
self.assertEqual(response.status_code, expected_code)
return response
def download_file(self, url, data, expected_code=None, expected_fn=None, decode=True):
"""
Download a file from the server, and return an in-memory file
"""
response = self.client.get(url, data=data, format='json')
if expected_code is not None:
self.assertEqual(response.status_code, expected_code)
# Check that the response is of the correct type
if not isinstance(response, StreamingHttpResponse):
raise ValueError("Response is not a StreamingHttpResponse object as expected")
# Extract filename
disposition = response.headers['Content-Disposition']
result = re.search(r'attachment; filename="([\w.]+)"', disposition)
fn = result.groups()[0]
if expected_fn is not None:
self.assertEqual(expected_fn, fn)
if decode:
# Decode data and return as StringIO file object
fo = io.StringIO()
fo.name = fo
fo.write(response.getvalue().decode('UTF-8'))
else:
# Return a a BytesIO file object
fo = io.BytesIO()
fo.name = fn
fo.write(response.getvalue())
fo.seek(0)
return fo
def process_csv(self, fo, delimiter=',', required_cols=None, excluded_cols=None, required_rows=None):
"""
Helper function to process and validate a downloaded csv file
"""
# Check that the correct object type has been passed
self.assertTrue(isinstance(fo, io.StringIO))
fo.seek(0)
reader = csv.reader(fo, delimiter=delimiter)
headers = []
rows = []
for idx, row in enumerate(reader):
if idx == 0:
headers = row
else:
rows.append(row)
if required_cols is not None:
for col in required_cols:
self.assertIn(col, headers)
if excluded_cols is not None:
for col in excluded_cols:
self.assertNotIn(col, headers)
if required_rows is not None:
self.assertEqual(len(rows), required_rows)
# Return the file data as a list of dict items, based on the headers
data = []
for row in rows:
entry = {}
for idx, col in enumerate(headers):
entry[col] = row[idx]
data.append(entry)
return data

View File

@ -16,7 +16,7 @@ class BuildResource(ModelResource):
# but we don't for other ones.
# TODO: 2022-05-12 - Need to investigate why this is the case!
pk = Field(attribute='pk')
id = Field(attribute='pk')
reference = Field(attribute='reference')
@ -45,6 +45,7 @@ class BuildResource(ModelResource):
clean_model_instances = True
exclude = [
'lft', 'rght', 'tree_id', 'level',
'metadata',
]

View File

@ -511,6 +511,50 @@ class BuildTest(BuildAPITest):
self.assertIn('This build output has already been completed', str(response.data))
def test_download_build_orders(self):
required_cols = [
'reference',
'status',
'completed',
'batch',
'notes',
'title',
'part',
'part_name',
'id',
'quantity',
]
excluded_cols = [
'lft', 'rght', 'tree_id', 'level',
'metadata',
]
with self.download_file(
reverse('api-build-list'),
{
'export': 'csv',
}
) as fo:
data = self.process_csv(
fo,
required_cols=required_cols,
excluded_cols=excluded_cols,
required_rows=Build.objects.count()
)
for row in data:
build = Build.objects.get(pk=row['id'])
self.assertEqual(str(build.part.pk), row['part'])
self.assertEqual(build.part.full_name, row['part_name'])
self.assertEqual(build.reference, row['reference'])
self.assertEqual(build.title, row['title'])
class BuildAllocationTest(BuildAPITest):
"""

View File

@ -105,6 +105,9 @@ class PurchaseOrderResource(ModelResource):
model = PurchaseOrder
skip_unchanged = True
clean_model_instances = True
exclude = [
'metadata',
]
class PurchaseOrderLineItemResource(ModelResource):
@ -147,6 +150,9 @@ class SalesOrderResource(ModelResource):
model = SalesOrder
skip_unchanged = True
clean_model_instances = True
exclude = [
'metadata',
]
class SalesOrderLineItemResource(ModelResource):

View File

@ -667,9 +667,9 @@ class SalesOrderList(APIDownloadMixin, generics.ListCreateAPIView):
outstanding = str2bool(outstanding)
if outstanding:
queryset = queryset.filter(status__in=models.SalesOrderStatus.OPEN)
queryset = queryset.filter(status__in=SalesOrderStatus.OPEN)
else:
queryset = queryset.exclude(status__in=models.SalesOrderStatus.OPEN)
queryset = queryset.exclude(status__in=SalesOrderStatus.OPEN)
# Filter by 'overdue' status
overdue = params.get('overdue', None)

View File

@ -2,6 +2,8 @@
Tests for the Order API
"""
import io
from datetime import datetime, timedelta
from rest_framework import status
@ -323,6 +325,77 @@ class PurchaseOrderTest(OrderTest):
self.assertEqual(order.get_metadata('yam'), 'yum')
class PurchaseOrderDownloadTest(OrderTest):
"""Unit tests for downloading PurchaseOrder data via the API endpoint"""
required_cols = [
'id',
'line_items',
'description',
'issue_date',
'notes',
'reference',
'status',
'supplier_reference',
]
excluded_cols = [
'metadata',
]
def test_download_wrong_format(self):
"""Incorrect format should default raise an error"""
url = reverse('api-po-list')
with self.assertRaises(ValueError):
self.download_file(
url,
{
'export': 'xyz',
}
)
def test_download_csv(self):
"""Download PurchaseOrder data as .csv"""
with self.download_file(
reverse('api-po-list'),
{
'export': 'csv',
},
expected_code=200,
expected_fn='InvenTree_PurchaseOrders.csv',
) as fo:
data = self.process_csv(
fo,
required_cols=self.required_cols,
excluded_cols=self.excluded_cols,
required_rows=models.PurchaseOrder.objects.count()
)
for row in data:
order = models.PurchaseOrder.objects.get(pk=row['id'])
self.assertEqual(order.description, row['description'])
self.assertEqual(order.reference, row['reference'])
def test_download_line_items(self):
with self.download_file(
reverse('api-po-line-list'),
{
'export': 'xlsx',
},
decode=False,
expected_code=200,
expected_fn='InvenTree_PurchaseOrderItems.xlsx',
) as fo:
self.assertTrue(isinstance(fo, io.BytesIO))
class PurchaseOrderReceiveTest(OrderTest):
"""
Unit tests for receiving items against a PurchaseOrder
@ -908,6 +981,177 @@ class SalesOrderTest(OrderTest):
self.assertEqual(order.get_metadata('xyz'), 'abc')
class SalesOrderLineItemTest(OrderTest):
"""
Tests for the SalesOrderLineItem API
"""
def setUp(self):
super().setUp()
# List of salable parts
parts = Part.objects.filter(salable=True)
# Create a bunch of SalesOrderLineItems for each order
for idx, so in enumerate(models.SalesOrder.objects.all()):
for part in parts:
models.SalesOrderLineItem.objects.create(
order=so,
part=part,
quantity=(idx + 1) * 5,
reference=f"Order {so.reference} - line {idx}",
)
self.url = reverse('api-so-line-list')
def test_so_line_list(self):
# List *all* lines
response = self.get(
self.url,
{},
expected_code=200,
)
n = models.SalesOrderLineItem.objects.count()
# We should have received *all* lines
self.assertEqual(len(response.data), n)
# List *all* lines, but paginate
response = self.get(
self.url,
{
"limit": 5,
},
expected_code=200,
)
self.assertEqual(response.data['count'], n)
self.assertEqual(len(response.data['results']), 5)
n_orders = models.SalesOrder.objects.count()
n_parts = Part.objects.filter(salable=True).count()
# List by part
for part in Part.objects.filter(salable=True):
response = self.get(
self.url,
{
'part': part.pk,
'limit': 10,
}
)
self.assertEqual(response.data['count'], n_orders)
# List by order
for order in models.SalesOrder.objects.all():
response = self.get(
self.url,
{
'order': order.pk,
'limit': 10,
}
)
self.assertEqual(response.data['count'], n_parts)
class SalesOrderDownloadTest(OrderTest):
"""Unit tests for downloading SalesOrder data via the API endpoint"""
def test_download_fail(self):
"""Test that downloading without the 'export' option fails"""
url = reverse('api-so-list')
with self.assertRaises(ValueError):
self.download_file(url, {}, expected_code=200)
def test_download_xls(self):
url = reverse('api-so-list')
# Download .xls file
with self.download_file(
url,
{
'export': 'xls',
},
expected_code=200,
expected_fn='InvenTree_SalesOrders.xls',
decode=False,
) as fo:
self.assertTrue(isinstance(fo, io.BytesIO))
def test_download_csv(self):
url = reverse('api-so-list')
required_cols = [
'line_items',
'id',
'reference',
'customer',
'status',
'shipment_date',
'notes',
'description',
]
excluded_cols = [
'metadata'
]
# Download .xls file
with self.download_file(
url,
{
'export': 'csv',
},
expected_code=200,
expected_fn='InvenTree_SalesOrders.csv',
decode=True
) as fo:
data = self.process_csv(
fo,
required_cols=required_cols,
excluded_cols=excluded_cols,
required_rows=models.SalesOrder.objects.count()
)
for line in data:
order = models.SalesOrder.objects.get(pk=line['id'])
self.assertEqual(line['description'], order.description)
self.assertEqual(line['status'], str(order.status))
# Download only outstanding sales orders
with self.download_file(
url,
{
'export': 'tsv',
'outstanding': True,
},
expected_code=200,
expected_fn='InvenTree_SalesOrders.tsv',
decode=True,
) as fo:
self.process_csv(
fo,
required_cols=required_cols,
excluded_cols=excluded_cols,
required_rows=models.SalesOrder.objects.filter(status__in=SalesOrderStatus.OPEN).count(),
delimiter='\t',
)
class SalesOrderAllocateTest(OrderTest):
"""
Unit tests for allocating stock items against a SalesOrder

View File

@ -45,6 +45,7 @@ class PartResource(ModelResource):
exclude = [
'bom_checksum', 'bom_checked_by', 'bom_checked_date',
'lft', 'rght', 'tree_id', 'level',
'metadata',
]
def get_queryset(self):
@ -98,6 +99,7 @@ class PartCategoryResource(ModelResource):
exclude = [
# Exclude MPTT internal model fields
'lft', 'rght', 'tree_id', 'level',
'metadata',
]
def after_import(self, dataset, result, using_transactions, dry_run, **kwargs):

View File

@ -822,6 +822,58 @@ class PartAPITest(InvenTreeAPITestCase):
response = self.get('/api/part/10004/', {})
self.assertEqual(response.data['variant_stock'], 500)
def test_part_download(self):
"""Test download of part data via the API"""
url = reverse('api-part-list')
required_cols = [
'id',
'name',
'description',
'in_stock',
'category_name',
'keywords',
'is_template',
'virtual',
'trackable',
'active',
'notes',
'creation_date',
]
excluded_cols = [
'lft', 'rght', 'level', 'tree_id',
'metadata',
]
with self.download_file(
url,
{
'export': 'csv',
},
expected_fn='InvenTree_Parts.csv',
) as fo:
data = self.process_csv(
fo,
excluded_cols=excluded_cols,
required_cols=required_cols,
required_rows=Part.objects.count(),
)
for row in data:
part = Part.objects.get(pk=row['id'])
if part.IPN:
self.assertEqual(part.IPN, row['IPN'])
self.assertEqual(part.name, row['name'])
self.assertEqual(part.description, row['description'])
if part.category:
self.assertEqual(part.category.name, row['category_name'])
class PartDetailTests(InvenTreeAPITestCase):
"""

View File

@ -31,6 +31,7 @@ class LocationResource(ModelResource):
exclude = [
# Exclude MPTT internal model fields
'lft', 'rght', 'tree_id', 'level',
'metadata',
]
def after_import(self, dataset, result, using_transactions, dry_run, **kwargs):
@ -119,7 +120,7 @@ class StockItemResource(ModelResource):
# Exclude MPTT internal model fields
'lft', 'rght', 'tree_id', 'level',
# Exclude internal fields
'serial_int',
'serial_int', 'metadata',
]

View File

@ -344,6 +344,13 @@ class StockItemListTest(StockAPITestCase):
for h in headers:
self.assertIn(h, dataset.headers)
excluded_headers = [
'metadata',
]
for h in excluded_headers:
self.assertNotIn(h, dataset.headers)
# Now, add a filter to the results
dataset = self.export_data({'location': 1})