Coverage for backend/django/core/auxiliary/views/UploadMSSData.py: 89%

64 statements  

« prev     ^ index     » next       coverage.py v7.10.7, created at 2026-05-13 02:47 +0000

1from rest_framework.response import Response 

2from core.auxiliary.models.DataCell import DataCell 

3from core.auxiliary.models.DataRow import DataRow 

4from drf_spectacular.utils import extend_schema 

5from core.auxiliary.models.DataColumn import DataColumn 

6from rest_framework.decorators import api_view 

7from rest_framework import serializers, status 

8from core.validation import api_view_validate 

9from core.managers import get_flowsheet_access 

10 

11class UploadDataSerializer(serializers.Serializer): 

12 # THe data format is e.g: 

13 # { 

14 # "data": { 

15 # "heater_enthalpy": [1, 2, 3, 4, 5], 

16 # "heater_temperature": [1, 2, 3, 4, 5] 

17 # } 

18 # "flowsheet": 1 

19 # } 

20 flowsheet = serializers.IntegerField() 

21 scenario=serializers.IntegerField() 

22 data = serializers.DictField( # column name 

23 child=serializers.ListField( # List of values 

24 child=serializers.FloatField() # Value 

25 ) 

26 ) 

27 

28 

29@api_view_validate 

30@extend_schema(request=UploadDataSerializer, responses=None) 

31@api_view(['POST']) 

32def upload_data(request) -> Response: 

33 flowsheet_id = request.GET.get("flowsheet") 

34 access_state = get_flowsheet_access(request.user, flowsheet_id) 

35 if access_state.has_read_access and not access_state.has_write_access: 

36 return Response( 

37 {"error": "This flowsheet is shared with read-only access."}, 

38 status=status.HTTP_403_FORBIDDEN, 

39 ) 

40 

41 try: 

42 serializer = UploadDataSerializer(data=request.data) 

43 serializer.is_valid(raise_exception=True) 

44 validated_data = serializer.validated_data 

45 data = validated_data.get('data') 

46 flowsheet_id = validated_data.get('flowsheet') 

47 scenario_id = validated_data.get('scenario') 

48 except Exception as e: 

49 return Response(status=400, data=f"Invalid csv data: {e}") 

50 

51 # Step 1: Create any missing data column under the given optimization 

52 data_columns = [] 

53 for key in data: 

54 data_columns.append(DataColumn(name=key, scenario_id=scenario_id,flowsheet_id=flowsheet_id)) 

55 

56 DataColumn.objects.bulk_create(data_columns, ignore_conflicts=True) 

57 # Step 2: Determine number of rows 

58 num_rows = len(list(next(iter(data.values())))) 

59 

60 # Step 3: Get existing data rows by optimization 

61 existing_rows = list(DataRow.objects.filter(scenario_id=scenario_id).order_by("index")) 

62 existing_indices = {r.index for r in existing_rows} 

63 

64 # Step 4: Create missing data rows 

65 new_rows = [ 

66 DataRow(index=i, flowsheet_id=flowsheet_id, scenario_id=scenario_id) 

67 for i in range(num_rows) 

68 if i not in existing_indices 

69 ] 

70 if new_rows: 70 ↛ 74line 70 didn't jump to line 74 because the condition on line 70 was always true

71 DataRow.objects.bulk_create(new_rows) 

72 

73 # Refresh data row list 

74 data_rows = list(DataRow.objects.filter(scenario_id=scenario_id).order_by("index")) 

75 data_row_map = {r.index: r for r in data_rows} 

76 

77 # Step 5: Get updated data columns 

78 data_columns = DataColumn.objects.filter(scenario_id=scenario_id).prefetch_related("dataCells") 

79 column_map = {column.name: column for column in data_columns} 

80 

81 # Step 6: Build mapping of existing values 

82 existing_values = { 

83 column.name: {sv.data_row.index: sv for sv in column.dataCells.all()} 

84 for column in data_columns 

85 } 

86 

87 # Step 7: Insert or update DataCells 

88 for i in range(num_rows): 

89 data_row = data_row_map[i] 

90 for column_name, values in data.items(): 

91 value = values[i] 

92 column = column_map[column_name] 

93 existing = existing_values.get(column_name, {}).get(i) 

94 

95 if existing: 95 ↛ 96line 95 didn't jump to line 96 because the condition on line 95 was never true

96 existing.value = value 

97 existing.save() 

98 data_row = data_row_map[i] 

99 for column_name, values in data.items(): 

100 value = values[i] 

101 column = column_map[column_name] 

102 existing = existing_values.get(column_name, {}).get(i) 

103 

104 if existing: 104 ↛ 105line 104 didn't jump to line 105 because the condition on line 104 was never true

105 existing.value = value 

106 existing.save() 

107 else: 

108 DataCell.objects.create(value=value, data_column=column, data_row=data_row, flowsheet_id=flowsheet_id) 

109 

110 return Response(status=200, data="Data uploaded successfully")