Coverage for src/paperap/models/abstract/queryset.py: 74%
304 statements
« prev ^ index » next coverage.py v7.6.12, created at 2025-03-20 13:17 -0400
« prev ^ index » next coverage.py v7.6.12, created at 2025-03-20 13:17 -0400
1"""
2----------------------------------------------------------------------------
4 METADATA:
6 File: queryset.py
7 Project: paperap
8 Created: 2025-03-04
9 Version: 0.0.8
10 Author: Jess Mann
11 Email: jess@jmann.me
12 Copyright (c) 2025 Jess Mann
14----------------------------------------------------------------------------
16 LAST MODIFIED:
18 2025-03-04 By Jess Mann
20"""
22from __future__ import annotations
24import copy
25import logging
26from datetime import datetime
27from string import Template
28from typing import TYPE_CHECKING, Any, Final, Generic, Iterable, Iterator, Optional, Self, TypeAlias, Union, override
30from pydantic import HttpUrl
31from typing_extensions import TypeVar
33from paperap.exceptions import FilterDisabledError, MultipleObjectsFoundError, ObjectNotFoundError
35if TYPE_CHECKING:
36 from paperap.models.abstract.model import BaseModel, StandardModel
37 from paperap.resources.base import BaseResource, StandardResource
39logger = logging.getLogger(__name__)
41# _BaseResource = TypeVar("_BaseResource", bound="BaseResource", default="BaseResource")
44class BaseQuerySet[_Model: BaseModel](Iterable[_Model]):
45 """
46 A lazy-loaded, chainable query interface for Paperless NGX resources.
48 BaseQuerySet provides pagination, filtering, and caching functionality similar to Django's QuerySet.
49 It's designed to be lazy - only fetching data when it's actually needed.
51 Args:
52 resource: The BaseResource instance.
53 filters: Initial filter parameters.
54 _cache: Optional internal result cache.
55 _fetch_all: Whether all results have been fetched.
56 _next_url: URL for the next page of results.
57 _last_response: Optional last response from the API.
58 _iter: Optional iterator for the results.
60 Returns:
61 A new instance of BaseQuerySet.
63 Examples:
64 # Create a QuerySet for documents
65 >>> docs = client.documents()
66 >>> for doc in docs:
67 ... print(doc.id)
68 1
69 2
70 3
72 """
74 resource: "BaseResource[_Model, Self]"
75 filters: dict[str, Any]
76 _last_response: dict[str, Any] | None = None
77 _result_cache: list[_Model] = []
78 _fetch_all: bool = False
79 _next_url: str | None = None
80 _urls_fetched: list[str] = []
81 _iter: Iterator[_Model] | None
83 def __init__(
84 self,
85 resource: "BaseResource[_Model, Self]",
86 filters: Optional[dict[str, Any]] = None,
87 _cache: Optional[list[_Model]] = None,
88 _fetch_all: bool = False,
89 _next_url: str | None = None,
90 _last_response: Optional[dict[str, Any]] = None,
91 _iter: Optional[Iterator[_Model]] = None,
92 _urls_fetched: Optional[list[str]] = None,
93 ) -> None:
94 self.resource = resource
95 self.filters = filters or {}
96 self._result_cache = _cache or []
97 self._fetch_all = _fetch_all
98 self._next_url = _next_url
99 self._urls_fetched = _urls_fetched or []
100 self._last_response = _last_response
101 self._iter = _iter
103 super().__init__()
105 @property
106 def _model(self) -> type[_Model]:
107 """
108 Return the model class associated with the resource.
110 Returns:
111 The model class
113 Examples:
114 # Create a model instance
115 >>> model = queryset._model(**params)
117 """
118 return self.resource.model_class
120 @property
121 def _meta(self) -> "BaseModel.Meta":
122 """
123 Return the model's metadata.
125 Returns:
126 The model's metadata
128 Examples:
129 # Get the model's metadata
130 >>> queryset._meta.read_only_fields
131 {'id', 'added', 'modified'}
133 """
134 return self._model._meta # pyright: ignore[reportPrivateUsage] # pylint: disable=protected-access
136 def _reset(self) -> None:
137 """
138 Reset the QuerySet to its initial state.
140 This clears the result cache and resets the fetch state.
141 """
142 self._result_cache = []
143 self._fetch_all = False
144 self._next_url = None
145 self._urls_fetched = []
146 self._last_response = None
147 self._iter = None
149 def _update_filters(self, values: dict[str, Any]) -> None:
150 """
151 Update the current filters with new values.
153 This updates the current queryset instance. It does not return a new instance. For that reason,
154 do not call this directly. Call filter() or exclude() instead.
156 Args:
157 values: New filter values to add
159 Raises:
160 FilterDisabledError: If a filter is not allowed by the resource
162 Examples:
163 # Update filters with new values
164 queryset._update_filters({"correspondent": 1})
166 # Update filters with multiple values
167 queryset._update_filters({"correspondent": 1, "document_type": 2})
169 """
170 for key, _value in values.items():
171 if not self._meta.filter_allowed(key):
172 raise FilterDisabledError(
173 f"Filtering by {key} for {self.resource.name} does not appear to be supported by the API."
174 )
176 if values:
177 # Reset the cache if filters change
178 self._reset()
179 self.filters.update(**values)
181 def filter(self, **kwargs: Any) -> Self:
182 """
183 Return a new QuerySet with the given filters applied.
185 Args:
186 **kwargs: Filters to apply, where keys are field names and values are desired values.
187 Supports Django-style lookups like field__contains, field__in, etc.
189 Returns:
190 A new QuerySet with the additional filters applied
192 Examples:
193 # Get documents with specific correspondent
194 docs = client.documents.filter(correspondent=1)
196 # Get documents with specific correspondent and document type
197 docs = client.documents.filter(correspondent=1, document_type=2)
199 # Get documents with title containing "invoice"
200 docs = client.documents.filter(title__contains="invoice")
202 # Get documents with IDs in a list
203 docs = client.documents.filter(id__in=[1, 2, 3])
205 """
206 processed_filters = {}
208 for key, value in kwargs.items():
209 # Handle list values for __in lookups
210 if isinstance(value, (list, set, tuple)):
211 # Convert list to comma-separated string for the API
212 processed_value = ",".join(str(item) for item in value)
213 processed_filters[key] = processed_value
214 # Handle boolean values
215 elif isinstance(value, bool):
216 processed_filters[key] = str(value).lower()
217 # Handle normal values
218 else:
219 processed_filters[key] = value
221 return self._chain(filters={**self.filters, **processed_filters})
223 def exclude(self, **kwargs: Any) -> Self:
224 """
225 Return a new QuerySet excluding objects with the given filters.
227 Args:
228 **kwargs: Filters to exclude, where keys are field names and values are excluded values
230 Returns:
231 A new QuerySet excluding objects that match the filters
233 Examples:
234 # Get documents with any correspondent except ID 1
235 docs = client.documents.exclude(correspondent=1)
237 """
238 # Transform each key to its "not" equivalent
239 exclude_filters = {}
240 for key, value in kwargs.items():
241 if "__" in key:
242 field, lookup = key.split("__", 1)
243 # If it already has a "not" prefix, remove it
244 if lookup.startswith("not_"):
245 exclude_filters[f"{field}__{lookup[4:]}"] = value
246 else:
247 exclude_filters[f"{field}__not_{lookup}"] = value
248 else:
249 exclude_filters[f"{key}__not"] = value
251 return self._chain(filters={**self.filters, **exclude_filters})
253 def get(self, pk: Any) -> _Model:
254 """
255 Retrieve a single object from the API.
257 Raises NotImplementedError. Subclasses may implement this.
259 Args:
260 pk: The primary key (e.g. the id) of the object to retrieve
262 Returns:
263 A single object matching the query
265 Raises:
266 ObjectNotFoundError: If no object or multiple objects are found
267 NotImplementedError: If the method is not implemented by the subclass
269 Examples:
270 # Get document with ID 123
271 doc = client.documents.get(123)
273 """
274 raise NotImplementedError("Getting a single resource is not defined by BaseModels without an id.")
276 def count(self) -> int:
277 """
278 Return the total number of objects in the queryset.
280 Returns:
281 The total count of objects matching the filters
283 Raises:
284 NotImplementedError: If the response does not have a count attribute
286 """
287 # If we have a last response, we can use the "count" field
288 if self._last_response:
289 if (count := self._last_response.get("count")) is not None:
290 return count
291 raise NotImplementedError("Response does not have a count attribute.")
293 # Get one page of results, to populate last response
294 _iter = self._request_iter(params=self.filters)
296 # TODO Hack
297 for _ in _iter:
298 break
300 if not self._last_response:
301 # I don't think this should ever occur, but just in case.
302 raise NotImplementedError("Requested iter, but no last response")
304 if (count := self._last_response.get("count")) is not None:
305 return count
307 # I don't think this should ever occur, but just in case.
308 raise NotImplementedError(
309 f"Unexpected Error: Could not determine count of objects. Last response: {self._last_response}"
310 )
312 def count_this_page(self) -> int:
313 """
314 Return the number of objects on the current page.
316 Returns:
317 The count of objects on the current page
319 Raises:
320 NotImplementedError: If _last_response is not set
322 """
323 # If we have a last response, we can count it without a new request
324 if self._last_response:
325 results = self._last_response.get("results", [])
326 return len(results)
328 # Get one page of results, to populate last response
329 _iter = self._request_iter(params=self.filters)
331 # TODO Hack
332 for _ in _iter:
333 break
335 if not self._last_response:
336 # I don't think this should ever occur, but just in case.
337 raise NotImplementedError("Requested iter, but no last response")
339 results = self._last_response.get("results", [])
340 return len(results)
342 def all(self) -> Self:
343 """
344 Return a new QuerySet that copies the current one.
346 Returns:
347 A copy of the current BaseQuerySet
349 """
350 return self._chain()
352 def order_by(self, *fields: str) -> Self:
353 """
354 Return a new QuerySet ordered by the specified fields.
356 Args:
357 *fields: Field names to order by. Prefix with '-' for descending order.
359 Returns:
360 A new QuerySet with the ordering applied
362 Examples:
363 # Order documents by title ascending
364 docs = client.documents.order_by('title')
366 # Order documents by added date descending
367 docs = client.documents.order_by('-added')
369 """
370 if not fields:
371 return self
373 # Combine with existing ordering if any
374 ordering = self.filters.get("ordering", [])
375 if isinstance(ordering, str):
376 ordering = [ordering]
377 elif not isinstance(ordering, list):
378 ordering = list(ordering)
380 # Add new ordering fields
381 new_ordering = ordering + list(fields)
383 # Join with commas for API
384 ordering_param = ",".join(new_ordering)
386 return self._chain(filters={**self.filters, "ordering": ordering_param})
388 def first(self) -> _Model | None:
389 """
390 Return the first object in the QuerySet, or None if empty.
392 Returns:
393 The first object or None if no objects match
395 """
396 if self._result_cache and len(self._result_cache) > 0:
397 return self._result_cache[0]
399 # If not cached, create a copy limited to 1 result
400 results = list(self._chain(filters={**self.filters, "limit": 1}))
401 return results[0] if results else None
403 def last(self) -> _Model | None:
404 """
405 Return the last object in the QuerySet, or None if empty.
407 Note: This requires fetching all results to determine the last one.
409 Returns:
410 The last object or None if no objects match
412 """
413 # If we have all results, we can just return the last one
414 if self._fetch_all:
415 if self._result_cache and len(self._result_cache) > 0:
416 return self._result_cache[-1]
417 return None
419 # We need all results to get the last one
420 self._fetch_all_results()
422 if self._result_cache and len(self._result_cache) > 0:
423 return self._result_cache[-1]
424 return None
426 def exists(self) -> bool:
427 """
428 Return True if the QuerySet contains any results.
430 Returns:
431 True if there are any objects matching the filters
433 """
434 # Check the cache before potentially making a new request
435 if self._fetch_all or self._result_cache:
436 return len(self._result_cache) > 0
438 # Check if there's at least one result
439 return self.first() is not None
441 def none(self) -> Self:
442 """
443 Return an empty QuerySet.
445 Returns:
446 An empty QuerySet
448 """
449 return self._chain(filters={"limit": 0})
451 def filter_field_by_str(self, field: str, value: str, *, exact: bool = True, case_insensitive: bool = True) -> Self:
452 """
453 Filter a queryset based on a given field.
455 This allows subclasses to easily implement custom filter methods.
457 Args:
458 field: The field name to filter by.
459 value: The value to filter against.
460 exact: Whether to filter by an exact match.
461 case_insensitive: Whether the filter should be case-insensitive.
463 Returns:
464 A new QuerySet instance with the filter applied.
466 """
467 if exact:
468 lookup = f"{field}__iexact" if case_insensitive else field
469 else:
470 lookup = f"{field}__icontains" if case_insensitive else f"{field}__contains"
472 return self.filter(**{lookup: value})
474 def _fetch_all_results(self) -> None:
475 """
476 Fetch all results from the API and populate the cache.
478 Returns:
479 None
481 """
482 if self._fetch_all:
483 return
485 # Clear existing cache if any
486 self._result_cache = []
488 # Initial fetch
489 iterator = self._request_iter(params=self.filters)
491 # Collect results from initial page
492 # TODO: Consider itertools chain for performance reasons (?)
493 self._result_cache.extend(list(iterator))
495 # Fetch additional pages if available
496 while self._last_response and self._next_url:
497 iterator = self._request_iter(url=self._next_url)
498 self._result_cache.extend(list(iterator))
500 self._fetch_all = True
502 def _request_iter(
503 self, url: str | HttpUrl | Template | None = None, params: Optional[dict[str, Any]] = None
504 ) -> Iterator[_Model]:
505 """
506 Get an iterator of resources.
508 Args:
509 url: The URL to request, if different from the resource's default.
510 params: Query parameters.
512 Returns:
513 An iterator over the resources.
515 Raises:
516 NotImplementedError: If the request cannot be completed.
518 Examples:
519 # Iterate over documents
520 for doc in queryset._request_iter():
521 print(doc)
523 """
524 if not (response := self.resource.request_raw(url=url, params=params)):
525 logger.debug("No response from request.")
526 return
528 self._last_response = response
530 yield from self.resource.handle_response(**response)
532 def _get_next(self, response: dict[str, Any] | None = None) -> str | None:
533 """
534 Get the next url, and adjust our references accordingly.
535 """
536 # Allow passing a different response
537 if response is None:
538 response = self._last_response
540 # Last response is not set
541 if not response or not (next_url := response.get("next")):
542 self._next_url = None
543 return None
545 # For safety, check both instance attributes, even though the first check isn't strictly necessary
546 # this hopefully future proofs any changes to the implementation
547 if next_url == self._next_url or next_url in self._urls_fetched:
548 logger.debug(
549 "Next URL was previously fetched. Stopping iteration. URL: %s, Already Fetched: %s",
550 next_url,
551 self._urls_fetched,
552 )
553 self._next_url = None
554 return None
556 # Cache it
557 self._next_url = next_url
558 self._urls_fetched.append(next_url)
559 return self._next_url
561 def _chain(self, **kwargs: Any) -> Self:
562 """
563 Return a copy of the current BaseQuerySet with updated attributes.
565 Args:
566 **kwargs: Attributes to update in the new BaseQuerySet
568 Returns:
569 A new QuerySet with the updated attributes
571 """
572 # Create a new BaseQuerySet with copied attributes
573 clone = self.__class__(self.resource) # type: ignore # pyright not handling Self correctly
575 # Copy attributes from self
576 clone.filters = copy.deepcopy(self.filters)
577 # Do not copy the cache, fetch_all, etc, since filters may change it
579 # Update with provided kwargs
580 for key, value in kwargs.items():
581 if key == "filters" and value:
582 clone._update_filters(value) # pylint: disable=protected-access
583 else:
584 setattr(clone, key, value)
586 return clone
588 @override
589 def __iter__(self) -> Iterator[_Model]:
590 """
591 Iterate over the objects in the QuerySet.
593 Returns:
594 An iterator over the objects
596 """
597 # If we have a fully populated cache, use it
598 if self._fetch_all:
599 yield from self._result_cache
600 return
602 if not self._iter:
603 # Start a new iteration
604 self._iter = self._request_iter(params=self.filters)
606 # Yield objects from the current page
607 for obj in self._iter:
608 self._result_cache.append(obj)
609 yield obj
611 self._get_next()
613 # If there are more pages, keep going
614 count = 0
615 while self._next_url:
616 count += 1
617 self._iter = self._request_iter(url=self._next_url)
619 # Yield objects from the current page
620 for obj in self._iter:
621 self._result_cache.append(obj)
622 yield obj
624 self._get_next()
626 # We've fetched everything
627 self._fetch_all = True
628 self._iter = None
630 def __len__(self) -> int:
631 """
632 Return the number of objects in the QuerySet.
634 Returns:
635 The count of objects
637 """
638 return self.count()
640 def __bool__(self) -> bool:
641 """
642 Return True if the QuerySet has any results.
644 Returns:
645 True if there are any objects matching the filters
647 """
648 return self.exists()
650 def __getitem__(self, key: int | slice) -> _Model | list[_Model]:
651 """
652 Retrieve an item or slice of items from the QuerySet.
654 Args:
655 key: An integer index or slice
657 Returns:
658 A single object or list of objects
660 Raises:
661 IndexError: If the index is out of range
663 """
664 if isinstance(key, slice):
665 # Handle slicing
666 start = key.start if key.start is not None else 0
667 stop = key.stop
669 if start < 0 or (stop is not None and stop < 0):
670 # Negative indexing requires knowing the full size
671 self._fetch_all_results()
672 return self._result_cache[key]
674 # Optimize by using limit/offset if available
675 if start == 0 and stop is not None:
676 # Simple limit
677 clone = self._chain(filters={**self.filters, "limit": stop})
678 results = list(clone)
679 return results
681 if start > 0 and stop is not None:
682 # Limit with offset
683 clone = self._chain(
684 filters={
685 **self.filters,
686 "limit": stop - start,
687 "offset": start,
688 }
689 )
690 results = list(clone)
691 return results
693 if start > 0 and stop is None:
694 # Just offset
695 clone = self._chain(filters={**self.filters, "offset": start})
696 self._fetch_all_results() # We need all results after the offset
697 return self._result_cache
699 # Default to fetching all and slicing
700 self._fetch_all_results()
701 return self._result_cache[key]
703 # Handle integer indexing
704 if key < 0:
705 # Negative indexing requires the full result set
706 self._fetch_all_results()
707 return self._result_cache[key]
709 # Positive indexing - we can optimize with limit/offset
710 if len(self._result_cache) > key:
711 # Already have this item cached
712 return self._result_cache[key]
714 # Fetch specific item by position
715 clone = self._chain(filters={**self.filters, "limit": 1, "offset": key})
716 results = list(clone)
717 if not results:
718 raise IndexError(f"BaseQuerySet index {key} out of range")
719 return results[0]
721 def __contains__(self, item: Any) -> bool:
722 """
723 Return True if the QuerySet contains the given object.
725 Args:
726 item: The object to check for
728 Returns:
729 True if the object is in the QuerySet
731 """
732 if not isinstance(item, self._model):
733 return False
735 return any(obj == item for obj in self)
738class StandardQuerySet[_Model: StandardModel](BaseQuerySet[_Model]):
739 """
740 A queryset for StandardModel instances (i.e. BaseModels with standard fields, like id).
742 Returns:
743 A new instance of StandardModel.
745 Raises:
746 ValueError: If resource is not provided.
748 Examples:
749 # Create a StandardModel instance
750 model = StandardModel(id=1)
752 Args:
753 resource: The BaseResource instance.
754 filters: Initial filter parameters.
756 Returns:
757 A new instance of StandardQuerySet.
759 Raises:
760 ObjectNotFoundError: If no object or multiple objects are found.
762 Examples:
763 # Create a StandardQuerySet for documents
764 docs = StandardQuerySet(resource=client.documents)
766 """
768 resource: "StandardResource[_Model, Self]" # type: ignore # pyright is getting inheritance wrong
770 @override
771 def get(self, pk: int) -> _Model:
772 """
773 Retrieve a single object from the API.
775 Args:
776 pk: The ID of the object to retrieve
778 Returns:
779 A single object matching the query
781 Raises:
782 ObjectNotFoundError: If no object or multiple objects are found
784 Examples:
785 # Get document with ID 123
786 doc = client.documents.get(123)
788 """
789 # Attempt to find it in the result cache
790 if self._result_cache:
791 for obj in self._result_cache:
792 if obj.id == pk:
793 return obj
795 # Direct lookup by ID - use the resource's get method
796 return self.resource.get(pk)
798 def id(self, value: int | list[int]) -> Self:
799 """
800 Filter models by ID.
802 Args:
803 value: The ID or list of IDs to filter by
805 Returns:
806 Filtered QuerySet
808 """
809 if isinstance(value, list):
810 return self.filter(id__in=value)
811 return self.filter(id=value)
813 @override
814 def __contains__(self, item: Any) -> bool:
815 """
816 Return True if the QuerySet contains the given object.
818 NOTE: This method only ensures a match by ID, not by full object equality.
819 This is intentional, as the object may be outdated or not fully populated.
821 Args:
822 item: The object or ID to check for
824 Returns:
825 True if the object is in the QuerySet
827 """
828 # Handle integers directly
829 if isinstance(item, int):
830 return any(obj.id == item for obj in self)
832 # Handle model objects that have an id attribute
833 try:
834 if hasattr(item, "id"):
835 return any(obj.id == item.id for obj in self)
836 except (AttributeError, TypeError):
837 pass
839 # For any other type, it's not in the queryset
840 return False
842 def bulk_action(self, action: str, **kwargs: Any) -> dict[str, Any]:
843 """
844 Perform a bulk action on all objects in the queryset.
846 This method fetches all IDs in the queryset and passes them to the resource's bulk_action method.
848 Args:
849 action: The action to perform
850 **kwargs: Additional parameters for the action
852 Returns:
853 The API response
855 Raises:
856 NotImplementedError: If the resource doesn't support bulk actions
858 """
859 if not hasattr(self.resource, "bulk_action"):
860 raise NotImplementedError(f"Resource {self.resource.name} does not support bulk actions")
862 # Fetch all IDs in the queryset
863 # We only need IDs, so optimize by requesting just the ID field if possible
864 ids = [obj.id for obj in self]
866 if not ids:
867 return {"success": True, "count": 0}
869 return self.resource.bulk_action(action, ids, **kwargs)
871 def bulk_delete(self) -> dict[str, Any]:
872 """
873 Delete all objects in the queryset.
875 Returns:
876 The API response
878 """
879 return self.bulk_action("delete")
881 def bulk_update(self, **kwargs: Any) -> dict[str, Any]:
882 """
883 Update all objects in the queryset with the given values.
885 Args:
886 **kwargs: Fields to update
888 Returns:
889 The API response
891 """
892 if not hasattr(self.resource, "bulk_update"):
893 raise NotImplementedError(f"Resource {self.resource.name} does not support bulk updates")
895 # Fetch all IDs in the queryset
896 ids = [obj.id for obj in self]
898 if not ids:
899 return {"success": True, "count": 0}
901 return self.resource.bulk_update(ids, **kwargs)
903 def bulk_assign_tags(self, tag_ids: list[int], remove_existing: bool = False) -> dict[str, Any]:
904 """
905 Assign tags to all objects in the queryset.
907 Args:
908 tag_ids: List of tag IDs to assign
909 remove_existing: If True, remove existing tags before assigning new ones
911 Returns:
912 The API response
914 """
915 if not hasattr(self.resource, "bulk_assign_tags"):
916 raise NotImplementedError(f"Resource {self.resource.name} does not support bulk tag assignment")
918 # Fetch all IDs in the queryset
919 ids = [obj.id for obj in self]
921 if not ids:
922 return {"success": True, "count": 0}
924 return self.resource.bulk_assign_tags(ids, tag_ids, remove_existing)
926 def bulk_assign_correspondent(self, correspondent_id: int) -> dict[str, Any]:
927 """
928 Assign a correspondent to all objects in the queryset.
930 Args:
931 correspondent_id: Correspondent ID to assign
933 Returns:
934 The API response
936 """
937 if not hasattr(self.resource, "bulk_assign_correspondent"):
938 raise NotImplementedError(f"Resource {self.resource.name} does not support bulk correspondent assignment")
940 # Fetch all IDs in the queryset
941 ids = [obj.id for obj in self]
943 if not ids:
944 return {"success": True, "count": 0}
946 return self.resource.bulk_assign_correspondent(ids, correspondent_id)
948 def bulk_assign_document_type(self, document_type_id: int) -> dict[str, Any]:
949 """
950 Assign a document type to all objects in the queryset.
952 Args:
953 document_type_id: Document type ID to assign
955 Returns:
956 The API response
958 """
959 if not hasattr(self.resource, "bulk_assign_document_type"):
960 raise NotImplementedError(f"Resource {self.resource.name} does not support bulk document type assignment")
962 # Fetch all IDs in the queryset
963 ids = [obj.id for obj in self]
965 if not ids:
966 return {"success": True, "count": 0}
968 return self.resource.bulk_assign_document_type(ids, document_type_id)
970 def bulk_assign_storage_path(self, storage_path_id: int) -> dict[str, Any]:
971 """
972 Assign a storage path to all objects in the queryset.
974 Args:
975 storage_path_id: Storage path ID to assign
977 Returns:
978 The API response
980 """
981 if not hasattr(self.resource, "bulk_assign_storage_path"):
982 raise NotImplementedError(f"Resource {self.resource.name} does not support bulk storage path assignment")
984 # Fetch all IDs in the queryset
985 ids = [obj.id for obj in self]
987 if not ids:
988 return {"success": True, "count": 0}
990 return self.resource.bulk_assign_storage_path(ids, storage_path_id)
992 def bulk_assign_owner(self, owner_id: int) -> dict[str, Any]:
993 """
994 Assign an owner to all objects in the queryset.
996 Args:
997 owner_id: Owner ID to assign
999 Returns:
1000 The API response
1002 """
1003 if not hasattr(self.resource, "bulk_assign_owner"):
1004 raise NotImplementedError(f"Resource {self.resource.name} does not support bulk owner assignment")
1006 # Fetch all IDs in the queryset
1007 ids = [obj.id for obj in self]
1009 if not ids:
1010 return {"success": True, "count": 0}
1012 return self.resource.bulk_assign_owner(ids, owner_id)