Coverage for src/paperap/models/abstract/queryset.py: 74%

304 statements  

« prev     ^ index     » next       coverage.py v7.6.12, created at 2025-03-20 13:17 -0400

1""" 

2---------------------------------------------------------------------------- 

3 

4 METADATA: 

5 

6 File: queryset.py 

7 Project: paperap 

8 Created: 2025-03-04 

9 Version: 0.0.8 

10 Author: Jess Mann 

11 Email: jess@jmann.me 

12 Copyright (c) 2025 Jess Mann 

13 

14---------------------------------------------------------------------------- 

15 

16 LAST MODIFIED: 

17 

18 2025-03-04 By Jess Mann 

19 

20""" 

21 

22from __future__ import annotations 

23 

24import copy 

25import logging 

26from datetime import datetime 

27from string import Template 

28from typing import TYPE_CHECKING, Any, Final, Generic, Iterable, Iterator, Optional, Self, TypeAlias, Union, override 

29 

30from pydantic import HttpUrl 

31from typing_extensions import TypeVar 

32 

33from paperap.exceptions import FilterDisabledError, MultipleObjectsFoundError, ObjectNotFoundError 

34 

35if TYPE_CHECKING: 

36 from paperap.models.abstract.model import BaseModel, StandardModel 

37 from paperap.resources.base import BaseResource, StandardResource 

38 

39logger = logging.getLogger(__name__) 

40 

41# _BaseResource = TypeVar("_BaseResource", bound="BaseResource", default="BaseResource") 

42 

43 

44class BaseQuerySet[_Model: BaseModel](Iterable[_Model]): 

45 """ 

46 A lazy-loaded, chainable query interface for Paperless NGX resources. 

47 

48 BaseQuerySet provides pagination, filtering, and caching functionality similar to Django's QuerySet. 

49 It's designed to be lazy - only fetching data when it's actually needed. 

50 

51 Args: 

52 resource: The BaseResource instance. 

53 filters: Initial filter parameters. 

54 _cache: Optional internal result cache. 

55 _fetch_all: Whether all results have been fetched. 

56 _next_url: URL for the next page of results. 

57 _last_response: Optional last response from the API. 

58 _iter: Optional iterator for the results. 

59 

60 Returns: 

61 A new instance of BaseQuerySet. 

62 

63 Examples: 

64 # Create a QuerySet for documents 

65 >>> docs = client.documents() 

66 >>> for doc in docs: 

67 ... print(doc.id) 

68 1 

69 2 

70 3 

71 

72 """ 

73 

74 resource: "BaseResource[_Model, Self]" 

75 filters: dict[str, Any] 

76 _last_response: dict[str, Any] | None = None 

77 _result_cache: list[_Model] = [] 

78 _fetch_all: bool = False 

79 _next_url: str | None = None 

80 _urls_fetched: list[str] = [] 

81 _iter: Iterator[_Model] | None 

82 

83 def __init__( 

84 self, 

85 resource: "BaseResource[_Model, Self]", 

86 filters: Optional[dict[str, Any]] = None, 

87 _cache: Optional[list[_Model]] = None, 

88 _fetch_all: bool = False, 

89 _next_url: str | None = None, 

90 _last_response: Optional[dict[str, Any]] = None, 

91 _iter: Optional[Iterator[_Model]] = None, 

92 _urls_fetched: Optional[list[str]] = None, 

93 ) -> None: 

94 self.resource = resource 

95 self.filters = filters or {} 

96 self._result_cache = _cache or [] 

97 self._fetch_all = _fetch_all 

98 self._next_url = _next_url 

99 self._urls_fetched = _urls_fetched or [] 

100 self._last_response = _last_response 

101 self._iter = _iter 

102 

103 super().__init__() 

104 

105 @property 

106 def _model(self) -> type[_Model]: 

107 """ 

108 Return the model class associated with the resource. 

109 

110 Returns: 

111 The model class 

112 

113 Examples: 

114 # Create a model instance 

115 >>> model = queryset._model(**params) 

116 

117 """ 

118 return self.resource.model_class 

119 

120 @property 

121 def _meta(self) -> "BaseModel.Meta": 

122 """ 

123 Return the model's metadata. 

124 

125 Returns: 

126 The model's metadata 

127 

128 Examples: 

129 # Get the model's metadata 

130 >>> queryset._meta.read_only_fields 

131 {'id', 'added', 'modified'} 

132 

133 """ 

134 return self._model._meta # pyright: ignore[reportPrivateUsage] # pylint: disable=protected-access 

135 

136 def _reset(self) -> None: 

137 """ 

138 Reset the QuerySet to its initial state. 

139 

140 This clears the result cache and resets the fetch state. 

141 """ 

142 self._result_cache = [] 

143 self._fetch_all = False 

144 self._next_url = None 

145 self._urls_fetched = [] 

146 self._last_response = None 

147 self._iter = None 

148 

149 def _update_filters(self, values: dict[str, Any]) -> None: 

150 """ 

151 Update the current filters with new values. 

152 

153 This updates the current queryset instance. It does not return a new instance. For that reason, 

154 do not call this directly. Call filter() or exclude() instead. 

155 

156 Args: 

157 values: New filter values to add 

158 

159 Raises: 

160 FilterDisabledError: If a filter is not allowed by the resource 

161 

162 Examples: 

163 # Update filters with new values 

164 queryset._update_filters({"correspondent": 1}) 

165 

166 # Update filters with multiple values 

167 queryset._update_filters({"correspondent": 1, "document_type": 2}) 

168 

169 """ 

170 for key, _value in values.items(): 

171 if not self._meta.filter_allowed(key): 

172 raise FilterDisabledError( 

173 f"Filtering by {key} for {self.resource.name} does not appear to be supported by the API." 

174 ) 

175 

176 if values: 

177 # Reset the cache if filters change 

178 self._reset() 

179 self.filters.update(**values) 

180 

181 def filter(self, **kwargs: Any) -> Self: 

182 """ 

183 Return a new QuerySet with the given filters applied. 

184 

185 Args: 

186 **kwargs: Filters to apply, where keys are field names and values are desired values. 

187 Supports Django-style lookups like field__contains, field__in, etc. 

188 

189 Returns: 

190 A new QuerySet with the additional filters applied 

191 

192 Examples: 

193 # Get documents with specific correspondent 

194 docs = client.documents.filter(correspondent=1) 

195 

196 # Get documents with specific correspondent and document type 

197 docs = client.documents.filter(correspondent=1, document_type=2) 

198 

199 # Get documents with title containing "invoice" 

200 docs = client.documents.filter(title__contains="invoice") 

201 

202 # Get documents with IDs in a list 

203 docs = client.documents.filter(id__in=[1, 2, 3]) 

204 

205 """ 

206 processed_filters = {} 

207 

208 for key, value in kwargs.items(): 

209 # Handle list values for __in lookups 

210 if isinstance(value, (list, set, tuple)): 

211 # Convert list to comma-separated string for the API 

212 processed_value = ",".join(str(item) for item in value) 

213 processed_filters[key] = processed_value 

214 # Handle boolean values 

215 elif isinstance(value, bool): 

216 processed_filters[key] = str(value).lower() 

217 # Handle normal values 

218 else: 

219 processed_filters[key] = value 

220 

221 return self._chain(filters={**self.filters, **processed_filters}) 

222 

223 def exclude(self, **kwargs: Any) -> Self: 

224 """ 

225 Return a new QuerySet excluding objects with the given filters. 

226 

227 Args: 

228 **kwargs: Filters to exclude, where keys are field names and values are excluded values 

229 

230 Returns: 

231 A new QuerySet excluding objects that match the filters 

232 

233 Examples: 

234 # Get documents with any correspondent except ID 1 

235 docs = client.documents.exclude(correspondent=1) 

236 

237 """ 

238 # Transform each key to its "not" equivalent 

239 exclude_filters = {} 

240 for key, value in kwargs.items(): 

241 if "__" in key: 

242 field, lookup = key.split("__", 1) 

243 # If it already has a "not" prefix, remove it 

244 if lookup.startswith("not_"): 

245 exclude_filters[f"{field}__{lookup[4:]}"] = value 

246 else: 

247 exclude_filters[f"{field}__not_{lookup}"] = value 

248 else: 

249 exclude_filters[f"{key}__not"] = value 

250 

251 return self._chain(filters={**self.filters, **exclude_filters}) 

252 

253 def get(self, pk: Any) -> _Model: 

254 """ 

255 Retrieve a single object from the API. 

256 

257 Raises NotImplementedError. Subclasses may implement this. 

258 

259 Args: 

260 pk: The primary key (e.g. the id) of the object to retrieve 

261 

262 Returns: 

263 A single object matching the query 

264 

265 Raises: 

266 ObjectNotFoundError: If no object or multiple objects are found 

267 NotImplementedError: If the method is not implemented by the subclass 

268 

269 Examples: 

270 # Get document with ID 123 

271 doc = client.documents.get(123) 

272 

273 """ 

274 raise NotImplementedError("Getting a single resource is not defined by BaseModels without an id.") 

275 

276 def count(self) -> int: 

277 """ 

278 Return the total number of objects in the queryset. 

279 

280 Returns: 

281 The total count of objects matching the filters 

282 

283 Raises: 

284 NotImplementedError: If the response does not have a count attribute 

285 

286 """ 

287 # If we have a last response, we can use the "count" field 

288 if self._last_response: 

289 if (count := self._last_response.get("count")) is not None: 

290 return count 

291 raise NotImplementedError("Response does not have a count attribute.") 

292 

293 # Get one page of results, to populate last response 

294 _iter = self._request_iter(params=self.filters) 

295 

296 # TODO Hack 

297 for _ in _iter: 

298 break 

299 

300 if not self._last_response: 

301 # I don't think this should ever occur, but just in case. 

302 raise NotImplementedError("Requested iter, but no last response") 

303 

304 if (count := self._last_response.get("count")) is not None: 

305 return count 

306 

307 # I don't think this should ever occur, but just in case. 

308 raise NotImplementedError( 

309 f"Unexpected Error: Could not determine count of objects. Last response: {self._last_response}" 

310 ) 

311 

312 def count_this_page(self) -> int: 

313 """ 

314 Return the number of objects on the current page. 

315 

316 Returns: 

317 The count of objects on the current page 

318 

319 Raises: 

320 NotImplementedError: If _last_response is not set 

321 

322 """ 

323 # If we have a last response, we can count it without a new request 

324 if self._last_response: 

325 results = self._last_response.get("results", []) 

326 return len(results) 

327 

328 # Get one page of results, to populate last response 

329 _iter = self._request_iter(params=self.filters) 

330 

331 # TODO Hack 

332 for _ in _iter: 

333 break 

334 

335 if not self._last_response: 

336 # I don't think this should ever occur, but just in case. 

337 raise NotImplementedError("Requested iter, but no last response") 

338 

339 results = self._last_response.get("results", []) 

340 return len(results) 

341 

342 def all(self) -> Self: 

343 """ 

344 Return a new QuerySet that copies the current one. 

345 

346 Returns: 

347 A copy of the current BaseQuerySet 

348 

349 """ 

350 return self._chain() 

351 

352 def order_by(self, *fields: str) -> Self: 

353 """ 

354 Return a new QuerySet ordered by the specified fields. 

355 

356 Args: 

357 *fields: Field names to order by. Prefix with '-' for descending order. 

358 

359 Returns: 

360 A new QuerySet with the ordering applied 

361 

362 Examples: 

363 # Order documents by title ascending 

364 docs = client.documents.order_by('title') 

365 

366 # Order documents by added date descending 

367 docs = client.documents.order_by('-added') 

368 

369 """ 

370 if not fields: 

371 return self 

372 

373 # Combine with existing ordering if any 

374 ordering = self.filters.get("ordering", []) 

375 if isinstance(ordering, str): 

376 ordering = [ordering] 

377 elif not isinstance(ordering, list): 

378 ordering = list(ordering) 

379 

380 # Add new ordering fields 

381 new_ordering = ordering + list(fields) 

382 

383 # Join with commas for API 

384 ordering_param = ",".join(new_ordering) 

385 

386 return self._chain(filters={**self.filters, "ordering": ordering_param}) 

387 

388 def first(self) -> _Model | None: 

389 """ 

390 Return the first object in the QuerySet, or None if empty. 

391 

392 Returns: 

393 The first object or None if no objects match 

394 

395 """ 

396 if self._result_cache and len(self._result_cache) > 0: 

397 return self._result_cache[0] 

398 

399 # If not cached, create a copy limited to 1 result 

400 results = list(self._chain(filters={**self.filters, "limit": 1})) 

401 return results[0] if results else None 

402 

403 def last(self) -> _Model | None: 

404 """ 

405 Return the last object in the QuerySet, or None if empty. 

406 

407 Note: This requires fetching all results to determine the last one. 

408 

409 Returns: 

410 The last object or None if no objects match 

411 

412 """ 

413 # If we have all results, we can just return the last one 

414 if self._fetch_all: 

415 if self._result_cache and len(self._result_cache) > 0: 

416 return self._result_cache[-1] 

417 return None 

418 

419 # We need all results to get the last one 

420 self._fetch_all_results() 

421 

422 if self._result_cache and len(self._result_cache) > 0: 

423 return self._result_cache[-1] 

424 return None 

425 

426 def exists(self) -> bool: 

427 """ 

428 Return True if the QuerySet contains any results. 

429 

430 Returns: 

431 True if there are any objects matching the filters 

432 

433 """ 

434 # Check the cache before potentially making a new request 

435 if self._fetch_all or self._result_cache: 

436 return len(self._result_cache) > 0 

437 

438 # Check if there's at least one result 

439 return self.first() is not None 

440 

441 def none(self) -> Self: 

442 """ 

443 Return an empty QuerySet. 

444 

445 Returns: 

446 An empty QuerySet 

447 

448 """ 

449 return self._chain(filters={"limit": 0}) 

450 

451 def filter_field_by_str(self, field: str, value: str, *, exact: bool = True, case_insensitive: bool = True) -> Self: 

452 """ 

453 Filter a queryset based on a given field. 

454 

455 This allows subclasses to easily implement custom filter methods. 

456 

457 Args: 

458 field: The field name to filter by. 

459 value: The value to filter against. 

460 exact: Whether to filter by an exact match. 

461 case_insensitive: Whether the filter should be case-insensitive. 

462 

463 Returns: 

464 A new QuerySet instance with the filter applied. 

465 

466 """ 

467 if exact: 

468 lookup = f"{field}__iexact" if case_insensitive else field 

469 else: 

470 lookup = f"{field}__icontains" if case_insensitive else f"{field}__contains" 

471 

472 return self.filter(**{lookup: value}) 

473 

474 def _fetch_all_results(self) -> None: 

475 """ 

476 Fetch all results from the API and populate the cache. 

477 

478 Returns: 

479 None 

480 

481 """ 

482 if self._fetch_all: 

483 return 

484 

485 # Clear existing cache if any 

486 self._result_cache = [] 

487 

488 # Initial fetch 

489 iterator = self._request_iter(params=self.filters) 

490 

491 # Collect results from initial page 

492 # TODO: Consider itertools chain for performance reasons (?) 

493 self._result_cache.extend(list(iterator)) 

494 

495 # Fetch additional pages if available 

496 while self._last_response and self._next_url: 

497 iterator = self._request_iter(url=self._next_url) 

498 self._result_cache.extend(list(iterator)) 

499 

500 self._fetch_all = True 

501 

502 def _request_iter( 

503 self, url: str | HttpUrl | Template | None = None, params: Optional[dict[str, Any]] = None 

504 ) -> Iterator[_Model]: 

505 """ 

506 Get an iterator of resources. 

507 

508 Args: 

509 url: The URL to request, if different from the resource's default. 

510 params: Query parameters. 

511 

512 Returns: 

513 An iterator over the resources. 

514 

515 Raises: 

516 NotImplementedError: If the request cannot be completed. 

517 

518 Examples: 

519 # Iterate over documents 

520 for doc in queryset._request_iter(): 

521 print(doc) 

522 

523 """ 

524 if not (response := self.resource.request_raw(url=url, params=params)): 

525 logger.debug("No response from request.") 

526 return 

527 

528 self._last_response = response 

529 

530 yield from self.resource.handle_response(**response) 

531 

532 def _get_next(self, response: dict[str, Any] | None = None) -> str | None: 

533 """ 

534 Get the next url, and adjust our references accordingly. 

535 """ 

536 # Allow passing a different response 

537 if response is None: 

538 response = self._last_response 

539 

540 # Last response is not set 

541 if not response or not (next_url := response.get("next")): 

542 self._next_url = None 

543 return None 

544 

545 # For safety, check both instance attributes, even though the first check isn't strictly necessary 

546 # this hopefully future proofs any changes to the implementation 

547 if next_url == self._next_url or next_url in self._urls_fetched: 

548 logger.debug( 

549 "Next URL was previously fetched. Stopping iteration. URL: %s, Already Fetched: %s", 

550 next_url, 

551 self._urls_fetched, 

552 ) 

553 self._next_url = None 

554 return None 

555 

556 # Cache it 

557 self._next_url = next_url 

558 self._urls_fetched.append(next_url) 

559 return self._next_url 

560 

561 def _chain(self, **kwargs: Any) -> Self: 

562 """ 

563 Return a copy of the current BaseQuerySet with updated attributes. 

564 

565 Args: 

566 **kwargs: Attributes to update in the new BaseQuerySet 

567 

568 Returns: 

569 A new QuerySet with the updated attributes 

570 

571 """ 

572 # Create a new BaseQuerySet with copied attributes 

573 clone = self.__class__(self.resource) # type: ignore # pyright not handling Self correctly 

574 

575 # Copy attributes from self 

576 clone.filters = copy.deepcopy(self.filters) 

577 # Do not copy the cache, fetch_all, etc, since filters may change it 

578 

579 # Update with provided kwargs 

580 for key, value in kwargs.items(): 

581 if key == "filters" and value: 

582 clone._update_filters(value) # pylint: disable=protected-access 

583 else: 

584 setattr(clone, key, value) 

585 

586 return clone 

587 

588 @override 

589 def __iter__(self) -> Iterator[_Model]: 

590 """ 

591 Iterate over the objects in the QuerySet. 

592 

593 Returns: 

594 An iterator over the objects 

595 

596 """ 

597 # If we have a fully populated cache, use it 

598 if self._fetch_all: 

599 yield from self._result_cache 

600 return 

601 

602 if not self._iter: 

603 # Start a new iteration 

604 self._iter = self._request_iter(params=self.filters) 

605 

606 # Yield objects from the current page 

607 for obj in self._iter: 

608 self._result_cache.append(obj) 

609 yield obj 

610 

611 self._get_next() 

612 

613 # If there are more pages, keep going 

614 count = 0 

615 while self._next_url: 

616 count += 1 

617 self._iter = self._request_iter(url=self._next_url) 

618 

619 # Yield objects from the current page 

620 for obj in self._iter: 

621 self._result_cache.append(obj) 

622 yield obj 

623 

624 self._get_next() 

625 

626 # We've fetched everything 

627 self._fetch_all = True 

628 self._iter = None 

629 

630 def __len__(self) -> int: 

631 """ 

632 Return the number of objects in the QuerySet. 

633 

634 Returns: 

635 The count of objects 

636 

637 """ 

638 return self.count() 

639 

640 def __bool__(self) -> bool: 

641 """ 

642 Return True if the QuerySet has any results. 

643 

644 Returns: 

645 True if there are any objects matching the filters 

646 

647 """ 

648 return self.exists() 

649 

650 def __getitem__(self, key: int | slice) -> _Model | list[_Model]: 

651 """ 

652 Retrieve an item or slice of items from the QuerySet. 

653 

654 Args: 

655 key: An integer index or slice 

656 

657 Returns: 

658 A single object or list of objects 

659 

660 Raises: 

661 IndexError: If the index is out of range 

662 

663 """ 

664 if isinstance(key, slice): 

665 # Handle slicing 

666 start = key.start if key.start is not None else 0 

667 stop = key.stop 

668 

669 if start < 0 or (stop is not None and stop < 0): 

670 # Negative indexing requires knowing the full size 

671 self._fetch_all_results() 

672 return self._result_cache[key] 

673 

674 # Optimize by using limit/offset if available 

675 if start == 0 and stop is not None: 

676 # Simple limit 

677 clone = self._chain(filters={**self.filters, "limit": stop}) 

678 results = list(clone) 

679 return results 

680 

681 if start > 0 and stop is not None: 

682 # Limit with offset 

683 clone = self._chain( 

684 filters={ 

685 **self.filters, 

686 "limit": stop - start, 

687 "offset": start, 

688 } 

689 ) 

690 results = list(clone) 

691 return results 

692 

693 if start > 0 and stop is None: 

694 # Just offset 

695 clone = self._chain(filters={**self.filters, "offset": start}) 

696 self._fetch_all_results() # We need all results after the offset 

697 return self._result_cache 

698 

699 # Default to fetching all and slicing 

700 self._fetch_all_results() 

701 return self._result_cache[key] 

702 

703 # Handle integer indexing 

704 if key < 0: 

705 # Negative indexing requires the full result set 

706 self._fetch_all_results() 

707 return self._result_cache[key] 

708 

709 # Positive indexing - we can optimize with limit/offset 

710 if len(self._result_cache) > key: 

711 # Already have this item cached 

712 return self._result_cache[key] 

713 

714 # Fetch specific item by position 

715 clone = self._chain(filters={**self.filters, "limit": 1, "offset": key}) 

716 results = list(clone) 

717 if not results: 

718 raise IndexError(f"BaseQuerySet index {key} out of range") 

719 return results[0] 

720 

721 def __contains__(self, item: Any) -> bool: 

722 """ 

723 Return True if the QuerySet contains the given object. 

724 

725 Args: 

726 item: The object to check for 

727 

728 Returns: 

729 True if the object is in the QuerySet 

730 

731 """ 

732 if not isinstance(item, self._model): 

733 return False 

734 

735 return any(obj == item for obj in self) 

736 

737 

738class StandardQuerySet[_Model: StandardModel](BaseQuerySet[_Model]): 

739 """ 

740 A queryset for StandardModel instances (i.e. BaseModels with standard fields, like id). 

741 

742 Returns: 

743 A new instance of StandardModel. 

744 

745 Raises: 

746 ValueError: If resource is not provided. 

747 

748 Examples: 

749 # Create a StandardModel instance 

750 model = StandardModel(id=1) 

751 

752 Args: 

753 resource: The BaseResource instance. 

754 filters: Initial filter parameters. 

755 

756 Returns: 

757 A new instance of StandardQuerySet. 

758 

759 Raises: 

760 ObjectNotFoundError: If no object or multiple objects are found. 

761 

762 Examples: 

763 # Create a StandardQuerySet for documents 

764 docs = StandardQuerySet(resource=client.documents) 

765 

766 """ 

767 

768 resource: "StandardResource[_Model, Self]" # type: ignore # pyright is getting inheritance wrong 

769 

770 @override 

771 def get(self, pk: int) -> _Model: 

772 """ 

773 Retrieve a single object from the API. 

774 

775 Args: 

776 pk: The ID of the object to retrieve 

777 

778 Returns: 

779 A single object matching the query 

780 

781 Raises: 

782 ObjectNotFoundError: If no object or multiple objects are found 

783 

784 Examples: 

785 # Get document with ID 123 

786 doc = client.documents.get(123) 

787 

788 """ 

789 # Attempt to find it in the result cache 

790 if self._result_cache: 

791 for obj in self._result_cache: 

792 if obj.id == pk: 

793 return obj 

794 

795 # Direct lookup by ID - use the resource's get method 

796 return self.resource.get(pk) 

797 

798 def id(self, value: int | list[int]) -> Self: 

799 """ 

800 Filter models by ID. 

801 

802 Args: 

803 value: The ID or list of IDs to filter by 

804 

805 Returns: 

806 Filtered QuerySet 

807 

808 """ 

809 if isinstance(value, list): 

810 return self.filter(id__in=value) 

811 return self.filter(id=value) 

812 

813 @override 

814 def __contains__(self, item: Any) -> bool: 

815 """ 

816 Return True if the QuerySet contains the given object. 

817 

818 NOTE: This method only ensures a match by ID, not by full object equality. 

819 This is intentional, as the object may be outdated or not fully populated. 

820 

821 Args: 

822 item: The object or ID to check for 

823 

824 Returns: 

825 True if the object is in the QuerySet 

826 

827 """ 

828 # Handle integers directly 

829 if isinstance(item, int): 

830 return any(obj.id == item for obj in self) 

831 

832 # Handle model objects that have an id attribute 

833 try: 

834 if hasattr(item, "id"): 

835 return any(obj.id == item.id for obj in self) 

836 except (AttributeError, TypeError): 

837 pass 

838 

839 # For any other type, it's not in the queryset 

840 return False 

841 

842 def bulk_action(self, action: str, **kwargs: Any) -> dict[str, Any]: 

843 """ 

844 Perform a bulk action on all objects in the queryset. 

845 

846 This method fetches all IDs in the queryset and passes them to the resource's bulk_action method. 

847 

848 Args: 

849 action: The action to perform 

850 **kwargs: Additional parameters for the action 

851 

852 Returns: 

853 The API response 

854 

855 Raises: 

856 NotImplementedError: If the resource doesn't support bulk actions 

857 

858 """ 

859 if not hasattr(self.resource, "bulk_action"): 

860 raise NotImplementedError(f"Resource {self.resource.name} does not support bulk actions") 

861 

862 # Fetch all IDs in the queryset 

863 # We only need IDs, so optimize by requesting just the ID field if possible 

864 ids = [obj.id for obj in self] 

865 

866 if not ids: 

867 return {"success": True, "count": 0} 

868 

869 return self.resource.bulk_action(action, ids, **kwargs) 

870 

871 def bulk_delete(self) -> dict[str, Any]: 

872 """ 

873 Delete all objects in the queryset. 

874 

875 Returns: 

876 The API response 

877 

878 """ 

879 return self.bulk_action("delete") 

880 

881 def bulk_update(self, **kwargs: Any) -> dict[str, Any]: 

882 """ 

883 Update all objects in the queryset with the given values. 

884 

885 Args: 

886 **kwargs: Fields to update 

887 

888 Returns: 

889 The API response 

890 

891 """ 

892 if not hasattr(self.resource, "bulk_update"): 

893 raise NotImplementedError(f"Resource {self.resource.name} does not support bulk updates") 

894 

895 # Fetch all IDs in the queryset 

896 ids = [obj.id for obj in self] 

897 

898 if not ids: 

899 return {"success": True, "count": 0} 

900 

901 return self.resource.bulk_update(ids, **kwargs) 

902 

903 def bulk_assign_tags(self, tag_ids: list[int], remove_existing: bool = False) -> dict[str, Any]: 

904 """ 

905 Assign tags to all objects in the queryset. 

906 

907 Args: 

908 tag_ids: List of tag IDs to assign 

909 remove_existing: If True, remove existing tags before assigning new ones 

910 

911 Returns: 

912 The API response 

913 

914 """ 

915 if not hasattr(self.resource, "bulk_assign_tags"): 

916 raise NotImplementedError(f"Resource {self.resource.name} does not support bulk tag assignment") 

917 

918 # Fetch all IDs in the queryset 

919 ids = [obj.id for obj in self] 

920 

921 if not ids: 

922 return {"success": True, "count": 0} 

923 

924 return self.resource.bulk_assign_tags(ids, tag_ids, remove_existing) 

925 

926 def bulk_assign_correspondent(self, correspondent_id: int) -> dict[str, Any]: 

927 """ 

928 Assign a correspondent to all objects in the queryset. 

929 

930 Args: 

931 correspondent_id: Correspondent ID to assign 

932 

933 Returns: 

934 The API response 

935 

936 """ 

937 if not hasattr(self.resource, "bulk_assign_correspondent"): 

938 raise NotImplementedError(f"Resource {self.resource.name} does not support bulk correspondent assignment") 

939 

940 # Fetch all IDs in the queryset 

941 ids = [obj.id for obj in self] 

942 

943 if not ids: 

944 return {"success": True, "count": 0} 

945 

946 return self.resource.bulk_assign_correspondent(ids, correspondent_id) 

947 

948 def bulk_assign_document_type(self, document_type_id: int) -> dict[str, Any]: 

949 """ 

950 Assign a document type to all objects in the queryset. 

951 

952 Args: 

953 document_type_id: Document type ID to assign 

954 

955 Returns: 

956 The API response 

957 

958 """ 

959 if not hasattr(self.resource, "bulk_assign_document_type"): 

960 raise NotImplementedError(f"Resource {self.resource.name} does not support bulk document type assignment") 

961 

962 # Fetch all IDs in the queryset 

963 ids = [obj.id for obj in self] 

964 

965 if not ids: 

966 return {"success": True, "count": 0} 

967 

968 return self.resource.bulk_assign_document_type(ids, document_type_id) 

969 

970 def bulk_assign_storage_path(self, storage_path_id: int) -> dict[str, Any]: 

971 """ 

972 Assign a storage path to all objects in the queryset. 

973 

974 Args: 

975 storage_path_id: Storage path ID to assign 

976 

977 Returns: 

978 The API response 

979 

980 """ 

981 if not hasattr(self.resource, "bulk_assign_storage_path"): 

982 raise NotImplementedError(f"Resource {self.resource.name} does not support bulk storage path assignment") 

983 

984 # Fetch all IDs in the queryset 

985 ids = [obj.id for obj in self] 

986 

987 if not ids: 

988 return {"success": True, "count": 0} 

989 

990 return self.resource.bulk_assign_storage_path(ids, storage_path_id) 

991 

992 def bulk_assign_owner(self, owner_id: int) -> dict[str, Any]: 

993 """ 

994 Assign an owner to all objects in the queryset. 

995 

996 Args: 

997 owner_id: Owner ID to assign 

998 

999 Returns: 

1000 The API response 

1001 

1002 """ 

1003 if not hasattr(self.resource, "bulk_assign_owner"): 

1004 raise NotImplementedError(f"Resource {self.resource.name} does not support bulk owner assignment") 

1005 

1006 # Fetch all IDs in the queryset 

1007 ids = [obj.id for obj in self] 

1008 

1009 if not ids: 

1010 return {"success": True, "count": 0} 

1011 

1012 return self.resource.bulk_assign_owner(ids, owner_id)