Hide keyboard shortcuts

Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1# -*- coding: utf-8 -*- 

2"""Task implementation: request context and the task base class.""" 

3from __future__ import absolute_import, unicode_literals 

4 

5import sys 

6 

7from billiard.einfo import ExceptionInfo 

8from kombu import serialization 

9from kombu.exceptions import OperationalError 

10from kombu.utils.uuid import uuid 

11 

12from celery import current_app, group, states 

13from celery._state import _task_stack 

14from celery.canvas import signature 

15from celery.exceptions import (Ignore, ImproperlyConfigured, 

16 MaxRetriesExceededError, Reject, Retry) 

17from celery.five import items, python_2_unicode_compatible 

18from celery.local import class_property 

19from celery.result import EagerResult, denied_join_result 

20from celery.utils import abstract 

21from celery.utils.functional import mattrgetter, maybe_list 

22from celery.utils.imports import instantiate 

23from celery.utils.nodenames import gethostname 

24from celery.utils.serialization import raise_with_context 

25from .annotations import resolve_all as resolve_all_annotations 

26from .registry import _unpickle_task_v2 

27from .utils import appstr 

28 

29__all__ = ('Context', 'Task') 

30 

31#: extracts attributes related to publishing a message from an object. 

32extract_exec_options = mattrgetter( 

33 'queue', 'routing_key', 'exchange', 'priority', 'expires', 

34 'serializer', 'delivery_mode', 'compression', 'time_limit', 

35 'soft_time_limit', 'immediate', 'mandatory', # imm+man is deprecated 

36) 

37 

38# We take __repr__ very seriously around here ;) 

39R_BOUND_TASK = '<class {0.__name__} of {app}{flags}>' 

40R_UNBOUND_TASK = '<unbound {0.__name__}{flags}>' 

41R_INSTANCE = '<@task: {0.name} of {app}{flags}>' 

42 

43#: Here for backwards compatibility as tasks no longer use a custom meta-class. 

44TaskType = type 

45 

46 

47def _strflags(flags, default=''): 

48 if flags: 

49 return ' ({0})'.format(', '.join(flags)) 

50 return default 

51 

52 

53def _reprtask(task, fmt=None, flags=None): 

54 flags = list(flags) if flags is not None else [] 

55 flags.append('v2 compatible') if task.__v2_compat__ else None 

56 if not fmt: 

57 fmt = R_BOUND_TASK if task._app else R_UNBOUND_TASK 

58 return fmt.format( 

59 task, flags=_strflags(flags), 

60 app=appstr(task._app) if task._app else None, 

61 ) 

62 

63 

64@python_2_unicode_compatible 

65class Context(object): 

66 """Task request variables (Task.request).""" 

67 

68 logfile = None 

69 loglevel = None 

70 hostname = None 

71 id = None 

72 args = None 

73 kwargs = None 

74 retries = 0 

75 eta = None 

76 expires = None 

77 is_eager = False 

78 headers = None 

79 delivery_info = None 

80 reply_to = None 

81 root_id = None 

82 parent_id = None 

83 correlation_id = None 

84 taskset = None # compat alias to group 

85 group = None 

86 chord = None 

87 chain = None 

88 utc = None 

89 called_directly = True 

90 callbacks = None 

91 errbacks = None 

92 timelimit = None 

93 origin = None 

94 _children = None # see property 

95 _protected = 0 

96 

97 def __init__(self, *args, **kwargs): 

98 self.update(*args, **kwargs) 

99 

100 def update(self, *args, **kwargs): 

101 return self.__dict__.update(*args, **kwargs) 

102 

103 def clear(self): 

104 return self.__dict__.clear() 

105 

106 def get(self, key, default=None): 

107 return getattr(self, key, default) 

108 

109 def __repr__(self): 

110 return '<Context: {0!r}>'.format(vars(self)) 

111 

112 def as_execution_options(self): 

113 limit_hard, limit_soft = self.timelimit or (None, None) 

114 return { 

115 'task_id': self.id, 

116 'root_id': self.root_id, 

117 'parent_id': self.parent_id, 

118 'group_id': self.group, 

119 'chord': self.chord, 

120 'chain': self.chain, 

121 'link': self.callbacks, 

122 'link_error': self.errbacks, 

123 'expires': self.expires, 

124 'soft_time_limit': limit_soft, 

125 'time_limit': limit_hard, 

126 'headers': self.headers, 

127 'retries': self.retries, 

128 'reply_to': self.reply_to, 

129 'origin': self.origin, 

130 } 

131 

132 @property 

133 def children(self): 

134 # children must be an empty list for every thread 

135 if self._children is None: 

136 self._children = [] 

137 return self._children 

138 

139 

140@abstract.CallableTask.register 

141@python_2_unicode_compatible 

142class Task(object): 

143 """Task base class. 

144 

145 Note: 

146 When called tasks apply the :meth:`run` method. This method must 

147 be defined by all tasks (that is unless the :meth:`__call__` method 

148 is overridden). 

149 """ 

150 

151 __trace__ = None 

152 __v2_compat__ = False # set by old base in celery.task.base 

153 

154 MaxRetriesExceededError = MaxRetriesExceededError 

155 OperationalError = OperationalError 

156 

157 #: Execution strategy used, or the qualified name of one. 

158 Strategy = 'celery.worker.strategy:default' 

159 

160 #: Request class used, or the qualified name of one. 

161 Request = 'celery.worker.request:Request' 

162 

163 #: The application instance associated with this task class. 

164 _app = None 

165 

166 #: Name of the task. 

167 name = None 

168 

169 #: Enable argument checking. 

170 #: You can set this to false if you don't want the signature to be 

171 #: checked when calling the task. 

172 #: Defaults to :attr:`app.strict_typing <@Celery.strict_typing>`. 

173 typing = None 

174 

175 #: Maximum number of retries before giving up. If set to :const:`None`, 

176 #: it will **never** stop retrying. 

177 max_retries = 3 

178 

179 #: Default time in seconds before a retry of the task should be 

180 #: executed. 3 minutes by default. 

181 default_retry_delay = 3 * 60 

182 

183 #: Rate limit for this task type. Examples: :const:`None` (no rate 

184 #: limit), `'100/s'` (hundred tasks a second), `'100/m'` (hundred tasks 

185 #: a minute),`'100/h'` (hundred tasks an hour) 

186 rate_limit = None 

187 

188 #: If enabled the worker won't store task state and return values 

189 #: for this task. Defaults to the :setting:`task_ignore_result` 

190 #: setting. 

191 ignore_result = None 

192 

193 #: If enabled the request will keep track of subtasks started by 

194 #: this task, and this information will be sent with the result 

195 #: (``result.children``). 

196 trail = True 

197 

198 #: If enabled the worker will send monitoring events related to 

199 #: this task (but only if the worker is configured to send 

200 #: task related events). 

201 #: Note that this has no effect on the task-failure event case 

202 #: where a task is not registered (as it will have no task class 

203 #: to check this flag). 

204 send_events = True 

205 

206 #: When enabled errors will be stored even if the task is otherwise 

207 #: configured to ignore results. 

208 store_errors_even_if_ignored = None 

209 

210 #: The name of a serializer that are registered with 

211 #: :mod:`kombu.serialization.registry`. Default is `'json'`. 

212 serializer = None 

213 

214 #: Hard time limit. 

215 #: Defaults to the :setting:`task_time_limit` setting. 

216 time_limit = None 

217 

218 #: Soft time limit. 

219 #: Defaults to the :setting:`task_soft_time_limit` setting. 

220 soft_time_limit = None 

221 

222 #: The result store backend used for this task. 

223 backend = None 

224 

225 #: If disabled this task won't be registered automatically. 

226 autoregister = True 

227 

228 #: If enabled the task will report its status as 'started' when the task 

229 #: is executed by a worker. Disabled by default as the normal behavior 

230 #: is to not report that level of granularity. Tasks are either pending, 

231 #: finished, or waiting to be retried. 

232 #: 

233 #: Having a 'started' status can be useful for when there are long 

234 #: running tasks and there's a need to report what task is currently 

235 #: running. 

236 #: 

237 #: The application default can be overridden using the 

238 #: :setting:`task_track_started` setting. 

239 track_started = None 

240 

241 #: When enabled messages for this task will be acknowledged **after** 

242 #: the task has been executed, and not *just before* (the 

243 #: default behavior). 

244 #: 

245 #: Please note that this means the task may be executed twice if the 

246 #: worker crashes mid execution. 

247 #: 

248 #: The application default can be overridden with the 

249 #: :setting:`task_acks_late` setting. 

250 acks_late = None 

251 

252 #: When enabled messages for this task will be acknowledged even if it 

253 #: fails or times out. 

254 #: 

255 #: Configuring this setting only applies to tasks that are 

256 #: acknowledged **after** they have been executed and only if 

257 #: :setting:`task_acks_late` is enabled. 

258 #: 

259 #: The application default can be overridden with the 

260 #: :setting:`task_acks_on_failure_or_timeout` setting. 

261 acks_on_failure_or_timeout = None 

262 

263 #: Even if :attr:`acks_late` is enabled, the worker will 

264 #: acknowledge tasks when the worker process executing them abruptly 

265 #: exits or is signaled (e.g., :sig:`KILL`/:sig:`INT`, etc). 

266 #: 

267 #: Setting this to true allows the message to be re-queued instead, 

268 #: so that the task will execute again by the same worker, or another 

269 #: worker. 

270 #: 

271 #: Warning: Enabling this can cause message loops; make sure you know 

272 #: what you're doing. 

273 reject_on_worker_lost = None 

274 

275 #: Tuple of expected exceptions. 

276 #: 

277 #: These are errors that are expected in normal operation 

278 #: and that shouldn't be regarded as a real error by the worker. 

279 #: Currently this means that the state will be updated to an error 

280 #: state, but the worker won't log the event as an error. 

281 throws = () 

282 

283 #: Default task expiry time. 

284 expires = None 

285 

286 #: Default task priority. 

287 priority = None 

288 

289 #: Max length of result representation used in logs and events. 

290 resultrepr_maxsize = 1024 

291 

292 #: Task request stack, the current request will be the topmost. 

293 request_stack = None 

294 

295 #: Some may expect a request to exist even if the task hasn't been 

296 #: called. This should probably be deprecated. 

297 _default_request = None 

298 

299 #: Deprecated attribute ``abstract`` here for compatibility. 

300 abstract = True 

301 

302 _exec_options = None 

303 

304 __bound__ = False 

305 

306 from_config = ( 

307 ('serializer', 'task_serializer'), 

308 ('rate_limit', 'task_default_rate_limit'), 

309 ('priority', 'task_default_priority'), 

310 ('track_started', 'task_track_started'), 

311 ('acks_late', 'task_acks_late'), 

312 ('acks_on_failure_or_timeout', 'task_acks_on_failure_or_timeout'), 

313 ('reject_on_worker_lost', 'task_reject_on_worker_lost'), 

314 ('ignore_result', 'task_ignore_result'), 

315 ('store_errors_even_if_ignored', 'task_store_errors_even_if_ignored'), 

316 ) 

317 

318 _backend = None # set by backend property. 

319 

320 # - Tasks are lazily bound, so that configuration is not set 

321 # - until the task is actually used 

322 

323 @classmethod 

324 def bind(cls, app): 

325 was_bound, cls.__bound__ = cls.__bound__, True 

326 cls._app = app 

327 conf = app.conf 

328 cls._exec_options = None # clear option cache 

329 

330 if cls.typing is None: 

331 cls.typing = app.strict_typing 

332 

333 for attr_name, config_name in cls.from_config: 

334 if getattr(cls, attr_name, None) is None: 

335 setattr(cls, attr_name, conf[config_name]) 

336 

337 # decorate with annotations from config. 

338 if not was_bound: 

339 cls.annotate() 

340 

341 from celery.utils.threads import LocalStack 

342 cls.request_stack = LocalStack() 

343 

344 # PeriodicTask uses this to add itself to the PeriodicTask schedule. 

345 cls.on_bound(app) 

346 

347 return app 

348 

349 @classmethod 

350 def on_bound(cls, app): 

351 """Called when the task is bound to an app. 

352 

353 Note: 

354 This class method can be defined to do additional actions when 

355 the task class is bound to an app. 

356 """ 

357 

358 @classmethod 

359 def _get_app(cls): 

360 if cls._app is None: 

361 cls._app = current_app 

362 if not cls.__bound__: 

363 # The app property's __set__ method is not called 

364 # if Task.app is set (on the class), so must bind on use. 

365 cls.bind(cls._app) 

366 return cls._app 

367 app = class_property(_get_app, bind) 

368 

369 @classmethod 

370 def annotate(cls): 

371 for d in resolve_all_annotations(cls.app.annotations, cls): 

372 for key, value in items(d): 

373 if key.startswith('@'): 

374 cls.add_around(key[1:], value) 

375 else: 

376 setattr(cls, key, value) 

377 

378 @classmethod 

379 def add_around(cls, attr, around): 

380 orig = getattr(cls, attr) 

381 if getattr(orig, '__wrapped__', None): 

382 orig = orig.__wrapped__ 

383 meth = around(orig) 

384 meth.__wrapped__ = orig 

385 setattr(cls, attr, meth) 

386 

387 def __call__(self, *args, **kwargs): 

388 _task_stack.push(self) 

389 self.push_request(args=args, kwargs=kwargs) 

390 try: 

391 return self.run(*args, **kwargs) 

392 finally: 

393 self.pop_request() 

394 _task_stack.pop() 

395 

396 def __reduce__(self): 

397 # - tasks are pickled into the name of the task only, and the receiver 

398 # - simply grabs it from the local registry. 

399 # - in later versions the module of the task is also included, 

400 # - and the receiving side tries to import that module so that 

401 # - it will work even if the task hasn't been registered. 

402 mod = type(self).__module__ 

403 mod = mod if mod and mod in sys.modules else None 

404 return (_unpickle_task_v2, (self.name, mod), None) 

405 

406 def run(self, *args, **kwargs): 

407 """The body of the task executed by workers.""" 

408 raise NotImplementedError('Tasks must define the run method.') 

409 

410 def start_strategy(self, app, consumer, **kwargs): 

411 return instantiate(self.Strategy, self, app, consumer, **kwargs) 

412 

413 def delay(self, *args, **kwargs): 

414 """Star argument version of :meth:`apply_async`. 

415 

416 Does not support the extra options enabled by :meth:`apply_async`. 

417 

418 Arguments: 

419 *args (Any): Positional arguments passed on to the task. 

420 **kwargs (Any): Keyword arguments passed on to the task. 

421 Returns: 

422 celery.result.AsyncResult: Future promise. 

423 """ 

424 return self.apply_async(args, kwargs) 

425 

426 def apply_async(self, args=None, kwargs=None, task_id=None, producer=None, 

427 link=None, link_error=None, shadow=None, **options): 

428 """Apply tasks asynchronously by sending a message. 

429 

430 Arguments: 

431 args (Tuple): The positional arguments to pass on to the task. 

432 

433 kwargs (Dict): The keyword arguments to pass on to the task. 

434 

435 countdown (float): Number of seconds into the future that the 

436 task should execute. Defaults to immediate execution. 

437 

438 eta (~datetime.datetime): Absolute time and date of when the task 

439 should be executed. May not be specified if `countdown` 

440 is also supplied. 

441 

442 expires (float, ~datetime.datetime): Datetime or 

443 seconds in the future for the task should expire. 

444 The task won't be executed after the expiration time. 

445 

446 shadow (str): Override task name used in logs/monitoring. 

447 Default is retrieved from :meth:`shadow_name`. 

448 

449 connection (kombu.Connection): Re-use existing broker connection 

450 instead of acquiring one from the connection pool. 

451 

452 retry (bool): If enabled sending of the task message will be 

453 retried in the event of connection loss or failure. 

454 Default is taken from the :setting:`task_publish_retry` 

455 setting. Note that you need to handle the 

456 producer/connection manually for this to work. 

457 

458 retry_policy (Mapping): Override the retry policy used. 

459 See the :setting:`task_publish_retry_policy` setting. 

460 

461 queue (str, kombu.Queue): The queue to route the task to. 

462 This must be a key present in :setting:`task_queues`, or 

463 :setting:`task_create_missing_queues` must be 

464 enabled. See :ref:`guide-routing` for more 

465 information. 

466 

467 exchange (str, kombu.Exchange): Named custom exchange to send the 

468 task to. Usually not used in combination with the ``queue`` 

469 argument. 

470 

471 routing_key (str): Custom routing key used to route the task to a 

472 worker server. If in combination with a ``queue`` argument 

473 only used to specify custom routing keys to topic exchanges. 

474 

475 priority (int): The task priority, a number between 0 and 9. 

476 Defaults to the :attr:`priority` attribute. 

477 

478 serializer (str): Serialization method to use. 

479 Can be `pickle`, `json`, `yaml`, `msgpack` or any custom 

480 serialization method that's been registered 

481 with :mod:`kombu.serialization.registry`. 

482 Defaults to the :attr:`serializer` attribute. 

483 

484 compression (str): Optional compression method 

485 to use. Can be one of ``zlib``, ``bzip2``, 

486 or any custom compression methods registered with 

487 :func:`kombu.compression.register`. 

488 Defaults to the :setting:`task_compression` setting. 

489 

490 link (Signature): A single, or a list of tasks signatures 

491 to apply if the task returns successfully. 

492 

493 link_error (Signature): A single, or a list of task signatures 

494 to apply if an error occurs while executing the task. 

495 

496 producer (kombu.Producer): custom producer to use when publishing 

497 the task. 

498 

499 add_to_parent (bool): If set to True (default) and the task 

500 is applied while executing another task, then the result 

501 will be appended to the parent tasks ``request.children`` 

502 attribute. Trailing can also be disabled by default using the 

503 :attr:`trail` attribute 

504 

505 publisher (kombu.Producer): Deprecated alias to ``producer``. 

506 

507 headers (Dict): Message headers to be included in the message. 

508 

509 Returns: 

510 celery.result.AsyncResult: Promise of future evaluation. 

511 

512 Raises: 

513 TypeError: If not enough arguments are passed, or too many 

514 arguments are passed. Note that signature checks may 

515 be disabled by specifying ``@task(typing=False)``. 

516 kombu.exceptions.OperationalError: If a connection to the 

517 transport cannot be made, or if the connection is lost. 

518 

519 Note: 

520 Also supports all keyword arguments supported by 

521 :meth:`kombu.Producer.publish`. 

522 """ 

523 if self.typing: 

524 try: 

525 check_arguments = self.__header__ 

526 except AttributeError: # pragma: no cover 

527 pass 

528 else: 

529 check_arguments(*(args or ()), **(kwargs or {})) 

530 

531 if self.__v2_compat__: 

532 shadow = shadow or self.shadow_name(self(), args, kwargs, options) 

533 else: 

534 shadow = shadow or self.shadow_name(args, kwargs, options) 

535 

536 preopts = self._get_exec_options() 

537 options = dict(preopts, **options) if options else preopts 

538 

539 options.setdefault('ignore_result', self.ignore_result) 

540 if self.priority: 

541 options.setdefault('priority', self.priority) 

542 

543 app = self._get_app() 

544 if app.conf.task_always_eager: 

545 with app.producer_or_acquire(producer) as eager_producer: 

546 serializer = options.get('serializer') 

547 if serializer is None: 

548 if eager_producer.serializer: 

549 serializer = eager_producer.serializer 

550 else: 

551 serializer = app.conf.task_serializer 

552 body = args, kwargs 

553 content_type, content_encoding, data = serialization.dumps( 

554 body, serializer, 

555 ) 

556 args, kwargs = serialization.loads( 

557 data, content_type, content_encoding, 

558 accept=[content_type] 

559 ) 

560 with denied_join_result(): 

561 return self.apply(args, kwargs, task_id=task_id or uuid(), 

562 link=link, link_error=link_error, **options) 

563 else: 

564 return app.send_task( 

565 self.name, args, kwargs, task_id=task_id, producer=producer, 

566 link=link, link_error=link_error, result_cls=self.AsyncResult, 

567 shadow=shadow, task_type=self, 

568 **options 

569 ) 

570 

571 def shadow_name(self, args, kwargs, options): 

572 """Override for custom task name in worker logs/monitoring. 

573 

574 Example: 

575 .. code-block:: python 

576 

577 from celery.utils.imports import qualname 

578 

579 def shadow_name(task, args, kwargs, options): 

580 return qualname(args[0]) 

581 

582 @app.task(shadow_name=shadow_name, serializer='pickle') 

583 def apply_function_async(fun, *args, **kwargs): 

584 return fun(*args, **kwargs) 

585 

586 Arguments: 

587 args (Tuple): Task positional arguments. 

588 kwargs (Dict): Task keyword arguments. 

589 options (Dict): Task execution options. 

590 """ 

591 

592 def signature_from_request(self, request=None, args=None, kwargs=None, 

593 queue=None, **extra_options): 

594 request = self.request if request is None else request 

595 args = request.args if args is None else args 

596 kwargs = request.kwargs if kwargs is None else kwargs 

597 options = request.as_execution_options() 

598 delivery_info = request.delivery_info or {} 

599 priority = delivery_info.get('priority') 

600 if priority is not None: 

601 options['priority'] = priority 

602 if queue: 

603 options['queue'] = queue 

604 else: 

605 exchange = delivery_info.get('exchange') 

606 routing_key = delivery_info.get('routing_key') 

607 if exchange == '' and routing_key: 

608 # sent to anon-exchange 

609 options['queue'] = routing_key 

610 else: 

611 options.update(delivery_info) 

612 return self.signature( 

613 args, kwargs, options, type=self, **extra_options 

614 ) 

615 subtask_from_request = signature_from_request # XXX compat 

616 

617 def retry(self, args=None, kwargs=None, exc=None, throw=True, 

618 eta=None, countdown=None, max_retries=None, **options): 

619 """Retry the task, adding it to the back of the queue. 

620 

621 Example: 

622 >>> from imaginary_twitter_lib import Twitter 

623 >>> from proj.celery import app 

624 

625 >>> @app.task(bind=True) 

626 ... def tweet(self, auth, message): 

627 ... twitter = Twitter(oauth=auth) 

628 ... try: 

629 ... twitter.post_status_update(message) 

630 ... except twitter.FailWhale as exc: 

631 ... # Retry in 5 minutes. 

632 ... self.retry(countdown=60 * 5, exc=exc) 

633 

634 Note: 

635 Although the task will never return above as `retry` raises an 

636 exception to notify the worker, we use `raise` in front of the 

637 retry to convey that the rest of the block won't be executed. 

638 

639 Arguments: 

640 args (Tuple): Positional arguments to retry with. 

641 kwargs (Dict): Keyword arguments to retry with. 

642 exc (Exception): Custom exception to report when the max retry 

643 limit has been exceeded (default: 

644 :exc:`~@MaxRetriesExceededError`). 

645 

646 If this argument is set and retry is called while 

647 an exception was raised (``sys.exc_info()`` is set) 

648 it will attempt to re-raise the current exception. 

649 

650 If no exception was raised it will raise the ``exc`` 

651 argument provided. 

652 countdown (float): Time in seconds to delay the retry for. 

653 eta (~datetime.datetime): Explicit time and date to run the 

654 retry at. 

655 max_retries (int): If set, overrides the default retry limit for 

656 this execution. Changes to this parameter don't propagate to 

657 subsequent task retry attempts. A value of :const:`None`, 

658 means "use the default", so if you want infinite retries you'd 

659 have to set the :attr:`max_retries` attribute of the task to 

660 :const:`None` first. 

661 time_limit (int): If set, overrides the default time limit. 

662 soft_time_limit (int): If set, overrides the default soft 

663 time limit. 

664 throw (bool): If this is :const:`False`, don't raise the 

665 :exc:`~@Retry` exception, that tells the worker to mark 

666 the task as being retried. Note that this means the task 

667 will be marked as failed if the task raises an exception, 

668 or successful if it returns after the retry call. 

669 **options (Any): Extra options to pass on to :meth:`apply_async`. 

670 

671 Raises: 

672 

673 celery.exceptions.Retry: 

674 To tell the worker that the task has been re-sent for retry. 

675 This always happens, unless the `throw` keyword argument 

676 has been explicitly set to :const:`False`, and is considered 

677 normal operation. 

678 """ 

679 request = self.request 

680 retries = request.retries + 1 

681 max_retries = self.max_retries if max_retries is None else max_retries 

682 

683 # Not in worker or emulated by (apply/always_eager), 

684 # so just raise the original exception. 

685 if request.called_directly: 

686 # raises orig stack if PyErr_Occurred, 

687 # and augments with exc' if that argument is defined. 

688 raise_with_context(exc or Retry('Task can be retried', None)) 

689 

690 if not eta and countdown is None: 

691 countdown = self.default_retry_delay 

692 

693 is_eager = request.is_eager 

694 S = self.signature_from_request( 

695 request, args, kwargs, 

696 countdown=countdown, eta=eta, retries=retries, 

697 **options 

698 ) 

699 

700 if max_retries is not None and retries > max_retries: 

701 if exc: 

702 # On Py3: will augment any current exception with 

703 # the exc' argument provided (raise exc from orig) 

704 raise_with_context(exc) 

705 raise self.MaxRetriesExceededError( 

706 "Can't retry {0}[{1}] args:{2} kwargs:{3}".format( 

707 self.name, request.id, S.args, S.kwargs 

708 ), task_args=S.args, task_kwargs=S.kwargs 

709 ) 

710 

711 ret = Retry(exc=exc, when=eta or countdown, is_eager=is_eager, sig=S) 

712 

713 if is_eager: 

714 # if task was executed eagerly using apply(), 

715 # then the retry must also be executed eagerly in apply method 

716 if throw: 

717 raise ret 

718 return ret 

719 

720 try: 

721 S.apply_async() 

722 except Exception as exc: 

723 raise Reject(exc, requeue=False) 

724 if throw: 

725 raise ret 

726 return ret 

727 

728 def apply(self, args=None, kwargs=None, 

729 link=None, link_error=None, 

730 task_id=None, retries=None, throw=None, 

731 logfile=None, loglevel=None, headers=None, **options): 

732 """Execute this task locally, by blocking until the task returns. 

733 

734 Arguments: 

735 args (Tuple): positional arguments passed on to the task. 

736 kwargs (Dict): keyword arguments passed on to the task. 

737 throw (bool): Re-raise task exceptions. 

738 Defaults to the :setting:`task_eager_propagates` setting. 

739 

740 Returns: 

741 celery.result.EagerResult: pre-evaluated result. 

742 """ 

743 # trace imports Task, so need to import inline. 

744 from celery.app.trace import build_tracer 

745 

746 app = self._get_app() 

747 args = args or () 

748 kwargs = kwargs or {} 

749 task_id = task_id or uuid() 

750 retries = retries or 0 

751 if throw is None: 

752 throw = app.conf.task_eager_propagates 

753 

754 # Make sure we get the task instance, not class. 

755 task = app._tasks[self.name] 

756 

757 request = { 

758 'id': task_id, 

759 'retries': retries, 

760 'is_eager': True, 

761 'logfile': logfile, 

762 'loglevel': loglevel or 0, 

763 'hostname': gethostname(), 

764 'callbacks': maybe_list(link), 

765 'errbacks': maybe_list(link_error), 

766 'headers': headers, 

767 'delivery_info': {'is_eager': True}, 

768 } 

769 tb = None 

770 tracer = build_tracer( 

771 task.name, task, eager=True, 

772 propagate=throw, app=self._get_app(), 

773 ) 

774 ret = tracer(task_id, args, kwargs, request) 

775 retval = ret.retval 

776 if isinstance(retval, ExceptionInfo): 

777 retval, tb = retval.exception, retval.traceback 

778 if isinstance(retval, Retry) and retval.sig is not None: 

779 return retval.sig.apply(retries=retries + 1) 

780 state = states.SUCCESS if ret.info is None else ret.info.state 

781 return EagerResult(task_id, retval, state, traceback=tb) 

782 

783 def AsyncResult(self, task_id, **kwargs): 

784 """Get AsyncResult instance for the specified task. 

785 

786 Arguments: 

787 task_id (str): Task id to get result for. 

788 """ 

789 return self._get_app().AsyncResult(task_id, backend=self.backend, 

790 task_name=self.name, **kwargs) 

791 

792 def signature(self, args=None, *starargs, **starkwargs): 

793 """Create signature. 

794 

795 Returns: 

796 :class:`~celery.signature`: object for 

797 this task, wrapping arguments and execution options 

798 for a single task invocation. 

799 """ 

800 starkwargs.setdefault('app', self.app) 

801 return signature(self, args, *starargs, **starkwargs) 

802 subtask = signature 

803 

804 def s(self, *args, **kwargs): 

805 """Create signature. 

806 

807 Shortcut for ``.s(*a, **k) -> .signature(a, k)``. 

808 """ 

809 return self.signature(args, kwargs) 

810 

811 def si(self, *args, **kwargs): 

812 """Create immutable signature. 

813 

814 Shortcut for ``.si(*a, **k) -> .signature(a, k, immutable=True)``. 

815 """ 

816 return self.signature(args, kwargs, immutable=True) 

817 

818 def chunks(self, it, n): 

819 """Create a :class:`~celery.canvas.chunks` task for this task.""" 

820 from celery import chunks 

821 return chunks(self.s(), it, n, app=self.app) 

822 

823 def map(self, it): 

824 """Create a :class:`~celery.canvas.xmap` task from ``it``.""" 

825 from celery import xmap 

826 return xmap(self.s(), it, app=self.app) 

827 

828 def starmap(self, it): 

829 """Create a :class:`~celery.canvas.xstarmap` task from ``it``.""" 

830 from celery import xstarmap 

831 return xstarmap(self.s(), it, app=self.app) 

832 

833 def send_event(self, type_, retry=True, retry_policy=None, **fields): 

834 """Send monitoring event message. 

835 

836 This can be used to add custom event types in :pypi:`Flower` 

837 and other monitors. 

838 

839 Arguments: 

840 type_ (str): Type of event, e.g. ``"task-failed"``. 

841 

842 Keyword Arguments: 

843 retry (bool): Retry sending the message 

844 if the connection is lost. Default is taken from the 

845 :setting:`task_publish_retry` setting. 

846 retry_policy (Mapping): Retry settings. Default is taken 

847 from the :setting:`task_publish_retry_policy` setting. 

848 **fields (Any): Map containing information about the event. 

849 Must be JSON serializable. 

850 """ 

851 req = self.request 

852 if retry_policy is None: 

853 retry_policy = self.app.conf.task_publish_retry_policy 

854 with self.app.events.default_dispatcher(hostname=req.hostname) as d: 

855 return d.send( 

856 type_, 

857 uuid=req.id, retry=retry, retry_policy=retry_policy, **fields) 

858 

859 def replace(self, sig): 

860 """Replace this task, with a new task inheriting the task id. 

861 

862 Execution of the host task ends immediately and no subsequent statements 

863 will be run. 

864 

865 .. versionadded:: 4.0 

866 

867 Arguments: 

868 sig (~@Signature): signature to replace with. 

869 

870 Raises: 

871 ~@Ignore: This is always raised when called in asynchronous context. 

872 It is best to always use ``return self.replace(...)`` to convey 

873 to the reader that the task won't continue after being replaced. 

874 """ 

875 chord = self.request.chord 

876 if 'chord' in sig.options: 

877 raise ImproperlyConfigured( 

878 "A signature replacing a task must not be part of a chord" 

879 ) 

880 

881 if isinstance(sig, group): 

882 sig |= self.app.tasks['celery.accumulate'].s(index=0).set( 

883 link=self.request.callbacks, 

884 link_error=self.request.errbacks, 

885 ) 

886 

887 if self.request.chain: 

888 for t in reversed(self.request.chain): 

889 sig |= signature(t, app=self.app) 

890 

891 sig.set( 

892 chord=chord, 

893 group_id=self.request.group, 

894 root_id=self.request.root_id, 

895 ) 

896 sig.freeze(self.request.id) 

897 

898 if self.request.is_eager: 

899 return sig.apply().get() 

900 else: 

901 sig.delay() 

902 raise Ignore('Replaced by new task') 

903 

904 def add_to_chord(self, sig, lazy=False): 

905 """Add signature to the chord the current task is a member of. 

906 

907 .. versionadded:: 4.0 

908 

909 Currently only supported by the Redis result backend. 

910 

911 Arguments: 

912 sig (~@Signature): Signature to extend chord with. 

913 lazy (bool): If enabled the new task won't actually be called, 

914 and ``sig.delay()`` must be called manually. 

915 """ 

916 if not self.request.chord: 

917 raise ValueError('Current task is not member of any chord') 

918 sig.set( 

919 group_id=self.request.group, 

920 chord=self.request.chord, 

921 root_id=self.request.root_id, 

922 ) 

923 result = sig.freeze() 

924 self.backend.add_to_chord(self.request.group, result) 

925 return sig.delay() if not lazy else sig 

926 

927 def update_state(self, task_id=None, state=None, meta=None, **kwargs): 

928 """Update task state. 

929 

930 Arguments: 

931 task_id (str): Id of the task to update. 

932 Defaults to the id of the current task. 

933 state (str): New state. 

934 meta (Dict): State meta-data. 

935 """ 

936 if task_id is None: 

937 task_id = self.request.id 

938 self.backend.store_result(task_id, meta, state, request=self.request, **kwargs) 

939 

940 def on_success(self, retval, task_id, args, kwargs): 

941 """Success handler. 

942 

943 Run by the worker if the task executes successfully. 

944 

945 Arguments: 

946 retval (Any): The return value of the task. 

947 task_id (str): Unique id of the executed task. 

948 args (Tuple): Original arguments for the executed task. 

949 kwargs (Dict): Original keyword arguments for the executed task. 

950 

951 Returns: 

952 None: The return value of this handler is ignored. 

953 """ 

954 

955 def on_retry(self, exc, task_id, args, kwargs, einfo): 

956 """Retry handler. 

957 

958 This is run by the worker when the task is to be retried. 

959 

960 Arguments: 

961 exc (Exception): The exception sent to :meth:`retry`. 

962 task_id (str): Unique id of the retried task. 

963 args (Tuple): Original arguments for the retried task. 

964 kwargs (Dict): Original keyword arguments for the retried task. 

965 einfo (~billiard.einfo.ExceptionInfo): Exception information. 

966 

967 Returns: 

968 None: The return value of this handler is ignored. 

969 """ 

970 

971 def on_failure(self, exc, task_id, args, kwargs, einfo): 

972 """Error handler. 

973 

974 This is run by the worker when the task fails. 

975 

976 Arguments: 

977 exc (Exception): The exception raised by the task. 

978 task_id (str): Unique id of the failed task. 

979 args (Tuple): Original arguments for the task that failed. 

980 kwargs (Dict): Original keyword arguments for the task that failed. 

981 einfo (~billiard.einfo.ExceptionInfo): Exception information. 

982 

983 Returns: 

984 None: The return value of this handler is ignored. 

985 """ 

986 

987 def after_return(self, status, retval, task_id, args, kwargs, einfo): 

988 """Handler called after the task returns. 

989 

990 Arguments: 

991 status (str): Current task state. 

992 retval (Any): Task return value/exception. 

993 task_id (str): Unique id of the task. 

994 args (Tuple): Original arguments for the task. 

995 kwargs (Dict): Original keyword arguments for the task. 

996 einfo (~billiard.einfo.ExceptionInfo): Exception information. 

997 

998 Returns: 

999 None: The return value of this handler is ignored. 

1000 """ 

1001 

1002 def add_trail(self, result): 

1003 if self.trail: 

1004 self.request.children.append(result) 

1005 return result 

1006 

1007 def push_request(self, *args, **kwargs): 

1008 self.request_stack.push(Context(*args, **kwargs)) 

1009 

1010 def pop_request(self): 

1011 self.request_stack.pop() 

1012 

1013 def __repr__(self): 

1014 """``repr(task)``.""" 

1015 return _reprtask(self, R_INSTANCE) 

1016 

1017 def _get_request(self): 

1018 """Get current request object.""" 

1019 req = self.request_stack.top 

1020 if req is None: 

1021 # task was not called, but some may still expect a request 

1022 # to be there, perhaps that should be deprecated. 

1023 if self._default_request is None: 

1024 self._default_request = Context() 

1025 return self._default_request 

1026 return req 

1027 request = property(_get_request) 

1028 

1029 def _get_exec_options(self): 

1030 if self._exec_options is None: 

1031 self._exec_options = extract_exec_options(self) 

1032 return self._exec_options 

1033 

1034 @property 

1035 def backend(self): 

1036 backend = self._backend 

1037 if backend is None: 

1038 return self.app.backend 

1039 return backend 

1040 

1041 @backend.setter 

1042 def backend(self, value): # noqa 

1043 self._backend = value 

1044 

1045 @property 

1046 def __name__(self): 

1047 return self.__class__.__name__ 

1048 

1049 

1050BaseTask = Task # noqa: E305 XXX compat alias