1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
|
import math
import attr
import trio
from . import _core
from ._core import enable_ki_protection, ParkingLot
from ._util import Final
@attr.s(frozen=True)
class _EventStatistics:
tasks_waiting = attr.ib()
@attr.s(repr=False, eq=False, hash=False, slots=True)
class Event(metaclass=Final):
"""A waitable boolean value useful for inter-task synchronization,
inspired by :class:`threading.Event`.
An event object has an internal boolean flag, representing whether
the event has happened yet. The flag is initially False, and the
:meth:`wait` method waits until the flag is True. If the flag is
already True, then :meth:`wait` returns immediately. (If the event has
already happened, there's nothing to wait for.) The :meth:`set` method
sets the flag to True, and wakes up any waiters.
This behavior is useful because it helps avoid race conditions and
lost wakeups: it doesn't matter whether :meth:`set` gets called just
before or after :meth:`wait`. If you want a lower-level wakeup
primitive that doesn't have this protection, consider :class:`Condition`
or :class:`trio.lowlevel.ParkingLot`.
.. note:: Unlike `threading.Event`, `trio.Event` has no
`~threading.Event.clear` method. In Trio, once an `Event` has happened,
it cannot un-happen. If you need to represent a series of events,
consider creating a new `Event` object for each one (they're cheap!),
or other synchronization methods like :ref:`channels <channels>` or
`trio.lowlevel.ParkingLot`.
"""
_tasks = attr.ib(factory=set, init=False)
_flag = attr.ib(default=False, init=False)
def is_set(self):
"""Return the current value of the internal flag."""
return self._flag
@enable_ki_protection
def set(self):
"""Set the internal flag value to True, and wake any waiting tasks."""
if not self._flag:
self._flag = True
for task in self._tasks:
_core.reschedule(task)
self._tasks.clear()
async def wait(self):
"""Block until the internal flag value becomes True.
If it's already True, then this method returns immediately.
"""
if self._flag:
await trio.lowlevel.checkpoint()
else:
task = _core.current_task()
self._tasks.add(task)
def abort_fn(_):
self._tasks.remove(task)
return _core.Abort.SUCCEEDED
await _core.wait_task_rescheduled(abort_fn)
def statistics(self):
"""Return an object containing debugging information.
Currently the following fields are defined:
* ``tasks_waiting``: The number of tasks blocked on this event's
:meth:`wait` method.
"""
return _EventStatistics(tasks_waiting=len(self._tasks))
class AsyncContextManagerMixin:
@enable_ki_protection
async def __aenter__(self):
await self.acquire()
@enable_ki_protection
async def __aexit__(self, *args):
self.release()
@attr.s(frozen=True)
class _CapacityLimiterStatistics:
borrowed_tokens = attr.ib()
total_tokens = attr.ib()
borrowers = attr.ib()
tasks_waiting = attr.ib()
class CapacityLimiter(AsyncContextManagerMixin, metaclass=Final):
"""An object for controlling access to a resource with limited capacity.
Sometimes you need to put a limit on how many tasks can do something at
the same time. For example, you might want to use some threads to run
multiple blocking I/O operations in parallel... but if you use too many
threads at once, then your system can become overloaded and it'll actually
make things slower. One popular solution is to impose a policy like "run
up to 40 threads at the same time, but no more". But how do you implement
a policy like this?
That's what :class:`CapacityLimiter` is for. You can think of a
:class:`CapacityLimiter` object as a sack that starts out holding some fixed
number of tokens::
limit = trio.CapacityLimiter(40)
Then tasks can come along and borrow a token out of the sack::
# Borrow a token:
async with limit:
# We are holding a token!
await perform_expensive_operation()
# Exiting the 'async with' block puts the token back into the sack
And crucially, if you try to borrow a token but the sack is empty, then
you have to wait for another task to finish what it's doing and put its
token back first before you can take it and continue.
Another way to think of it: a :class:`CapacityLimiter` is like a sofa with a
fixed number of seats, and if they're all taken then you have to wait for
someone to get up before you can sit down.
By default, :func:`trio.to_thread.run_sync` uses a
:class:`CapacityLimiter` to limit the number of threads running at once;
see `trio.to_thread.current_default_thread_limiter` for details.
If you're familiar with semaphores, then you can think of this as a
restricted semaphore that's specialized for one common use case, with
additional error checking. For a more traditional semaphore, see
:class:`Semaphore`.
.. note::
Don't confuse this with the `"leaky bucket"
<https://en.wikipedia.org/wiki/Leaky_bucket>`__ or `"token bucket"
<https://en.wikipedia.org/wiki/Token_bucket>`__ algorithms used to
limit bandwidth usage on networks. The basic idea of using tokens to
track a resource limit is similar, but this is a very simple sack where
tokens aren't automatically created or destroyed over time; they're
just borrowed and then put back.
"""
def __init__(self, total_tokens):
self._lot = ParkingLot()
self._borrowers = set()
# Maps tasks attempting to acquire -> borrower, to handle on-behalf-of
self._pending_borrowers = {}
# invoke the property setter for validation
self.total_tokens = total_tokens
assert self._total_tokens == total_tokens
def __repr__(self):
return "<trio.CapacityLimiter at {:#x}, {}/{} with {} waiting>".format(
id(self), len(self._borrowers), self._total_tokens, len(self._lot)
)
@property
def total_tokens(self):
"""The total capacity available.
You can change :attr:`total_tokens` by assigning to this attribute. If
you make it larger, then the appropriate number of waiting tasks will
be woken immediately to take the new tokens. If you decrease
total_tokens below the number of tasks that are currently using the
resource, then all current tasks will be allowed to finish as normal,
but no new tasks will be allowed in until the total number of tasks
drops below the new total_tokens.
"""
return self._total_tokens
@total_tokens.setter
def total_tokens(self, new_total_tokens):
if not isinstance(new_total_tokens, int) and new_total_tokens != math.inf:
raise TypeError("total_tokens must be an int or math.inf")
if new_total_tokens < 1:
raise ValueError("total_tokens must be >= 1")
self._total_tokens = new_total_tokens
self._wake_waiters()
def _wake_waiters(self):
available = self._total_tokens - len(self._borrowers)
for woken in self._lot.unpark(count=available):
self._borrowers.add(self._pending_borrowers.pop(woken))
@property
def borrowed_tokens(self):
"""The amount of capacity that's currently in use."""
return len(self._borrowers)
@property
def available_tokens(self):
"""The amount of capacity that's available to use."""
return self.total_tokens - self.borrowed_tokens
@enable_ki_protection
def acquire_nowait(self):
"""Borrow a token from the sack, without blocking.
Raises:
WouldBlock: if no tokens are available.
RuntimeError: if the current task already holds one of this sack's
tokens.
"""
self.acquire_on_behalf_of_nowait(trio.lowlevel.current_task())
@enable_ki_protection
def acquire_on_behalf_of_nowait(self, borrower):
"""Borrow a token from the sack on behalf of ``borrower``, without
blocking.
Args:
borrower: A :class:`trio.lowlevel.Task` or arbitrary opaque object
used to record who is borrowing this token. This is used by
:func:`trio.to_thread.run_sync` to allow threads to "hold
tokens", with the intention in the future of using it to `allow
deadlock detection and other useful things
<https://github.com/python-trio/trio/issues/182>`__
Raises:
WouldBlock: if no tokens are available.
RuntimeError: if ``borrower`` already holds one of this sack's
tokens.
"""
if borrower in self._borrowers:
raise RuntimeError(
"this borrower is already holding one of this "
"CapacityLimiter's tokens"
)
if len(self._borrowers) < self._total_tokens and not self._lot:
self._borrowers.add(borrower)
else:
raise trio.WouldBlock
@enable_ki_protection
async def acquire(self):
"""Borrow a token from the sack, blocking if necessary.
Raises:
RuntimeError: if the current task already holds one of this sack's
tokens.
"""
await self.acquire_on_behalf_of(trio.lowlevel.current_task())
@enable_ki_protection
async def acquire_on_behalf_of(self, borrower):
"""Borrow a token from the sack on behalf of ``borrower``, blocking if
necessary.
Args:
borrower: A :class:`trio.lowlevel.Task` or arbitrary opaque object
used to record who is borrowing this token; see
:meth:`acquire_on_behalf_of_nowait` for details.
Raises:
RuntimeError: if ``borrower`` task already holds one of this sack's
tokens.
"""
await trio.lowlevel.checkpoint_if_cancelled()
try:
self.acquire_on_behalf_of_nowait(borrower)
except trio.WouldBlock:
task = trio.lowlevel.current_task()
self._pending_borrowers[task] = borrower
try:
await self._lot.park()
except trio.Cancelled:
self._pending_borrowers.pop(task)
raise
else:
await trio.lowlevel.cancel_shielded_checkpoint()
@enable_ki_protection
def release(self):
"""Put a token back into the sack.
Raises:
RuntimeError: if the current task has not acquired one of this
sack's tokens.
"""
self.release_on_behalf_of(trio.lowlevel.current_task())
@enable_ki_protection
def release_on_behalf_of(self, borrower):
"""Put a token back into the sack on behalf of ``borrower``.
Raises:
RuntimeError: if the given borrower has not acquired one of this
sack's tokens.
"""
if borrower not in self._borrowers:
raise RuntimeError(
"this borrower isn't holding any of this CapacityLimiter's tokens"
)
self._borrowers.remove(borrower)
self._wake_waiters()
def statistics(self):
"""Return an object containing debugging information.
Currently the following fields are defined:
* ``borrowed_tokens``: The number of tokens currently borrowed from
the sack.
* ``total_tokens``: The total number of tokens in the sack. Usually
this will be larger than ``borrowed_tokens``, but it's possibly for
it to be smaller if :attr:`total_tokens` was recently decreased.
* ``borrowers``: A list of all tasks or other entities that currently
hold a token.
* ``tasks_waiting``: The number of tasks blocked on this
:class:`CapacityLimiter`\'s :meth:`acquire` or
:meth:`acquire_on_behalf_of` methods.
"""
return _CapacityLimiterStatistics(
borrowed_tokens=len(self._borrowers),
total_tokens=self._total_tokens,
# Use a list instead of a frozenset just in case we start to allow
# one borrower to hold multiple tokens in the future
borrowers=list(self._borrowers),
tasks_waiting=len(self._lot),
)
class Semaphore(AsyncContextManagerMixin, metaclass=Final):
"""A `semaphore <https://en.wikipedia.org/wiki/Semaphore_(programming)>`__.
A semaphore holds an integer value, which can be incremented by
calling :meth:`release` and decremented by calling :meth:`acquire` – but
the value is never allowed to drop below zero. If the value is zero, then
:meth:`acquire` will block until someone calls :meth:`release`.
If you're looking for a :class:`Semaphore` to limit the number of tasks
that can access some resource simultaneously, then consider using a
:class:`CapacityLimiter` instead.
This object's interface is similar to, but different from, that of
:class:`threading.Semaphore`.
A :class:`Semaphore` object can be used as an async context manager; it
blocks on entry but not on exit.
Args:
initial_value (int): A non-negative integer giving semaphore's initial
value.
max_value (int or None): If given, makes this a "bounded" semaphore that
raises an error if the value is about to exceed the given
``max_value``.
"""
def __init__(self, initial_value, *, max_value=None):
if not isinstance(initial_value, int):
raise TypeError("initial_value must be an int")
if initial_value < 0:
raise ValueError("initial value must be >= 0")
if max_value is not None:
if not isinstance(max_value, int):
raise TypeError("max_value must be None or an int")
if max_value < initial_value:
raise ValueError("max_values must be >= initial_value")
# Invariants:
# bool(self._lot) implies self._value == 0
# (or equivalently: self._value > 0 implies not self._lot)
self._lot = trio.lowlevel.ParkingLot()
self._value = initial_value
self._max_value = max_value
def __repr__(self):
if self._max_value is None:
max_value_str = ""
else:
max_value_str = ", max_value={}".format(self._max_value)
return "<trio.Semaphore({}{}) at {:#x}>".format(
self._value, max_value_str, id(self)
)
@property
def value(self):
"""The current value of the semaphore."""
return self._value
@property
def max_value(self):
"""The maximum allowed value. May be None to indicate no limit."""
return self._max_value
@enable_ki_protection
def acquire_nowait(self):
"""Attempt to decrement the semaphore value, without blocking.
Raises:
WouldBlock: if the value is zero.
"""
if self._value > 0:
assert not self._lot
self._value -= 1
else:
raise trio.WouldBlock
@enable_ki_protection
async def acquire(self):
"""Decrement the semaphore value, blocking if necessary to avoid
letting it drop below zero.
"""
await trio.lowlevel.checkpoint_if_cancelled()
try:
self.acquire_nowait()
except trio.WouldBlock:
await self._lot.park()
else:
await trio.lowlevel.cancel_shielded_checkpoint()
@enable_ki_protection
def release(self):
"""Increment the semaphore value, possibly waking a task blocked in
:meth:`acquire`.
Raises:
ValueError: if incrementing the value would cause it to exceed
:attr:`max_value`.
"""
if self._lot:
assert self._value == 0
self._lot.unpark(count=1)
else:
if self._max_value is not None and self._value == self._max_value:
raise ValueError("semaphore released too many times")
self._value += 1
def statistics(self):
"""Return an object containing debugging information.
Currently the following fields are defined:
* ``tasks_waiting``: The number of tasks blocked on this semaphore's
:meth:`acquire` method.
"""
return self._lot.statistics()
@attr.s(frozen=True)
class _LockStatistics:
locked = attr.ib()
owner = attr.ib()
tasks_waiting = attr.ib()
@attr.s(eq=False, hash=False, repr=False)
class _LockImpl(AsyncContextManagerMixin):
_lot = attr.ib(factory=ParkingLot, init=False)
_owner = attr.ib(default=None, init=False)
def __repr__(self):
if self.locked():
s1 = "locked"
s2 = " with {} waiters".format(len(self._lot))
else:
s1 = "unlocked"
s2 = ""
return "<{} {} object at {:#x}{}>".format(
s1, self.__class__.__name__, id(self), s2
)
def locked(self):
"""Check whether the lock is currently held.
Returns:
bool: True if the lock is held, False otherwise.
"""
return self._owner is not None
@enable_ki_protection
def acquire_nowait(self):
"""Attempt to acquire the lock, without blocking.
Raises:
WouldBlock: if the lock is held.
"""
task = trio.lowlevel.current_task()
if self._owner is task:
raise RuntimeError("attempt to re-acquire an already held Lock")
elif self._owner is None and not self._lot:
# No-one owns it
self._owner = task
else:
raise trio.WouldBlock
@enable_ki_protection
async def acquire(self):
"""Acquire the lock, blocking if necessary."""
await trio.lowlevel.checkpoint_if_cancelled()
try:
self.acquire_nowait()
except trio.WouldBlock:
# NOTE: it's important that the contended acquire path is just
# "_lot.park()", because that's how Condition.wait() acquires the
# lock as well.
await self._lot.park()
else:
await trio.lowlevel.cancel_shielded_checkpoint()
@enable_ki_protection
def release(self):
"""Release the lock.
Raises:
RuntimeError: if the calling task does not hold the lock.
"""
task = trio.lowlevel.current_task()
if task is not self._owner:
raise RuntimeError("can't release a Lock you don't own")
if self._lot:
(self._owner,) = self._lot.unpark(count=1)
else:
self._owner = None
def statistics(self):
"""Return an object containing debugging information.
Currently the following fields are defined:
* ``locked``: boolean indicating whether the lock is held.
* ``owner``: the :class:`trio.lowlevel.Task` currently holding the lock,
or None if the lock is not held.
* ``tasks_waiting``: The number of tasks blocked on this lock's
:meth:`acquire` method.
"""
return _LockStatistics(
locked=self.locked(), owner=self._owner, tasks_waiting=len(self._lot)
)
class Lock(_LockImpl, metaclass=Final):
"""A classic `mutex
<https://en.wikipedia.org/wiki/Lock_(computer_science)>`__.
This is a non-reentrant, single-owner lock. Unlike
:class:`threading.Lock`, only the owner of the lock is allowed to release
it.
A :class:`Lock` object can be used as an async context manager; it
blocks on entry but not on exit.
"""
class StrictFIFOLock(_LockImpl, metaclass=Final):
r"""A variant of :class:`Lock` where tasks are guaranteed to acquire the
lock in strict first-come-first-served order.
An example of when this is useful is if you're implementing something like
:class:`trio.SSLStream` or an HTTP/2 server using `h2
<https://hyper-h2.readthedocs.io/>`__, where you have multiple concurrent
tasks that are interacting with a shared state machine, and at
unpredictable moments the state machine requests that a chunk of data be
sent over the network. (For example, when using h2 simply reading incoming
data can occasionally `create outgoing data to send
<https://http2.github.io/http2-spec/#PING>`__.) The challenge is to make
sure that these chunks are sent in the correct order, without being
garbled.
One option would be to use a regular :class:`Lock`, and wrap it around
every interaction with the state machine::
# This approach is sometimes workable but often sub-optimal; see below
async with lock:
state_machine.do_something()
if state_machine.has_data_to_send():
await conn.sendall(state_machine.get_data_to_send())
But this can be problematic. If you're using h2 then *usually* reading
incoming data doesn't create the need to send any data, so we don't want
to force every task that tries to read from the network to sit and wait
a potentially long time for ``sendall`` to finish. And in some situations
this could even potentially cause a deadlock, if the remote peer is
waiting for you to read some data before it accepts the data you're
sending.
:class:`StrictFIFOLock` provides an alternative. We can rewrite our
example like::
# Note: no awaits between when we start using the state machine and
# when we block to take the lock!
state_machine.do_something()
if state_machine.has_data_to_send():
# Notice that we fetch the data to send out of the state machine
# *before* sleeping, so that other tasks won't see it.
chunk = state_machine.get_data_to_send()
async with strict_fifo_lock:
await conn.sendall(chunk)
First we do all our interaction with the state machine in a single
scheduling quantum (notice there are no ``await``\s in there), so it's
automatically atomic with respect to other tasks. And then if and only if
we have data to send, we get in line to send it – and
:class:`StrictFIFOLock` guarantees that each task will send its data in
the same order that the state machine generated it.
Currently, :class:`StrictFIFOLock` is identical to :class:`Lock`,
but (a) this may not always be true in the future, especially if Trio ever
implements `more sophisticated scheduling policies
<https://github.com/python-trio/trio/issues/32>`__, and (b) the above code
is relying on a pretty subtle property of its lock. Using a
:class:`StrictFIFOLock` acts as an executable reminder that you're relying
on this property.
"""
@attr.s(frozen=True)
class _ConditionStatistics:
tasks_waiting = attr.ib()
lock_statistics = attr.ib()
class Condition(AsyncContextManagerMixin, metaclass=Final):
"""A classic `condition variable
<https://en.wikipedia.org/wiki/Monitor_(synchronization)>`__, similar to
:class:`threading.Condition`.
A :class:`Condition` object can be used as an async context manager to
acquire the underlying lock; it blocks on entry but not on exit.
Args:
lock (Lock): the lock object to use. If given, must be a
:class:`trio.Lock`. If None, a new :class:`Lock` will be allocated
and used.
"""
def __init__(self, lock=None):
if lock is None:
lock = Lock()
if not type(lock) is Lock:
raise TypeError("lock must be a trio.Lock")
self._lock = lock
self._lot = trio.lowlevel.ParkingLot()
def locked(self):
"""Check whether the underlying lock is currently held.
Returns:
bool: True if the lock is held, False otherwise.
"""
return self._lock.locked()
def acquire_nowait(self):
"""Attempt to acquire the underlying lock, without blocking.
Raises:
WouldBlock: if the lock is currently held.
"""
return self._lock.acquire_nowait()
async def acquire(self):
"""Acquire the underlying lock, blocking if necessary."""
await self._lock.acquire()
def release(self):
"""Release the underlying lock."""
self._lock.release()
@enable_ki_protection
async def wait(self):
"""Wait for another task to call :meth:`notify` or
:meth:`notify_all`.
When calling this method, you must hold the lock. It releases the lock
while waiting, and then re-acquires it before waking up.
There is a subtlety with how this method interacts with cancellation:
when cancelled it will block to re-acquire the lock before raising
:exc:`Cancelled`. This may cause cancellation to be less prompt than
expected. The advantage is that it makes code like this work::
async with condition:
await condition.wait()
If we didn't re-acquire the lock before waking up, and :meth:`wait`
were cancelled here, then we'd crash in ``condition.__aexit__`` when
we tried to release the lock we no longer held.
Raises:
RuntimeError: if the calling task does not hold the lock.
"""
if trio.lowlevel.current_task() is not self._lock._owner:
raise RuntimeError("must hold the lock to wait")
self.release()
# NOTE: we go to sleep on self._lot, but we'll wake up on
# self._lock._lot. That's all that's required to acquire a Lock.
try:
await self._lot.park()
except:
with trio.CancelScope(shield=True):
await self.acquire()
raise
def notify(self, n=1):
"""Wake one or more tasks that are blocked in :meth:`wait`.
Args:
n (int): The number of tasks to wake.
Raises:
RuntimeError: if the calling task does not hold the lock.
"""
if trio.lowlevel.current_task() is not self._lock._owner:
raise RuntimeError("must hold the lock to notify")
self._lot.repark(self._lock._lot, count=n)
def notify_all(self):
"""Wake all tasks that are currently blocked in :meth:`wait`.
Raises:
RuntimeError: if the calling task does not hold the lock.
"""
if trio.lowlevel.current_task() is not self._lock._owner:
raise RuntimeError("must hold the lock to notify")
self._lot.repark_all(self._lock._lot)
def statistics(self):
r"""Return an object containing debugging information.
Currently the following fields are defined:
* ``tasks_waiting``: The number of tasks blocked on this condition's
:meth:`wait` method.
* ``lock_statistics``: The result of calling the underlying
:class:`Lock`\s :meth:`~Lock.statistics` method.
"""
return _ConditionStatistics(
tasks_waiting=len(self._lot), lock_statistics=self._lock.statistics()
)
|