Skip to content
Projeler
Gruplar
Parçacıklar
Yardım
Yükleniyor...
Oturum aç / Kaydol
Gezinmeyi değiştir
C
cpython
Proje
Proje
Ayrıntılar
Etkinlik
Cycle Analytics
Depo (repository)
Depo (repository)
Dosyalar
Kayıtlar (commit)
Dallar (branch)
Etiketler
Katkıda bulunanlar
Grafik
Karşılaştır
Grafikler
Konular (issue)
0
Konular (issue)
0
Liste
Pano
Etiketler
Kilometre Taşları
Birleştirme (merge) Talepleri
0
Birleştirme (merge) Talepleri
0
CI / CD
CI / CD
İş akışları (pipeline)
İşler
Zamanlamalar
Grafikler
Paketler
Paketler
Wiki
Wiki
Parçacıklar
Parçacıklar
Üyeler
Üyeler
Collapse sidebar
Close sidebar
Etkinlik
Grafik
Grafikler
Yeni bir konu (issue) oluştur
İşler
Kayıtlar (commit)
Konu (issue) Panoları
Kenar çubuğunu aç
Batuhan Osman TASKAYA
cpython
Commits
7cbb78cd
Kaydet (Commit)
7cbb78cd
authored
Şub 17, 2015
tarafından
Victor Stinner
Dosyalara gözat
Seçenekler
Dosyalara Gözat
İndir
Sade Fark
Merge 3.4 (asyncio)
üst
8391b728
4088ad9d
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
60 additions
and
66 deletions
+60
-66
base_subprocess.py
Lib/asyncio/base_subprocess.py
+2
-0
queues.py
Lib/asyncio/queues.py
+43
-59
test_queues.py
Lib/test/test_asyncio/test_queues.py
+5
-5
test_subprocess.py
Lib/test/test_asyncio/test_subprocess.py
+10
-2
No files found.
Lib/asyncio/base_subprocess.py
Dosyayı görüntüle @
7cbb78cd
...
...
@@ -57,6 +57,8 @@ class BaseSubprocessTransport(transports.SubprocessTransport):
info
.
append
(
'pid=
%
s'
%
self
.
_pid
)
if
self
.
_returncode
is
not
None
:
info
.
append
(
'returncode=
%
s'
%
self
.
_returncode
)
else
:
info
.
append
(
'running'
)
stdin
=
self
.
_pipes
.
get
(
0
)
if
stdin
is
not
None
:
...
...
Lib/asyncio/queues.py
Dosyayı görüntüle @
7cbb78cd
"""Queues"""
__all__
=
[
'Queue'
,
'PriorityQueue'
,
'LifoQueue'
,
'
JoinableQueue
'
,
'
QueueFull'
,
'QueueEmpty
'
]
__all__
=
[
'Queue'
,
'PriorityQueue'
,
'LifoQueue'
,
'
QueueFull'
,
'QueueEmpty
'
,
'
JoinableQueue
'
]
import
collections
import
heapq
...
...
@@ -49,6 +49,9 @@ class Queue:
self
.
_getters
=
collections
.
deque
()
# Pairs of (item, Future).
self
.
_putters
=
collections
.
deque
()
self
.
_unfinished_tasks
=
0
self
.
_finished
=
locks
.
Event
(
loop
=
self
.
_loop
)
self
.
_finished
.
set
()
self
.
_init
(
maxsize
)
def
_init
(
self
,
maxsize
):
...
...
@@ -59,6 +62,8 @@ class Queue:
def
_put
(
self
,
item
):
self
.
_queue
.
append
(
item
)
self
.
_unfinished_tasks
+=
1
self
.
_finished
.
clear
()
def
__repr__
(
self
):
return
'<{} at {:#x} {}>'
.
format
(
...
...
@@ -75,6 +80,8 @@ class Queue:
result
+=
' _getters[{}]'
.
format
(
len
(
self
.
_getters
))
if
self
.
_putters
:
result
+=
' _putters[{}]'
.
format
(
len
(
self
.
_putters
))
if
self
.
_unfinished_tasks
:
result
+=
' tasks={}'
.
format
(
self
.
_unfinished_tasks
)
return
result
def
_consume_done_getters
(
self
):
...
...
@@ -126,9 +133,6 @@ class Queue:
'queue non-empty, why are getters waiting?'
)
getter
=
self
.
_getters
.
popleft
()
# Use _put and _get instead of passing item straight to getter, in
# case a subclass has logic that must run (e.g. JoinableQueue).
self
.
_put
(
item
)
# getter cannot be cancelled, we just removed done getters
...
...
@@ -154,9 +158,6 @@ class Queue:
'queue non-empty, why are getters waiting?'
)
getter
=
self
.
_getters
.
popleft
()
# Use _put and _get instead of passing item straight to getter, in
# case a subclass has logic that must run (e.g. JoinableQueue).
self
.
_put
(
item
)
# getter cannot be cancelled, we just removed done getters
...
...
@@ -219,6 +220,38 @@ class Queue:
else
:
raise
QueueEmpty
def
task_done
(
self
):
"""Indicate that a formerly enqueued task is complete.
Used by queue consumers. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items have
been processed (meaning that a task_done() call was received for every
item that had been put() into the queue).
Raises ValueError if called more times than there were items placed in
the queue.
"""
if
self
.
_unfinished_tasks
<=
0
:
raise
ValueError
(
'task_done() called too many times'
)
self
.
_unfinished_tasks
-=
1
if
self
.
_unfinished_tasks
==
0
:
self
.
_finished
.
set
()
@coroutine
def
join
(
self
):
"""Block until all items in the queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer calls task_done() to
indicate that the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
if
self
.
_unfinished_tasks
>
0
:
yield
from
self
.
_finished
.
wait
()
class
PriorityQueue
(
Queue
):
"""A subclass of Queue; retrieves entries in priority order (lowest first).
...
...
@@ -249,54 +282,5 @@ class LifoQueue(Queue):
return
self
.
_queue
.
pop
()
class
JoinableQueue
(
Queue
):
"""A subclass of Queue with task_done() and join() methods."""
def
__init__
(
self
,
maxsize
=
0
,
*
,
loop
=
None
):
super
()
.
__init__
(
maxsize
=
maxsize
,
loop
=
loop
)
self
.
_unfinished_tasks
=
0
self
.
_finished
=
locks
.
Event
(
loop
=
self
.
_loop
)
self
.
_finished
.
set
()
def
_format
(
self
):
result
=
Queue
.
_format
(
self
)
if
self
.
_unfinished_tasks
:
result
+=
' tasks={}'
.
format
(
self
.
_unfinished_tasks
)
return
result
def
_put
(
self
,
item
):
super
()
.
_put
(
item
)
self
.
_unfinished_tasks
+=
1
self
.
_finished
.
clear
()
def
task_done
(
self
):
"""Indicate that a formerly enqueued task is complete.
Used by queue consumers. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items have
been processed (meaning that a task_done() call was received for every
item that had been put() into the queue).
Raises ValueError if called more times than there were items placed in
the queue.
"""
if
self
.
_unfinished_tasks
<=
0
:
raise
ValueError
(
'task_done() called too many times'
)
self
.
_unfinished_tasks
-=
1
if
self
.
_unfinished_tasks
==
0
:
self
.
_finished
.
set
()
@coroutine
def
join
(
self
):
"""Block until all items in the queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate that the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
if
self
.
_unfinished_tasks
>
0
:
yield
from
self
.
_finished
.
wait
()
JoinableQueue
=
Queue
"""Deprecated alias for Queue."""
Lib/test/test_asyncio/test_queues.py
Dosyayı görüntüle @
7cbb78cd
...
...
@@ -408,14 +408,14 @@ class PriorityQueueTests(_QueueTestBase):
self
.
assertEqual
([
1
,
2
,
3
],
items
)
class
JoinableQueue
Tests
(
_QueueTestBase
):
class
QueueJoin
Tests
(
_QueueTestBase
):
def
test_task_done_underflow
(
self
):
q
=
asyncio
.
Joinable
Queue
(
loop
=
self
.
loop
)
q
=
asyncio
.
Queue
(
loop
=
self
.
loop
)
self
.
assertRaises
(
ValueError
,
q
.
task_done
)
def
test_task_done
(
self
):
q
=
asyncio
.
Joinable
Queue
(
loop
=
self
.
loop
)
q
=
asyncio
.
Queue
(
loop
=
self
.
loop
)
for
i
in
range
(
100
):
q
.
put_nowait
(
i
)
...
...
@@ -452,7 +452,7 @@ class JoinableQueueTests(_QueueTestBase):
self
.
loop
.
run_until_complete
(
asyncio
.
wait
(
tasks
,
loop
=
self
.
loop
))
def
test_join_empty_queue
(
self
):
q
=
asyncio
.
Joinable
Queue
(
loop
=
self
.
loop
)
q
=
asyncio
.
Queue
(
loop
=
self
.
loop
)
# Test that a queue join()s successfully, and before anything else
# (done twice for insurance).
...
...
@@ -465,7 +465,7 @@ class JoinableQueueTests(_QueueTestBase):
self
.
loop
.
run_until_complete
(
join
())
def
test_format
(
self
):
q
=
asyncio
.
Joinable
Queue
(
loop
=
self
.
loop
)
q
=
asyncio
.
Queue
(
loop
=
self
.
loop
)
self
.
assertEqual
(
q
.
_format
(),
'maxsize=0'
)
q
.
_unfinished_tasks
=
2
...
...
Lib/test/test_asyncio/test_subprocess.py
Dosyayı görüntüle @
7cbb78cd
...
...
@@ -355,11 +355,19 @@ class SubprocessMixin:
create
=
self
.
loop
.
subprocess_exec
(
asyncio
.
SubprocessProtocol
,
*
PROGRAM_BLOCKED
)
transport
,
protocol
=
yield
from
create
kill_called
=
False
def
kill
():
nonlocal
kill_called
kill_called
=
True
orig_kill
()
proc
=
transport
.
get_extra_info
(
'subprocess'
)
proc
.
kill
=
mock
.
Mock
()
orig_kill
=
proc
.
kill
proc
.
kill
=
kill
returncode
=
transport
.
get_returncode
()
transport
.
close
()
return
(
returncode
,
proc
.
kill
.
called
)
return
(
returncode
,
kill_
called
)
# Ignore "Close running child process: kill ..." log
with
test_utils
.
disable_logger
():
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment