Skip to content
Projeler
Gruplar
Parçacıklar
Yardım
Yükleniyor...
Oturum aç / Kaydol
Gezinmeyi değiştir
C
cpython
Proje
Proje
Ayrıntılar
Etkinlik
Cycle Analytics
Depo (repository)
Depo (repository)
Dosyalar
Kayıtlar (commit)
Dallar (branch)
Etiketler
Katkıda bulunanlar
Grafik
Karşılaştır
Grafikler
Konular (issue)
0
Konular (issue)
0
Liste
Pano
Etiketler
Kilometre Taşları
Birleştirme (merge) Talepleri
0
Birleştirme (merge) Talepleri
0
CI / CD
CI / CD
İş akışları (pipeline)
İşler
Zamanlamalar
Grafikler
Paketler
Paketler
Wiki
Wiki
Parçacıklar
Parçacıklar
Üyeler
Üyeler
Collapse sidebar
Close sidebar
Etkinlik
Grafik
Grafikler
Yeni bir konu (issue) oluştur
İşler
Kayıtlar (commit)
Konu (issue) Panoları
Kenar çubuğunu aç
Batuhan Osman TASKAYA
cpython
Commits
79bf8998
Kaydet (Commit)
79bf8998
authored
Şub 18, 2014
tarafından
Terry Jan Reedy
Dosyalara gözat
Seçenekler
Dosyalara Gözat
İndir
Sade Fark
Merge with 3.3
üst
5a8e5796
5b8d2c3a
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
27 additions
and
13 deletions
+27
-13
test_tokenize.py
Lib/test/test_tokenize.py
+13
-0
tokenize.py
Lib/tokenize.py
+11
-13
NEWS
Misc/NEWS
+3
-0
No files found.
Lib/test/test_tokenize.py
Dosyayı görüntüle @
79bf8998
...
@@ -1165,6 +1165,19 @@ class UntokenizeTest(TestCase):
...
@@ -1165,6 +1165,19 @@ class UntokenizeTest(TestCase):
'start (1,3) precedes previous end (2,2)'
)
'start (1,3) precedes previous end (2,2)'
)
self
.
assertRaises
(
ValueError
,
u
.
add_whitespace
,
(
2
,
1
))
self
.
assertRaises
(
ValueError
,
u
.
add_whitespace
,
(
2
,
1
))
def
test_iter_compat
(
self
):
u
=
Untokenizer
()
token
=
(
NAME
,
'Hello'
)
tokens
=
[(
ENCODING
,
'utf-8'
),
token
]
u
.
compat
(
token
,
iter
([]))
self
.
assertEqual
(
u
.
tokens
,
[
"Hello "
])
u
=
Untokenizer
()
self
.
assertEqual
(
u
.
untokenize
(
iter
([
token
])),
'Hello '
)
u
=
Untokenizer
()
self
.
assertEqual
(
u
.
untokenize
(
iter
(
tokens
)),
'Hello '
)
self
.
assertEqual
(
u
.
encoding
,
'utf-8'
)
self
.
assertEqual
(
untokenize
(
iter
(
tokens
)),
b
'Hello '
)
__test__
=
{
"doctests"
:
doctests
,
'decistmt'
:
decistmt
}
__test__
=
{
"doctests"
:
doctests
,
'decistmt'
:
decistmt
}
...
...
Lib/tokenize.py
Dosyayı görüntüle @
79bf8998
...
@@ -25,12 +25,14 @@ __credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
...
@@ -25,12 +25,14 @@ __credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
'Skip Montanaro, Raymond Hettinger, Trent Nelson, '
'Michael Foord'
)
'Michael Foord'
)
import
builtins
import
builtins
import
re
import
sys
from
token
import
*
from
codecs
import
lookup
,
BOM_UTF8
from
codecs
import
lookup
,
BOM_UTF8
import
collections
import
collections
from
io
import
TextIOWrapper
from
io
import
TextIOWrapper
from
itertools
import
chain
import
re
import
sys
from
token
import
*
cookie_re
=
re
.
compile
(
r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)'
,
re
.
ASCII
)
cookie_re
=
re
.
compile
(
r'^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)'
,
re
.
ASCII
)
blank_re
=
re
.
compile
(
br
'^[
\t\f
]*(?:[#
\r\n
]|$)'
,
re
.
ASCII
)
blank_re
=
re
.
compile
(
br
'^[
\t\f
]*(?:[#
\r\n
]|$)'
,
re
.
ASCII
)
...
@@ -237,9 +239,10 @@ class Untokenizer:
...
@@ -237,9 +239,10 @@ class Untokenizer:
self
.
tokens
.
append
(
" "
*
col_offset
)
self
.
tokens
.
append
(
" "
*
col_offset
)
def
untokenize
(
self
,
iterable
):
def
untokenize
(
self
,
iterable
):
for
t
in
iterable
:
it
=
iter
(
iterable
)
for
t
in
it
:
if
len
(
t
)
==
2
:
if
len
(
t
)
==
2
:
self
.
compat
(
t
,
it
erable
)
self
.
compat
(
t
,
it
)
break
break
tok_type
,
token
,
start
,
end
,
line
=
t
tok_type
,
token
,
start
,
end
,
line
=
t
if
tok_type
==
ENCODING
:
if
tok_type
==
ENCODING
:
...
@@ -254,17 +257,12 @@ class Untokenizer:
...
@@ -254,17 +257,12 @@ class Untokenizer:
return
""
.
join
(
self
.
tokens
)
return
""
.
join
(
self
.
tokens
)
def
compat
(
self
,
token
,
iterable
):
def
compat
(
self
,
token
,
iterable
):
startline
=
False
indents
=
[]
indents
=
[]
toks_append
=
self
.
tokens
.
append
toks_append
=
self
.
tokens
.
append
toknum
,
tokval
=
token
startline
=
token
[
0
]
in
(
NEWLINE
,
NL
)
if
toknum
in
(
NAME
,
NUMBER
):
tokval
+=
' '
if
toknum
in
(
NEWLINE
,
NL
):
startline
=
True
prevstring
=
False
prevstring
=
False
for
tok
in
iterable
:
for
tok
in
chain
([
token
],
iterable
):
toknum
,
tokval
=
tok
[:
2
]
toknum
,
tokval
=
tok
[:
2
]
if
toknum
==
ENCODING
:
if
toknum
==
ENCODING
:
self
.
encoding
=
tokval
self
.
encoding
=
tokval
...
...
Misc/NEWS
Dosyayı görüntüle @
79bf8998
...
@@ -40,6 +40,9 @@ Library
...
@@ -40,6 +40,9 @@ Library
-
Issue
#
17671
:
Fixed
a
crash
when
use
non
-
initialized
io
.
BufferedRWPair
.
-
Issue
#
17671
:
Fixed
a
crash
when
use
non
-
initialized
io
.
BufferedRWPair
.
Based
on
patch
by
Stephen
Tu
.
Based
on
patch
by
Stephen
Tu
.
-
Issue
#
8478
:
Untokenizer
.
compat
processes
first
token
from
iterator
input
.
Patch
based
on
lines
from
Georg
Brandl
,
Eric
Snow
,
and
Gareth
Rees
.
-
Issue
#
20594
:
Avoid
name
clash
with
the
libc
function
posix_close
.
-
Issue
#
20594
:
Avoid
name
clash
with
the
libc
function
posix_close
.
-
Issue
#
19856
:
shutil
.
move
()
failed
to
move
a
directory
to
other
directory
-
Issue
#
19856
:
shutil
.
move
()
failed
to
move
a
directory
to
other
directory
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment