Skip to content
Projeler
Gruplar
Parçacıklar
Yardım
Yükleniyor...
Oturum aç / Kaydol
Gezinmeyi değiştir
C
cpython
Proje
Proje
Ayrıntılar
Etkinlik
Cycle Analytics
Depo (repository)
Depo (repository)
Dosyalar
Kayıtlar (commit)
Dallar (branch)
Etiketler
Katkıda bulunanlar
Grafik
Karşılaştır
Grafikler
Konular (issue)
0
Konular (issue)
0
Liste
Pano
Etiketler
Kilometre Taşları
Birleştirme (merge) Talepleri
0
Birleştirme (merge) Talepleri
0
CI / CD
CI / CD
İş akışları (pipeline)
İşler
Zamanlamalar
Grafikler
Paketler
Paketler
Wiki
Wiki
Parçacıklar
Parçacıklar
Üyeler
Üyeler
Collapse sidebar
Close sidebar
Etkinlik
Grafik
Grafikler
Yeni bir konu (issue) oluştur
İşler
Kayıtlar (commit)
Konu (issue) Panoları
Kenar çubuğunu aç
Batuhan Osman TASKAYA
cpython
Commits
10a428b6
Kaydet (Commit)
10a428b6
authored
Eyl 15, 2018
tarafından
Monson Shao
Kaydeden (comit)
Benjamin Peterson
Eyl 15, 2018
Dosyalara gözat
Seçenekler
Dosyalara Gözat
İndir
Eposta Yamaları
Sade Fark
closes bpo-34515: Support non-ASCII identifiers in lib2to3. (GH-8950)
üst
d2067318
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
16 additions
and
6 deletions
+16
-6
tokenize.py
Lib/lib2to3/pgen2/tokenize.py
+5
-6
test_parser.py
Lib/lib2to3/tests/test_parser.py
+10
-0
2018-08-27-16-01-22.bpo-34515.S0Irst.rst
...S.d/next/Library/2018-08-27-16-01-22.bpo-34515.S0Irst.rst
+1
-0
No files found.
Lib/lib2to3/pgen2/tokenize.py
Dosyayı görüntüle @
10a428b6
...
@@ -56,7 +56,7 @@ def _combinations(*l):
...
@@ -56,7 +56,7 @@ def _combinations(*l):
Whitespace
=
r'[ \f\t]*'
Whitespace
=
r'[ \f\t]*'
Comment
=
r'#[^\r\n]*'
Comment
=
r'#[^\r\n]*'
Ignore
=
Whitespace
+
any
(
r'\\\r?\n'
+
Whitespace
)
+
maybe
(
Comment
)
Ignore
=
Whitespace
+
any
(
r'\\\r?\n'
+
Whitespace
)
+
maybe
(
Comment
)
Name
=
r'
[a-zA-Z_]\w*
'
Name
=
r'
\w+
'
Binnumber
=
r'0[bB]_?[01]+(?:_[01]+)*'
Binnumber
=
r'0[bB]_?[01]+(?:_[01]+)*'
Hexnumber
=
r'0[xX]_?[\da-fA-F]+(?:_[\da-fA-F]+)*[lL]?'
Hexnumber
=
r'0[xX]_?[\da-fA-F]+(?:_[\da-fA-F]+)*[lL]?'
...
@@ -107,8 +107,8 @@ ContStr = group(_litprefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
...
@@ -107,8 +107,8 @@ ContStr = group(_litprefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
PseudoExtras
=
group
(
r'\\\r?\n'
,
Comment
,
Triple
)
PseudoExtras
=
group
(
r'\\\r?\n'
,
Comment
,
Triple
)
PseudoToken
=
Whitespace
+
group
(
PseudoExtras
,
Number
,
Funny
,
ContStr
,
Name
)
PseudoToken
=
Whitespace
+
group
(
PseudoExtras
,
Number
,
Funny
,
ContStr
,
Name
)
tokenprog
,
pseudoprog
,
single3prog
,
double3prog
=
list
(
map
(
tokenprog
,
pseudoprog
,
single3prog
,
double3prog
=
map
(
re
.
compile
,
(
Token
,
PseudoToken
,
Single3
,
Double3
))
)
re
.
compile
,
(
Token
,
PseudoToken
,
Single3
,
Double3
))
_strprefixes
=
(
_strprefixes
=
(
_combinations
(
'r'
,
'R'
,
'f'
,
'F'
)
|
_combinations
(
'r'
,
'R'
,
'f'
,
'F'
)
|
...
@@ -349,7 +349,6 @@ def generate_tokens(readline):
...
@@ -349,7 +349,6 @@ def generate_tokens(readline):
logical line; continuation lines are included.
logical line; continuation lines are included.
"""
"""
lnum
=
parenlev
=
continued
=
0
lnum
=
parenlev
=
continued
=
0
namechars
,
numchars
=
string
.
ascii_letters
+
'_'
,
'0123456789'
contstr
,
needcont
=
''
,
0
contstr
,
needcont
=
''
,
0
contline
=
None
contline
=
None
indents
=
[
0
]
indents
=
[
0
]
...
@@ -451,7 +450,7 @@ def generate_tokens(readline):
...
@@ -451,7 +450,7 @@ def generate_tokens(readline):
spos
,
epos
,
pos
=
(
lnum
,
start
),
(
lnum
,
end
),
end
spos
,
epos
,
pos
=
(
lnum
,
start
),
(
lnum
,
end
),
end
token
,
initial
=
line
[
start
:
end
],
line
[
start
]
token
,
initial
=
line
[
start
:
end
],
line
[
start
]
if
initial
in
numchar
s
or
\
if
initial
in
string
.
digit
s
or
\
(
initial
==
'.'
and
token
!=
'.'
):
# ordinary number
(
initial
==
'.'
and
token
!=
'.'
):
# ordinary number
yield
(
NUMBER
,
token
,
spos
,
epos
,
line
)
yield
(
NUMBER
,
token
,
spos
,
epos
,
line
)
elif
initial
in
'
\r\n
'
:
elif
initial
in
'
\r\n
'
:
...
@@ -501,7 +500,7 @@ def generate_tokens(readline):
...
@@ -501,7 +500,7 @@ def generate_tokens(readline):
yield
stashed
yield
stashed
stashed
=
None
stashed
=
None
yield
(
STRING
,
token
,
spos
,
epos
,
line
)
yield
(
STRING
,
token
,
spos
,
epos
,
line
)
elif
initial
in
namechars
:
# ordinary name
elif
initial
.
isidentifier
():
# ordinary name
if
token
in
(
'async'
,
'await'
):
if
token
in
(
'async'
,
'await'
):
if
async_def
:
if
async_def
:
yield
(
ASYNC
if
token
==
'async'
else
AWAIT
,
yield
(
ASYNC
if
token
==
'async'
else
AWAIT
,
...
...
Lib/lib2to3/tests/test_parser.py
Dosyayı görüntüle @
10a428b6
...
@@ -529,6 +529,16 @@ class TestSetLiteral(GrammarTest):
...
@@ -529,6 +529,16 @@ class TestSetLiteral(GrammarTest):
self
.
validate
(
"""x = {2, 3, 4,}"""
)
self
.
validate
(
"""x = {2, 3, 4,}"""
)
# Adapted from Python 3's Lib/test/test_unicode_identifiers.py and
# Lib/test/test_tokenize.py:TokenizeTest.test_non_ascii_identifiers
class
TestIdentfier
(
GrammarTest
):
def
test_non_ascii_identifiers
(
self
):
self
.
validate
(
"Örter = 'places'
\n
grün = 'green'"
)
self
.
validate
(
"蟒 = a蟒 = 锦蛇 = 1"
)
self
.
validate
(
"µ = aµ = µµ = 1"
)
self
.
validate
(
"𝔘𝔫𝔦𝔠𝔬𝔡𝔢 = a_𝔘𝔫𝔦𝔠𝔬𝔡𝔢 = 1"
)
class
TestNumericLiterals
(
GrammarTest
):
class
TestNumericLiterals
(
GrammarTest
):
def
test_new_octal_notation
(
self
):
def
test_new_octal_notation
(
self
):
self
.
validate
(
"""0o7777777777777"""
)
self
.
validate
(
"""0o7777777777777"""
)
...
...
Misc/NEWS.d/next/Library/2018-08-27-16-01-22.bpo-34515.S0Irst.rst
0 → 100644
Dosyayı görüntüle @
10a428b6
Fix parsing non-ASCII identifiers in :mod:`lib2to3.pgen2.tokenize` (PEP 3131).
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment