Skip to content
Projeler
Gruplar
Parçacıklar
Yardım
Yükleniyor...
Oturum aç / Kaydol
Gezinmeyi değiştir
C
cpython
Proje
Proje
Ayrıntılar
Etkinlik
Cycle Analytics
Depo (repository)
Depo (repository)
Dosyalar
Kayıtlar (commit)
Dallar (branch)
Etiketler
Katkıda bulunanlar
Grafik
Karşılaştır
Grafikler
Konular (issue)
0
Konular (issue)
0
Liste
Pano
Etiketler
Kilometre Taşları
Birleştirme (merge) Talepleri
0
Birleştirme (merge) Talepleri
0
CI / CD
CI / CD
İş akışları (pipeline)
İşler
Zamanlamalar
Grafikler
Paketler
Paketler
Wiki
Wiki
Parçacıklar
Parçacıklar
Üyeler
Üyeler
Collapse sidebar
Close sidebar
Etkinlik
Grafik
Grafikler
Yeni bir konu (issue) oluştur
İşler
Kayıtlar (commit)
Konu (issue) Panoları
Kenar çubuğunu aç
Batuhan Osman TASKAYA
cpython
Commits
f4b44fa6
Kaydet (Commit)
f4b44fa6
authored
Nis 06, 1998
tarafından
Guido van Rossum
Dosyalara gözat
Seçenekler
Dosyalara Gözat
İndir
Eposta Yamaları
Sade Fark
Tim's latest version (supports old and new tokenize modules)
üst
5d97ebf2
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
216 additions
and
63 deletions
+216
-63
tabnanny.py
Lib/tabnanny.py
+72
-21
tabnanny.py
Tools/idle/tabnanny.py
+72
-21
tabnanny.py
Tools/scripts/tabnanny.py
+72
-21
No files found.
Lib/tabnanny.py
Dosyayı görüntüle @
f4b44fa6
#! /home/guido/python/src/sparc/python
#! /usr/bin/env python
#! /usr/bin/env python
"""The Tab Nanny despises ambiguous indentation. She knows no mercy.
"""The Tab Nanny despises ambiguous indentation. She knows no mercy.
"""
CAUTION: this version requires Guido's "NL" patch to lib/tokenize.py,
# Released to the public domain, by Tim Peters, 4 April 1998.
posted 30-Mar-98. This version will not run at all with an unpatched
tokenize (it will raise AttributeError while loading), while previous
versions will run incorrectly with the patched tokenize.
"""
# Released to the public domain, by Tim Peters, 30 March 1998.
__version__
=
"3"
__version__
=
"2"
import
os
import
os
import
sys
import
sys
import
getopt
import
getopt
import
tokenize
import
tokenize
try
:
tokenize
.
NL
except
AttributeError
:
raise
AttributeError
,
"Sorry, I need a version of tokenize.py "
\
"that supports the NL pseudo-token."
verbose
=
0
verbose
=
0
def
main
():
def
main
():
...
@@ -235,15 +222,22 @@ def format_witnesses(w):
...
@@ -235,15 +222,22 @@ def format_witnesses(w):
prefix
=
prefix
+
"s"
prefix
=
prefix
+
"s"
return
prefix
+
" "
+
string
.
join
(
firsts
,
', '
)
return
prefix
+
" "
+
string
.
join
(
firsts
,
', '
)
indents
=
[]
# The collection of globals, the reset_globals() function, and the
check_equal
=
0
# tokeneater() function, depend on which version of tokenize is
# in use.
if
hasattr
(
tokenize
,
'NL'
):
# take advantage of Guido's patch!
def
reset_globals
():
indents
=
[]
check_equal
=
0
def
reset_globals
():
global
indents
,
check_equal
global
indents
,
check_equal
check_equal
=
0
check_equal
=
0
indents
=
[
Whitespace
(
""
)]
indents
=
[
Whitespace
(
""
)]
def
tokeneater
(
type
,
token
,
start
,
end
,
line
,
def
tokeneater
(
type
,
token
,
start
,
end
,
line
,
INDENT
=
tokenize
.
INDENT
,
INDENT
=
tokenize
.
INDENT
,
DEDENT
=
tokenize
.
DEDENT
,
DEDENT
=
tokenize
.
DEDENT
,
NEWLINE
=
tokenize
.
NEWLINE
,
NEWLINE
=
tokenize
.
NEWLINE
,
...
@@ -285,7 +279,7 @@ def tokeneater(type, token, start, end, line,
...
@@ -285,7 +279,7 @@ def tokeneater(type, token, start, end, line,
elif
check_equal
:
elif
check_equal
:
# this is the first "real token" following a NEWLINE, so it
# this is the first "real token" following a NEWLINE, so it
# must be the first token of the next program stat
ment, or an
# must be the first token of the next program state
ment, or an
# ENDMARKER; the "line" argument exposes the leading whitespace
# ENDMARKER; the "line" argument exposes the leading whitespace
# for this statement; in the case of ENDMARKER, line is an empty
# for this statement; in the case of ENDMARKER, line is an empty
# string, so will properly match the empty string with which the
# string, so will properly match the empty string with which the
...
@@ -297,6 +291,63 @@ def tokeneater(type, token, start, end, line,
...
@@ -297,6 +291,63 @@ def tokeneater(type, token, start, end, line,
msg
=
"indent not equal e.g. "
+
format_witnesses
(
witness
)
msg
=
"indent not equal e.g. "
+
format_witnesses
(
witness
)
raise
NannyNag
(
start
[
0
],
msg
,
line
)
raise
NannyNag
(
start
[
0
],
msg
,
line
)
else
:
# unpatched version of tokenize
nesting_level
=
0
indents
=
[]
check_equal
=
0
def
reset_globals
():
global
nesting_level
,
indents
,
check_equal
nesting_level
=
check_equal
=
0
indents
=
[
Whitespace
(
""
)]
def
tokeneater
(
type
,
token
,
start
,
end
,
line
,
INDENT
=
tokenize
.
INDENT
,
DEDENT
=
tokenize
.
DEDENT
,
NEWLINE
=
tokenize
.
NEWLINE
,
COMMENT
=
tokenize
.
COMMENT
,
OP
=
tokenize
.
OP
):
global
nesting_level
,
indents
,
check_equal
if
type
==
INDENT
:
check_equal
=
0
thisguy
=
Whitespace
(
token
)
if
not
indents
[
-
1
]
.
less
(
thisguy
):
witness
=
indents
[
-
1
]
.
not_less_witness
(
thisguy
)
msg
=
"indent not greater e.g. "
+
format_witnesses
(
witness
)
raise
NannyNag
(
start
[
0
],
msg
,
line
)
indents
.
append
(
thisguy
)
elif
type
==
DEDENT
:
del
indents
[
-
1
]
elif
type
==
NEWLINE
:
if
nesting_level
==
0
:
check_equal
=
1
elif
type
==
COMMENT
:
pass
elif
check_equal
:
check_equal
=
0
thisguy
=
Whitespace
(
line
)
if
not
indents
[
-
1
]
.
equal
(
thisguy
):
witness
=
indents
[
-
1
]
.
not_equal_witness
(
thisguy
)
msg
=
"indent not equal e.g. "
+
format_witnesses
(
witness
)
raise
NannyNag
(
start
[
0
],
msg
,
line
)
if
type
==
OP
and
token
in
(
'{'
,
'['
,
'('
):
nesting_level
=
nesting_level
+
1
elif
type
==
OP
and
token
in
(
'}'
,
']'
,
')'
):
if
nesting_level
==
0
:
raise
NannyNag
(
start
[
0
],
"unbalanced bracket '"
+
token
+
"'"
,
line
)
nesting_level
=
nesting_level
-
1
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
main
()
main
()
Tools/idle/tabnanny.py
Dosyayı görüntüle @
f4b44fa6
#! /home/guido/python/src/sparc/python
#! /usr/bin/env python
#! /usr/bin/env python
"""The Tab Nanny despises ambiguous indentation. She knows no mercy.
"""The Tab Nanny despises ambiguous indentation. She knows no mercy.
"""
CAUTION: this version requires Guido's "NL" patch to lib/tokenize.py,
# Released to the public domain, by Tim Peters, 4 April 1998.
posted 30-Mar-98. This version will not run at all with an unpatched
tokenize (it will raise AttributeError while loading), while previous
versions will run incorrectly with the patched tokenize.
"""
# Released to the public domain, by Tim Peters, 30 March 1998.
__version__
=
"3"
__version__
=
"2"
import
os
import
os
import
sys
import
sys
import
getopt
import
getopt
import
tokenize
import
tokenize
try
:
tokenize
.
NL
except
AttributeError
:
raise
AttributeError
,
"Sorry, I need a version of tokenize.py "
\
"that supports the NL pseudo-token."
verbose
=
0
verbose
=
0
def
main
():
def
main
():
...
@@ -235,15 +222,22 @@ def format_witnesses(w):
...
@@ -235,15 +222,22 @@ def format_witnesses(w):
prefix
=
prefix
+
"s"
prefix
=
prefix
+
"s"
return
prefix
+
" "
+
string
.
join
(
firsts
,
', '
)
return
prefix
+
" "
+
string
.
join
(
firsts
,
', '
)
indents
=
[]
# The collection of globals, the reset_globals() function, and the
check_equal
=
0
# tokeneater() function, depend on which version of tokenize is
# in use.
if
hasattr
(
tokenize
,
'NL'
):
# take advantage of Guido's patch!
def
reset_globals
():
indents
=
[]
check_equal
=
0
def
reset_globals
():
global
indents
,
check_equal
global
indents
,
check_equal
check_equal
=
0
check_equal
=
0
indents
=
[
Whitespace
(
""
)]
indents
=
[
Whitespace
(
""
)]
def
tokeneater
(
type
,
token
,
start
,
end
,
line
,
def
tokeneater
(
type
,
token
,
start
,
end
,
line
,
INDENT
=
tokenize
.
INDENT
,
INDENT
=
tokenize
.
INDENT
,
DEDENT
=
tokenize
.
DEDENT
,
DEDENT
=
tokenize
.
DEDENT
,
NEWLINE
=
tokenize
.
NEWLINE
,
NEWLINE
=
tokenize
.
NEWLINE
,
...
@@ -285,7 +279,7 @@ def tokeneater(type, token, start, end, line,
...
@@ -285,7 +279,7 @@ def tokeneater(type, token, start, end, line,
elif
check_equal
:
elif
check_equal
:
# this is the first "real token" following a NEWLINE, so it
# this is the first "real token" following a NEWLINE, so it
# must be the first token of the next program stat
ment, or an
# must be the first token of the next program state
ment, or an
# ENDMARKER; the "line" argument exposes the leading whitespace
# ENDMARKER; the "line" argument exposes the leading whitespace
# for this statement; in the case of ENDMARKER, line is an empty
# for this statement; in the case of ENDMARKER, line is an empty
# string, so will properly match the empty string with which the
# string, so will properly match the empty string with which the
...
@@ -297,6 +291,63 @@ def tokeneater(type, token, start, end, line,
...
@@ -297,6 +291,63 @@ def tokeneater(type, token, start, end, line,
msg
=
"indent not equal e.g. "
+
format_witnesses
(
witness
)
msg
=
"indent not equal e.g. "
+
format_witnesses
(
witness
)
raise
NannyNag
(
start
[
0
],
msg
,
line
)
raise
NannyNag
(
start
[
0
],
msg
,
line
)
else
:
# unpatched version of tokenize
nesting_level
=
0
indents
=
[]
check_equal
=
0
def
reset_globals
():
global
nesting_level
,
indents
,
check_equal
nesting_level
=
check_equal
=
0
indents
=
[
Whitespace
(
""
)]
def
tokeneater
(
type
,
token
,
start
,
end
,
line
,
INDENT
=
tokenize
.
INDENT
,
DEDENT
=
tokenize
.
DEDENT
,
NEWLINE
=
tokenize
.
NEWLINE
,
COMMENT
=
tokenize
.
COMMENT
,
OP
=
tokenize
.
OP
):
global
nesting_level
,
indents
,
check_equal
if
type
==
INDENT
:
check_equal
=
0
thisguy
=
Whitespace
(
token
)
if
not
indents
[
-
1
]
.
less
(
thisguy
):
witness
=
indents
[
-
1
]
.
not_less_witness
(
thisguy
)
msg
=
"indent not greater e.g. "
+
format_witnesses
(
witness
)
raise
NannyNag
(
start
[
0
],
msg
,
line
)
indents
.
append
(
thisguy
)
elif
type
==
DEDENT
:
del
indents
[
-
1
]
elif
type
==
NEWLINE
:
if
nesting_level
==
0
:
check_equal
=
1
elif
type
==
COMMENT
:
pass
elif
check_equal
:
check_equal
=
0
thisguy
=
Whitespace
(
line
)
if
not
indents
[
-
1
]
.
equal
(
thisguy
):
witness
=
indents
[
-
1
]
.
not_equal_witness
(
thisguy
)
msg
=
"indent not equal e.g. "
+
format_witnesses
(
witness
)
raise
NannyNag
(
start
[
0
],
msg
,
line
)
if
type
==
OP
and
token
in
(
'{'
,
'['
,
'('
):
nesting_level
=
nesting_level
+
1
elif
type
==
OP
and
token
in
(
'}'
,
']'
,
')'
):
if
nesting_level
==
0
:
raise
NannyNag
(
start
[
0
],
"unbalanced bracket '"
+
token
+
"'"
,
line
)
nesting_level
=
nesting_level
-
1
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
main
()
main
()
Tools/scripts/tabnanny.py
Dosyayı görüntüle @
f4b44fa6
#! /home/guido/python/src/sparc/python
#! /usr/bin/env python
#! /usr/bin/env python
"""The Tab Nanny despises ambiguous indentation. She knows no mercy.
"""The Tab Nanny despises ambiguous indentation. She knows no mercy.
"""
CAUTION: this version requires Guido's "NL" patch to lib/tokenize.py,
# Released to the public domain, by Tim Peters, 4 April 1998.
posted 30-Mar-98. This version will not run at all with an unpatched
tokenize (it will raise AttributeError while loading), while previous
versions will run incorrectly with the patched tokenize.
"""
# Released to the public domain, by Tim Peters, 30 March 1998.
__version__
=
"3"
__version__
=
"2"
import
os
import
os
import
sys
import
sys
import
getopt
import
getopt
import
tokenize
import
tokenize
try
:
tokenize
.
NL
except
AttributeError
:
raise
AttributeError
,
"Sorry, I need a version of tokenize.py "
\
"that supports the NL pseudo-token."
verbose
=
0
verbose
=
0
def
main
():
def
main
():
...
@@ -235,15 +222,22 @@ def format_witnesses(w):
...
@@ -235,15 +222,22 @@ def format_witnesses(w):
prefix
=
prefix
+
"s"
prefix
=
prefix
+
"s"
return
prefix
+
" "
+
string
.
join
(
firsts
,
', '
)
return
prefix
+
" "
+
string
.
join
(
firsts
,
', '
)
indents
=
[]
# The collection of globals, the reset_globals() function, and the
check_equal
=
0
# tokeneater() function, depend on which version of tokenize is
# in use.
if
hasattr
(
tokenize
,
'NL'
):
# take advantage of Guido's patch!
def
reset_globals
():
indents
=
[]
check_equal
=
0
def
reset_globals
():
global
indents
,
check_equal
global
indents
,
check_equal
check_equal
=
0
check_equal
=
0
indents
=
[
Whitespace
(
""
)]
indents
=
[
Whitespace
(
""
)]
def
tokeneater
(
type
,
token
,
start
,
end
,
line
,
def
tokeneater
(
type
,
token
,
start
,
end
,
line
,
INDENT
=
tokenize
.
INDENT
,
INDENT
=
tokenize
.
INDENT
,
DEDENT
=
tokenize
.
DEDENT
,
DEDENT
=
tokenize
.
DEDENT
,
NEWLINE
=
tokenize
.
NEWLINE
,
NEWLINE
=
tokenize
.
NEWLINE
,
...
@@ -285,7 +279,7 @@ def tokeneater(type, token, start, end, line,
...
@@ -285,7 +279,7 @@ def tokeneater(type, token, start, end, line,
elif
check_equal
:
elif
check_equal
:
# this is the first "real token" following a NEWLINE, so it
# this is the first "real token" following a NEWLINE, so it
# must be the first token of the next program stat
ment, or an
# must be the first token of the next program state
ment, or an
# ENDMARKER; the "line" argument exposes the leading whitespace
# ENDMARKER; the "line" argument exposes the leading whitespace
# for this statement; in the case of ENDMARKER, line is an empty
# for this statement; in the case of ENDMARKER, line is an empty
# string, so will properly match the empty string with which the
# string, so will properly match the empty string with which the
...
@@ -297,6 +291,63 @@ def tokeneater(type, token, start, end, line,
...
@@ -297,6 +291,63 @@ def tokeneater(type, token, start, end, line,
msg
=
"indent not equal e.g. "
+
format_witnesses
(
witness
)
msg
=
"indent not equal e.g. "
+
format_witnesses
(
witness
)
raise
NannyNag
(
start
[
0
],
msg
,
line
)
raise
NannyNag
(
start
[
0
],
msg
,
line
)
else
:
# unpatched version of tokenize
nesting_level
=
0
indents
=
[]
check_equal
=
0
def
reset_globals
():
global
nesting_level
,
indents
,
check_equal
nesting_level
=
check_equal
=
0
indents
=
[
Whitespace
(
""
)]
def
tokeneater
(
type
,
token
,
start
,
end
,
line
,
INDENT
=
tokenize
.
INDENT
,
DEDENT
=
tokenize
.
DEDENT
,
NEWLINE
=
tokenize
.
NEWLINE
,
COMMENT
=
tokenize
.
COMMENT
,
OP
=
tokenize
.
OP
):
global
nesting_level
,
indents
,
check_equal
if
type
==
INDENT
:
check_equal
=
0
thisguy
=
Whitespace
(
token
)
if
not
indents
[
-
1
]
.
less
(
thisguy
):
witness
=
indents
[
-
1
]
.
not_less_witness
(
thisguy
)
msg
=
"indent not greater e.g. "
+
format_witnesses
(
witness
)
raise
NannyNag
(
start
[
0
],
msg
,
line
)
indents
.
append
(
thisguy
)
elif
type
==
DEDENT
:
del
indents
[
-
1
]
elif
type
==
NEWLINE
:
if
nesting_level
==
0
:
check_equal
=
1
elif
type
==
COMMENT
:
pass
elif
check_equal
:
check_equal
=
0
thisguy
=
Whitespace
(
line
)
if
not
indents
[
-
1
]
.
equal
(
thisguy
):
witness
=
indents
[
-
1
]
.
not_equal_witness
(
thisguy
)
msg
=
"indent not equal e.g. "
+
format_witnesses
(
witness
)
raise
NannyNag
(
start
[
0
],
msg
,
line
)
if
type
==
OP
and
token
in
(
'{'
,
'['
,
'('
):
nesting_level
=
nesting_level
+
1
elif
type
==
OP
and
token
in
(
'}'
,
']'
,
')'
):
if
nesting_level
==
0
:
raise
NannyNag
(
start
[
0
],
"unbalanced bracket '"
+
token
+
"'"
,
line
)
nesting_level
=
nesting_level
-
1
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
main
()
main
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment