Skip to content
Projeler
Gruplar
Parçacıklar
Yardım
Yükleniyor...
Oturum aç / Kaydol
Gezinmeyi değiştir
C
cpython
Proje
Proje
Ayrıntılar
Etkinlik
Cycle Analytics
Depo (repository)
Depo (repository)
Dosyalar
Kayıtlar (commit)
Dallar (branch)
Etiketler
Katkıda bulunanlar
Grafik
Karşılaştır
Grafikler
Konular (issue)
0
Konular (issue)
0
Liste
Pano
Etiketler
Kilometre Taşları
Birleştirme (merge) Talepleri
0
Birleştirme (merge) Talepleri
0
CI / CD
CI / CD
İş akışları (pipeline)
İşler
Zamanlamalar
Grafikler
Paketler
Paketler
Wiki
Wiki
Parçacıklar
Parçacıklar
Üyeler
Üyeler
Collapse sidebar
Close sidebar
Etkinlik
Grafik
Grafikler
Yeni bir konu (issue) oluştur
İşler
Kayıtlar (commit)
Konu (issue) Panoları
Kenar çubuğunu aç
Batuhan Osman TASKAYA
cpython
Commits
9b8d24b1
Kaydet (Commit)
9b8d24b1
authored
Mar 24, 2009
tarafından
Benjamin Peterson
Dosyalara gözat
Seçenekler
Dosyalara Gözat
İndir
Eposta Yamaları
Sade Fark
reuse tokenize.detect_encoding in linecache instead of a custom solution
patch by Victor Stinner #4016
üst
a8abe863
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
8 additions
and
23 deletions
+8
-23
linecache.py
Lib/linecache.py
+4
-20
tokenize.py
Lib/tokenize.py
+4
-3
No files found.
Lib/linecache.py
Dosyayı görüntüle @
9b8d24b1
...
...
@@ -7,7 +7,7 @@ that name.
import
sys
import
os
import
r
e
import
tokeniz
e
__all__
=
[
"getline"
,
"clearcache"
,
"checkcache"
]
...
...
@@ -120,27 +120,11 @@ def updatecache(filename, module_globals=None):
pass
else
:
# No luck
## print '*** Cannot stat', filename, ':', msg
return
[]
## print("Refreshing cache for %s..." % fullname)
try
:
fp
=
open
(
fullname
,
'rU'
)
with
open
(
fullname
,
'rb'
)
as
fp
:
coding
,
line
=
tokenize
.
detect_encoding
(
fp
.
readline
)
with
open
(
fullname
,
'r'
,
encoding
=
coding
)
as
fp
:
lines
=
fp
.
readlines
()
fp
.
close
()
except
Exception
as
msg
:
## print '*** Cannot open', fullname, ':', msg
return
[]
coding
=
"utf-8"
for
line
in
lines
[:
2
]:
m
=
re
.
search
(
r"coding[:=]\s*([-\w.]+)"
,
line
)
if
m
:
coding
=
m
.
group
(
1
)
break
try
:
lines
=
[
line
if
isinstance
(
line
,
str
)
else
str
(
line
,
coding
)
for
line
in
lines
]
except
:
pass
# Hope for the best
size
,
mtime
=
stat
.
st_size
,
stat
.
st_mtime
cache
[
filename
]
=
size
,
mtime
,
lines
,
fullname
return
lines
Lib/tokenize.py
Dosyayı görüntüle @
9b8d24b1
...
...
@@ -27,7 +27,6 @@ __credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
import
re
,
string
,
sys
from
token
import
*
from
codecs
import
lookup
,
BOM_UTF8
from
itertools
import
chain
,
repeat
cookie_re
=
re
.
compile
(
"coding[:=]
\
s*([-
\
w.]+)"
)
import
token
...
...
@@ -327,13 +326,15 @@ def tokenize(readline):
which tells you which encoding was used to decode the bytes stream.
"""
encoding
,
consumed
=
detect_encoding
(
readline
)
def
readline_generator
():
def
readline_generator
(
consumed
):
for
line
in
consumed
:
yield
line
while
True
:
try
:
yield
readline
()
except
StopIteration
:
return
chained
=
chain
(
consumed
,
readline_generator
()
)
chained
=
readline_generator
(
consumed
)
return
_tokenize
(
chained
.
__next__
,
encoding
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment