Skip to content
Projeler
Gruplar
Parçacıklar
Yardım
Yükleniyor...
Oturum aç / Kaydol
Gezinmeyi değiştir
C
cpython
Proje
Proje
Ayrıntılar
Etkinlik
Cycle Analytics
Depo (repository)
Depo (repository)
Dosyalar
Kayıtlar (commit)
Dallar (branch)
Etiketler
Katkıda bulunanlar
Grafik
Karşılaştır
Grafikler
Konular (issue)
0
Konular (issue)
0
Liste
Pano
Etiketler
Kilometre Taşları
Birleştirme (merge) Talepleri
0
Birleştirme (merge) Talepleri
0
CI / CD
CI / CD
İş akışları (pipeline)
İşler
Zamanlamalar
Grafikler
Paketler
Paketler
Wiki
Wiki
Parçacıklar
Parçacıklar
Üyeler
Üyeler
Collapse sidebar
Close sidebar
Etkinlik
Grafik
Grafikler
Yeni bir konu (issue) oluştur
İşler
Kayıtlar (commit)
Konu (issue) Panoları
Kenar çubuğunu aç
Batuhan Osman TASKAYA
cpython
Commits
b2c7affb
Kaydet (Commit)
b2c7affb
authored
Kas 23, 2002
tarafından
Martin v. Löwis
Dosyalara gözat
Seçenekler
Dosyalara Gözat
İndir
Eposta Yamaları
Sade Fark
Merge with bsddb3 2002.11.23.10.42.36
üst
a797d815
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
100 additions
and
47 deletions
+100
-47
dbtables.py
Lib/bsddb/dbtables.py
+12
-11
dbutils.py
Lib/bsddb/dbutils.py
+21
-15
test_dbshelve.py
Lib/bsddb/test/test_dbshelve.py
+4
-4
test_dbtables.py
Lib/bsddb/test/test_dbtables.py
+35
-0
test_thread.py
Lib/bsddb/test/test_thread.py
+12
-9
_bsddb.c
Modules/_bsddb.c
+16
-8
No files found.
Lib/bsddb/dbtables.py
Dosyayı görüntüle @
b2c7affb
#-----------------------------------------------------------------------
#
# Copyright (C) 2000, 2001 by Autonomous Zone Industries
# Copyright (C) 2002 Gregory P. Smith
#
# License: This is free software. You may use this software for any
# purpose including modification/redistribution, so long as
...
...
@@ -54,6 +55,13 @@ class PrefixCond(Cond):
def
__call__
(
self
,
s
):
return
s
[:
len
(
self
.
prefix
)]
==
self
.
prefix
class
PostfixCond
(
Cond
):
"""Acts as a condition function for matching a string postfix"""
def
__init__
(
self
,
postfix
):
self
.
postfix
=
postfix
def
__call__
(
self
,
s
):
return
s
[
-
len
(
self
.
postfix
):]
==
self
.
postfix
class
LikeCond
(
Cond
):
"""
Acts as a function that will match using an SQL 'LIKE' style
...
...
@@ -523,17 +531,10 @@ class bsdTableDB :
# if no condition was specified or the condition
# succeeds, add row to our match list.
if
not
condition
or
condition
(
data
)
:
# only create new entries in matcing_rowids on
# the first pass, otherwise reject the
# rowid as it must not have matched
# the previous passes
if
column_num
==
0
:
if
not
matching_rowids
.
has_key
(
rowid
)
:
matching_rowids
[
rowid
]
=
{}
if
savethiscolumndata
:
matching_rowids
[
rowid
][
column
]
=
data
else
:
rejected_rowids
[
rowid
]
=
rowid
if
not
matching_rowids
.
has_key
(
rowid
)
:
matching_rowids
[
rowid
]
=
{}
if
savethiscolumndata
:
matching_rowids
[
rowid
][
column
]
=
data
else
:
if
matching_rowids
.
has_key
(
rowid
)
:
del
matching_rowids
[
rowid
]
...
...
Lib/bsddb/dbutils.py
Dosyayı görüntüle @
b2c7affb
#------------------------------------------------------------------------
#
# In my performance tests, using this (as in dbtest.py test4) is
# slightly slower than simply compiling _db.c with MYDB_THREAD
# undefined to prevent multithreading support in the C module.
# Using NoDeadlockDb also prevent deadlocks from mutliple processes
# accessing the same database.
#
# Copyright (C) 2000 Autonomous Zone Industries
#
# License: This is free software. You may use this software for any
...
...
@@ -18,7 +12,7 @@
# Author: Gregory P. Smith <greg@electricrain.com>
#
# Note: I don't know how useful this is in reality since when a
# DBDeadlockError happens the current transaction is supposed to be
# DB
Lock
DeadlockError happens the current transaction is supposed to be
# aborted. If it doesn't then when the operation is attempted again
# the deadlock is still happening...
# --Robin
...
...
@@ -34,35 +28,47 @@ from time import sleep
_sleep
=
sleep
del
sleep
import
_db
import
_
bsd
db
_deadlock_MinSleepTime
=
1.0
/
64
# always sleep at least N seconds between retrys
_deadlock_MaxSleepTime
=
1.0
# never sleep more than N seconds between retrys
_deadlock_MinSleepTime
=
1.0
/
64
# always sleep at least N seconds between retrys
_deadlock_MaxSleepTime
=
3.14159
# never sleep more than N seconds between retrys
_deadlock_VerboseFile
=
None
# Assign a file object to this for a "sleeping"
# message to be written to it each retry
def
DeadlockWrap
(
function
,
*
_args
,
**
_kwargs
):
"""DeadlockWrap(function, *_args, **_kwargs) - automatically retries
function in case of a database deadlock.
This is a DeadlockWrapper method which DB calls can be made using to
preform infinite retrys with sleeps in between when a DBLockDeadlockError
exception is raised in a database call:
This is a function intended to be used to wrap database calls such
that they perform retrys with exponentially backing off sleeps in
between when a DBLockDeadlockError exception is raised.
A 'max_retries' parameter may optionally be passed to prevent it
from retrying forever (in which case the exception will be reraised).
d = DB(...)
d.open(...)
DeadlockWrap(d.put, "foo", data="bar") # set key "foo" to "bar"
"""
sleeptime
=
_deadlock_MinSleepTime
while
(
1
)
:
max_retries
=
_kwargs
.
get
(
'max_retries'
,
-
1
)
if
_kwargs
.
has_key
(
'max_retries'
):
del
_kwargs
[
'max_retries'
]
while
1
:
try
:
return
apply
(
function
,
_args
,
_kwargs
)
except
_db
.
DBLockDeadlockError
:
print
'DeadlockWrap sleeping '
,
sleeptime
if
_deadlock_VerboseFile
:
_deadlock_VerboseFile
.
write
(
'dbutils.DeadlockWrap: sleeping
%1.3
f
\n
'
%
sleeptime
)
_sleep
(
sleeptime
)
# exponential backoff in the sleep time
sleeptime
=
sleeptime
*
2
if
sleeptime
>
_deadlock_MaxSleepTime
:
sleeptime
=
_deadlock_MaxSleepTime
max_retries
=
max_retries
-
1
if
max_retries
==
-
1
:
raise
#------------------------------------------------------------------------
...
...
Lib/bsddb/test/test_dbshelve.py
Dosyayı görüntüle @
b2c7affb
...
...
@@ -210,7 +210,7 @@ class BTreeShelveTestCase(BasicShelveTestCase):
class
HashShelveTestCase
(
BasicShelveTestCase
):
dbtype
=
db
.
DB_
BTREE
dbtype
=
db
.
DB_
HASH
dbflags
=
db
.
DB_CREATE
...
...
@@ -220,7 +220,7 @@ class ThreadBTreeShelveTestCase(BasicShelveTestCase):
class
ThreadHashShelveTestCase
(
BasicShelveTestCase
):
dbtype
=
db
.
DB_
BTREE
dbtype
=
db
.
DB_
HASH
dbflags
=
db
.
DB_CREATE
|
db
.
DB_THREAD
...
...
@@ -261,7 +261,7 @@ class EnvBTreeShelveTestCase(BasicEnvShelveTestCase):
class
EnvHashShelveTestCase
(
BasicEnvShelveTestCase
):
envflags
=
0
dbtype
=
db
.
DB_
BTREE
dbtype
=
db
.
DB_
HASH
dbflags
=
db
.
DB_CREATE
...
...
@@ -273,7 +273,7 @@ class EnvThreadBTreeShelveTestCase(BasicEnvShelveTestCase):
class
EnvThreadHashShelveTestCase
(
BasicEnvShelveTestCase
):
envflags
=
db
.
DB_THREAD
dbtype
=
db
.
DB_
BTREE
dbtype
=
db
.
DB_
HASH
dbflags
=
db
.
DB_CREATE
|
db
.
DB_THREAD
...
...
Lib/bsddb/test/test_dbtables.py
Dosyayı görüntüle @
b2c7affb
...
...
@@ -5,6 +5,7 @@
#-----------------------------------------------------------------------
#
# Copyright (C) 2000, 2001 by Autonomous Zone Industries
# Copyright (C) 2002 Gregory P. Smith
#
# March 20, 2000
#
...
...
@@ -159,6 +160,40 @@ class TableDBTestCase(unittest.TestCase):
assert
values
[
0
][
'b'
]
==
"bad"
def
test04_MultiCondSelect
(
self
):
tabname
=
"test04_MultiCondSelect"
try
:
self
.
tdb
.
Drop
(
tabname
)
except
dbtables
.
TableDBError
:
pass
self
.
tdb
.
CreateTable
(
tabname
,
[
'a'
,
'b'
,
'c'
,
'd'
,
'e'
])
try
:
self
.
tdb
.
Insert
(
tabname
,
{
'a'
:
""
,
'e'
:
pickle
.
dumps
([{
4
:
5
,
6
:
7
},
'foo'
],
1
),
'f'
:
"Zero"
})
assert
0
except
dbtables
.
TableDBError
:
pass
self
.
tdb
.
Insert
(
tabname
,
{
'a'
:
"A"
,
'b'
:
"B"
,
'c'
:
"C"
,
'd'
:
"D"
,
'e'
:
"E"
})
self
.
tdb
.
Insert
(
tabname
,
{
'a'
:
"-A"
,
'b'
:
"-B"
,
'c'
:
"-C"
,
'd'
:
"-D"
,
'e'
:
"-E"
})
self
.
tdb
.
Insert
(
tabname
,
{
'a'
:
"A-"
,
'b'
:
"B-"
,
'c'
:
"C-"
,
'd'
:
"D-"
,
'e'
:
"E-"
})
if
verbose
:
self
.
tdb
.
_db_print
()
# This select should return 0 rows. it is designed to test
# the bug identified and fixed in sourceforge bug # 590449
# (Big Thanks to "Rob Tillotson (n9mtb)" for tracking this down
# and supplying a fix!! This one caused many headaches to say
# the least...)
values
=
self
.
tdb
.
Select
(
tabname
,
[
'b'
,
'a'
,
'd'
],
conditions
=
{
'e'
:
dbtables
.
ExactCond
(
'E'
),
'a'
:
dbtables
.
ExactCond
(
'A'
),
'd'
:
dbtables
.
PrefixCond
(
'-'
)
}
)
assert
len
(
values
)
==
0
,
values
def
test_CreateOrExtend
(
self
):
tabname
=
"test_CreateOrExtend"
...
...
Lib/bsddb/test/test_thread.py
Dosyayı görüntüle @
b2c7affb
...
...
@@ -18,7 +18,7 @@ except ImportError:
import
unittest
from
test.test_support
import
verbose
from
bsddb
import
db
from
bsddb
import
db
,
dbutils
#----------------------------------------------------------------------
...
...
@@ -31,6 +31,9 @@ class BaseThreadedTestCase(unittest.TestCase):
def
setUp
(
self
):
if
verbose
:
dbutils
.
_deadlock_VerboseFile
=
sys
.
stdout
homeDir
=
os
.
path
.
join
(
os
.
path
.
dirname
(
sys
.
argv
[
0
]),
'db_home'
)
self
.
homeDir
=
homeDir
try
:
os
.
mkdir
(
homeDir
)
...
...
@@ -109,7 +112,7 @@ class ConcurrentDataStoreBase(BaseThreadedTestCase):
for
x
in
range
(
start
,
stop
):
key
=
'
%04
d'
%
x
d
.
put
(
key
,
self
.
makeData
(
key
)
)
d
butils
.
DeadlockWrap
(
d
.
put
,
key
,
self
.
makeData
(
key
),
max_retries
=
12
)
if
verbose
and
x
%
100
==
0
:
print
"
%
s: records
%
d -
%
d finished"
%
(
name
,
start
,
x
)
...
...
@@ -212,7 +215,7 @@ class SimpleThreadedBase(BaseThreadedTestCase):
# create a bunch of records
for
x
in
xrange
(
start
,
stop
):
key
=
'
%04
d'
%
x
d
.
put
(
key
,
self
.
makeData
(
key
)
)
d
butils
.
DeadlockWrap
(
d
.
put
,
key
,
self
.
makeData
(
key
),
max_retries
=
12
)
if
verbose
and
x
%
100
==
0
:
print
"
%
s: records
%
d -
%
d finished"
%
(
name
,
start
,
x
)
...
...
@@ -221,12 +224,12 @@ class SimpleThreadedBase(BaseThreadedTestCase):
if
random
()
<=
0.05
:
for
y
in
xrange
(
start
,
x
):
key
=
'
%04
d'
%
x
data
=
d
.
get
(
key
)
data
=
d
butils
.
DeadlockWrap
(
d
.
get
,
key
,
max_retries
=
12
)
assert
data
==
self
.
makeData
(
key
)
# flush them
try
:
d
.
sync
(
)
d
butils
.
DeadlockWrap
(
d
.
sync
,
max_retries
=
12
)
except
db
.
DBIncompleteError
,
val
:
if
verbose
:
print
"could not complete sync()..."
...
...
@@ -234,12 +237,12 @@ class SimpleThreadedBase(BaseThreadedTestCase):
# read them back, deleting a few
for
x
in
xrange
(
start
,
stop
):
key
=
'
%04
d'
%
x
data
=
d
.
get
(
key
)
data
=
d
butils
.
DeadlockWrap
(
d
.
get
,
key
,
max_retries
=
12
)
if
verbose
and
x
%
100
==
0
:
print
"
%
s: fetched record (
%
s,
%
s)"
%
(
name
,
key
,
data
)
assert
data
==
self
.
makeData
(
key
)
assert
data
==
self
.
makeData
(
key
)
,
(
key
,
data
,
self
.
makeData
(
key
))
if
random
()
<=
0.10
:
d
.
delete
(
key
)
d
butils
.
DeadlockWrap
(
d
.
delete
,
key
,
max_retries
=
12
)
if
verbose
:
print
"
%
s: deleted record
%
s"
%
(
name
,
key
)
...
...
@@ -273,7 +276,7 @@ class BTreeSimpleThreaded(SimpleThreadedBase):
class
HashSimpleThreaded
(
SimpleThreadedBase
):
dbtype
=
db
.
DB_
BTREE
dbtype
=
db
.
DB_
HASH
#----------------------------------------------------------------------
...
...
Modules/_bsddb.c
Dosyayı görüntüle @
b2c7affb
...
...
@@ -75,12 +75,12 @@
/* --------------------------------------------------------------------- */
/* Various macro definitions */
#define PY_BSDDB_VERSION "3.4.
0
"
#define PY_BSDDB_VERSION "3.4.
2
"
/* 40 = 4.0, 33 = 3.3; this will break if the second number is > 9 */
#define DBVER (DB_VERSION_MAJOR * 10 + DB_VERSION_MINOR)
static
char
*
orig_rcs_id
=
"/Id: _db.c,v 1.4
4 2002/06/07 18:24:00
greg Exp /"
;
static
char
*
orig_rcs_id
=
"/Id: _db.c,v 1.4
8 2002/11/21 19:11:19
greg Exp /"
;
static
char
*
rcs_id
=
"$Id$"
;
...
...
@@ -1012,7 +1012,17 @@ DB_associate(DBObject* self, PyObject* args, PyObject* kwargs)
secondaryDB
->
associateCallback
=
callback
;
secondaryDB
->
primaryDBType
=
_DB_get_type
(
self
);
/* PyEval_InitThreads is called here due to a quirk in python 1.5
* - 2.2.1 (at least) according to Russell Williamson <merel@wt.net>:
* The global interepreter lock is not initialized until the first
* thread is created using thread.start_new_thread() or fork() is
* called. that would cause the ALLOW_THREADS here to segfault due
* to a null pointer reference if no threads or child processes
* have been created. This works around that and is a no-op if
* threads have already been initialized.
* (see pybsddb-users mailing list post on 2002-08-07)
*/
PyEval_InitThreads
();
MYDB_BEGIN_ALLOW_THREADS
;
err
=
self
->
db
->
associate
(
self
->
db
,
secondaryDB
->
db
,
...
...
@@ -2323,8 +2333,6 @@ DBC_close(DBCursorObject* self, PyObject* args)
if
(
!
PyArg_ParseTuple
(
args
,
":close"
))
return
NULL
;
CHECK_CURSOR_NOT_CLOSED
(
self
);
if
(
self
->
dbc
!=
NULL
)
{
MYDB_BEGIN_ALLOW_THREADS
;
err
=
self
->
dbc
->
c_close
(
self
->
dbc
);
...
...
@@ -2413,7 +2421,7 @@ DBC_first(DBCursorObject* self, PyObject* args, PyObject* kwargs)
static
PyObject
*
DBC_get
(
DBCursorObject
*
self
,
PyObject
*
args
,
PyObject
*
kwargs
)
{
int
err
,
flags
;
int
err
,
flags
=
0
;
PyObject
*
keyobj
=
NULL
;
PyObject
*
dataobj
=
NULL
;
PyObject
*
retval
=
NULL
;
...
...
@@ -3298,7 +3306,7 @@ DBEnv_lock_stat(DBEnvObject* self, PyObject* args)
int
err
;
DB_LOCK_STAT
*
sp
;
PyObject
*
d
=
NULL
;
u_int32_t
flags
;
u_int32_t
flags
=
0
;
if
(
!
PyArg_ParseTuple
(
args
,
"|i:lock_stat"
,
&
flags
))
return
NULL
;
...
...
@@ -3410,7 +3418,7 @@ DBEnv_txn_stat(DBEnvObject* self, PyObject* args)
int
err
;
DB_TXN_STAT
*
sp
;
PyObject
*
d
=
NULL
;
u_int32_t
flags
;
u_int32_t
flags
=
0
;
if
(
!
PyArg_ParseTuple
(
args
,
"|i:txn_stat"
,
&
flags
))
return
NULL
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment