thread.c 10.6 KB
Newer Older
1

2 3 4 5 6
/* Thread package.
   This is intended to be usable independently from Python.
   The implementation for system foobar is in a file thread_foobar.h
   which is included by this file dependent on config settings.
   Stuff shared by all thread_*.h files is collected here. */
7

8
#include "Python.h"
9

10 11 12 13 14 15 16 17 18 19

#ifndef _POSIX_THREADS
/* This means pthreads are not implemented in libc headers, hence the macro
   not present in unistd.h. But they still can be implemented as an external
   library (e.g. gnu pth in pthread emulation) */
# ifdef HAVE_PTHREAD_H
#  include <pthread.h> /* _POSIX_THREADS */
# endif
#endif

20
#ifndef DONT_HAVE_STDIO_H
21
#include <stdio.h>
22
#endif
23

Sjoerd Mullender's avatar
Sjoerd Mullender committed
24
#include <stdlib.h>
25

26 27 28 29 30 31
#ifdef __sgi
#ifndef HAVE_PTHREAD_H /* XXX Need to check in configure.in */
#undef _POSIX_THREADS
#endif
#endif

32
#include "pythread.h"
33 34 35 36 37 38 39 40 41 42 43 44 45 46

#ifndef _POSIX_THREADS

#ifdef __sgi
#define SGI_THREADS
#endif

#ifdef HAVE_THREAD_H
#define SOLARIS_THREADS
#endif

#if defined(sun) && !defined(SOLARIS_THREADS)
#define SUN_LWP
#endif
47

48 49 50 51 52 53 54 55 56 57 58 59 60 61
/* Check if we're running on HP-UX and _SC_THREADS is defined. If so, then
   enough of the Posix threads package is implimented to support python 
   threads.

   This is valid for HP-UX 11.23 running on an ia64 system. If needed, add
   a check of __ia64 to verify that we're running on a ia64 system instead
   of a pa-risc system.
*/
#ifdef __hpux
#ifdef _SC_THREADS
#define _POSIX_THREADS
#endif
#endif

Sjoerd Mullender's avatar
Sjoerd Mullender committed
62
#endif /* _POSIX_THREADS */
63 64


65
#ifdef Py_DEBUG
66
static int thread_debug = 0;
Jeremy Hylton's avatar
Jeremy Hylton committed
67
#define dprintf(args)	(void)((thread_debug & 1) && printf args)
68 69 70 71
#define d2printf(args)	((thread_debug & 8) && printf args)
#else
#define dprintf(args)
#define d2printf(args)
Sjoerd Mullender's avatar
Sjoerd Mullender committed
72
#endif
73

74 75
static int initialized;

76
static void PyThread__init_thread(void); /* Forward */
77

78 79
void
PyThread_init_thread(void)
80
{
81
#ifdef Py_DEBUG
82
	char *p = Py_GETENV("PYTHONTHREADDEBUG");
Sjoerd Mullender's avatar
Sjoerd Mullender committed
83 84 85 86 87 88 89

	if (p) {
		if (*p)
			thread_debug = atoi(p);
		else
			thread_debug = 1;
	}
90
#endif /* Py_DEBUG */
91 92 93
	if (initialized)
		return;
	initialized = 1;
94 95
	dprintf(("PyThread_init_thread called\n"));
	PyThread__init_thread();
96 97
}

98 99 100 101 102
/* Support for runtime thread stack size tuning.
   A value of 0 means using the platform's default stack size
   or the size specified by the THREAD_STACK_SIZE macro. */
static size_t _pythread_stacksize = 0;

103 104
#ifdef SGI_THREADS
#include "thread_sgi.h"
105
#endif
106

107 108
#ifdef SOLARIS_THREADS
#include "thread_solaris.h"
109 110
#endif

111 112
#ifdef SUN_LWP
#include "thread_lwp.h"
Sjoerd Mullender's avatar
Sjoerd Mullender committed
113
#endif
114

115
#ifdef HAVE_PTH
116
#include "thread_pth.h"
117
#undef _POSIX_THREADS
118 119
#endif

120 121
#ifdef _POSIX_THREADS
#include "thread_pthread.h"
122 123
#endif

124 125
#ifdef C_THREADS
#include "thread_cthread.h"
126 127
#endif

Guido van Rossum's avatar
Guido van Rossum committed
128 129 130 131
#ifdef NT_THREADS
#include "thread_nt.h"
#endif

132 133 134 135
#ifdef OS2_THREADS
#include "thread_os2.h"
#endif

136 137 138 139
#ifdef BEOS_THREADS
#include "thread_beos.h"
#endif

140 141 142 143
#ifdef PLAN9_THREADS
#include "thread_plan9.h"
#endif

144 145 146 147
#ifdef ATHEOS_THREADS
#include "thread_atheos.h"
#endif

148
/*
149 150
#ifdef FOOBAR_THREADS
#include "thread_foobar.h"
151
#endif
152
*/
153

154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175
/* return the current thread stack size */
size_t
PyThread_get_stacksize(void)
{
	return _pythread_stacksize;
}

/* Only platforms defining a THREAD_SET_STACKSIZE() macro
   in thread_<platform>.h support changing the stack size.
   Return 0 if stack size is valid,
          -1 if stack size value is invalid,
          -2 if setting stack size is not supported. */
int
PyThread_set_stacksize(size_t size)
{
#if defined(THREAD_SET_STACKSIZE)
	return THREAD_SET_STACKSIZE(size);
#else
	return -2;
#endif
}

176 177 178 179 180 181 182
#ifndef Py_HAVE_NATIVE_TLS
/* If the platform has not supplied a platform specific
   TLS implementation, provide our own.

   This code stolen from "thread_sgi.h", where it was the only
   implementation of an existing Python TLS API.
*/
183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200
/* ------------------------------------------------------------------------
Per-thread data ("key") support.

Use PyThread_create_key() to create a new key.  This is typically shared
across threads.

Use PyThread_set_key_value(thekey, value) to associate void* value with
thekey in the current thread.  Each thread has a distinct mapping of thekey
to a void* value.  Caution:  if the current thread already has a mapping
for thekey, value is ignored.

Use PyThread_get_key_value(thekey) to retrieve the void* value associated
with thekey in the current thread.  This returns NULL if no value is
associated with thekey in the current thread.

Use PyThread_delete_key_value(thekey) to forget the current thread's associated
value for thekey.  PyThread_delete_key(thekey) forgets the values associated
with thekey across *all* threads.
201

202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222
While some of these functions have error-return values, none set any
Python exception.

None of the functions does memory management on behalf of the void* values.
You need to allocate and deallocate them yourself.  If the void* values
happen to be PyObject*, these functions don't do refcount operations on
them either.

The GIL does not need to be held when calling these functions; they supply
their own locking.  This isn't true of PyThread_create_key(), though (see
next paragraph).

There's a hidden assumption that PyThread_create_key() will be called before
any of the other functions are called.  There's also a hidden assumption
that calls to PyThread_create_key() are serialized externally.
------------------------------------------------------------------------ */

/* A singly-linked list of struct key objects remembers all the key->value
 * associations.  File static keyhead heads the list.  keymutex is used
 * to enforce exclusion internally.
 */
223
struct key {
224
	/* Next record in the list, or NULL if this is the last record. */
225
	struct key *next;
226 227

	/* The thread id, according to PyThread_get_thread_ident(). */
228
	long id;
229 230

	/* The key and its associated value. */
231 232 233 234 235 236
	int key;
	void *value;
};

static struct key *keyhead = NULL;
static PyThread_type_lock keymutex = NULL;
237 238 239 240 241 242 243 244 245 246 247 248 249
static int nkeys = 0;  /* PyThread_create_key() hands out nkeys+1 next */

/* Internal helper.
 * If the current thread has a mapping for key, the appropriate struct key*
 * is returned.  NB:  value is ignored in this case!
 * If there is no mapping for key in the current thread, then:
 *     If value is NULL, NULL is returned.
 *     Else a mapping of key to value is created for the current thread,
 *     and a pointer to a new struct key* is returned; except that if
 *     malloc() can't find room for a new struct key*, NULL is returned.
 * So when value==NULL, this acts like a pure lookup routine, and when
 * value!=NULL, this acts like dict.setdefault(), returning an existing
 * mapping if one exists, else creating a new mapping.
250 251 252 253 254 255 256 257 258
 *
 * Caution:  this used to be too clever, trying to hold keymutex only
 * around the "p->next = keyhead; keyhead = p" pair.  That allowed
 * another thread to mutate the list, via key deletion, concurrent with
 * find_key() crawling over the list.  Hilarity ensued.  For example, when
 * the for-loop here does "p = p->next", p could end up pointing at a
 * record that PyThread_delete_key_value() was concurrently free()'ing.
 * That could lead to anything, from failing to find a key that exists, to
 * segfaults.  Now we lock the whole routine.
259
 */
260 261
static struct key *
find_key(int key, void *value)
262
{
263
	struct key *p, *prev_p;
264
	long id = PyThread_get_thread_ident();
265

266 267
	if (!keymutex)
		return NULL;
268
	PyThread_acquire_lock(keymutex, 1);
269
	prev_p = NULL;
270 271
	for (p = keyhead; p != NULL; p = p->next) {
		if (p->id == id && p->key == key)
272
			goto Done;
273 274 275 276 277 278 279 280 281
		/* Sanity check.  These states should never happen but if
		 * they do we must abort.  Otherwise we'll end up spinning in
		 * in a tight loop with the lock held.  A similar check is done
		 * in pystate.c tstate_delete_common().  */
		if (p == prev_p)
			Py_FatalError("tls find_key: small circular list(!)");
		prev_p = p;
		if (p->next == keyhead)
			Py_FatalError("tls find_key: circular list(!)");
282 283 284 285
	}
	if (value == NULL) {
		assert(p == NULL);
		goto Done;
286 287 288 289 290 291 292 293 294
	}
	p = (struct key *)malloc(sizeof(struct key));
	if (p != NULL) {
		p->id = id;
		p->key = key;
		p->value = value;
		p->next = keyhead;
		keyhead = p;
	}
295 296
 Done:
	PyThread_release_lock(keymutex);
297 298 299
	return p;
}

300 301 302 303
/* Return a new key.  This must be called before any other functions in
 * this family, and callers must arrange to serialize calls to this
 * function.  No violations are detected.
 */
304 305
int
PyThread_create_key(void)
306
{
307 308 309
	/* All parts of this function are wrong if it's called by multiple
	 * threads simultaneously.
	 */
310 311 312 313 314
	if (keymutex == NULL)
		keymutex = PyThread_allocate_lock();
	return ++nkeys;
}

315
/* Forget the associations for key across *all* threads. */
316 317
void
PyThread_delete_key(int key)
318 319
{
	struct key *p, **q;
320

321 322 323 324 325 326 327 328 329 330 331 332 333 334
	PyThread_acquire_lock(keymutex, 1);
	q = &keyhead;
	while ((p = *q) != NULL) {
		if (p->key == key) {
			*q = p->next;
			free((void *)p);
			/* NB This does *not* free p->value! */
		}
		else
			q = &p->next;
	}
	PyThread_release_lock(keymutex);
}

335 336 337 338 339 340
/* Confusing:  If the current thread has an association for key,
 * value is ignored, and 0 is returned.  Else an attempt is made to create
 * an association of key to value for the current thread.  0 is returned
 * if that succeeds, but -1 is returned if there's not enough memory
 * to create the association.  value must not be NULL.
 */
341 342
int
PyThread_set_key_value(int key, void *value)
343
{
344 345 346 347
	struct key *p;

	assert(value != NULL);
	p = find_key(key, value);
348 349 350 351 352 353
	if (p == NULL)
		return -1;
	else
		return 0;
}

354 355 356
/* Retrieve the value associated with key in the current thread, or NULL
 * if the current thread doesn't have an association for key.
 */
357 358
void *
PyThread_get_key_value(int key)
359 360
{
	struct key *p = find_key(key, NULL);
361

362 363 364 365 366 367
	if (p == NULL)
		return NULL;
	else
		return p->value;
}

368
/* Forget the current thread's association for key, if any. */
369 370
void
PyThread_delete_key_value(int key)
371 372 373
{
	long id = PyThread_get_thread_ident();
	struct key *p, **q;
374

375 376 377 378 379 380 381 382 383 384 385 386 387 388 389
	PyThread_acquire_lock(keymutex, 1);
	q = &keyhead;
	while ((p = *q) != NULL) {
		if (p->key == key && p->id == id) {
			*q = p->next;
			free((void *)p);
			/* NB This does *not* free p->value! */
			break;
		}
		else
			q = &p->next;
	}
	PyThread_release_lock(keymutex);
}

390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420
/* Forget everything not associated with the current thread id.
 * This function is called from PyOS_AfterFork().  It is necessary
 * because other thread ids which were in use at the time of the fork
 * may be reused for new threads created in the forked process.
 */
void
PyThread_ReInitTLS(void)
{
	long id = PyThread_get_thread_ident();
	struct key *p, **q;

	if (!keymutex)
		return;
	
	/* As with interpreter_lock in PyEval_ReInitThreads()
	   we just create a new lock without freeing the old one */
	keymutex = PyThread_allocate_lock();

	/* Delete all keys which do not match the current thread id */
	q = &keyhead;
	while ((p = *q) != NULL) {
		if (p->id != id) {
			*q = p->next;
			free((void *)p);
			/* NB This does *not* free p->value! */
		}
		else
			q = &p->next;
	}
}

421
#endif /* Py_HAVE_NATIVE_TLS */