diff --git a/src/libImaging/Imaging.h b/src/libImaging/Imaging.h index 4ac772af3..4b2db9531 100644 --- a/src/libImaging/Imaging.h +++ b/src/libImaging/Imaging.h @@ -52,17 +52,17 @@ extern "C" { */ #ifdef Py_GIL_DISABLED - #if defined(__cplusplus) - #define IMAGING_TLS thread_local - #elif defined(HAVE_THREAD_LOCAL) - #define IMAGING_TLS thread_local - #elif defined(HAVE__THREAD_LOCAL) - #define IMAGING_TLS _Thread_local - #elif defined(HAVE___THREAD) - #define IMAGING_TLS __thread - #elif defined(HAVE___DECLSPEC_THREAD_) - #define IMAGING_TLS __declspec(thread) - #endif +#if defined(__cplusplus) +#define IMAGING_TLS thread_local +#elif defined(HAVE_THREAD_LOCAL) +#define IMAGING_TLS thread_local +#elif defined(HAVE__THREAD_LOCAL) +#define IMAGING_TLS _Thread_local +#elif defined(HAVE___THREAD) +#define IMAGING_TLS __thread +#elif defined(HAVE___DECLSPEC_THREAD_) +#define IMAGING_TLS __declspec(thread) +#endif #endif /* Handles */ @@ -200,8 +200,9 @@ extern struct ImagingMemoryArena ImagingArenas[IMAGING_ARENAS_COUNT]; /* Provide a macro that loops through each arena that has been * statically-allocated. This is necessary to properly handle stats. */ -#define IMAGING_ARENAS_FOREACH(index, arena) \ - for (index = 0, (arena) = &ImagingArenas[index]; index < IMAGING_ARENAS_COUNT; (arena) = &ImagingArenas[++index]) +#define IMAGING_ARENAS_FOREACH(index, arena) \ + for (index = 0, (arena) = &ImagingArenas[index]; index < IMAGING_ARENAS_COUNT; \ + (arena) = &ImagingArenas[++index]) #else /* In this case we either have the GIL or do not have thread-local storage, in * which case we will only allocate a single arena. @@ -213,10 +214,11 @@ extern struct ImagingMemoryArena ImagingDefaultArena; * effectively a single block of code. */ #define IMAGING_ARENAS_FOREACH(index, arena) \ - for ((void) index, (arena) = &ImagingDefaultArena; (arena); (arena) = NULL) + for ((void)index, (arena) = &ImagingDefaultArena; (arena); (arena) = NULL) #endif -ImagingMemoryArena ImagingGetArena(void); +ImagingMemoryArena +ImagingGetArena(void); extern int ImagingMemorySetBlocksMax(ImagingMemoryArena arena, int blocks_max); diff --git a/src/libImaging/Storage.c b/src/libImaging/Storage.c index 3495fd444..28f2ea56f 100644 --- a/src/libImaging/Storage.c +++ b/src/libImaging/Storage.c @@ -274,23 +274,25 @@ static IMAGING_TLS uint64_t ImagingArenaThreadIndex = UINT64_MAX; /* These are the statically-allocated arenas. */ struct ImagingMemoryArena ImagingArenas[IMAGING_ARENAS_COUNT] = { - { 1, IMAGING_ARENA_BLOCK_SIZE, 0, 0, NULL, 0, 0, 0, 0, 0, 0, {0} }, - { 1, IMAGING_ARENA_BLOCK_SIZE, 0, 0, NULL, 0, 0, 0, 0, 0, 1, {0} }, - { 1, IMAGING_ARENA_BLOCK_SIZE, 0, 0, NULL, 0, 0, 0, 0, 0, 2, {0} }, - { 1, IMAGING_ARENA_BLOCK_SIZE, 0, 0, NULL, 0, 0, 0, 0, 0, 3, {0} }, - { 1, IMAGING_ARENA_BLOCK_SIZE, 0, 0, NULL, 0, 0, 0, 0, 0, 4, {0} }, - { 1, IMAGING_ARENA_BLOCK_SIZE, 0, 0, NULL, 0, 0, 0, 0, 0, 5, {0} }, - { 1, IMAGING_ARENA_BLOCK_SIZE, 0, 0, NULL, 0, 0, 0, 0, 0, 6, {0} }, - { 1, IMAGING_ARENA_BLOCK_SIZE, 0, 0, NULL, 0, 0, 0, 0, 0, 7, {0} } + {1, IMAGING_ARENA_BLOCK_SIZE, 0, 0, NULL, 0, 0, 0, 0, 0, 0, {0}}, + {1, IMAGING_ARENA_BLOCK_SIZE, 0, 0, NULL, 0, 0, 0, 0, 0, 1, {0}}, + {1, IMAGING_ARENA_BLOCK_SIZE, 0, 0, NULL, 0, 0, 0, 0, 0, 2, {0}}, + {1, IMAGING_ARENA_BLOCK_SIZE, 0, 0, NULL, 0, 0, 0, 0, 0, 3, {0}}, + {1, IMAGING_ARENA_BLOCK_SIZE, 0, 0, NULL, 0, 0, 0, 0, 0, 4, {0}}, + {1, IMAGING_ARENA_BLOCK_SIZE, 0, 0, NULL, 0, 0, 0, 0, 0, 5, {0}}, + {1, IMAGING_ARENA_BLOCK_SIZE, 0, 0, NULL, 0, 0, 0, 0, 0, 6, {0}}, + {1, IMAGING_ARENA_BLOCK_SIZE, 0, 0, NULL, 0, 0, 0, 0, 0, 7, {0}} }; /* Get a pointer to the correct arena for this context. In this case where we * are using a round-robin approach to the statically allocated arenas, we will * return the arena that is assigned to the thread on first use. */ -ImagingMemoryArena ImagingGetArena(void) { +ImagingMemoryArena +ImagingGetArena(void) { if (ImagingArenaThreadIndex == UINT64_MAX) { - ImagingArenaThreadIndex = _Py_atomic_add_uint64(&ImagingArenaIndex, 1) % IMAGING_ARENAS_COUNT; + ImagingArenaThreadIndex = + _Py_atomic_add_uint64(&ImagingArenaIndex, 1) % IMAGING_ARENAS_COUNT; } return &ImagingArenas[ImagingArenaThreadIndex]; } @@ -298,7 +300,8 @@ ImagingMemoryArena ImagingGetArena(void) { /* Return the arena associated with the given image. In this case the index of * the arena is stored on the image itself. */ -ImagingMemoryArena ImagingGetArenaFromImaging(Imaging im) { +ImagingMemoryArena +ImagingGetArenaFromImaging(Imaging im) { int arenaindex = im->arenaindex; assert(arenaindex >= 0 && arenaindex < IMAGING_ARENAS_COUNT); return &ImagingArenas[arenaindex]; @@ -308,7 +311,8 @@ ImagingMemoryArena ImagingGetArenaFromImaging(Imaging im) { * is necessary in order to return the blocks to the correct arena when the * image is destroyed. */ -static void ImagingSetArenaOnImaging(Imaging im, ImagingMemoryArena arena) { +static void +ImagingSetArenaOnImaging(Imaging im, ImagingMemoryArena arena) { im->arenaindex = arena->index; } #else @@ -339,7 +343,8 @@ struct ImagingMemoryArena ImagingDefaultArena = { * either have the GIL or we do not have TLS, we will return only the default * arena. */ -ImagingMemoryArena ImagingGetArena(void) { +ImagingMemoryArena +ImagingGetArena(void) { return &ImagingDefaultArena; } @@ -588,9 +593,7 @@ ImagingNewInternal(const char *mode, int xsize, int ysize, int dirty) { ImagingSetArenaOnImaging(im, arena); MUTEX_LOCK(&arena->mutex); - Imaging tmp = ImagingAllocateArray( - im, arena, dirty, arena->block_size - ); + Imaging tmp = ImagingAllocateArray(im, arena, dirty, arena->block_size); MUTEX_UNLOCK(&arena->mutex); if (tmp) { return im;