summaryrefslogtreecommitdiffstats
path: root/Python/condvar.h
blob: 8cba19b84612dc77981a788d42de77052f52e418 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
/*
 * Portable condition variable support for windows and pthreads.
 * Everything is inline, this header can be included where needed.
 *
 * APIs generally return 0 on success and non-zero on error,
 * and the caller needs to use its platform's error mechanism to
 * discover the error (errno, or GetLastError())
 *
 * Note that some implementations cannot distinguish between a
 * condition variable wait time-out and successful wait. Most often
 * the difference is moot anyway since the wait condition must be
 * re-checked.
 * PyCOND_TIMEDWAIT, in addition to returning negative on error,
 * thus returns 0 on regular success, 1 on timeout
 * or 2 if it can't tell.
 *
 * There are at least two caveats with using these condition variables,
 * due to the fact that they may be emulated with Semaphores on
 * Windows:
 * 1) While PyCOND_SIGNAL() will wake up at least one thread, we
 *    cannot currently guarantee that it will be one of the threads
 *    already waiting in a PyCOND_WAIT() call.  It _could_ cause
 *    the wakeup of a subsequent thread to try a PyCOND_WAIT(),
 *    including the thread doing the PyCOND_SIGNAL() itself.
 *    The same applies to PyCOND_BROADCAST(), if N threads are waiting
 *    then at least N threads will be woken up, but not necessarily
 *    those already waiting.
 *    For this reason, don't make the scheduling assumption that a
 *    specific other thread will get the wakeup signal
 * 2) The _mutex_ must be held when calling PyCOND_SIGNAL() and
 *    PyCOND_BROADCAST().
 *    While e.g. the posix standard strongly recommends that the mutex
 *    associated with the condition variable is held when a
 *    pthread_cond_signal() call is made, this is not a hard requirement,
 *    although scheduling will not be "reliable" if it isn't.  Here
 *    the mutex is used for internal synchronization of the emulated
 *    Condition Variable.
 */

#ifndef _CONDVAR_IMPL_H_
#define _CONDVAR_IMPL_H_

#include "Python.h"
#include "pycore_condvar.h"

#ifdef _POSIX_THREADS
/*
 * POSIX support
 */

/* These private functions are implemented in Python/thread_pthread.h */
int _PyThread_cond_init(PyCOND_T *cond);
void _PyThread_cond_after(long long us, struct timespec *abs);

/* The following functions return 0 on success, nonzero on error */
#define PyMUTEX_INIT(mut)       pthread_mutex_init((mut), NULL)
#define PyMUTEX_FINI(mut)       pthread_mutex_destroy(mut)
#define PyMUTEX_LOCK(mut)       pthread_mutex_lock(mut)
#define PyMUTEX_UNLOCK(mut)     pthread_mutex_unlock(mut)

#define PyCOND_INIT(cond)       _PyThread_cond_init(cond)
#define PyCOND_FINI(cond)       pthread_cond_destroy(cond)
#define PyCOND_SIGNAL(cond)     pthread_cond_signal(cond)
#define PyCOND_BROADCAST(cond)  pthread_cond_broadcast(cond)
#define PyCOND_WAIT(cond, mut)  pthread_cond_wait((cond), (mut))

/* return 0 for success, 1 on timeout, -1 on error */
Py_LOCAL_INLINE(int)
PyCOND_TIMEDWAIT(PyCOND_T *cond, PyMUTEX_T *mut, long long us)
{
    struct timespec abs;
    _PyThread_cond_after(us, &abs);
    int ret = pthread_cond_timedwait(cond, mut, &abs);
    if (ret == ETIMEDOUT) {
        return 1;
    }
    if (ret) {
        return -1;
    }
    return 0;
}

#elif defined(NT_THREADS)
/*
 * Windows (XP, 2003 server and later, as well as (hopefully) CE) support
 *
 * Emulated condition variables ones that work with XP and later, plus
 * example native support on VISTA and onwards.
 */

#if _PY_EMULATED_WIN_CV

/* The mutex is a CriticalSection object and
   The condition variables is emulated with the help of a semaphore.

   This implementation still has the problem that the threads woken
   with a "signal" aren't necessarily those that are already
   waiting.  It corresponds to listing 2 in:
   http://birrell.org/andrew/papers/ImplementingCVs.pdf

   Generic emulations of the pthread_cond_* API using
   earlier Win32 functions can be found on the Web.
   The following read can be give background information to these issues,
   but the implementations are all broken in some way.
   http://www.cse.wustl.edu/~schmidt/win32-cv-1.html
*/

Py_LOCAL_INLINE(int)
PyMUTEX_INIT(PyMUTEX_T *cs)
{
    InitializeCriticalSection(cs);
    return 0;
}

Py_LOCAL_INLINE(int)
PyMUTEX_FINI(PyMUTEX_T *cs)
{
    DeleteCriticalSection(cs);
    return 0;
}

Py_LOCAL_INLINE(int)
PyMUTEX_LOCK(PyMUTEX_T *cs)
{
    EnterCriticalSection(cs);
    return 0;
}

Py_LOCAL_INLINE(int)
PyMUTEX_UNLOCK(PyMUTEX_T *cs)
{
    LeaveCriticalSection(cs);
    return 0;
}


Py_LOCAL_INLINE(int)
PyCOND_INIT(PyCOND_T *cv)
{
    /* A semaphore with a "large" max value,  The positive value
     * is only needed to catch those "lost wakeup" events and
     * race conditions when a timed wait elapses.
     */
    cv->sem = CreateSemaphore(NULL, 0, 100000, NULL);
    if (cv->sem==NULL)
        return -1;
    cv->waiting = 0;
    return 0;
}

Py_LOCAL_INLINE(int)
PyCOND_FINI(PyCOND_T *cv)
{
    return CloseHandle(cv->sem) ? 0 : -1;
}

/* this implementation can detect a timeout.  Returns 1 on timeout,
 * 0 otherwise (and -1 on error)
 */
Py_LOCAL_INLINE(int)
_PyCOND_WAIT_MS(PyCOND_T *cv, PyMUTEX_T *cs, DWORD ms)
{
    DWORD wait;
    cv->waiting++;
    PyMUTEX_UNLOCK(cs);
    /* "lost wakeup bug" would occur if the caller were interrupted here,
     * but we are safe because we are using a semaphore which has an internal
     * count.
     */
    wait = WaitForSingleObjectEx(cv->sem, ms, FALSE);
    PyMUTEX_LOCK(cs);
    if (wait != WAIT_OBJECT_0)
        --cv->waiting;
        /* Here we have a benign race condition with PyCOND_SIGNAL.
         * When failure occurs or timeout, it is possible that
         * PyCOND_SIGNAL also decrements this value
         * and signals releases the mutex.  This is benign because it
         * just means an extra spurious wakeup for a waiting thread.
         * ('waiting' corresponds to the semaphore's "negative" count and
         * we may end up with e.g. (waiting == -1 && sem.count == 1).  When
         * a new thread comes along, it will pass right through, having
         * adjusted it to (waiting == 0 && sem.count == 0).
         */

    if (wait == WAIT_FAILED)
        return -1;
    /* return 0 on success, 1 on timeout */
    return wait != WAIT_OBJECT_0;
}

Py_LOCAL_INLINE(int)
PyCOND_WAIT(PyCOND_T *cv, PyMUTEX_T *cs)
{
    int result = _PyCOND_WAIT_MS(cv, cs, INFINITE);
    return result >= 0 ? 0 : result;
}

Py_LOCAL_INLINE(int)
PyCOND_TIMEDWAIT(PyCOND_T *cv, PyMUTEX_T *cs, long long us)
{
    return _PyCOND_WAIT_MS(cv, cs, (DWORD)(us/1000));
}

Py_LOCAL_INLINE(int)
PyCOND_SIGNAL(PyCOND_T *cv)
{
    /* this test allows PyCOND_SIGNAL to be a no-op unless required
     * to wake someone up, thus preventing an unbounded increase of
     * the semaphore's internal counter.
     */
    if (cv->waiting > 0) {
        /* notifying thread decreases the cv->waiting count so that
         * a delay between notify and actual wakeup of the target thread
         * doesn't cause a number of extra ReleaseSemaphore calls.
         */
        cv->waiting--;
        return ReleaseSemaphore(cv->sem, 1, NULL) ? 0 : -1;
    }
    return 0;
}

Py_LOCAL_INLINE(int)
PyCOND_BROADCAST(PyCOND_T *cv)
{
    int waiting = cv->waiting;
    if (waiting > 0) {
        cv->waiting = 0;
        return ReleaseSemaphore(cv->sem, waiting, NULL) ? 0 : -1;
    }
    return 0;
}

#else /* !_PY_EMULATED_WIN_CV */

Py_LOCAL_INLINE(int)
PyMUTEX_INIT(PyMUTEX_T *cs)
{
    InitializeSRWLock(cs);
    return 0;
}

Py_LOCAL_INLINE(int)
PyMUTEX_FINI(PyMUTEX_T *cs)
{
    return 0;
}

Py_LOCAL_INLINE(int)
PyMUTEX_LOCK(PyMUTEX_T *cs)
{
    AcquireSRWLockExclusive(cs);
    return 0;
}

Py_LOCAL_INLINE(int)
PyMUTEX_UNLOCK(PyMUTEX_T *cs)
{
    ReleaseSRWLockExclusive(cs);
    return 0;
}


Py_LOCAL_INLINE(int)
PyCOND_INIT(PyCOND_T *cv)
{
    InitializeConditionVariable(cv);
    return 0;
}
Py_LOCAL_INLINE(int)
PyCOND_FINI(PyCOND_T *cv)
{
    return 0;
}

Py_LOCAL_INLINE(int)
PyCOND_WAIT(PyCOND_T *cv, PyMUTEX_T *cs)
{
    return SleepConditionVariableSRW(cv, cs, INFINITE, 0) ? 0 : -1;
}

/* This implementation makes no distinction about timeouts.  Signal
 * 2 to indicate that we don't know.
 */
Py_LOCAL_INLINE(int)
PyCOND_TIMEDWAIT(PyCOND_T *cv, PyMUTEX_T *cs, long long us)
{
    return SleepConditionVariableSRW(cv, cs, (DWORD)(us/1000), 0) ? 2 : -1;
}

Py_LOCAL_INLINE(int)
PyCOND_SIGNAL(PyCOND_T *cv)
{
     WakeConditionVariable(cv);
     return 0;
}

Py_LOCAL_INLINE(int)
PyCOND_BROADCAST(PyCOND_T *cv)
{
     WakeAllConditionVariable(cv);
     return 0;
}


#endif /* _PY_EMULATED_WIN_CV */

#endif /* _POSIX_THREADS, NT_THREADS */

#endif /* _CONDVAR_IMPL_H_ */
ppc"> #include "H5Fpkg.h" /* Files */ #include "H5FLprivate.h" /* Free Lists */ #include "H5MMprivate.h" /* Memory management */ /****************/ /* Local Macros */ /****************/ /******************/ /* Local Typedefs */ /******************/ /* Typedef for tagged entry iterator callback context - evict tagged entries */ typedef struct { H5F_t *f; /* File pointer for evicting entry */ bool evicted_entries_last_pass; /* Flag to indicate that an entry * was evicted when iterating over * cache */ bool pinned_entries_need_evicted; /* Flag to indicate that a pinned * entry was attempted to be evicted */ bool skipped_pf_dirty_entries; /* Flag indicating that one or more * entries marked prefetched_dirty * were encountered and not * evicted. */ } H5C_tag_iter_evict_ctx_t; /* Typedef for tagged entry iterator callback context - expunge tag type metadata */ typedef struct { H5F_t *f; /* File pointer for evicting entry */ int type_id; /* Cache entry type to expunge */ unsigned flags; /* Flags for expunging entry */ } H5C_tag_iter_ettm_ctx_t; /* Typedef for tagged entry iterator callback context - mark corked */ typedef struct { bool cork_val; /* Corked value */ } H5C_tag_iter_cork_ctx_t; /********************/ /* Local Prototypes */ /********************/ static herr_t H5C__iter_tagged_entries_real(H5C_t *cache, haddr_t tag, H5C_tag_iter_cb_t cb, void *cb_ctx); static herr_t H5C__mark_tagged_entries(H5C_t *cache, haddr_t tag); static herr_t H5C__flush_marked_entries(H5F_t *f); /*********************/ /* Package Variables */ /*********************/ /* Declare extern free list to manage the tag info struct */ H5FL_EXTERN(H5C_tag_info_t); /*****************************/ /* Library Private Variables */ /*****************************/ /*******************/ /* Local Variables */ /*******************/ /*------------------------------------------------------------------------- * Function: H5C_ignore_tags * * Purpose: Override all assertion frameworks associated with making * sure proper tags are applied to cache entries. * * NOTE: This should really only be used in tests that need * to access internal functions without going through * standard API paths. Since tags are set inside dxpl_id's * before coming into the cache, any external functions that * use the internal library functions (i.e., tests) should * use this function if they don't plan on setting up proper * metadata tags. * * Return: FAIL if error is detected, SUCCEED otherwise. * *------------------------------------------------------------------------- */ herr_t H5C_ignore_tags(H5C_t *cache) { FUNC_ENTER_NOAPI_NOERR /* Assertions */ assert(cache != NULL); /* Set variable to ignore tag values upon assignment */ cache->ignore_tags = true; FUNC_LEAVE_NOAPI(SUCCEED) } /* H5C_ignore_tags */ /*------------------------------------------------------------------------- * Function: H5C_get_ignore_tags * * Purpose: Retrieve the 'ignore_tags' field for the cache * * Return: 'ignore_tags' value (can't fail) * *------------------------------------------------------------------------- */ H5_ATTR_PURE bool H5C_get_ignore_tags(const H5C_t *cache) { FUNC_ENTER_NOAPI_NOERR /* Sanity checks */ assert(cache); /* Return ignore tag value */ FUNC_LEAVE_NOAPI(cache->ignore_tags) } /* H5C_get_ignore_tags */ /*------------------------------------------------------------------------- * Function: H5C_get_num_objs_corked * * Purpose: Retrieve the 'num_objs_corked' field for the cache * * Return: 'num_objs_corked' value (can't fail) * *------------------------------------------------------------------------- */ H5_ATTR_PURE uint32_t H5C_get_num_objs_corked(const H5C_t *cache) { FUNC_ENTER_NOAPI_NOERR /* Sanity checks */ assert(cache); /* Return value for num_objs_corked */ FUNC_LEAVE_NOAPI(cache->num_objs_corked) } /* H5C_get_num_objs_corked */ /*------------------------------------------------------------------------- * Function: H5C__tag_entry * * Purpose: Tags an entry with the provided tag (contained in the API context). * If sanity checking is enabled, this function will perform * validation that a proper tag is contained within the provided * data access property list id before application. * * Return: FAIL if error is detected, SUCCEED otherwise. * *------------------------------------------------------------------------- */ herr_t H5C__tag_entry(H5C_t *cache, H5C_cache_entry_t *entry) { H5C_tag_info_t *tag_info; /* Points to a tag info struct */ haddr_t tag; /* Tag value */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE /* Assertions */ assert(cache != NULL); assert(entry != NULL); /* Get the tag */ tag = H5CX_get_tag(); if (cache->ignore_tags) { /* if we're ignoring tags, it's because we're running tests on internal functions and may not have inserted a tag value into a given API context before creating some metadata. Thus, in this case only, if a tag value has not been set, we can arbitrarily set it to something for the sake of passing the tests. If the tag value is set, then we'll just let it get assigned without additional checking for correctness. */ if (!H5_addr_defined(tag)) tag = H5AC__IGNORE_TAG; } #ifdef H5C_DO_TAGGING_SANITY_CHECKS else { /* Perform some sanity checks to ensure that a correct tag is being applied */ if (H5C_verify_tag(entry->type->id, tag) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "tag verification failed"); } #endif /* Search the list of tagged object addresses in the cache */ HASH_FIND(hh, cache->tag_list, &tag, sizeof(haddr_t), tag_info); /* Check if this is the first entry for this tagged object */ if (NULL == tag_info) { /* Allocate new tag info struct */ if (NULL == (tag_info = H5FL_CALLOC(H5C_tag_info_t))) HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "can't allocate tag info for cache entry"); /* Set the tag for all entries */ tag_info->tag = tag; /* Insert tag info into the hash table */ HASH_ADD(hh, cache->tag_list, tag, sizeof(haddr_t), tag_info); } else assert(tag_info->corked || (tag_info->entry_cnt > 0 && tag_info->head)); /* Sanity check entry, to avoid double insertions, etc */ assert(entry->tl_next == NULL); assert(entry->tl_prev == NULL); assert(entry->tag_info == NULL); /* Add the entry to the list for the tagged object */ entry->tl_next = tag_info->head; entry->tag_info = tag_info; if (tag_info->head) tag_info->head->tl_prev = entry; tag_info->head = entry; tag_info->entry_cnt++; done: FUNC_LEAVE_NOAPI(ret_value) } /* H5C__tag_entry */ /*------------------------------------------------------------------------- * Function: H5C__untag_entry * * Purpose: Removes an entry from a tag list, possibly removing the tag * info from the list of tagged objects with entries. * * Return: FAIL if error is detected, SUCCEED otherwise. * *------------------------------------------------------------------------- */ herr_t H5C__untag_entry(H5C_t *cache, H5C_cache_entry_t *entry) { H5C_tag_info_t *tag_info; /* Points to a tag info struct */ herr_t ret_value = SUCCEED; /* Return value */ FUNC_ENTER_PACKAGE_NOERR /* Assertions */ assert(cache != NULL); assert(entry != NULL); /* Get the entry's tag info struct */ if (NULL != (tag_info = entry->tag_info)) { /* Remove the entry from the list */ if (entry->tl_next) entry->tl_next->tl_prev = entry->tl_prev; if (entry->tl_prev) entry->tl_prev->tl_next = entry->tl_next; if (tag_info->head == entry) tag_info->head = entry->tl_next; tag_info->entry_cnt--; /* Reset pointers, to avoid confusion */ entry->tl_next = NULL; entry->tl_prev = NULL; entry->tag_info = NULL; /* Remove the tag info from the tag list, if there's no more entries with this tag */ if (!tag_info->corked && 0 == tag_info->entry_cnt) { /* Sanity check */ assert(NULL == tag_info->head); /* Release the tag info */ HASH_DELETE(hh, cache->tag_list, tag_info); tag_info = H5FL_FREE(H5C_tag_info_t, tag_info); } else assert(tag_info->corked || NULL != tag_info->head); } FUNC_LEAVE_NOAPI(ret_value) } /* H5C__untag_entry */ /*------------------------------------------------------------------------- * Function: H5C__iter_tagged_entries_real * * Purpose: Iterate over tagged entries, making a callback for matches * * Return: FAIL if error is detected, SUCCEED otherwise. * *------------------------------------------------------------------------- */ static herr_t H5C__iter_tagged_entries_real(H5C_t *cache, haddr_t tag, H5C_tag_iter_cb_t cb, void *cb_ctx) { H5C_tag_info_t *tag_info; /* Points to a tag info struct */ herr_t ret_value = SUCCEED; /* Return value */ /* Function enter macro */ FUNC_ENTER_PACKAGE /* Sanity checks */ assert(cache != NULL); /* Search the list of tagged object addresses in the cache */ HASH_FIND(hh, cache->tag_list, &tag, sizeof(haddr_t), tag_info); /* If there's any entries for this tag, iterate over them */ if (tag_info) { H5C_cache_entry_t *entry; /* Pointer to current entry */ H5C_cache_entry_t *next_entry; /* Pointer to next entry in hash bucket chain */ /* Sanity check */ assert(tag_info->head); assert(tag_info->entry_cnt > 0); /* Iterate over the entries for this tag */ entry = tag_info->head; while (entry) { /* Acquire pointer to next entry */ next_entry = entry->tl_next; /* Make callback for entry */ if ((cb)(entry, cb_ctx) != H5_ITER_CONT) HGOTO_ERROR(H5E_CACHE, H5E_BADITER, FAIL, "tagged entry iteration callback failed"); /* Advance to next entry */ entry = next_entry; } } done: FUNC_LEAVE_NOAPI(ret_value) } /* H5C__iter_tagged_entries_real() */ /*------------------------------------------------------------------------- * Function: H5C__iter_tagged_entries * * Purpose: Iterate over tagged entries, making a callback for matches * * Return: FAIL if error is detected, SUCCEED otherwise. * *------------------------------------------------------------------------- */ herr_t H5C__iter_tagged_entries(H5C_t *cache, haddr_t tag, bool match_global, H5C_tag_iter_cb_t cb, void *cb_ctx) { herr_t ret_value = SUCCEED; /* Return value */ /* Function enter macro */ FUNC_ENTER_PACKAGE /* Sanity checks */ assert(cache != NULL); /* Iterate over the entries for this tag */ if (H5C__iter_tagged_entries_real(cache, tag, cb, cb_ctx) < 0) HGOTO_ERROR(H5E_CACHE, H5E_BADITER, FAIL, "iteration of tagged entries failed"); /* Check for iterating over global metadata */ if (match_global) { /* Iterate over the entries for SOHM entries */ if (H5C__iter_tagged_entries_real(cache, H5AC__SOHM_TAG, cb, cb_ctx) < 0) HGOTO_ERROR(H5E_CACHE, H5E_BADITER, FAIL, "iteration of tagged entries failed"); /* Iterate over the entries for global heap entries */ if (H5C__iter_tagged_entries_real(cache, H5AC__GLOBALHEAP_TAG, cb, cb_ctx) < 0) HGOTO_ERROR(H5E_CACHE, H5E_BADITER, FAIL, "iteration of tagged entries failed"); } /* end if */ done: FUNC_LEAVE_NOAPI(ret_value) } /* H5C__iter_tagged_entries() */ /*------------------------------------------------------------------------- * Function: H5C__evict_tagged_entries_cb * * Purpose: Callback for evicting tagged entries * * Return: H5_ITER_ERROR if error is detected, H5_ITER_CONT otherwise. * *------------------------------------------------------------------------- */ static int H5C__evict_tagged_entries_cb(H5C_cache_entry_t *entry, void *_ctx) { H5C_tag_iter_evict_ctx_t *ctx = (H5C_tag_iter_evict_ctx_t *)_ctx; /* Get pointer to iterator context */ int ret_value = H5_ITER_CONT; /* Return value */ /* Function enter macro */ FUNC_ENTER_PACKAGE /* Santify checks */ assert(entry); assert(ctx); /* Attempt to evict entry */ if (entry->is_protected) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, H5_ITER_ERROR, "Cannot evict protected entry"); else if (entry->is_dirty) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, H5_ITER_ERROR, "Cannot evict dirty entry"); else if (entry->is_pinned) /* Can't evict at this time, but let's note that we hit a pinned entry and we'll loop back around again (as evicting other entries will hopefully unpin this entry) */ ctx->pinned_entries_need_evicted = true; else if (!entry->prefetched_dirty) { /* Evict the Entry */ if (H5C__flush_single_entry(ctx->f, entry, H5C__FLUSH_INVALIDATE_FLAG | H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, H5_ITER_ERROR, "Entry eviction failed."); ctx->evicted_entries_last_pass = true; } else ctx->skipped_pf_dirty_entries = true; done: FUNC_LEAVE_NOAPI(ret_value) } /* H5C__evict_tagged_entries_cb() */ /*------------------------------------------------------------------------- * Function: H5C_evict_tagged_entries * * Purpose: Evicts all entries with the specified tag from cache * * Return: FAIL if error is detected, SUCCEED otherwise. * *------------------------------------------------------------------------- */ herr_t H5C_evict_tagged_entries(H5F_t *f, haddr_t tag, bool match_global) { H5C_t *cache; /* Pointer to cache structure */ H5C_tag_iter_evict_ctx_t ctx; /* Context for iterator callback */ herr_t ret_value = SUCCEED; /* Return value */ /* Function enter macro */ FUNC_ENTER_NOAPI(FAIL) /* Sanity checks */ assert(f); assert(f->shared); cache = f->shared->cache; /* Get cache pointer */ assert(cache != NULL); /* Construct context for iterator callbacks */ ctx.f = f; /* Start evicting entries */ do { /* Reset pinned/evicted tracking flags */ ctx.pinned_entries_need_evicted = false; ctx.evicted_entries_last_pass = false; ctx.skipped_pf_dirty_entries = false; /* Iterate through entries in the cache */ if (H5C__iter_tagged_entries(cache, tag, match_global, H5C__evict_tagged_entries_cb, &ctx) < 0) HGOTO_ERROR(H5E_CACHE, H5E_BADITER, FAIL, "Iteration of tagged entries failed"); /* Keep doing this until we have stopped evicted entries */ } while (true == ctx.evicted_entries_last_pass); /* In most cases, fail if we have finished evicting entries and pinned * entries still need evicted * * However, things can get strange if the file was opened R/O and * the file contains a cache image and the cache image contains dirty * entries. * * Since the file was opened read only, dirty entries in the cache * image were marked as clean when they were inserted into the metadata * cache. This is necessary, as if they are marked dirty, the metadata * cache will attempt to write them on file close, which is frowned * upon when the file is opened R/O. * * On the other hand, such entries (marked prefetched_dirty) must not * be evicted, as should the cache be asked to re-load them, the cache * will attempt to read them from the file, and at best load an outdated * version. * * To avoid this, H5C__evict_tagged_entries_cb has been modified to * skip such entries. However, by doing so, it may prevent pinned * entries from becoming unpinned. * * Thus we must ignore ctx.pinned_entries_need_evicted if * ctx.skipped_pf_dirty_entries is true. */ if ((!ctx.skipped_pf_dirty_entries) && (ctx.pinned_entries_need_evicted)) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Pinned entries still need evicted?!"); done: FUNC_LEAVE_NOAPI(ret_value) } /* H5C_evict_tagged_entries() */ /*------------------------------------------------------------------------- * Function: H5C__mark_tagged_entries_cb * * Purpose: Callback to set the flush marker on dirty entries in the cache * * Return: H5_ITER_CONT (can't fail) * *------------------------------------------------------------------------- */ static int H5C__mark_tagged_entries_cb(H5C_cache_entry_t *entry, void H5_ATTR_UNUSED *_ctx) { /* Function enter macro */ FUNC_ENTER_PACKAGE_NOERR /* Sanity checks */ assert(entry); /* We only want to set the flush marker on entries that * actually need flushed (i.e., dirty ones) */ if (entry->is_dirty) entry->flush_marker = true; FUNC_LEAVE_NOAPI(H5_ITER_CONT) } /* H5C__mark_tagged_entries_cb() */ /*------------------------------------------------------------------------- * Function: H5C__mark_tagged_entries * * Purpose: Set the flush marker on dirty entries in the cache that have * the specified tag, as well as all globally tagged entries. * * Return: FAIL if error is detected, SUCCEED otherwise. * *------------------------------------------------------------------------- */ static herr_t H5C__mark_tagged_entries(H5C_t *cache, haddr_t tag) { herr_t ret_value = SUCCEED; /* Return value */ /* Function enter macro */ FUNC_ENTER_PACKAGE /* Sanity check */ assert(cache); /* Iterate through hash table entries, marking those with specified tag, as * well as any major global entries which should always be flushed * when flushing based on tag value */ if (H5C__iter_tagged_entries(cache, tag, true, H5C__mark_tagged_entries_cb, NULL) < 0) HGOTO_ERROR(H5E_CACHE, H5E_BADITER, FAIL, "Iteration of tagged entries failed"); done: FUNC_LEAVE_NOAPI(ret_value) } /* H5C__mark_tagged_entries() */ /*------------------------------------------------------------------------- * Function: H5C__flush_marked_entries * * Purpose: Flushes all marked entries in the cache. * * Return: FAIL if error is detected, SUCCEED otherwise. * *------------------------------------------------------------------------- */ static herr_t H5C__flush_marked_entries(H5F_t *f) { herr_t ret_value = SUCCEED; FUNC_ENTER_PACKAGE /* Assertions */ assert(f != NULL); /* Enable the slist, as it is needed in the flush */ if (H5C_set_slist_enabled(f->shared->cache, true, false) < 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "set slist enabled failed"); /* Flush all marked entries */ if (H5C_flush_cache(f, H5C__FLUSH_MARKED_ENTRIES_FLAG | H5C__FLUSH_IGNORE_PROTECTED_FLAG) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush cache"); /* Disable the slist. Set the clear_slist parameter to true * since we called H5C_flush_cache() with the * H5C__FLUSH_MARKED_ENTRIES_FLAG. */ if (H5C_set_slist_enabled(f->shared->cache, false, true) < 0) HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "disable slist failed"); done: FUNC_LEAVE_NOAPI(ret_value) } /* H5C__flush_marked_entries */ #ifdef H5C_DO_TAGGING_SANITY_CHECKS /*------------------------------------------------------------------------- * Function: H5C_verify_tag * * Purpose: Performs sanity checking on an entrytype/tag pair. * * Return: SUCCEED or FAIL. * *------------------------------------------------------------------------- */ herr_t H5C_verify_tag(int id, haddr_t tag) { herr_t ret_value = SUCCEED; FUNC_ENTER_NOAPI_NOINIT /* Perform some sanity checks on tag value. Certain entry * types require certain tag values, so check that these * constraints are met. */ if (tag == H5AC__IGNORE_TAG) HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "cannot ignore a tag while doing verification."); else if (tag == H5AC__INVALID_TAG) { if (id != H5AC_PROXY_ENTRY_ID) HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "no metadata tag provided"); } /* end else-if */ else { /* Perform some sanity checks on tag value. Certain entry * types require certain tag values, so check that these * constraints are met. */ /* Superblock */ if ((id == H5AC_SUPERBLOCK_ID) || (id == H5AC_DRVRINFO_ID)) { if (tag != H5AC__SUPERBLOCK_TAG) HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "superblock not tagged with H5AC__SUPERBLOCK_TAG"); } /* end if */ else { if (tag == H5AC__SUPERBLOCK_TAG) HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "H5AC__SUPERBLOCK_TAG applied to non-superblock entry"); } /* end else */ /* Free Space Manager */ if (tag == H5AC__FREESPACE_TAG && ((id != H5AC_FSPACE_HDR_ID) && (id != H5AC_FSPACE_SINFO_ID))) HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "H5AC__FREESPACE_TAG applied to non-freespace entry"); /* SOHM */ if ((id == H5AC_SOHM_TABLE_ID) || (id == H5AC_SOHM_LIST_ID)) if (tag != H5AC__SOHM_TAG) HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "sohm entry not tagged with H5AC__SOHM_TAG"); /* Global Heap */ if (id == H5AC_GHEAP_ID) { if (tag != H5AC__GLOBALHEAP_TAG) HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "global heap not tagged with H5AC__GLOBALHEAP_TAG"); } /* end if */ else { if (tag == H5AC__GLOBALHEAP_TAG) HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "H5AC__GLOBALHEAP_TAG applied to non-globalheap entry"); } /* end else */ } /* end else */ done: FUNC_LEAVE_NOAPI(ret_value) } /* H5C_verify_tag */ #endif /*------------------------------------------------------------------------- * Function: H5C_flush_tagged_entries * * Purpose: Flushes all entries with the specified tag to disk. * * Return: FAIL if error is detected, SUCCEED otherwise. * *------------------------------------------------------------------------- */ herr_t H5C_flush_tagged_entries(H5F_t *f, haddr_t tag) { /* Variable Declarations */ H5C_t *cache = NULL; herr_t ret_value = SUCCEED; FUNC_ENTER_NOAPI(FAIL) /* Assertions */ assert(f); assert(f->shared); /* Get cache pointer */ cache = f->shared->cache; /* Mark all entries with specified tag */ if (H5C__mark_tagged_entries(cache, tag) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't mark tagged entries"); /* Flush all marked entries */ if (H5C__flush_marked_entries(f) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush marked entries"); done: FUNC_LEAVE_NOAPI(ret_value) } /* H5C_flush_tagged_entries */ /*------------------------------------------------------------------------- * Function: H5C_retag_entries * * Purpose: Searches through cache index for all entries with the * value specified by src_tag and changes it to the value * specified by dest_tag. * * Return: SUCCEED/FAIL * *------------------------------------------------------------------------- */ herr_t H5C_retag_entries(H5C_t *cache, haddr_t src_tag, haddr_t dest_tag) { H5C_tag_info_t *tag_info = NULL; /* Function enter macro */ FUNC_ENTER_NOAPI_NOERR /* Sanity check */ assert(cache); /* Remove tag info from tag list */ HASH_FIND(hh, cache->tag_list, &src_tag, sizeof(haddr_t), tag_info); if (NULL != tag_info) { /* Remove info with old tag */ HASH_DELETE(hh, cache->tag_list, tag_info); /* Change to new tag */ tag_info->tag = dest_tag; /* Re-insert tag info into tag list */ HASH_ADD(hh, cache->tag_list, tag, sizeof(haddr_t), tag_info); } FUNC_LEAVE_NOAPI(SUCCEED) } /* H5C_retag_entries() */ /*------------------------------------------------------------------------- * Function: H5C__expunge_tag_type_metadata_cb * * Purpose: Expunge from the cache entries associated * with 'tag' and type id. * * Return: H5_ITER_ERROR if error is detected, H5_ITER_CONT otherwise. * *------------------------------------------------------------------------- */ static int H5C__expunge_tag_type_metadata_cb(H5C_cache_entry_t *entry, void *_ctx) { H5C_tag_iter_ettm_ctx_t *ctx = (H5C_tag_iter_ettm_ctx_t *)_ctx; /* Get pointer to iterator context */ int ret_value = H5_ITER_CONT; /* Return value */ /* Function enter macro */ FUNC_ENTER_PACKAGE /* Santify checks */ assert(entry); assert(ctx); /* Found one with the same tag and type id */ if (entry->type->id == ctx->type_id) if (H5C_expunge_entry(ctx->f, entry->type, entry->addr, ctx->flags) < 0) HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, H5_ITER_ERROR, "can't expunge entry"); done: FUNC_LEAVE_NOAPI(ret_value) } /* H5C__expunge_tag_type_metadata_cb() */ /*------------------------------------------------------------------------- * Function: H5C_expunge_tag_type_metadata * * Purpose: Search and expunge from the cache entries associated * with 'tag' and type id. * * Return: FAIL if error is detected, SUCCEED otherwise. * *------------------------------------------------------------------------- */ herr_t H5C_expunge_tag_type_metadata(H5F_t *f, haddr_t tag, int type_id, unsigned flags) { H5C_t *cache; /* Pointer to cache structure */ H5C_tag_iter_ettm_ctx_t ctx; /* Context for iterator callback */ herr_t ret_value = SUCCEED; /* Return value */ /* Function enter macro */ FUNC_ENTER_NOAPI(FAIL) /* Sanity checks */ assert(f); assert(f->shared); cache = f->shared->cache; /* Get cache pointer */ assert(cache != NULL); /* Construct context for iterator callbacks */ ctx.f = f; ctx.type_id = type_id; ctx.flags = flags; /* Iterate through hash table entries, expunge those with specified tag and type id */ if (H5C__iter_tagged_entries(cache, tag, false, H5C__expunge_tag_type_metadata_cb, &ctx) < 0) HGOTO_ERROR(H5E_CACHE, H5E_BADITER, FAIL, "Iteration of tagged entries failed"); done: FUNC_LEAVE_NOAPI(ret_value) } /* H5C_expunge_tag_type_metadata() */ /*------------------------------------------------------------------------- * Function: H5C_get_tag() * * Purpose: Get the tag for a metadata cache entry. * * Return: SUCCEED (can't fail) * *------------------------------------------------------------------------- */ herr_t H5C_get_tag(const void *thing, haddr_t *tag) { const H5C_cache_entry_t *entry = (const H5C_cache_entry_t *)thing; /* Pointer to cache entry */ FUNC_ENTER_NOAPI_NOERR assert(entry); assert(entry->tag_info); assert(tag); /* Return the tag */ *tag = entry->tag_info->tag; FUNC_LEAVE_NOAPI(SUCCEED) } /* H5C_get_tag() */