summaryrefslogtreecommitdiffstats
path: root/Help/guide/tutorial/Step9/MathFunctions/MakeTable.cxx
diff options
context:
space:
mode:
authorBrad King <brad.king@kitware.com>2020-07-10 14:19:21 (GMT)
committerBrad King <brad.king@kitware.com>2020-07-13 12:31:59 (GMT)
commitc51400033ce77b100f222b309cf893bd814ffc37 (patch)
treede2b71d460eab8b534928511a20a7887660d288e /Help/guide/tutorial/Step9/MathFunctions/MakeTable.cxx
parent6ab08c4e99469439900c1cdc02fd2452ab268a87 (diff)
downloadCMake-c51400033ce77b100f222b309cf893bd814ffc37.zip
CMake-c51400033ce77b100f222b309cf893bd814ffc37.tar.gz
CMake-c51400033ce77b100f222b309cf893bd814ffc37.tar.bz2
file: Update GET_RUNTIME_DEPENDENCIES for macOS 11 dylib cache
Starting on macOS 11, the dynamic loader has a builtin cache of system-provided dylib files. They do not actually exist on the filesystem. However, runtime dependencies recorded in Mach-O binaries can still have `LC_LOAD_DYLIB` entries referring to such dylib files by absolute path. The dynamic loader simply resolves the paths from its cache. Teach `file(GET_RUNTIME_DEPENDENCIES)` to skip dependencies on such dylib paths. For practical software distribution purposes they do not exist, or at least can be assumed available on all deployments. Issue: #20863
Diffstat (limited to 'Help/guide/tutorial/Step9/MathFunctions/MakeTable.cxx')
0 files changed, 0 insertions, 0 deletions
155'>155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
# Some simple Queue module tests, plus some failure conditions
# to ensure the Queue locks remain stable.
import Queue
import sys
import threading
import time

from test.test_support import verify, TestFailed, verbose

QUEUE_SIZE = 5

# A thread to run a function that unclogs a blocked Queue.
class _TriggerThread(threading.Thread):
    def __init__(self, fn, args):
        self.fn = fn
        self.args = args
        self.startedEvent = threading.Event()
        threading.Thread.__init__(self)

    def run(self):
        # The sleep isn't necessary, but is intended to give the blocking
        # function in the main thread a chance at actually blocking before
        # we unclog it.  But if the sleep is longer than the timeout-based
        # tests wait in their blocking functions, those tests will fail.
        # So we give them much longer timeout values compared to the
        # sleep here (I aimed at 10 seconds for blocking functions --
        # they should never actually wait that long - they should make
        # progress as soon as we call self.fn()).
        time.sleep(0.1)
        self.startedEvent.set()
        self.fn(*self.args)

# Execute a function that blocks, and in a separate thread, a function that
# triggers the release.  Returns the result of the blocking function.
# Caution:  block_func must guarantee to block until trigger_func is
# called, and trigger_func must guarantee to change queue state so that
# block_func can make enough progress to return.  In particular, a
# block_func that just raises an exception regardless of whether trigger_func
# is called will lead to timing-dependent sporadic failures, and one of
# those went rarely seen but undiagnosed for years.  Now block_func
# must be unexceptional.  If block_func is supposed to raise an exception,
# call _doExceptionalBlockingTest() instead.
def _doBlockingTest(block_func, block_args, trigger_func, trigger_args):
    t = _TriggerThread(trigger_func, trigger_args)
    t.start()
    result = block_func(*block_args)
    # If block_func returned before our thread made the call, we failed!
    if not t.startedEvent.isSet():
        raise TestFailed("blocking function '%r' appeared not to block" %
                         block_func)
    t.join(10) # make sure the thread terminates
    if t.isAlive():
        raise TestFailed("trigger function '%r' appeared to not return" %
                         trigger_func)
    return result

# Call this instead if block_func is supposed to raise an exception.
def _doExceptionalBlockingTest(block_func, block_args, trigger_func,
                               trigger_args, expected_exception_class):
    t = _TriggerThread(trigger_func, trigger_args)
    t.start()
    try:
        try:
            block_func(*block_args)
        except expected_exception_class:
            raise
        else:
            raise TestFailed("expected exception of kind %r" %
                             expected_exception_class)
    finally:
        t.join(10) # make sure the thread terminates
        if t.isAlive():
            raise TestFailed("trigger function '%r' appeared to not return" %
                             trigger_func)
        if not t.startedEvent.isSet():
            raise TestFailed("trigger thread ended but event never set")

# A Queue subclass that can provoke failure at a moment's notice :)
class FailingQueueException(Exception):
    pass

class FailingQueue(Queue.Queue):
    def __init__(self, *args):
        self.fail_next_put = False
        self.fail_next_get = False
        Queue.Queue.__init__(self, *args)
    def _put(self, item):
        if self.fail_next_put:
            self.fail_next_put = False
            raise FailingQueueException, "You Lose"
        return Queue.Queue._put(self, item)
    def _get(self):
        if self.fail_next_get:
            self.fail_next_get = False
            raise FailingQueueException, "You Lose"
        return Queue.Queue._get(self)

def FailingQueueTest(q):
    if not q.empty():
        raise RuntimeError, "Call this function with an empty queue"
    for i in range(QUEUE_SIZE-1):
        q.put(i)
    # Test a failing non-blocking put.
    q.fail_next_put = True
    try:
        q.put("oops", block=0)
        raise TestFailed("The queue didn't fail when it should have")
    except FailingQueueException:
        pass
    q.fail_next_put = True
    try:
        q.put("oops", timeout=0.1)
        raise TestFailed("The queue didn't fail when it should have")
    except FailingQueueException:
        pass
    q.put("last")
    verify(q.full(), "Queue should be full")
    # Test a failing blocking put
    q.fail_next_put = True
    try:
        _doBlockingTest(q.put, ("full",), q.get, ())
        raise TestFailed("The queue didn't fail when it should have")
    except FailingQueueException:
        pass
    # Check the Queue isn't damaged.
    # put failed, but get succeeded - re-add
    q.put("last")
    # Test a failing timeout put
    q.fail_next_put = True
    try:
        _doExceptionalBlockingTest(q.put, ("full", True, 10), q.get, (),
                                   FailingQueueException)
        raise TestFailed("The queue didn't fail when it should have")
    except FailingQueueException:
        pass
    # Check the Queue isn't damaged.
    # put failed, but get succeeded - re-add
    q.put("last")
    verify(q.full(), "Queue should be full")
    q.get()
    verify(not q.full(), "Queue should not be full")
    q.put("last")
    verify(q.full(), "Queue should be full")
    # Test a blocking put
    _doBlockingTest( q.put, ("full",), q.get, ())
    # Empty it
    for i in range(QUEUE_SIZE):
        q.get()
    verify(q.empty(), "Queue should be empty")
    q.put("first")
    q.fail_next_get = True
    try:
        q.get()
        raise TestFailed("The queue didn't fail when it should have")
    except FailingQueueException:
        pass
    verify(not q.empty(), "Queue should not be empty")
    q.fail_next_get = True
    try:
        q.get(timeout=0.1)
        raise TestFailed("The queue didn't fail when it should have")
    except FailingQueueException:
        pass
    verify(not q.empty(), "Queue should not be empty")
    q.get()
    verify(q.empty(), "Queue should be empty")
    q.fail_next_get = True
    try:
        _doExceptionalBlockingTest(q.get, (), q.put, ('empty',),
                                   FailingQueueException)
        raise TestFailed("The queue didn't fail when it should have")
    except FailingQueueException:
        pass
    # put succeeded, but get failed.
    verify(not q.empty(), "Queue should not be empty")
    q.get()
    verify(q.empty(), "Queue should be empty")

def SimpleQueueTest(q):
    if not q.empty():
        raise RuntimeError, "Call this function with an empty queue"
    # I guess we better check things actually queue correctly a little :)
    q.put(111)
    q.put(222)
    verify(q.get() == 111 and q.get() == 222,
           "Didn't seem to queue the correct data!")
    for i in range(QUEUE_SIZE-1):
        q.put(i)
        verify(not q.empty(), "Queue should not be empty")
    verify(not q.full(), "Queue should not be full")
    q.put("last")
    verify(q.full(), "Queue should be full")
    try:
        q.put("full", block=0)
        raise TestFailed("Didn't appear to block with a full queue")
    except Queue.Full:
        pass
    try:
        q.put("full", timeout=0.01)
        raise TestFailed("Didn't appear to time-out with a full queue")
    except Queue.Full:
        pass
    # Test a blocking put
    _doBlockingTest(q.put, ("full",), q.get, ())
    _doBlockingTest(q.put, ("full", True, 10), q.get, ())
    # Empty it
    for i in range(QUEUE_SIZE):
        q.get()
    verify(q.empty(), "Queue should be empty")
    try:
        q.get(block=0)
        raise TestFailed("Didn't appear to block with an empty queue")
    except Queue.Empty:
        pass
    try:
        q.get(timeout=0.01)
        raise TestFailed("Didn't appear to time-out with an empty queue")
    except Queue.Empty:
        pass
    # Test a blocking get
    _doBlockingTest(q.get, (), q.put, ('empty',))
    _doBlockingTest(q.get, (True, 10), q.put, ('empty',))

cum = 0
cumlock = threading.Lock()

def worker(q):
    global cum
    while True:
        x = q.get()
        if x is None:
            q.task_done()
            return
        cumlock.acquire()
        try:
            cum += x
        finally:
            cumlock.release()
        q.task_done()

def QueueJoinTest(q):
    global cum
    cum = 0
    for i in (0,1):
        threading.Thread(target=worker, args=(q,)).start()
    for i in xrange(100):
        q.put(i)
    q.join()
    verify(cum==sum(range(100)), "q.join() did not block until all tasks were done")
    for i in (0,1):
        q.put(None)         # instruct the threads to close
    q.join()                # verify that you can join twice

def QueueTaskDoneTest(q):
    try:
        q.task_done()
    except ValueError:
        pass
    else:
        raise TestFailed("Did not detect task count going negative")

def test():
    q = Queue.Queue()
    QueueTaskDoneTest(q)
    QueueJoinTest(q)
    QueueJoinTest(q)
    QueueTaskDoneTest(q)

    q = Queue.Queue(QUEUE_SIZE)
    # Do it a couple of times on the same queue
    SimpleQueueTest(q)
    SimpleQueueTest(q)
    if verbose:
        print "Simple Queue tests seemed to work"
    q = FailingQueue(QUEUE_SIZE)
    FailingQueueTest(q)
    FailingQueueTest(q)
    if verbose:
        print "Failing Queue tests seemed to work"

test()