diff options
author | Stephan Bergmann <sbergman@redhat.com> | 2012-05-16 22:09:21 +0200 |
---|---|---|
committer | Caolán McNamara <caolanm@redhat.com> | 2012-05-19 20:53:03 +0100 |
commit | c4b4ab993b93e57de816d2ca47b0e1be8e24eaab (patch) | |
tree | de53bf7478b15df1df19dc92b4cd65e9682284f0 | |
parent | 14f078cd52b1471811871bd61cdb0b3ab4f26e1a (diff) |
Fixed ThreadPool (and dependent ORequestThread) life cycle
At least with sw_complex test under load, it happened that an ORequestThread
could still process a remote release request while the main thread was already
in exit(3). This was because (a) ThreadPool never joined with the spawned
worker threads (which has been rectified by calling uno_threadpool_dispose(0)
from the final uno_threadpool_destroy), and (b) binaryurp::Bridge called
uno_threadpool_destroy only from its destructor (which could go as late as
exit(3)) instead of from terminate.
Additional clean up:
* Access to Bridge's threadPool_ is now cleanly controlled by mutex_ (even
though that might not be necessary in every case).
* ThreadPool's stopDisposing got renamed to destroy, to make meaning clearer.
(cherry picked from commit d015384e1d98fe77fd59339044f58efb1ab9fb25)
Conflicts:
binaryurp/source/bridge.cxx
Change-Id: I45fa76e80e790a11065e7bf8ac9d92af2e62f262
Signed-off-by: Caolán McNamara <caolanm@redhat.com>
-rw-r--r-- | binaryurp/source/bridge.cxx | 46 | ||||
-rw-r--r-- | binaryurp/source/bridge.hxx | 4 | ||||
-rw-r--r-- | cppu/source/threadpool/threadpool.cxx | 24 | ||||
-rw-r--r-- | cppu/source/threadpool/threadpool.hxx | 4 |
4 files changed, 52 insertions, 26 deletions
diff --git a/binaryurp/source/bridge.cxx b/binaryurp/source/bridge.cxx index b491a2adc77f..f591fe0e9e43 100644 --- a/binaryurp/source/bridge.cxx +++ b/binaryurp/source/bridge.cxx | |||
@@ -234,16 +234,28 @@ Bridge::Bridge( | |||
234 | } | 234 | } |
235 | 235 | ||
236 | void Bridge::start() { | 236 | void Bridge::start() { |
237 | assert(threadPool_ == 0 && !writer_.is() && !reader_.is()); | 237 | rtl::Reference< Reader > r(new Reader(this)); |
238 | threadPool_ = uno_threadpool_create(); | 238 | rtl::Reference< Writer > w(new Writer(this)); |
239 | assert(threadPool_ != 0); | 239 | { |
240 | writer_.set(new Writer(this)); | 240 | osl::MutexGuard g(mutex_); |
241 | writer_->create(); | 241 | assert(threadPool_ == 0 && !writer_.is() && !reader_.is()); |
242 | reader_.set(new Reader(this)); | 242 | threadPool_ = uno_threadpool_create(); |
243 | reader_->create(); | 243 | assert(threadPool_ != 0); |
244 | reader_ = r; | ||
245 | writer_ = w; | ||
246 | } | ||
247 | // It is important to call reader_->create() last here; both | ||
248 | // Writer::execute and Reader::execute can call Bridge::terminate, but | ||
249 | // Writer::execute is initially blocked in unblocked_.wait() until | ||
250 | // Reader::execute has called bridge_->sendRequestChangeRequest(), so | ||
251 | // effectively only reader_->create() can lead to an early call to | ||
252 | // Bridge::terminate | ||
253 | w->create(); | ||
254 | r->create(); | ||
244 | } | 255 | } |
245 | 256 | ||
246 | void Bridge::terminate() { | 257 | void Bridge::terminate() { |
258 | uno_ThreadPool tp; | ||
247 | rtl::Reference< Reader > r; | 259 | rtl::Reference< Reader > r; |
248 | rtl::Reference< Writer > w; | 260 | rtl::Reference< Writer > w; |
249 | Listeners ls; | 261 | Listeners ls; |
@@ -252,6 +264,7 @@ void Bridge::terminate() { | |||
252 | if (terminated_) { | 264 | if (terminated_) { |
253 | return; | 265 | return; |
254 | } | 266 | } |
267 | tp = threadPool_; | ||
255 | std::swap(reader_, r); | 268 | std::swap(reader_, r); |
256 | std::swap(writer_, w); | 269 | std::swap(writer_, w); |
257 | ls.swap(listeners_); | 270 | ls.swap(listeners_); |
@@ -266,8 +279,8 @@ void Bridge::terminate() { | |||
266 | w->stop(); | 279 | w->stop(); |
267 | joinThread(r.get()); | 280 | joinThread(r.get()); |
268 | joinThread(w.get()); | 281 | joinThread(w.get()); |
269 | assert(threadPool_ != 0); | 282 | assert(tp != 0); |
270 | uno_threadpool_dispose(threadPool_); | 283 | uno_threadpool_dispose(tp); |
271 | Stubs s; | 284 | Stubs s; |
272 | { | 285 | { |
273 | osl::MutexGuard g(mutex_); | 286 | osl::MutexGuard g(mutex_); |
@@ -294,6 +307,7 @@ void Bridge::terminate() { | |||
294 | "binaryurp", "caught runtime exception '" << e.Message << '\''); | 307 | "binaryurp", "caught runtime exception '" << e.Message << '\''); |
295 | } | 308 | } |
296 | } | 309 | } |
310 | uno_threadpool_destroy(tp); | ||
297 | } | 311 | } |
298 | 312 | ||
299 | css::uno::Reference< css::connection::XConnection > Bridge::getConnection() | 313 | css::uno::Reference< css::connection::XConnection > Bridge::getConnection() |
@@ -323,7 +337,8 @@ BinaryAny Bridge::mapCppToBinaryAny(css::uno::Any const & cppAny) { | |||
323 | return out; | 337 | return out; |
324 | } | 338 | } |
325 | 339 | ||
326 | uno_ThreadPool Bridge::getThreadPool() const { | 340 | uno_ThreadPool Bridge::getThreadPool() { |
341 | osl::MutexGuard g(mutex_); | ||
327 | assert(threadPool_ != 0); | 342 | assert(threadPool_ != 0); |
328 | return threadPool_; | 343 | return threadPool_; |
329 | } | 344 | } |
@@ -564,7 +579,8 @@ bool Bridge::makeCall( | |||
564 | { | 579 | { |
565 | std::auto_ptr< IncomingReply > resp; | 580 | std::auto_ptr< IncomingReply > resp; |
566 | { | 581 | { |
567 | AttachThread att(threadPool_); | 582 | uno_ThreadPool tp = getThreadPool(); |
583 | AttachThread att(tp); | ||
568 | PopOutgoingRequest pop( | 584 | PopOutgoingRequest pop( |
569 | outgoingRequests_, att.getTid(), | 585 | outgoingRequests_, att.getTid(), |
570 | OutgoingRequest(OutgoingRequest::KIND_NORMAL, member, setter)); | 586 | OutgoingRequest(OutgoingRequest::KIND_NORMAL, member, setter)); |
@@ -575,7 +591,7 @@ bool Bridge::makeCall( | |||
575 | incrementCalls(true); | 591 | incrementCalls(true); |
576 | incrementActiveCalls(); | 592 | incrementActiveCalls(); |
577 | void * job; | 593 | void * job; |
578 | uno_threadpool_enter(threadPool_, &job); | 594 | uno_threadpool_enter(tp, &job); |
579 | resp.reset(static_cast< IncomingReply * >(job)); | 595 | resp.reset(static_cast< IncomingReply * >(job)); |
580 | decrementActiveCalls(); | 596 | decrementActiveCalls(); |
581 | decrementCalls(); | 597 | decrementCalls(); |
@@ -806,8 +822,8 @@ bool Bridge::isCurrentContextMode() { | |||
806 | } | 822 | } |
807 | 823 | ||
808 | Bridge::~Bridge() { | 824 | Bridge::~Bridge() { |
809 | if (threadPool_ != 0) { | 825 | if (getThreadPool() != 0) { |
810 | uno_threadpool_destroy(threadPool_); | 826 | terminate(); |
811 | } | 827 | } |
812 | } | 828 | } |
813 | 829 | ||
@@ -934,7 +950,7 @@ void Bridge::sendProtPropRequest( | |||
934 | void Bridge::makeReleaseCall( | 950 | void Bridge::makeReleaseCall( |
935 | rtl::OUString const & oid, css::uno::TypeDescription const & type) | 951 | rtl::OUString const & oid, css::uno::TypeDescription const & type) |
936 | { | 952 | { |
937 | AttachThread att(threadPool_); | 953 | AttachThread att(getThreadPool()); |
938 | sendRequest( | 954 | sendRequest( |
939 | att.getTid(), oid, type, | 955 | att.getTid(), oid, type, |
940 | css::uno::TypeDescription( | 956 | css::uno::TypeDescription( |
diff --git a/binaryurp/source/bridge.hxx b/binaryurp/source/bridge.hxx index cf281f2febfd..8d667897d253 100644 --- a/binaryurp/source/bridge.hxx +++ b/binaryurp/source/bridge.hxx | |||
@@ -106,7 +106,7 @@ public: | |||
106 | 106 | ||
107 | BinaryAny mapCppToBinaryAny(com::sun::star::uno::Any const & cppAny); | 107 | BinaryAny mapCppToBinaryAny(com::sun::star::uno::Any const & cppAny); |
108 | 108 | ||
109 | uno_ThreadPool getThreadPool() const; | 109 | uno_ThreadPool getThreadPool(); |
110 | 110 | ||
111 | rtl::Reference< Writer > getWriter(); | 111 | rtl::Reference< Writer > getWriter(); |
112 | 112 | ||
@@ -258,11 +258,11 @@ private: | |||
258 | com::sun::star::uno::TypeDescription protPropType_; | 258 | com::sun::star::uno::TypeDescription protPropType_; |
259 | com::sun::star::uno::TypeDescription protPropRequest_; | 259 | com::sun::star::uno::TypeDescription protPropRequest_; |
260 | com::sun::star::uno::TypeDescription protPropCommit_; | 260 | com::sun::star::uno::TypeDescription protPropCommit_; |
261 | uno_ThreadPool threadPool_; | ||
262 | OutgoingRequests outgoingRequests_; | 261 | OutgoingRequests outgoingRequests_; |
263 | 262 | ||
264 | osl::Mutex mutex_; | 263 | osl::Mutex mutex_; |
265 | Listeners listeners_; | 264 | Listeners listeners_; |
265 | uno_ThreadPool threadPool_; | ||
266 | rtl::Reference< Writer > writer_; | 266 | rtl::Reference< Writer > writer_; |
267 | rtl::Reference< Reader > reader_; | 267 | rtl::Reference< Reader > reader_; |
268 | bool currentContextMode_; | 268 | bool currentContextMode_; |
diff --git a/cppu/source/threadpool/threadpool.cxx b/cppu/source/threadpool/threadpool.cxx index 312fd89c4375..f9f0be6c7d03 100644 --- a/cppu/source/threadpool/threadpool.cxx +++ b/cppu/source/threadpool/threadpool.cxx | |||
@@ -26,7 +26,10 @@ | |||
26 | * | 26 | * |
27 | ************************************************************************/ | 27 | ************************************************************************/ |
28 | 28 | ||
29 | #include "sal/config.h" | ||
30 | |||
29 | #include <boost/unordered_map.hpp> | 31 | #include <boost/unordered_map.hpp> |
32 | #include <cassert> | ||
30 | #include <stdio.h> | 33 | #include <stdio.h> |
31 | 34 | ||
32 | #include <osl/diagnose.h> | 35 | #include <osl/diagnose.h> |
@@ -73,7 +76,7 @@ namespace cppu_threadpool | |||
73 | m_lst.push_back( nDisposeId ); | 76 | m_lst.push_back( nDisposeId ); |
74 | } | 77 | } |
75 | 78 | ||
76 | void DisposedCallerAdmin::stopDisposing( sal_Int64 nDisposeId ) | 79 | void DisposedCallerAdmin::destroy( sal_Int64 nDisposeId ) |
77 | { | 80 | { |
78 | MutexGuard guard( m_mutex ); | 81 | MutexGuard guard( m_mutex ); |
79 | for( DisposedCallerList::iterator ii = m_lst.begin() ; | 82 | for( DisposedCallerList::iterator ii = m_lst.begin() ; |
@@ -172,9 +175,9 @@ namespace cppu_threadpool | |||
172 | } | 175 | } |
173 | } | 176 | } |
174 | 177 | ||
175 | void ThreadPool::stopDisposing( sal_Int64 nDisposeId ) | 178 | void ThreadPool::destroy( sal_Int64 nDisposeId ) |
176 | { | 179 | { |
177 | m_DisposedCallerAdmin->stopDisposing( nDisposeId ); | 180 | m_DisposedCallerAdmin->destroy( nDisposeId ); |
178 | } | 181 | } |
179 | 182 | ||
180 | /****************** | 183 | /****************** |
@@ -480,13 +483,14 @@ uno_threadpool_dispose( uno_ThreadPool hPool ) SAL_THROW_EXTERN_C() | |||
480 | extern "C" void SAL_CALL | 483 | extern "C" void SAL_CALL |
481 | uno_threadpool_destroy( uno_ThreadPool hPool ) SAL_THROW_EXTERN_C() | 484 | uno_threadpool_destroy( uno_ThreadPool hPool ) SAL_THROW_EXTERN_C() |
482 | { | 485 | { |
483 | ThreadPool::getInstance()->stopDisposing( | 486 | assert(hPool != 0); |
487 | |||
488 | ThreadPool::getInstance()->destroy( | ||
484 | sal::static_int_cast< sal_Int64 >( | 489 | sal::static_int_cast< sal_Int64 >( |
485 | reinterpret_cast< sal_IntPtr >(hPool)) ); | 490 | reinterpret_cast< sal_IntPtr >(hPool)) ); |
486 | 491 | ||
487 | if( hPool ) | 492 | bool empty; |
488 | { | 493 | { |
489 | // special treatment for 0 ! | ||
490 | OSL_ASSERT( g_pThreadpoolHashSet ); | 494 | OSL_ASSERT( g_pThreadpoolHashSet ); |
491 | 495 | ||
492 | MutexGuard guard( Mutex::getGlobalMutex() ); | 496 | MutexGuard guard( Mutex::getGlobalMutex() ); |
@@ -496,12 +500,18 @@ uno_threadpool_destroy( uno_ThreadPool hPool ) SAL_THROW_EXTERN_C() | |||
496 | g_pThreadpoolHashSet->erase( ii ); | 500 | g_pThreadpoolHashSet->erase( ii ); |
497 | delete hPool; | 501 | delete hPool; |
498 | 502 | ||
499 | if( g_pThreadpoolHashSet->empty() ) | 503 | empty = g_pThreadpoolHashSet->empty(); |
504 | if( empty ) | ||
500 | { | 505 | { |
501 | delete g_pThreadpoolHashSet; | 506 | delete g_pThreadpoolHashSet; |
502 | g_pThreadpoolHashSet = 0; | 507 | g_pThreadpoolHashSet = 0; |
503 | } | 508 | } |
504 | } | 509 | } |
510 | |||
511 | if( empty ) | ||
512 | { | ||
513 | uno_threadpool_dispose( 0 ); | ||
514 | } | ||
505 | } | 515 | } |
506 | 516 | ||
507 | /* vim:set shiftwidth=4 softtabstop=4 expandtab: */ | 517 | /* vim:set shiftwidth=4 softtabstop=4 expandtab: */ |
diff --git a/cppu/source/threadpool/threadpool.hxx b/cppu/source/threadpool/threadpool.hxx index 273798c81f8a..18bb47a1ff20 100644 --- a/cppu/source/threadpool/threadpool.hxx +++ b/cppu/source/threadpool/threadpool.hxx | |||
@@ -90,7 +90,7 @@ namespace cppu_threadpool { | |||
90 | static DisposedCallerAdminHolder getInstance(); | 90 | static DisposedCallerAdminHolder getInstance(); |
91 | 91 | ||
92 | void dispose( sal_Int64 nDisposeId ); | 92 | void dispose( sal_Int64 nDisposeId ); |
93 | void stopDisposing( sal_Int64 nDisposeId ); | 93 | void destroy( sal_Int64 nDisposeId ); |
94 | sal_Bool isDisposed( sal_Int64 nDisposeId ); | 94 | sal_Bool isDisposed( sal_Int64 nDisposeId ); |
95 | 95 | ||
96 | private: | 96 | private: |
@@ -109,7 +109,7 @@ namespace cppu_threadpool { | |||
109 | static ThreadPoolHolder getInstance(); | 109 | static ThreadPoolHolder getInstance(); |
110 | 110 | ||
111 | void dispose( sal_Int64 nDisposeId ); | 111 | void dispose( sal_Int64 nDisposeId ); |
112 | void stopDisposing( sal_Int64 nDisposeId ); | 112 | void destroy( sal_Int64 nDisposeId ); |
113 | 113 | ||
114 | void addJob( const ByteSequence &aThreadId, | 114 | void addJob( const ByteSequence &aThreadId, |
115 | sal_Bool bAsynchron, | 115 | sal_Bool bAsynchron, |