libstdc++
mutex
Go to the documentation of this file.
1 // <mutex> -*- C++ -*-
2 
3 // Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 // Free Software Foundation, Inc.
5 //
6 // This file is part of the GNU ISO C++ Library. This library is free
7 // software; you can redistribute it and/or modify it under the
8 // terms of the GNU General Public License as published by the
9 // Free Software Foundation; either version 3, or (at your option)
10 // any later version.
11 
12 // This library is distributed in the hope that it will be useful,
13 // but WITHOUT ANY WARRANTY; without even the implied warranty of
14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 // GNU General Public License for more details.
16 
17 // Under Section 7 of GPL version 3, you are granted additional
18 // permissions described in the GCC Runtime Library Exception, version
19 // 3.1, as published by the Free Software Foundation.
20 
21 // You should have received a copy of the GNU General Public License and
22 // a copy of the GCC Runtime Library Exception along with this program;
23 // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 // <http://www.gnu.org/licenses/>.
25 
26 /** @file mutex
27  * This is a Standard C++ Library header.
28  */
29 
30 #ifndef _GLIBCXX_MUTEX
31 #define _GLIBCXX_MUTEX 1
32 
33 #pragma GCC system_header
34 
35 #ifndef __GXX_EXPERIMENTAL_CXX0X__
36 # include <c++0x_warning.h>
37 #else
38 
39 #include <tuple>
40 #include <cstddef>
41 #include <chrono>
42 #include <exception>
43 #include <type_traits>
44 #include <functional>
45 #include <system_error>
46 #include <bits/functexcept.h>
47 #include <bits/gthr.h>
48 #include <bits/move.h> // for std::swap
49 
50 #if defined(_GLIBCXX_HAS_GTHREADS) && defined(_GLIBCXX_USE_C99_STDINT_TR1)
51 
52 namespace std
53 {
54  /**
55  * @defgroup mutexes Mutexes
56  * @ingroup concurrency
57  *
58  * Classes for mutex support.
59  * @{
60  */
61 
62  /// mutex
63  class mutex
64  {
65  typedef __gthread_mutex_t __native_type;
66  __native_type _M_mutex;
67 
68  public:
69  typedef __native_type* native_handle_type;
70 
71  mutex()
72  {
73  // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
74 #ifdef __GTHREAD_MUTEX_INIT
75  __native_type __tmp = __GTHREAD_MUTEX_INIT;
76  _M_mutex = __tmp;
77 #else
78  __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
79 #endif
80  }
81 
82  mutex(const mutex&) = delete;
83  mutex& operator=(const mutex&) = delete;
84 
85  void
86  lock()
87  {
88  int __e = __gthread_mutex_lock(&_M_mutex);
89 
90  // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
91  if (__e)
92  __throw_system_error(__e);
93  }
94 
95  bool
96  try_lock()
97  {
98  // XXX EINVAL, EAGAIN, EBUSY
99  return !__gthread_mutex_trylock(&_M_mutex);
100  }
101 
102  void
103  unlock()
104  {
105  // XXX EINVAL, EAGAIN, EPERM
106  __gthread_mutex_unlock(&_M_mutex);
107  }
108 
109  native_handle_type
110  native_handle()
111  { return &_M_mutex; }
112  };
113 
114  /// recursive_mutex
116  {
117  typedef __gthread_recursive_mutex_t __native_type;
118  __native_type _M_mutex;
119 
120  public:
121  typedef __native_type* native_handle_type;
122 
124  {
125  // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
126 #ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
127  __native_type __tmp = __GTHREAD_RECURSIVE_MUTEX_INIT;
128  _M_mutex = __tmp;
129 #else
130  __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
131 #endif
132  }
133 
134  recursive_mutex(const recursive_mutex&) = delete;
135  recursive_mutex& operator=(const recursive_mutex&) = delete;
136 
137  void
138  lock()
139  {
140  int __e = __gthread_recursive_mutex_lock(&_M_mutex);
141 
142  // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
143  if (__e)
144  __throw_system_error(__e);
145  }
146 
147  bool
148  try_lock()
149  {
150  // XXX EINVAL, EAGAIN, EBUSY
151  return !__gthread_recursive_mutex_trylock(&_M_mutex);
152  }
153 
154  void
155  unlock()
156  {
157  // XXX EINVAL, EAGAIN, EBUSY
158  __gthread_recursive_mutex_unlock(&_M_mutex);
159  }
160 
161  native_handle_type
162  native_handle()
163  { return &_M_mutex; }
164  };
165 
166  /// timed_mutex
168  {
169  typedef __gthread_mutex_t __native_type;
170 
171 #ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
173 #else
175 #endif
176 
177  __native_type _M_mutex;
178 
179  public:
180  typedef __native_type* native_handle_type;
181 
182  timed_mutex()
183  {
184 #ifdef __GTHREAD_MUTEX_INIT
185  __native_type __tmp = __GTHREAD_MUTEX_INIT;
186  _M_mutex = __tmp;
187 #else
188  __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
189 #endif
190  }
191 
192  timed_mutex(const timed_mutex&) = delete;
193  timed_mutex& operator=(const timed_mutex&) = delete;
194 
195  void
196  lock()
197  {
198  int __e = __gthread_mutex_lock(&_M_mutex);
199 
200  // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
201  if (__e)
202  __throw_system_error(__e);
203  }
204 
205  bool
206  try_lock()
207  {
208  // XXX EINVAL, EAGAIN, EBUSY
209  return !__gthread_mutex_trylock(&_M_mutex);
210  }
211 
212  template <class _Rep, class _Period>
213  bool
214  try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
215  { return __try_lock_for_impl(__rtime); }
216 
217  template <class _Clock, class _Duration>
218  bool
219  try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
220  {
223 
224  chrono::nanoseconds __ns =
226 
227  __gthread_time_t __ts = {
228  static_cast<std::time_t>(__s.time_since_epoch().count()),
229  static_cast<long>(__ns.count())
230  };
231 
232  return !__gthread_mutex_timedlock(&_M_mutex, &__ts);
233  }
234 
235  void
236  unlock()
237  {
238  // XXX EINVAL, EAGAIN, EBUSY
239  __gthread_mutex_unlock(&_M_mutex);
240  }
241 
242  native_handle_type
243  native_handle()
244  { return &_M_mutex; }
245 
246  private:
247  template<typename _Rep, typename _Period>
248  typename enable_if<
250  __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
251  {
252  __clock_t::time_point __atime = __clock_t::now()
254 
255  return try_lock_until(__atime);
256  }
257 
258  template <typename _Rep, typename _Period>
259  typename enable_if<
260  !ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
261  __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
262  {
263  __clock_t::time_point __atime = __clock_t::now()
265 
266  return try_lock_until(__atime);
267  }
268  };
269 
270  /// recursive_timed_mutex
272  {
273  typedef __gthread_recursive_mutex_t __native_type;
274 
275 #ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
277 #else
279 #endif
280 
281  __native_type _M_mutex;
282 
283  public:
284  typedef __native_type* native_handle_type;
285 
287  {
288  // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
289 #ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
290  __native_type __tmp = __GTHREAD_RECURSIVE_MUTEX_INIT;
291  _M_mutex = __tmp;
292 #else
293  __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
294 #endif
295  }
296 
298  recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
299 
300  void
301  lock()
302  {
303  int __e = __gthread_recursive_mutex_lock(&_M_mutex);
304 
305  // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
306  if (__e)
307  __throw_system_error(__e);
308  }
309 
310  bool
311  try_lock()
312  {
313  // XXX EINVAL, EAGAIN, EBUSY
314  return !__gthread_recursive_mutex_trylock(&_M_mutex);
315  }
316 
317  template <class _Rep, class _Period>
318  bool
319  try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
320  { return __try_lock_for_impl(__rtime); }
321 
322  template <class _Clock, class _Duration>
323  bool
324  try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
325  {
328 
329  chrono::nanoseconds __ns =
331 
332  __gthread_time_t __ts = {
333  static_cast<std::time_t>(__s.time_since_epoch().count()),
334  static_cast<long>(__ns.count())
335  };
336 
337  return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts);
338  }
339 
340  void
341  unlock()
342  {
343  // XXX EINVAL, EAGAIN, EBUSY
344  __gthread_recursive_mutex_unlock(&_M_mutex);
345  }
346 
347  native_handle_type
348  native_handle()
349  { return &_M_mutex; }
350 
351  private:
352  template<typename _Rep, typename _Period>
353  typename enable_if<
355  __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
356  {
357  __clock_t::time_point __atime = __clock_t::now()
359 
360  return try_lock_until(__atime);
361  }
362 
363  template <typename _Rep, typename _Period>
364  typename enable_if<
365  !ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
366  __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
367  {
368  __clock_t::time_point __atime = __clock_t::now()
370 
371  return try_lock_until(__atime);
372  }
373  };
374 
375  /// Do not acquire ownership of the mutex.
376  struct defer_lock_t { };
377 
378  /// Try to acquire ownership of the mutex without blocking.
379  struct try_to_lock_t { };
380 
381  /// Assume the calling thread has already obtained mutex ownership
382  /// and manage it.
383  struct adopt_lock_t { };
384 
385  extern const defer_lock_t defer_lock;
386  extern const try_to_lock_t try_to_lock;
387  extern const adopt_lock_t adopt_lock;
388 
389  /**
390  * @brief Thrown to indicate errors with lock operations.
391  *
392  * @ingroup exceptions
393  */
394  class lock_error : public exception
395  {
396  public:
397  virtual const char*
398  what() const throw();
399  };
400 
401  /// @brief Scoped lock idiom.
402  // Acquire the mutex here with a constructor call, then release with
403  // the destructor call in accordance with RAII style.
404  template<typename _Mutex>
406  {
407  public:
408  typedef _Mutex mutex_type;
409 
410  explicit lock_guard(mutex_type& __m) : _M_device(__m)
411  { _M_device.lock(); }
412 
413  lock_guard(mutex_type& __m, adopt_lock_t __a) : _M_device(__m)
414  { _M_device.lock(); }
415 
416  ~lock_guard()
417  { _M_device.unlock(); }
418 
419  lock_guard(const lock_guard&) = delete;
420  lock_guard& operator=(const lock_guard&) = delete;
421 
422  private:
423  mutex_type& _M_device;
424  };
425 
426  /// unique_lock
427  template<typename _Mutex>
429  {
430  public:
431  typedef _Mutex mutex_type;
432 
433  unique_lock()
434  : _M_device(0), _M_owns(false)
435  { }
436 
437  explicit unique_lock(mutex_type& __m)
438  : _M_device(&__m), _M_owns(false)
439  {
440  lock();
441  _M_owns = true;
442  }
443 
444  unique_lock(mutex_type& __m, defer_lock_t)
445  : _M_device(&__m), _M_owns(false)
446  { }
447 
448  unique_lock(mutex_type& __m, try_to_lock_t)
449  : _M_device(&__m), _M_owns(_M_device->try_lock())
450  { }
451 
452  unique_lock(mutex_type& __m, adopt_lock_t)
453  : _M_device(&__m), _M_owns(true)
454  {
455  // XXX calling thread owns mutex
456  }
457 
458  template<typename _Clock, typename _Duration>
459  unique_lock(mutex_type& __m,
461  : _M_device(&__m), _M_owns(_M_device->try_lock_until(__atime))
462  { }
463 
464  template<typename _Rep, typename _Period>
465  unique_lock(mutex_type& __m,
466  const chrono::duration<_Rep, _Period>& __rtime)
467  : _M_device(&__m), _M_owns(_M_device->try_lock_for(__rtime))
468  { }
469 
470  ~unique_lock()
471  {
472  if (_M_owns)
473  unlock();
474  }
475 
476  unique_lock(const unique_lock&) = delete;
477  unique_lock& operator=(const unique_lock&) = delete;
478 
479  unique_lock(unique_lock&& __u)
480  : _M_device(__u._M_device), _M_owns(__u._M_owns)
481  {
482  __u._M_device = 0;
483  __u._M_owns = false;
484  }
485 
486  unique_lock& operator=(unique_lock&& __u)
487  {
488  if(_M_owns)
489  unlock();
490 
491  unique_lock(std::move(__u)).swap(*this);
492 
493  __u._M_device = 0;
494  __u._M_owns = false;
495 
496  return *this;
497  }
498 
499  void
500  lock()
501  {
502  if (!_M_device)
503  __throw_system_error(int(errc::operation_not_permitted));
504  else if (_M_owns)
505  __throw_system_error(int(errc::resource_deadlock_would_occur));
506  else
507  {
508  _M_device->lock();
509  _M_owns = true;
510  }
511  }
512 
513  bool
514  try_lock()
515  {
516  if (!_M_device)
517  __throw_system_error(int(errc::operation_not_permitted));
518  else if (_M_owns)
519  __throw_system_error(int(errc::resource_deadlock_would_occur));
520  else
521  {
522  _M_owns = _M_device->try_lock();
523  return _M_owns;
524  }
525  }
526 
527  template<typename _Clock, typename _Duration>
528  bool
529  try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
530  {
531  if (!_M_device)
532  __throw_system_error(int(errc::operation_not_permitted));
533  else if (_M_owns)
534  __throw_system_error(int(errc::resource_deadlock_would_occur));
535  else
536  {
537  _M_owns = _M_device->try_lock_until(__atime);
538  return _M_owns;
539  }
540  }
541 
542  template<typename _Rep, typename _Period>
543  bool
544  try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
545  {
546  if (!_M_device)
547  __throw_system_error(int(errc::operation_not_permitted));
548  else if (_M_owns)
549  __throw_system_error(int(errc::resource_deadlock_would_occur));
550  else
551  {
552  _M_owns = _M_device->try_lock_for(__rtime);
553  return _M_owns;
554  }
555  }
556 
557  void
558  unlock()
559  {
560  if (!_M_owns)
561  __throw_system_error(int(errc::operation_not_permitted));
562  else if (_M_device)
563  {
564  _M_device->unlock();
565  _M_owns = false;
566  }
567  }
568 
569  void
570  swap(unique_lock&& __u)
571  {
572  std::swap(_M_device, __u._M_device);
573  std::swap(_M_owns, __u._M_owns);
574  }
575 
576  mutex_type*
577  release()
578  {
579  mutex_type* __ret = _M_device;
580  _M_device = 0;
581  _M_owns = false;
582  return __ret;
583  }
584 
585  bool
586  owns_lock() const
587  { return _M_owns; }
588 
589  /* explicit */ operator bool () const
590  { return owns_lock(); }
591 
592  mutex_type*
593  mutex() const
594  { return _M_device; }
595 
596  private:
597  mutex_type* _M_device;
598  bool _M_owns; // XXX use atomic_bool
599  };
600 
601  template<typename _Mutex>
602  inline void
604  { __x.swap(__y); }
605 
606  template<typename _Mutex>
607  inline void
608  swap(unique_lock<_Mutex>&& __x, unique_lock<_Mutex>& __y)
609  { __x.swap(__y); }
610 
611  template<typename _Mutex>
612  inline void
613  swap(unique_lock<_Mutex>& __x, unique_lock<_Mutex>&& __y)
614  { __x.swap(__y); }
615 
616  template<int _Idx>
617  struct __unlock_impl
618  {
619  template<typename... _Lock>
620  static void
621  __do_unlock(tuple<_Lock&...>& __locks)
622  {
623  std::get<_Idx>(__locks).unlock();
624  __unlock_impl<_Idx - 1>::__do_unlock(__locks);
625  }
626  };
627 
628  template<>
629  struct __unlock_impl<-1>
630  {
631  template<typename... _Lock>
632  static void
633  __do_unlock(tuple<_Lock&...>&)
634  { }
635  };
636 
637  template<int _Idx, bool _Continue = true>
638  struct __try_lock_impl
639  {
640  template<typename... _Lock>
641  static int
642  __do_try_lock(tuple<_Lock&...>& __locks)
643  {
644  if(std::get<_Idx>(__locks).try_lock())
645  {
646  return __try_lock_impl<_Idx + 1,
647  _Idx + 2 < sizeof...(_Lock)>::__do_try_lock(__locks);
648  }
649  else
650  {
651  __unlock_impl<_Idx>::__do_unlock(__locks);
652  return _Idx;
653  }
654  }
655  };
656 
657  template<int _Idx>
658  struct __try_lock_impl<_Idx, false>
659  {
660  template<typename... _Lock>
661  static int
662  __do_try_lock(tuple<_Lock&...>& __locks)
663  {
664  if(std::get<_Idx>(__locks).try_lock())
665  return -1;
666  else
667  {
668  __unlock_impl<_Idx>::__do_unlock(__locks);
669  return _Idx;
670  }
671  }
672  };
673 
674  /** @brief Generic try_lock.
675  * @param __l1 Meets Mutex requirements (try_lock() may throw).
676  * @param __l2 Meets Mutex requirements (try_lock() may throw).
677  * @param __l3 Meets Mutex requirements (try_lock() may throw).
678  * @return Returns -1 if all try_lock() calls return true. Otherwise returns
679  * a 0-based index corresponding to the argument that returned false.
680  * @post Either all arguments are locked, or none will be.
681  *
682  * Sequentially calls try_lock() on each argument.
683  */
684  template<typename _Lock1, typename _Lock2, typename... _Lock3>
685  int
686  try_lock(_Lock1& __l1, _Lock2& __l2, _Lock3&... __l3)
687  {
688  tuple<_Lock1&, _Lock2&, _Lock3&...> __locks(__l1, __l2, __l3...);
689  return __try_lock_impl<0>::__do_try_lock(__locks);
690  }
691 
692  /// lock
693  template<typename _L1, typename _L2, typename ..._L3>
694  void
695  lock(_L1&, _L2&, _L3&...);
696 
697  /// once_flag
698  struct once_flag
699  {
700  private:
701  typedef __gthread_once_t __native_type;
702  __native_type _M_once;
703 
704  public:
705  once_flag()
706  {
707  __native_type __tmp = __GTHREAD_ONCE_INIT;
708  _M_once = __tmp;
709  }
710 
711  once_flag(const once_flag&) = delete;
712  once_flag& operator=(const once_flag&) = delete;
713 
714  template<typename _Callable, typename... _Args>
715  friend void
716  call_once(once_flag& __once, _Callable __f, _Args&&... __args);
717  };
718 
719 #ifdef _GLIBCXX_HAVE_TLS
720  extern __thread void* __once_callable;
721  extern __thread void (*__once_call)();
722 
723  template<typename _Callable>
724  inline void
725  __once_call_impl()
726  {
727  (*(_Callable*)__once_callable)();
728  }
729 #else
730  extern function<void()> __once_functor;
731 
732  extern void
733  __set_once_functor_lock_ptr(unique_lock<mutex>*);
734 
735  extern mutex&
736  __get_once_mutex();
737 #endif
738 
739  extern "C" void __once_proxy();
740 
741  /// call_once
742  template<typename _Callable, typename... _Args>
743  void
744  call_once(once_flag& __once, _Callable __f, _Args&&... __args)
745  {
746 #ifdef _GLIBCXX_HAVE_TLS
747  auto __bound_functor = bind(__f, __args...);
748  __once_callable = &__bound_functor;
749  __once_call = &__once_call_impl<decltype(__bound_functor)>;
750 #else
751  unique_lock<mutex> __functor_lock(__get_once_mutex());
752  __once_functor = bind(__f, __args...);
753  __set_once_functor_lock_ptr(&__functor_lock);
754 #endif
755 
756  int __e = __gthread_once(&(__once._M_once), &__once_proxy);
757 
758 #ifndef _GLIBCXX_HAVE_TLS
759  if (__functor_lock)
760  __set_once_functor_lock_ptr(0);
761 #endif
762 
763  if (__e)
764  __throw_system_error(__e);
765  }
766 
767  // @} group mutexes
768 }
769 
770 #endif // _GLIBCXX_HAS_GTHREADS && _GLIBCXX_USE_C99_STDINT_TR1
771 
772 #endif // __GXX_EXPERIMENTAL_CXX0X__
773 
774 #endif // _GLIBCXX_MUTEX