key.cpp 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485
  1. // bthread - A M:N threading library to make applications more concurrent.
  2. // Copyright (c) 2014 Baidu, Inc.
  3. //
  4. // Licensed under the Apache License, Version 2.0 (the "License");
  5. // you may not use this file except in compliance with the License.
  6. // You may obtain a copy of the License at
  7. //
  8. // http://www.apache.org/licenses/LICENSE-2.0
  9. //
  10. // Unless required by applicable law or agreed to in writing, software
  11. // distributed under the License is distributed on an "AS IS" BASIS,
  12. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. // See the License for the specific language governing permissions and
  14. // limitations under the License.
  15. // Author: Ge,Jun (gejun@baidu.com)
  16. // Date: Sun Aug 3 12:46:15 CST 2014
  17. #include <pthread.h>
  18. #include "butil/macros.h"
  19. #include "butil/atomicops.h"
  20. #include "bvar/passive_status.h"
  21. #include "bthread/errno.h" // EAGAIN
  22. #include "bthread/task_group.h" // TaskGroup
  23. // Implement bthread_key_t related functions
  24. namespace bthread {
  25. class KeyTable;
  26. // defined in task_group.cpp
  27. extern __thread TaskGroup* tls_task_group;
  28. extern __thread LocalStorage tls_bls;
  29. static __thread bool tls_ever_created_keytable = false;
  30. // We keep thread specific data in a two-level array. The top-level array
  31. // contains at most KEY_1STLEVEL_SIZE pointers to dynamically allocated
  32. // arrays of at most KEY_2NDLEVEL_SIZE data pointers. Many applications
  33. // may just occupy one or two second level array, thus this machanism keeps
  34. // memory footprint smaller and we can change KEY_1STLEVEL_SIZE to a
  35. // bigger number more freely. The tradeoff is an additional memory indirection:
  36. // negligible at most time.
  37. static const uint32_t KEY_2NDLEVEL_SIZE = 32;
  38. // Notice that we're trying to make the memory of second level and first
  39. // level both 256 bytes to make memory allocator happier.
  40. static const uint32_t KEY_1STLEVEL_SIZE = 31;
  41. // Max tls in one thread, currently the value is 992 which should be enough
  42. // for most projects throughout baidu.
  43. static const uint32_t KEYS_MAX = KEY_2NDLEVEL_SIZE * KEY_1STLEVEL_SIZE;
  44. // destructors/version of TLS.
  45. struct KeyInfo {
  46. uint32_t version;
  47. void (*dtor)(void*, const void*);
  48. const void* dtor_args;
  49. };
  50. static KeyInfo s_key_info[KEYS_MAX] = {};
  51. // For allocating keys.
  52. static pthread_mutex_t s_key_mutex = PTHREAD_MUTEX_INITIALIZER;
  53. static size_t nfreekey = 0;
  54. static size_t nkey = 0;
  55. static uint32_t s_free_keys[KEYS_MAX];
  56. // Stats.
  57. static butil::static_atomic<size_t> nkeytable = BUTIL_STATIC_ATOMIC_INIT(0);
  58. static butil::static_atomic<size_t> nsubkeytable = BUTIL_STATIC_ATOMIC_INIT(0);
  59. // The second-level array.
  60. // Align with cacheline to avoid false sharing.
  61. class BAIDU_CACHELINE_ALIGNMENT SubKeyTable {
  62. public:
  63. SubKeyTable() {
  64. memset(_data, 0, sizeof(_data));
  65. nsubkeytable.fetch_add(1, butil::memory_order_relaxed);
  66. }
  67. // NOTE: Call clear first.
  68. ~SubKeyTable() {
  69. nsubkeytable.fetch_sub(1, butil::memory_order_relaxed);
  70. }
  71. void clear(uint32_t offset) {
  72. for (uint32_t i = 0; i < KEY_2NDLEVEL_SIZE; ++i) {
  73. void* p = _data[i].ptr;
  74. if (p) {
  75. // Set the position to NULL before calling dtor which may set
  76. // the position again.
  77. _data[i].ptr = NULL;
  78. KeyInfo info = bthread::s_key_info[offset + i];
  79. if (info.dtor && _data[i].version == info.version) {
  80. info.dtor(p, info.dtor_args);
  81. }
  82. }
  83. }
  84. }
  85. bool cleared() const {
  86. // We need to iterate again to check if every slot is empty. An
  87. // alternative is remember if set_data() was called during clear.
  88. for (uint32_t i = 0; i < KEY_2NDLEVEL_SIZE; ++i) {
  89. if (_data[i].ptr) {
  90. return false;
  91. }
  92. }
  93. return true;
  94. }
  95. inline void* get_data(uint32_t index, uint32_t version) const {
  96. if (_data[index].version == version) {
  97. return _data[index].ptr;
  98. }
  99. return NULL;
  100. }
  101. inline void set_data(uint32_t index, uint32_t version, void* data) {
  102. _data[index].version = version;
  103. _data[index].ptr = data;
  104. }
  105. private:
  106. struct Data {
  107. uint32_t version;
  108. void* ptr;
  109. };
  110. Data _data[KEY_2NDLEVEL_SIZE];
  111. };
  112. // The first-level array.
  113. // Align with cacheline to avoid false sharing.
  114. class BAIDU_CACHELINE_ALIGNMENT KeyTable {
  115. public:
  116. KeyTable() : next(NULL) {
  117. memset(_subs, 0, sizeof(_subs));
  118. nkeytable.fetch_add(1, butil::memory_order_relaxed);
  119. }
  120. ~KeyTable() {
  121. nkeytable.fetch_sub(1, butil::memory_order_relaxed);
  122. for (int ntry = 0; ntry < PTHREAD_DESTRUCTOR_ITERATIONS; ++ntry) {
  123. for (uint32_t i = 0; i < KEY_1STLEVEL_SIZE; ++i) {
  124. if (_subs[i]) {
  125. _subs[i]->clear(i * KEY_2NDLEVEL_SIZE);
  126. }
  127. }
  128. bool all_cleared = true;
  129. for (uint32_t i = 0; i < KEY_1STLEVEL_SIZE; ++i) {
  130. if (_subs[i] != NULL && !_subs[i]->cleared()) {
  131. all_cleared = false;
  132. break;
  133. }
  134. }
  135. if (all_cleared) {
  136. for (uint32_t i = 0; i < KEY_1STLEVEL_SIZE; ++i) {
  137. delete _subs[i];
  138. }
  139. return;
  140. }
  141. }
  142. LOG(ERROR) << "Fail to destroy all objects in KeyTable[" << this << ']';
  143. }
  144. inline void* get_data(bthread_key_t key) const {
  145. const uint32_t subidx = key.index / KEY_2NDLEVEL_SIZE;
  146. if (subidx < KEY_1STLEVEL_SIZE) {
  147. const SubKeyTable* sub_kt = _subs[subidx];
  148. if (sub_kt) {
  149. return sub_kt->get_data(
  150. key.index - subidx * KEY_2NDLEVEL_SIZE, key.version);
  151. }
  152. }
  153. return NULL;
  154. }
  155. inline int set_data(bthread_key_t key, void* data) {
  156. const uint32_t subidx = key.index / KEY_2NDLEVEL_SIZE;
  157. if (subidx < KEY_1STLEVEL_SIZE &&
  158. key.version == s_key_info[key.index].version) {
  159. SubKeyTable* sub_kt = _subs[subidx];
  160. if (sub_kt == NULL) {
  161. sub_kt = new (std::nothrow) SubKeyTable;
  162. if (NULL == sub_kt) {
  163. return ENOMEM;
  164. }
  165. _subs[subidx] = sub_kt;
  166. }
  167. sub_kt->set_data(key.index - subidx * KEY_2NDLEVEL_SIZE,
  168. key.version, data);
  169. return 0;
  170. }
  171. CHECK(false) << "bthread_setspecific is called on invalid " << key;
  172. return EINVAL;
  173. }
  174. public:
  175. KeyTable* next;
  176. private:
  177. SubKeyTable* _subs[KEY_1STLEVEL_SIZE];
  178. };
  179. static KeyTable* borrow_keytable(bthread_keytable_pool_t* pool) {
  180. if (pool != NULL && pool->free_keytables) {
  181. BAIDU_SCOPED_LOCK(pool->mutex);
  182. KeyTable* p = (KeyTable*)pool->free_keytables;
  183. if (p) {
  184. pool->free_keytables = p->next;
  185. return p;
  186. }
  187. }
  188. return NULL;
  189. }
  190. // Referenced in task_group.cpp, must be extern.
  191. // Caller of this function must hold the KeyTable
  192. void return_keytable(bthread_keytable_pool_t* pool, KeyTable* kt) {
  193. if (NULL == kt) {
  194. return;
  195. }
  196. if (pool == NULL) {
  197. delete kt;
  198. return;
  199. }
  200. std::unique_lock<pthread_mutex_t> mu(pool->mutex);
  201. if (pool->destroyed) {
  202. mu.unlock();
  203. delete kt;
  204. return;
  205. }
  206. kt->next = (KeyTable*)pool->free_keytables;
  207. pool->free_keytables = kt;
  208. }
  209. static void cleanup_pthread() {
  210. KeyTable* kt = tls_bls.keytable;
  211. if (kt) {
  212. delete kt;
  213. // After deletion: tls may be set during deletion.
  214. tls_bls.keytable = NULL;
  215. }
  216. }
  217. static void arg_as_dtor(void* data, const void* arg) {
  218. typedef void (*KeyDtor)(void*);
  219. return ((KeyDtor)arg)(data);
  220. }
  221. static int get_key_count(void*) {
  222. BAIDU_SCOPED_LOCK(bthread::s_key_mutex);
  223. return (int)nkey - (int)nfreekey;
  224. }
  225. static size_t get_keytable_count(void*) {
  226. return nkeytable.load(butil::memory_order_relaxed);
  227. }
  228. static size_t get_keytable_memory(void*) {
  229. const size_t n = nkeytable.load(butil::memory_order_relaxed);
  230. const size_t nsub = nsubkeytable.load(butil::memory_order_relaxed);
  231. return n * sizeof(KeyTable) + nsub * sizeof(SubKeyTable);
  232. }
  233. static bvar::PassiveStatus<int> s_bthread_key_count(
  234. "bthread_key_count", get_key_count, NULL);
  235. static bvar::PassiveStatus<size_t> s_bthread_keytable_count(
  236. "bthread_keytable_count", get_keytable_count, NULL);
  237. static bvar::PassiveStatus<size_t> s_bthread_keytable_memory(
  238. "bthread_keytable_memory", get_keytable_memory, NULL);
  239. } // namespace bthread
  240. extern "C" {
  241. int bthread_keytable_pool_init(bthread_keytable_pool_t* pool) {
  242. if (pool == NULL) {
  243. LOG(ERROR) << "Param[pool] is NULL";
  244. return EINVAL;
  245. }
  246. pthread_mutex_init(&pool->mutex, NULL);
  247. pool->free_keytables = NULL;
  248. pool->destroyed = 0;
  249. return 0;
  250. }
  251. int bthread_keytable_pool_destroy(bthread_keytable_pool_t* pool) {
  252. if (pool == NULL) {
  253. LOG(ERROR) << "Param[pool] is NULL";
  254. return EINVAL;
  255. }
  256. bthread::KeyTable* saved_free_keytables = NULL;
  257. {
  258. BAIDU_SCOPED_LOCK(pool->mutex);
  259. if (pool->free_keytables) {
  260. saved_free_keytables = (bthread::KeyTable*)pool->free_keytables;
  261. pool->free_keytables = NULL;
  262. }
  263. pool->destroyed = 1;
  264. }
  265. // Cheat get/setspecific and destroy the keytables.
  266. bthread::TaskGroup* const g = bthread::tls_task_group;
  267. bthread::KeyTable* old_kt = bthread::tls_bls.keytable;
  268. while (saved_free_keytables) {
  269. bthread::KeyTable* kt = saved_free_keytables;
  270. saved_free_keytables = kt->next;
  271. bthread::tls_bls.keytable = kt;
  272. if (g) {
  273. g->current_task()->local_storage.keytable = kt;
  274. }
  275. delete kt;
  276. if (old_kt == kt) {
  277. old_kt = NULL;
  278. }
  279. }
  280. bthread::tls_bls.keytable = old_kt;
  281. if (g) {
  282. g->current_task()->local_storage.keytable = old_kt;
  283. }
  284. // TODO: return_keytable may race with this function, we don't destroy
  285. // the mutex right now.
  286. // pthread_mutex_destroy(&pool->mutex);
  287. return 0;
  288. }
  289. int bthread_keytable_pool_getstat(bthread_keytable_pool_t* pool,
  290. bthread_keytable_pool_stat_t* stat) {
  291. if (pool == NULL || stat == NULL) {
  292. LOG(ERROR) << "Param[pool] or Param[stat] is NULL";
  293. return EINVAL;
  294. }
  295. std::unique_lock<pthread_mutex_t> mu(pool->mutex);
  296. size_t count = 0;
  297. bthread::KeyTable* p = (bthread::KeyTable*)pool->free_keytables;
  298. for (; p; p = p->next, ++count) {}
  299. stat->nfree = count;
  300. return 0;
  301. }
  302. // TODO: this is not strict `reserve' because we only check #free.
  303. // Currently there's no way to track KeyTables that may be returned
  304. // to the pool in future.
  305. void bthread_keytable_pool_reserve(bthread_keytable_pool_t* pool,
  306. size_t nfree,
  307. bthread_key_t key,
  308. void* ctor(const void*),
  309. const void* ctor_args) {
  310. if (pool == NULL) {
  311. LOG(ERROR) << "Param[pool] is NULL";
  312. return;
  313. }
  314. bthread_keytable_pool_stat_t stat;
  315. if (bthread_keytable_pool_getstat(pool, &stat) != 0) {
  316. LOG(ERROR) << "Fail to getstat of pool=" << pool;
  317. return;
  318. }
  319. for (size_t i = stat.nfree; i < nfree; ++i) {
  320. bthread::KeyTable* kt = new (std::nothrow) bthread::KeyTable;
  321. if (kt == NULL) {
  322. break;
  323. }
  324. void* data = ctor(ctor_args);
  325. if (data) {
  326. kt->set_data(key, data);
  327. } // else append kt w/o data.
  328. std::unique_lock<pthread_mutex_t> mu(pool->mutex);
  329. if (pool->destroyed) {
  330. mu.unlock();
  331. delete kt;
  332. break;
  333. }
  334. kt->next = (bthread::KeyTable*)pool->free_keytables;
  335. pool->free_keytables = kt;
  336. if (data == NULL) {
  337. break;
  338. }
  339. }
  340. }
  341. int bthread_key_create2(bthread_key_t* key,
  342. void (*dtor)(void*, const void*),
  343. const void* dtor_args) __THROW {
  344. uint32_t index = 0;
  345. {
  346. BAIDU_SCOPED_LOCK(bthread::s_key_mutex);
  347. if (bthread::nfreekey > 0) {
  348. index = bthread::s_free_keys[--bthread::nfreekey];
  349. } else if (bthread::nkey < bthread::KEYS_MAX) {
  350. index = bthread::nkey++;
  351. } else {
  352. return EAGAIN; // what pthread_key_create returns in this case.
  353. }
  354. }
  355. bthread::s_key_info[index].dtor = dtor;
  356. bthread::s_key_info[index].dtor_args = dtor_args;
  357. key->index = index;
  358. key->version = bthread::s_key_info[index].version;
  359. if (key->version == 0) {
  360. ++bthread::s_key_info[index].version;
  361. ++key->version;
  362. }
  363. return 0;
  364. }
  365. int bthread_key_create(bthread_key_t* key, void (*dtor)(void*)) __THROW {
  366. if (dtor == NULL) {
  367. return bthread_key_create2(key, NULL, NULL);
  368. } else {
  369. return bthread_key_create2(key, bthread::arg_as_dtor, (const void*)dtor);
  370. }
  371. }
  372. int bthread_key_delete(bthread_key_t key) __THROW {
  373. if (key.index < bthread::KEYS_MAX &&
  374. key.version == bthread::s_key_info[key.index].version) {
  375. BAIDU_SCOPED_LOCK(bthread::s_key_mutex);
  376. if (key.version == bthread::s_key_info[key.index].version) {
  377. if (++bthread::s_key_info[key.index].version == 0) {
  378. ++bthread::s_key_info[key.index].version;
  379. }
  380. bthread::s_key_info[key.index].dtor = NULL;
  381. bthread::s_key_info[key.index].dtor_args = NULL;
  382. bthread::s_free_keys[bthread::nfreekey++] = key.index;
  383. return 0;
  384. }
  385. }
  386. CHECK(false) << "bthread_key_delete is called on invalid " << key;
  387. return EINVAL;
  388. }
  389. // NOTE: Can't borrow_keytable in bthread_setspecific, otherwise following
  390. // memory leak may occur:
  391. // -> bthread_getspecific fails to borrow_keytable and returns NULL.
  392. // -> bthread_setspecific succeeds to borrow_keytable and overwrites old data
  393. // at the position with newly created data, the old data is leaked.
  394. int bthread_setspecific(bthread_key_t key, void* data) __THROW {
  395. bthread::KeyTable* kt = bthread::tls_bls.keytable;
  396. if (NULL == kt) {
  397. kt = new (std::nothrow) bthread::KeyTable;
  398. if (NULL == kt) {
  399. return ENOMEM;
  400. }
  401. bthread::tls_bls.keytable = kt;
  402. bthread::TaskGroup* const g = bthread::tls_task_group;
  403. if (g) {
  404. g->current_task()->local_storage.keytable = kt;
  405. }
  406. if (!bthread::tls_ever_created_keytable) {
  407. bthread::tls_ever_created_keytable = true;
  408. CHECK_EQ(0, butil::thread_atexit(bthread::cleanup_pthread));
  409. }
  410. }
  411. return kt->set_data(key, data);
  412. }
  413. void* bthread_getspecific(bthread_key_t key) __THROW {
  414. bthread::KeyTable* kt = bthread::tls_bls.keytable;
  415. if (kt) {
  416. return kt->get_data(key);
  417. }
  418. bthread::TaskGroup* const g = bthread::tls_task_group;
  419. if (g) {
  420. bthread::TaskMeta* const task = g->current_task();
  421. kt = bthread::borrow_keytable(task->attr.keytable_pool);
  422. if (kt) {
  423. g->current_task()->local_storage.keytable = kt;
  424. bthread::tls_bls.keytable = kt;
  425. return kt->get_data(key);
  426. }
  427. }
  428. return NULL;
  429. }
  430. void bthread_assign_data(void* data) __THROW {
  431. bthread::tls_bls.assigned_data = data;
  432. bthread::TaskGroup* const g = bthread::tls_task_group;
  433. if (g) {
  434. g->current_task()->local_storage.assigned_data = data;
  435. }
  436. }
  437. void* bthread_get_assigned_data() __THROW {
  438. return bthread::tls_bls.assigned_data;
  439. }
  440. } // extern "C"