allocator_unittest.cc 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515
  1. // Copyright 2014 The Chromium Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style license that can be
  3. // found in the LICENSE file.
  4. #include <stdio.h>
  5. #include <stdlib.h>
  6. #include <algorithm> // for min()
  7. #include "butil/atomicops.h"
  8. #include <gtest/gtest.h>
  9. // Number of bits in a size_t.
  10. static const int kSizeBits = 8 * sizeof(size_t);
  11. // The maximum size of a size_t.
  12. static const size_t kMaxSize = ~static_cast<size_t>(0);
  13. // Maximum positive size of a size_t if it were signed.
  14. static const size_t kMaxSignedSize = ((size_t(1) << (kSizeBits-1)) - 1);
  15. // An allocation size which is not too big to be reasonable.
  16. static const size_t kNotTooBig = 100000;
  17. // An allocation size which is just too big.
  18. static const size_t kTooBig = ~static_cast<size_t>(0);
  19. namespace {
  20. using std::min;
  21. // Fill a buffer of the specified size with a predetermined pattern
  22. static void Fill(unsigned char* buffer, int n) {
  23. for (int i = 0; i < n; i++) {
  24. buffer[i] = (i & 0xff);
  25. }
  26. }
  27. // Check that the specified buffer has the predetermined pattern
  28. // generated by Fill()
  29. static bool Valid(unsigned char* buffer, int n) {
  30. for (int i = 0; i < n; i++) {
  31. if (buffer[i] != (i & 0xff)) {
  32. return false;
  33. }
  34. }
  35. return true;
  36. }
  37. // Check that a buffer is completely zeroed.
  38. static bool ALLOW_UNUSED IsZeroed(unsigned char* buffer, int n) {
  39. for (int i = 0; i < n; i++) {
  40. if (buffer[i] != 0) {
  41. return false;
  42. }
  43. }
  44. return true;
  45. }
  46. // Check alignment
  47. static void CheckAlignment(void* p, int align) {
  48. EXPECT_EQ(0, reinterpret_cast<uintptr_t>(p) & (align-1));
  49. }
  50. // Return the next interesting size/delta to check. Returns -1 if no more.
  51. static int NextSize(int size) {
  52. if (size < 100)
  53. return size+1;
  54. if (size < 100000) {
  55. // Find next power of two
  56. int power = 1;
  57. while (power < size)
  58. power <<= 1;
  59. // Yield (power-1, power, power+1)
  60. if (size < power-1)
  61. return power-1;
  62. if (size == power-1)
  63. return power;
  64. assert(size == power);
  65. return power+1;
  66. } else {
  67. return -1;
  68. }
  69. }
  70. template <class AtomicType>
  71. static void TestAtomicIncrement() {
  72. // For now, we just test single threaded execution
  73. // use a guard value to make sure the NoBarrier_AtomicIncrement doesn't go
  74. // outside the expected address bounds. This is in particular to
  75. // test that some future change to the asm code doesn't cause the
  76. // 32-bit NoBarrier_AtomicIncrement to do the wrong thing on 64-bit machines.
  77. struct {
  78. AtomicType prev_word;
  79. AtomicType count;
  80. AtomicType next_word;
  81. } s;
  82. AtomicType prev_word_value, next_word_value;
  83. memset(&prev_word_value, 0xFF, sizeof(AtomicType));
  84. memset(&next_word_value, 0xEE, sizeof(AtomicType));
  85. s.prev_word = prev_word_value;
  86. s.count = 0;
  87. s.next_word = next_word_value;
  88. EXPECT_EQ(butil::subtle::NoBarrier_AtomicIncrement(&s.count, 1), 1);
  89. EXPECT_EQ(s.count, 1);
  90. EXPECT_EQ(s.prev_word, prev_word_value);
  91. EXPECT_EQ(s.next_word, next_word_value);
  92. EXPECT_EQ(butil::subtle::NoBarrier_AtomicIncrement(&s.count, 2), 3);
  93. EXPECT_EQ(s.count, 3);
  94. EXPECT_EQ(s.prev_word, prev_word_value);
  95. EXPECT_EQ(s.next_word, next_word_value);
  96. EXPECT_EQ(butil::subtle::NoBarrier_AtomicIncrement(&s.count, 3), 6);
  97. EXPECT_EQ(s.count, 6);
  98. EXPECT_EQ(s.prev_word, prev_word_value);
  99. EXPECT_EQ(s.next_word, next_word_value);
  100. EXPECT_EQ(butil::subtle::NoBarrier_AtomicIncrement(&s.count, -3), 3);
  101. EXPECT_EQ(s.count, 3);
  102. EXPECT_EQ(s.prev_word, prev_word_value);
  103. EXPECT_EQ(s.next_word, next_word_value);
  104. EXPECT_EQ(butil::subtle::NoBarrier_AtomicIncrement(&s.count, -2), 1);
  105. EXPECT_EQ(s.count, 1);
  106. EXPECT_EQ(s.prev_word, prev_word_value);
  107. EXPECT_EQ(s.next_word, next_word_value);
  108. EXPECT_EQ(butil::subtle::NoBarrier_AtomicIncrement(&s.count, -1), 0);
  109. EXPECT_EQ(s.count, 0);
  110. EXPECT_EQ(s.prev_word, prev_word_value);
  111. EXPECT_EQ(s.next_word, next_word_value);
  112. EXPECT_EQ(butil::subtle::NoBarrier_AtomicIncrement(&s.count, -1), -1);
  113. EXPECT_EQ(s.count, -1);
  114. EXPECT_EQ(s.prev_word, prev_word_value);
  115. EXPECT_EQ(s.next_word, next_word_value);
  116. EXPECT_EQ(butil::subtle::NoBarrier_AtomicIncrement(&s.count, -4), -5);
  117. EXPECT_EQ(s.count, -5);
  118. EXPECT_EQ(s.prev_word, prev_word_value);
  119. EXPECT_EQ(s.next_word, next_word_value);
  120. EXPECT_EQ(butil::subtle::NoBarrier_AtomicIncrement(&s.count, 5), 0);
  121. EXPECT_EQ(s.count, 0);
  122. EXPECT_EQ(s.prev_word, prev_word_value);
  123. EXPECT_EQ(s.next_word, next_word_value);
  124. }
  125. #define NUM_BITS(T) (sizeof(T) * 8)
  126. template <class AtomicType>
  127. static void TestCompareAndSwap() {
  128. AtomicType value = 0;
  129. AtomicType prev = butil::subtle::NoBarrier_CompareAndSwap(&value, 0, 1);
  130. EXPECT_EQ(1, value);
  131. EXPECT_EQ(0, prev);
  132. // Use test value that has non-zero bits in both halves, more for testing
  133. // 64-bit implementation on 32-bit platforms.
  134. const AtomicType k_test_val = (static_cast<uint64_t>(1) <<
  135. (NUM_BITS(AtomicType) - 2)) + 11;
  136. value = k_test_val;
  137. prev = butil::subtle::NoBarrier_CompareAndSwap(&value, 0, 5);
  138. EXPECT_EQ(k_test_val, value);
  139. EXPECT_EQ(k_test_val, prev);
  140. value = k_test_val;
  141. prev = butil::subtle::NoBarrier_CompareAndSwap(&value, k_test_val, 5);
  142. EXPECT_EQ(5, value);
  143. EXPECT_EQ(k_test_val, prev);
  144. }
  145. template <class AtomicType>
  146. static void TestAtomicExchange() {
  147. AtomicType value = 0;
  148. AtomicType new_value = butil::subtle::NoBarrier_AtomicExchange(&value, 1);
  149. EXPECT_EQ(1, value);
  150. EXPECT_EQ(0, new_value);
  151. // Use test value that has non-zero bits in both halves, more for testing
  152. // 64-bit implementation on 32-bit platforms.
  153. const AtomicType k_test_val = (static_cast<uint64_t>(1) <<
  154. (NUM_BITS(AtomicType) - 2)) + 11;
  155. value = k_test_val;
  156. new_value = butil::subtle::NoBarrier_AtomicExchange(&value, k_test_val);
  157. EXPECT_EQ(k_test_val, value);
  158. EXPECT_EQ(k_test_val, new_value);
  159. value = k_test_val;
  160. new_value = butil::subtle::NoBarrier_AtomicExchange(&value, 5);
  161. EXPECT_EQ(5, value);
  162. EXPECT_EQ(k_test_val, new_value);
  163. }
  164. template <class AtomicType>
  165. static void TestAtomicIncrementBounds() {
  166. // Test increment at the half-width boundary of the atomic type.
  167. // It is primarily for testing at the 32-bit boundary for 64-bit atomic type.
  168. AtomicType test_val = static_cast<uint64_t>(1) << (NUM_BITS(AtomicType) / 2);
  169. AtomicType value = test_val - 1;
  170. AtomicType new_value = butil::subtle::NoBarrier_AtomicIncrement(&value, 1);
  171. EXPECT_EQ(test_val, value);
  172. EXPECT_EQ(value, new_value);
  173. butil::subtle::NoBarrier_AtomicIncrement(&value, -1);
  174. EXPECT_EQ(test_val - 1, value);
  175. }
  176. // This is a simple sanity check that values are correct. Not testing
  177. // atomicity
  178. template <class AtomicType>
  179. static void TestStore() {
  180. const AtomicType kVal1 = static_cast<AtomicType>(0xa5a5a5a5a5a5a5a5LL);
  181. const AtomicType kVal2 = static_cast<AtomicType>(-1);
  182. AtomicType value;
  183. butil::subtle::NoBarrier_Store(&value, kVal1);
  184. EXPECT_EQ(kVal1, value);
  185. butil::subtle::NoBarrier_Store(&value, kVal2);
  186. EXPECT_EQ(kVal2, value);
  187. butil::subtle::Acquire_Store(&value, kVal1);
  188. EXPECT_EQ(kVal1, value);
  189. butil::subtle::Acquire_Store(&value, kVal2);
  190. EXPECT_EQ(kVal2, value);
  191. butil::subtle::Release_Store(&value, kVal1);
  192. EXPECT_EQ(kVal1, value);
  193. butil::subtle::Release_Store(&value, kVal2);
  194. EXPECT_EQ(kVal2, value);
  195. }
  196. // This is a simple sanity check that values are correct. Not testing
  197. // atomicity
  198. template <class AtomicType>
  199. static void TestLoad() {
  200. const AtomicType kVal1 = static_cast<AtomicType>(0xa5a5a5a5a5a5a5a5LL);
  201. const AtomicType kVal2 = static_cast<AtomicType>(-1);
  202. AtomicType value;
  203. value = kVal1;
  204. EXPECT_EQ(kVal1, butil::subtle::NoBarrier_Load(&value));
  205. value = kVal2;
  206. EXPECT_EQ(kVal2, butil::subtle::NoBarrier_Load(&value));
  207. value = kVal1;
  208. EXPECT_EQ(kVal1, butil::subtle::Acquire_Load(&value));
  209. value = kVal2;
  210. EXPECT_EQ(kVal2, butil::subtle::Acquire_Load(&value));
  211. value = kVal1;
  212. EXPECT_EQ(kVal1, butil::subtle::Release_Load(&value));
  213. value = kVal2;
  214. EXPECT_EQ(kVal2, butil::subtle::Release_Load(&value));
  215. }
  216. template <class AtomicType>
  217. static void TestAtomicOps() {
  218. TestCompareAndSwap<AtomicType>();
  219. TestAtomicExchange<AtomicType>();
  220. TestAtomicIncrementBounds<AtomicType>();
  221. TestStore<AtomicType>();
  222. TestLoad<AtomicType>();
  223. }
  224. static void TestCalloc(size_t n, size_t s, bool ok) {
  225. char* p = reinterpret_cast<char*>(calloc(n, s));
  226. if (!ok) {
  227. EXPECT_EQ(NULL, p) << "calloc(n, s) should not succeed";
  228. } else {
  229. EXPECT_NE(reinterpret_cast<void*>(NULL), p) <<
  230. "calloc(n, s) should succeed";
  231. for (size_t i = 0; i < n*s; i++) {
  232. EXPECT_EQ('\0', p[i]);
  233. }
  234. free(p);
  235. }
  236. }
  237. // A global test counter for number of times the NewHandler is called.
  238. static int news_handled = 0;
  239. static void TestNewHandler() {
  240. ++news_handled;
  241. throw std::bad_alloc();
  242. }
  243. // Because we compile without exceptions, we expect these will not throw.
  244. static void TestOneNewWithoutExceptions(void* (*func)(size_t),
  245. bool should_throw) {
  246. // success test
  247. try {
  248. void* ptr = (*func)(kNotTooBig);
  249. EXPECT_NE(reinterpret_cast<void*>(NULL), ptr) <<
  250. "allocation should not have failed.";
  251. } catch(...) {
  252. EXPECT_EQ(0, 1) << "allocation threw unexpected exception.";
  253. }
  254. // failure test
  255. try {
  256. void* rv = (*func)(kTooBig);
  257. EXPECT_EQ(NULL, rv);
  258. EXPECT_FALSE(should_throw) << "allocation should have thrown.";
  259. } catch(...) {
  260. EXPECT_TRUE(should_throw) << "allocation threw unexpected exception.";
  261. }
  262. }
  263. static void TestNothrowNew(void* (*func)(size_t)) {
  264. news_handled = 0;
  265. // test without new_handler:
  266. std::new_handler saved_handler = std::set_new_handler(0);
  267. TestOneNewWithoutExceptions(func, false);
  268. // test with new_handler:
  269. std::set_new_handler(TestNewHandler);
  270. TestOneNewWithoutExceptions(func, true);
  271. EXPECT_EQ(news_handled, 1) << "nothrow new_handler was not called.";
  272. std::set_new_handler(saved_handler);
  273. }
  274. } // namespace
  275. //-----------------------------------------------------------------------------
  276. TEST(Atomics, AtomicIncrementWord) {
  277. TestAtomicIncrement<butil::subtle::AtomicWord>();
  278. }
  279. TEST(Atomics, AtomicIncrement32) {
  280. TestAtomicIncrement<butil::subtle::Atomic32>();
  281. }
  282. TEST(Atomics, AtomicOpsWord) {
  283. TestAtomicIncrement<butil::subtle::AtomicWord>();
  284. }
  285. TEST(Atomics, AtomicOps32) {
  286. TestAtomicIncrement<butil::subtle::Atomic32>();
  287. }
  288. TEST(Allocators, Malloc) {
  289. // Try allocating data with a bunch of alignments and sizes
  290. for (int size = 1; size < 1048576; size *= 2) {
  291. unsigned char* ptr = reinterpret_cast<unsigned char*>(malloc(size));
  292. CheckAlignment(ptr, 2); // Should be 2 byte aligned
  293. Fill(ptr, size);
  294. EXPECT_TRUE(Valid(ptr, size));
  295. free(ptr);
  296. }
  297. }
  298. TEST(Allocators, Calloc) {
  299. TestCalloc(0, 0, true);
  300. TestCalloc(0, 1, true);
  301. TestCalloc(1, 1, true);
  302. TestCalloc(1<<10, 0, true);
  303. TestCalloc(1<<20, 0, true);
  304. TestCalloc(0, 1<<10, true);
  305. TestCalloc(0, 1<<20, true);
  306. TestCalloc(1<<20, 2, true);
  307. TestCalloc(2, 1<<20, true);
  308. TestCalloc(1000, 1000, true);
  309. // Not work in glib 2.12 (Red Hat 4.4.6-3, Linux 2.6.32)
  310. // TestCalloc(kMaxSize, 2, false);
  311. // TestCalloc(2, kMaxSize, false);
  312. // TestCalloc(kMaxSize, kMaxSize, false);
  313. // TestCalloc(kMaxSignedSize, 3, false);
  314. // TestCalloc(3, kMaxSignedSize, false);
  315. // TestCalloc(kMaxSignedSize, kMaxSignedSize, false);
  316. }
  317. TEST(Allocators, New) {
  318. TestNothrowNew(&::operator new);
  319. TestNothrowNew(&::operator new[]);
  320. }
  321. // This makes sure that reallocing a small number of bytes in either
  322. // direction doesn't cause us to allocate new memory.
  323. TEST(Allocators, Realloc1) {
  324. int start_sizes[] = { 100, 1000, 10000, 100000 };
  325. int deltas[] = { 1, -2, 4, -8, 16, -32, 64, -128 };
  326. for (size_t s = 0; s < sizeof(start_sizes)/sizeof(*start_sizes); ++s) {
  327. void* p = malloc(start_sizes[s]);
  328. ASSERT_TRUE(p);
  329. // The larger the start-size, the larger the non-reallocing delta.
  330. for (size_t d = 0; d < s*2; ++d) {
  331. void* new_p = realloc(p, start_sizes[s] + deltas[d]);
  332. ASSERT_EQ(p, new_p); // realloc should not allocate new memory
  333. }
  334. // Test again, but this time reallocing smaller first.
  335. for (size_t d = 0; d < s*2; ++d) {
  336. void* new_p = realloc(p, start_sizes[s] - deltas[d]);
  337. ASSERT_EQ(p, new_p); // realloc should not allocate new memory
  338. }
  339. free(p);
  340. }
  341. }
  342. TEST(Allocators, Realloc2) {
  343. for (int src_size = 0; src_size >= 0; src_size = NextSize(src_size)) {
  344. for (int dst_size = 0; dst_size >= 0; dst_size = NextSize(dst_size)) {
  345. unsigned char* src = reinterpret_cast<unsigned char*>(malloc(src_size));
  346. Fill(src, src_size);
  347. unsigned char* dst =
  348. reinterpret_cast<unsigned char*>(realloc(src, dst_size));
  349. EXPECT_TRUE(Valid(dst, min(src_size, dst_size)));
  350. Fill(dst, dst_size);
  351. EXPECT_TRUE(Valid(dst, dst_size));
  352. if (dst != NULL) free(dst);
  353. }
  354. }
  355. // Now make sure realloc works correctly even when we overflow the
  356. // packed cache, so some entries are evicted from the cache.
  357. // The cache has 2^12 entries, keyed by page number.
  358. const int kNumEntries = 1 << 14;
  359. int** p = reinterpret_cast<int**>(malloc(sizeof(*p) * kNumEntries));
  360. int sum = 0;
  361. for (int i = 0; i < kNumEntries; i++) {
  362. // no page size is likely to be bigger than 8192?
  363. p[i] = reinterpret_cast<int*>(malloc(8192));
  364. p[i][1000] = i; // use memory deep in the heart of p
  365. }
  366. for (int i = 0; i < kNumEntries; i++) {
  367. p[i] = reinterpret_cast<int*>(realloc(p[i], 9000));
  368. }
  369. for (int i = 0; i < kNumEntries; i++) {
  370. sum += p[i][1000];
  371. free(p[i]);
  372. }
  373. EXPECT_EQ(kNumEntries/2 * (kNumEntries - 1), sum); // assume kNE is even
  374. free(p);
  375. }
  376. TEST(Allocators, ReallocZero) {
  377. // Test that realloc to zero does not return NULL.
  378. for (int size = 0; size >= 0; size = NextSize(size)) {
  379. char* ptr = reinterpret_cast<char*>(malloc(size));
  380. EXPECT_NE(static_cast<char*>(NULL), ptr);
  381. ptr = reinterpret_cast<char*>(realloc(ptr, 0));
  382. EXPECT_NE(static_cast<char*>(NULL), ptr);
  383. if (ptr)
  384. free(ptr);
  385. }
  386. }
  387. #ifdef WIN32
  388. // Test recalloc
  389. TEST(Allocators, Recalloc) {
  390. for (int src_size = 0; src_size >= 0; src_size = NextSize(src_size)) {
  391. for (int dst_size = 0; dst_size >= 0; dst_size = NextSize(dst_size)) {
  392. unsigned char* src =
  393. reinterpret_cast<unsigned char*>(_recalloc(NULL, 1, src_size));
  394. EXPECT_TRUE(IsZeroed(src, src_size));
  395. Fill(src, src_size);
  396. unsigned char* dst =
  397. reinterpret_cast<unsigned char*>(_recalloc(src, 1, dst_size));
  398. EXPECT_TRUE(Valid(dst, min(src_size, dst_size)));
  399. Fill(dst, dst_size);
  400. EXPECT_TRUE(Valid(dst, dst_size));
  401. if (dst != NULL)
  402. free(dst);
  403. }
  404. }
  405. }
  406. // Test windows specific _aligned_malloc() and _aligned_free() methods.
  407. TEST(Allocators, AlignedMalloc) {
  408. // Try allocating data with a bunch of alignments and sizes
  409. static const int kTestAlignments[] = {8, 16, 256, 4096, 8192, 16384};
  410. for (int size = 1; size > 0; size = NextSize(size)) {
  411. for (int i = 0; i < ARRAYSIZE(kTestAlignments); ++i) {
  412. unsigned char* ptr = static_cast<unsigned char*>(
  413. _aligned_malloc(size, kTestAlignments[i]));
  414. CheckAlignment(ptr, kTestAlignments[i]);
  415. Fill(ptr, size);
  416. EXPECT_TRUE(Valid(ptr, size));
  417. // Make a second allocation of the same size and alignment to prevent
  418. // allocators from passing this test by accident. Per jar, tcmalloc
  419. // provides allocations for new (never before seen) sizes out of a thread
  420. // local heap of a given "size class." Each time the test requests a new
  421. // size, it will usually get the first element of a span, which is a
  422. // 4K aligned allocation.
  423. unsigned char* ptr2 = static_cast<unsigned char*>(
  424. _aligned_malloc(size, kTestAlignments[i]));
  425. CheckAlignment(ptr2, kTestAlignments[i]);
  426. Fill(ptr2, size);
  427. EXPECT_TRUE(Valid(ptr2, size));
  428. // Should never happen, but sanity check just in case.
  429. ASSERT_NE(ptr, ptr2);
  430. _aligned_free(ptr);
  431. _aligned_free(ptr2);
  432. }
  433. }
  434. }
  435. #endif